text
stringlengths 9
7.94M
|
---|
\begin{document}
\begin{frontmatter}
\title{On sequences covering all rainbow $k$-progressions} \runtitle{On sequences covering all rainbow $k$-progressions}
\begin{aug} \author{\fnms{Leonardo} \snm{Alese}\thanksref{t1,t2}\ead[label=e1]{[email protected]}}, \address{Graz University of Technology\\ Institute of Geometry\\ Kopernikusgasse 24, 8010 Graz, Austria\\ \printead{e1}}
\author{\fnms{Stefan} \snm{Lendl}\thanksref{t1}\ead[label=e2]{[email protected]}} \address{Graz University of Technology\\ Institute of Discrete Mathematics\\ Steyrergasse 30, 8010 Graz, Austria\\ \printead{e2}} \and \author{\fnms{Paul} \snm{Tabatabai}\ead[label=e3]{[email protected]}} \address{Graz University of Technology\\ 8010 Graz\\ \printead{e3}}
\thankstext{t1}{The authors acknowledge the support of the Austrian Science Fund (FWF): W1230, Doctoral Program ``Discrete Mathematics''.} \thankstext{t2}{The author acknowledges the support of SFB-Transregio 109 ``Discretization in Geometry \& Dynamics'' funded by DFG and FWF (I 2978).}
\runauthor{Alese, Lendl, Tabatabai}
\end{aug}
\begin{abstract}
Let $\text{ac}(n,k)$ denote the smallest positive integer
with the property that there exists an $n$-colouring $f$ of
$\{1,\dots,\text{ac}(n,k)\}$ such that for every $k$-subset
$R \subseteq \{1, \dots, n\}$ there exists an (arithmetic)
$k$\nobreakdash-progression $A$ in $\{1,\dots,\text{ac}(n,k)\}$
with $\{f(a) : a \in A\} = R$.
Determining the behaviour of the function $\text{ac}(n,k)$
is a previously unstudied problem.
We use the first moment method to give
an asymptotic upper bound for $\text{ac}(n,k)$ for the case $k = o(n^{1/{5}})$.
\end{abstract}
\end{frontmatter} \section{Introduction} \label{intro}
Let $a, k, d \in \mathbb{N}$. The set $A = \{a, a +d, a+2d, \dots, a+(k-1)d\}$ is called an (arithmetic) $k$-progression. We say $A$ has \emph{common difference} $d$.
Let $n, N \in \mathbb{N}$ ($n \leq N$) and let $f:[N]\rightarrow [n]$ be an $n$-colouring of $[N]$. Let $R \in \binom{[n]}{k}$ be a $k$-subset of $[n]$. We say a $k$-progression $A$ in $[N]$ is \emph{$R$-coloured} if $\{ f(a) : a \in A \} = R$. We call such a $k$-progression a \emph{rainbow $k$-progression}. We say $f$ \emph{covers} $R$ if there is a $k$-progression in $[N]$ that is $R$-coloured. \begin{ex}
The $6$-colouring
$ f = (4,6,5,1,3,4,2,5,6,3,1,4)$
of the interval $\{1,2,\dots,14\}$
covers every $3$-subset of $\{1,\dots,6\}$; we give
examples for some subsets:
\begin{alignat*}{1}
\{1,2,3\}\text{: } (4,6,5,\bm{1},3,4,\bm{2},5,6,\bm{3},1,4) \ \ \ \\[-0.35em]
\{3,4,5\}\text{: } (\bm{4},6,\bm{5},1,\bm{3},4,2,5,6,3,1,4) \ \ \ \\[-0.35em]
\{3,4,6\}\text{: } (\bm{4},6,5,1,\bm{3},4,2,5,\bm{6},3,1,4) \ \ \ \\[-0.35em]
\{2,5,6\}\text{: } (4,6,5,1,3,4,\bm{2},\bm{5},\bm{6},3,1,4) \ \ \ \\[-0.35em]
\end{alignat*} \end{ex}
For $n,k \in \mathbb{N}$ (where $k \leq n$), let $\text{ac}(n,k)$ denote the smallest positive integer such that there exists an $n$-colouring $f$ of $[\text{ac}(n,k)] = \{ 1,2,\dots, \text{ac}(n,k) \}$ that covers every $k$-subset of $[n]$.
Among related problems, the anti-van der Waerden numbers $\text{aw}([N],k)$ are well-studied in Ramsey theory. The number $\text{aw}([N],k)$ is defined to be the smallest positive integer $r$ such that every surjective $r$-colouring of $[N]$ contains at least one rainbow $k$-progression.
Butler~et~al.~\cite{butler} calculate exact values of $\text{aw}([N],k)$ for small values of $N$ and $k$ and give asymptotic results. Berikkyzy~et~al.~\cite{berikkyzy2016anti} give an exact formula for $\text{aw}([N],3)$, proving a conjecture of Butler~et~al.~\cite{butler}. Young~\cite{young2016rainbow} and Schulte~et~al.~\cite{young2018graph} study generalizations of this problem to finite abelian groups and graphs, respectively.
The problem of studying anti-van der Waerden numbers is about finding colourings avoiding all rainbow $k$-progressions. Conversely, the problem we study in this work is about finding colourings that \emph{do not} avoid \emph{any} rainbow $k$-progressions.
A wide range of problems about covering all $k$-subsets of $[n]$, on various structures, are studied~\cite{chung1992universal, blackburn2012existence, cover}.
We prove the following asymptotic result. \begin{theorem-non}
\label{maintheorem}
As $n$ tends to infinity, we have
$$\text{ac}(n,k) = \Omega\left(\sqrt{k\binom{n}{k}} \right).$$
If $k = k(n) = o(n^{1/{5}})$, we have
$$\text{ac}(n,k) = \mathcal{O}\left(\log n \cdot
e^{k/{2}}\cdot k^{-k/{2}+5/{4}}\cdot n^{k/{2}}\right)$$ \end{theorem-non} Comparing the asymptotic upper and the asymptotic lower bound for the case $k~=~o(n^{1/{5}})$, we see that the bounds differ by the factor $k \log n$.
The proof of the theorem is given in Section~\ref{secthmproof}. The main tool of the proof (Lemma~\ref{mainlemma}) is shown in Section~\ref{lemmaproof}. We achieve this by finding a lower bound on the expected number of $k$-subsets of $[n]$ covered by a random colouring.
\section{Proof of Theorem}\label{secthmproof}
All asymptotics are to be understood with respect to $n$, where $n$ tends to infinity.
The lower bound in the theorem is a consequence of the fact that an $n$-colouring of $[N]$ can only cover all $k$-subsets of $[n]$ if $[N]$ contains at least $\binom{n}{k}$ $k$-progressions.
The remainder of this section is dedicated to proving the upper bound given in the theorem. To this end, as claimed let $k = k(n) = o(n^{1/{5}})$ and $N = N(n) = \left \lceil \sqrt{2}\sqrt{\frac{k-1}{k!}}\cdot n^{k/{2}} \right \rceil$.
The proof of the following lemma is given in Section~\ref{lemmaproof}. \begin{lemma}
\label{mainlemma}
Let
$\mathcal{F} \subseteq \binom{[n]}{k}$
be a family of $k$-subsets of $[n]$. There exists an $n$-colouring $f^*$ of $[N]$
such that the number of sets of $\mathcal{F}$ that are covered by $f^*$
is at least
$|\mathcal{F}|\left(\frac{1}{2} + o(1) \right).$ \end{lemma}
It follows that there exists an $n$-colouring $g_{0}$ of $[N]$ that covers at least $\binom{n}{k}\left(\frac{1}{2} + o(1) \right)$ of the sets of $\mathcal{F}_0 := \binom{[n]}{k}$.
Let $\mathcal{F}_1$ be the family of sets of $\mathcal{F}_0$ that have not been covered by $g_{0}$. Applying Lemma~\ref{mainlemma} again, we obtain an $n$-colouring $g_{1}$ of $[N]$
that covers at least $|\mathcal{F}_1|\left(\frac{1}{2} + o(1) \right)$ of the sets of $\mathcal{F}_1$. We repeat this process $r$ times, by defining $\mathcal{F}_i$ to be the family of $k$-subsets of $[n]$ not yet covered by any of the colourings $g_{0}, \dots,g_{i-1}$.
After $r$ iterations, the number of $k$-subsets of $[n]$ that are not covered by any of the constructed colourings is at most
$|\mathcal{F}_0| \left(\frac{1}{2} + o(1) \right)^r$. Setting $r = r(n,k) = \left \lceil \alpha \cdot k \log n \right \rceil$, where $\alpha > \frac{1}{\log(2)}$, we get
$$|\mathcal{F}_0| \left(\frac{1}{2} + o(1) \right)^{r(n)} = \binom{n}{k}\left(\frac{1}{2} + o(1) \right)^{r(n)} = o(1).$$ Thus, for sufficiently large $n$, after $r(n)$ iterations, every $k$-subset of $[n]$ is covered by at least one of the colourings $$g_{0}, g_{1}, \dots, g_{r(n)-1}.$$
From the colourings $g_{0}, g_{1}, \dots, g_{r(n)-1}$ we construct an $n$-colouring $g$ of $S := [r(n) \cdot N]$. We split $S$ into $r(n)$ intervals of length $N$ and colour each of these intervals with the corresponding colouring $g_{i}$. Formally, we set $$g\left(i\cdot N+s\right) = g_{i}(s) \ \ \ i \in \{ 0, \dots, r(n)-1 \}, \ s \in [N].$$ The colouring $g$ is an $n$-colouring of $S~=~\left[\left \lceil \alpha\cdot k \log n \right \rceil \cdot \left \lceil \sqrt{2}\sqrt{\frac{k-1}{k!}} \cdot n^{k/{2}} \right \rceil \right]$ that covers all $k$-subsets of $[n]$. It follows that \begin{align*}
\text{ac}(n,k) = \mathcal{O}\left(k \cdot \log n \cdot
\sqrt{\frac{k-1}{k!}} \cdot
n^{k/{2}} \right). \end{align*} If $k = o(n^{1/{5}})$ tends to infinity as $n \rightarrow \infty$, $$\text{ac}(n,k) = \mathcal{O}\left(\log n \cdot e^{k/{2}}\cdot k^{-k/{2}+5/{4}}\cdot n^{k/{2}}\right)$$ holds.
\section{Proof of Lemma \ref{mainlemma} using the probabilistic method} \label{lemmaproof}
For $n, N, k \in \mathbb{N}$ (where $k \leq n \leq N$) let $f$ be a random $n$-colouring of $[N]$ (chosen uniformly at random from all such colourings). For each $R \in \binom{[n]}{k}$ let $X_R$ be the indicator variable of the event \emph{``$f$ covers $R$''}. Given a $k$-progression $A$ in $[N]$, let $Y_{A,R}$ be the event
\emph{``The progression $A$ is $R$-coloured''}.
We are interested in the random variable $\sum_{R \in \binom{[n]}{k}} X_R,$
which counts the number of $k$-subsets of $[n]$ that are covered by $f$.
For the sake of brevity, let $\text{AP}_k(N)$ denote the set of all $k$-progressions in $[N]$ and $\mathcal{H}_k(N) = \binom{\text{AP}_k(N)}{2}$ denote the set of all unordered pairs of $k$\nobreakdash-progressions in $[N]$. Note that $X_R$ is the indicator variable of the event $\bigcup\limits_{A \in \text{AP}_k(N)} Y_{A,R}$.
Using a Bonferroni inequality we obtain the following lower bound for $\mathbb{E}X_R$. \begin{lemma} \label{expression}
For every $k$-subset $R$ of $[n]$, the following holds: \begin{align*} &\mathbb{E}X_R = \mathbb{P}(X_R = 1) = \mathbb{P}\bigg(\bigcup\limits_{A \in \text{AP}_k(N)} Y_{A,R}\bigg) \\ & \geq \sum_{A \in \text{AP}_k(N)}\mathbb{P}(Y_{A,R}) - \sum_{\{ A,B \} \in \mathcal{H}_k(N)} \mathbb{P}\left(Y_{A,R} \cap Y_{B,R}\right) \\
& = \sum_{A \in \text{AP}_k(N)} \frac{k!}{n^{k}} -
\sum_{i = 0}^{k-1} \sum_{\substack{\{ A,B \} \in \mathcal{H}_k(N) \\ |A \cap B| = i}} \frac{k!(k-1)!}{n^{2k-i}}. \end{align*} \begin{flushright} $\square$ \end{flushright} \end{lemma}
To evaluate the lower bound from Lemma \ref{expression}, we need to count the number $h(N,k) = |\text{AP}_k(N)|$ of $k$-progressions in $[N]$ and the numbers $h_i(N,k)$, defined as the number of unordered pairs of $k$-progressions in $[N]$ that intersect in exactly $i$ positions.
\begin{lemma} \label{formulas} As $N$ tends to infinity, the following asymptotic bounds hold: \begin{itemize}
\item $h(N,k) = \frac{N^2}{2k-2} + \mathcal{O}(N)$,
\item $h_0(N,k) \leq \binom{h(N,k)}{2} = \frac{N^4}{8(k-1)^2} + \mathcal{O}(N^2/{k})$,
\item $h_1(N,k) \leq h(N,k)k^2N = \mathcal{O}\left(N^3k\right)$,
\item $h_j(N,k) \leq \binom{N}{2} \binom{\binom{k}{2}}{2} = \mathcal{O}\left(N^2k^4\right) \text{ for } j \geq 2.$ \end{itemize} \begin{proof} The formula for $h(N,k)$ is obtained by counting the number of ways to choose the initial term and common difference of the progression. We bound $h_0(N,k)$ by the number of unordered pairs of $k$-progressions. The bound for $h_1(N,k)$ is obtained by fixing a $k$-progression and an element of that progression; there are at most $kN$ $k$-progressions containing this element. For each $j \geq 2$, $h_j(N,k)$ is bounded by the total number of pairs of $k$-progressions intersecting in at least two positions. For each pair of distinct elements there are at most $\binom{k}{2}$ $k$-progressions containing both of them.
\end{proof} \end{lemma}
We are ready to evaluate the lower bound from Lemma \ref{expression}.
\begin{lemma} \label{longlemma}
Let $k = k(n) = o(n^{1/{5}})$ and let
$N = N(n) = \left \lceil \sqrt{2}\sqrt{\frac{k-1}{k!}}\cdot n^{k/{2}} \right \rceil$.
Let $f$ be a random $n$-colouring of $[N]$. Then,
for every $R \in \binom{[n]}{k}$ the inequality
$$\mathbb{E}X_R \geq \frac{1}{2} + o(1)$$
holds. \begin{proof} Using Lemma~\ref{expression} and the asymptotic bounds for $h$ and the $h_i$'s we get \begin{align*} \mathbb{E}X_R &\geq h(N) \frac{k!}{n^k} - h_0(N)\frac{k!k!}{n^{2k}} - h_1(N)\frac{k!(k-1)!}{n^{2k-1}} - \sum_{i=2}^{k-1} h_i(N)\frac{k!(k-i)!}{n^{2k-i}} \\
&\geq \left(\frac{N^2}{2k-2} + \mathcal{O}(N) \right) \frac{k!}{n^k}
- \left(\frac{N^4}{8(k-1)^2} + \mathcal{O}(N^2/{k}) \right)\frac{k!k!}{n^{2k}} \\
&\qquad+ \mathcal{O}(N^3k)\frac{k!(k-1)!}{n^{2k-1}} +
\mathcal{O}(N^2k^4) \sum_{i=2}^{k-1} \frac{k!(k-i)!}{n^{2k-i}} =: L(n). \end{align*} Only the terms $\frac{N^2}{2k-2}\frac{k!}{n^k}$ and $\frac{N^4}{8(k-1)^2}\frac{k!k!}{n^{2k}}$ are asymptotically relevant. It follows from Stirling's formula that $\mathcal{O}(N) \frac{k!}{n^k} = o(1)$, $\mathcal{O}(N^2/{k})\frac{k!k!}{n^{2k}} = o(1)$, and $\mathcal{O}(N^3k)\frac{k!(k-1)!}{n^{2k-1}} = o(1)$. To see that $\mathcal{O}(N^2k^4)\sum_{i=2}^{k-1}\frac{k!(k-i)!}{n^{2k-i}} = o(1)$, we use the fact that the last term of the sum asymptotically dominates the sum of all other terms and the assumption $k~=~o(n^{1/{5}})$
We are thus left with the following representation of $L(n)$: $$ L(n) = \frac{N^2}{2k-2} \frac{k!}{n^k} - \frac{N^4}{8(k-1)^2}\frac{k!k!}{n^{2k}} + o(1),$$ which, by our choice of $N$, gives $L(n) = \frac{1}{2} + o(1)$.
\end{proof} \end{lemma}
Lemma~\ref{mainlemma} follows from Lemma~\ref{longlemma} by linearity of expectation.
\section{Conclusion}
Various generalizations of the problem we studied are possible, by replacing $[N]$ by another structure endowed with a sensible definition of $k$-progression. Structures of interest include cycles $\mathbb{Z}_{N}$, abelian groups and graphs, which are already studied for anti-van der Waerden numbers.
\end{document} |
\begin{document}
\publicationdetails{20}{2018}{1}{15}{3968} \maketitle \begin{abstract} We define a \emph{weakly threshold sequence} to be a degree sequence $d=(d_1,\dots,d_n)$ of a graph having the property that $\sum_{i \leq k} d_i \geq k(k-1)+\sum_{i > k} \min\{k,d_i\} - 1$ for all positive $k \leq \max\{i:d_i \geq i-1\}$. The \emph{weakly threshold graphs} are the realizations of the weakly threshold sequences. The weakly threshold graphs properly include the threshold graphs and satisfy pleasing extensions of many properties of threshold graphs. We demonstrate a majorization property of weakly threshold sequences and an iterative construction algorithm for weakly threshold graphs, as well as a forbidden induced subgraph characterization. We conclude by exactly enumerating weakly threshold sequences and graphs. \end{abstract}
\section{Introduction} \label{sec: intro}
The threshold graphs are a remarkable and well-studied class of graphs. As explained in the monograph devoted to them by Mahadev and Peled~\cite{MahadevPeled95}, these graphs have been independently rediscovered in diverse contexts, and they have a large number of equivalent characterizations. For example, Chv\'{a}tal and Hammer~\cite{ChvatalHammer73,ChvatalHammer77} defined threshold graphs as those graphs whose vertices can be labeled with nonnegative numerical values so that a set of vertices is an independent set if and only if the values of the included vertices sum to at most some predetermined value (the ``threshold''). Other characterizations of threshold graphs have dealt with characteristics ranging from construction algorithms to forbidden induced subgraphs to eigenvalues of the Laplacian matrix; see~\cite{MahadevPeled95} for a broad introduction.
One characterization of threshold graphs, due to Hammer, Ibaraki, and Simeone, concerns their degree sequences, which we call \emph{threshold sequences}. In this and all other results in this paper, we assume that degree sequences are indexed with their terms ordered from largest to smallest. Given a degree sequence $d=(d_1,\dots,d_n)$, we further define $m(d) = \max\{i: d_i \geq i-1\}$.
\begin{thm}[\cite{HammerIbarakiSimeone78}] \label{thm: threshold deg seq char} Let $d=(d_1,\dots,d_n)$ be the degree sequence of a graph $G$. The graph $G$ is a threshold graph if and only if \[\sum_{i=1}^k d_i = k(k-1) + \sum_{i>k} \min\{k,d_i\}\] for all $k \in \{1,\dots,m(d)\}$. \end{thm}
This theorem bears a strong resemblance to a well known theorem of Erd\H{o}s and Gallai characterizing graphic sequences. (The version stated here uses an improvement due to Hammer, Ibaraki, and Simeone.)
\begin{thm}[\cite{ErdosGallai60, HammerIbarakiSimeone78, HammerIbarakiSimeone81}] \label{thm: Erdos Gallai} A sequence $d=(d_1,\dots,d_n)$ of nonnegative integers, with even sum and terms in nonincreasing order, is the degree sequence of a simple graph if and only if \begin{equation}\label{eq: Erdos Gallai}\sum_{i=1}^k d_i \leq k(k-1) + \sum_{i>k} \min\{k,d_i\}\end{equation} for all $k \in \{1,\dots,m(d)\}$. \end{thm}
Thus Theorem~\ref{thm: threshold deg seq char} shows that threshold sequences are in one sense extremal examples among all degree sequences.
The Erd\H{o}s--Gallai inequalities of Theorem~\ref{thm: Erdos Gallai} are derived from the observation that the number of edges joining vertices with large degree to vertices of low degree cannot exceed the capacity of the low-degree vertices to accommodate these edges. As we might expect, in order for threshold sequences to satisfy these inequalities with equality, the adjacencies in a threshold graph are rigidly determined. In fact, one of the remarkable properties of threshold sequences is that each such sequence has exactly one labeled realization, and threshold sequences are the only degree sequences with this property~\cite{ChvatalHammer73, FulkersonHoffmanMcAndrew65}.
Stated another way, in a threshold graph the presence or absence of an edge between two vertices is uniquely determined by the degrees of those two vertices. In a recent paper~\cite{ForcedEdges}, the author characterized the circumstances under which an edge (or non-edge) is forced to appear in all realizations of a degree sequence. The answer can be stated in terms of the quantities \begin{equation} \label{eq: EG differences} \Delta_k(d) = k(k-1) + \sum_{i>k} \min\{k,d_i\} - \sum_{i=1}^k d_i \end{equation} for $1 \leq k \leq m(d)$, which we call the \emph{Erd\H{o}s--Gallai differences} of $d$. By Theorem~\ref{thm: Erdos Gallai} the Erd\H{o}s--Gallai differences are all nonnegative for any degree sequence. As shown in~\cite{ForcedEdges}, in order for an adjacency relationship to be constant among all labeled realizations of a degree sequence, it is necessary that an Erd\H{o}s--Gallai difference be at most 1.
Because of Theorem~\ref{thm: threshold deg seq char}, threshold sequences are precisely those degree sequences where all of the first $m(d)$ Erd\H{o}s--Gallai differences are 0. It is perhaps natural to wonder, though, what properties of threshold graphs may continue to hold in a more general form if this condition is relaxed somewhat. In light of the significance of Erd\H{o}s--Gallai differences of 1, at least in the degree sequence problem of~\cite{ForcedEdges}, we make a definition.
\begin{defn} A degree sequence $d$ is a \emph{weakly threshold sequence} if for all $k \in \{1,\dots,m(d)\}$ we have $\Delta_k(d) \leq 1$. If such is the case, then every realization of $d$ is called a \emph{weakly threshold graph}. \end{defn}
Since the four-vertex path is a weakly threshold graph but not a threshold graph, the class of weakly threshold graphs properly contains the class of threshold graphs.
In this paper, we review several characterizations of threshold graphs and show that for most of them, a more general property holds for weakly threshold graphs. In Section~\ref{sec: majorization} we establish some preliminary results on Erd\H{o}s--Gallai differences and show that, as for threshold graphs and threshold sequences, the weakly threshold graphs are split graphs, and the weakly threshold sequences have nearly symmetric Ferrers diagrams and appear at the top of the majorization order on degree sequences. In Section~\ref{sec: iter const} we examine iterative constructions of threshold and weakly threshold graphs. In Section~\ref{sec: forb subgr} we show that the weakly threshold graphs form a hereditary graph class and characterize them in terms of forbidden induced subgraphs; we see that weakly threshold graphs form a notable subclass of both the interval graphs and their complements. In Section~\ref{sec: enum} we enumerate the weakly threshold sequences and graphs and compare these numbers to those of the threshold graphs.
Throughout the paper, we will use $K_n$, $P_n$, and $C_n$, respectively, to denote the complete graph, the path, and the cycle with $n$ vertices. We use $V(G)$ to denote the vertex set of a graph $G$. The \emph{open neighborhood} of a vertex $v$ is the set of vertices adjacent to $v$; the \emph{closed neighborhood} of $v$ is the union of $\{v\}$ and the open neighborhood of $v$. Other terms and notation will be defined as they are encountered.
\section{Preliminaries and majorization} \label{sec: majorization}
In this section we focus on weakly threshold sequences, showing that they satisfy approximate versions of the Ferrers diagram symmetry and majorization properties of threshold sequences, which we describe below. Along the way we will also show that every weakly threshold graph is a split graph.
In discussing graph degree sequences, it has often proved useful to associate with a list $d$ of nonnegative integers its \emph{corrected Ferrers diagram} $C(d)$ (see, for example, the monograph~\cite{MahadevPeled95}, from which we adapt our notation and presentation). Assuming that $d=(d_1,\dots,d_n)$ and that the terms of $d$ are nonincreasing, we define $C(d)$ to be the $n\times n$ matrix with entries drawn from $\{0,1,\star\}$ such that the entries on the main diagonal all equal $\star$, and for each $i \in \{1,\dots,n\}$, the leftmost $d_i$ entries not on the main diagonal are equal to 1, with the remaining entries in the row each equaling 0.
Recall now our definition of $m(d)$, the \emph{corrected Durfee number of $d$}, from the previous section: \[m(d) = \max\{i:d_i \geq i-1\}.\] Pictorially, $m(d)$ represents the side length (measured in entries) of the largest square containing no 0 that occupies the top left corner of $C(d)$. (This square is called the \emph{corrected Durfee square} of $C(d)$.) As an example, in Figure~\ref{fig: C(11110) and C(2211)} we exhibit $C(s)$ and $C(s')$, where $s=(1,1,1,1,0)$ and $s'=(2,2,1,1)$; we see that $m(s) = m(s')=2$. \begin{figure}
\caption{The corrected Ferrers diagrams of $(1,1,1,1,0)$ and $(2,2,1,1)$}
\label{fig: C(11110) and C(2211)}
\end{figure}
Given two lists $p=(p_1,\dots,p_j)$ and $q=(q_1,\dots,q_k)$ of nonnegative integers, we say that \emph{$p$ majorizes $q$}, and we write $p \succeq q$, if $\sum_{i=1}^j p_i = \sum_{i=1}^k q_i$ and if for each positive integer $l$, $\sum_{i=1}^l p_i \geq \sum_{i=1}^l q_i$ (where undefined sequence terms are assumed to be 0).
It is well known that the partitions of any fixed nonnegative integer form a poset under the relation $\succeq$. Furthermore, if $p$ and $q$ are lists of positive integers (ignoring any 0's) that have the same sum, $p$ is a graph degree sequence, and $p \succeq q$, then $q$ is a degree sequence of a graph as well.
Threshold sequences have characterizations in terms of corrected Ferrers diagrams and majorization, as the following theorem shows. We will see shortly that relaxed versions of these statements hold for weakly threshold sequences.
\begin{thm}[see~{\cite[Theorem 3.2.2]{MahadevPeled95}}]\label{thm: deg seq props of threshold graphs} Let $d$ be a degree sequence. The following are equivalent. \begin{enumerate} \item[\textup{(i)}] The sequence $d$ is a threshold sequence. \item[\textup{(ii)}] The corrected Ferrers diagram $C(d)$ is a symmetric matrix. \item[\textup{(iii)}] If $e$ is a degree sequence and $e \succeq d$, then $d = e$. \end{enumerate} \end{thm}
In order to describe the corrected Ferrers diagrams of weakly threshold sequences, we first give a pictorial interpretation of the Erd\H{o}s--Gallai differences.
\begin{lem}\label{lem: EG diff via diagram} Let $d$ be a degree sequence. For all $k \in \{1,\dots,m(d)\}$, the Erd\H{o}s--Gallai difference $\Delta_k(d)$ equals $B_k(d)-R_k(d)$, where $B_k(d)$ is the number of 1's in the first $k$ columns of $C(d)$ that lie below the diagonal of stars, and $R_k(d)$ is the number of 1's in the first $k$ rows of $C(d)$ lying to the right of diagonal of stars. \end{lem} \begin{proof} Fix $k \in \{1,\dots,m(d)\}$, and observe that $R_k(d)$ equals $\sum_{i=1}^k d_i - k(k-1)/2$. Further note that $B_k(d)$ equals $k(k-1)/2 + \sum_{i>k} \min\{k,d_i\}$. Subtracting $R_k(d)$ from $B_k(d)$ yields $\Delta_k(d)$. \end{proof}
Lemma~\ref{lem: EG diff via diagram}, together with Theorem~\ref{thm: threshold deg seq char}, links statements (i) and (ii) in Theorem~\ref{thm: deg seq props of threshold graphs} when we observe that each $1$ in $C(d)$ lies in one of the first $m(d)$ rows or one of the first $m(d)$ columns of the diagram. Note that a degree sequence $d$ is a threshold sequence if and only if each star in $C(d)$ has an equal number of 1's below it and to the right of it.
Lemma~\ref{lem: EG diff via diagram} provides us with a similar statement about $C(d)$ when $d$ is a weakly threshold sequence; in this case, the numbers of stars 1's and to the right of each star can differ by at most 1, so $C(d)$ is ``almost symmetric.'' Futhermore, the instances where the numbers do differ for a given star are constrained by the facts that $0 \leq \Delta_k(d) \leq 1$ for all $k \in \{1,\dots,m(d)\}$; for instance, if the numbers do differ for two stars among the first $m(d)$ stars in $C(d)$, and they do not differ for any stars between these two, then it follows from Lemma~\ref{lem: EG diff via diagram} that one of these two stars must have one more 1 below it than to the right of it, and the other star must have the opposite situation.
Before discussing an analogue for statement (iii) in Theorem~\ref{thm: deg seq props of threshold graphs}, we mention another class of graphs with a degree sequence characterization. A graph is \emph{split} if its vertex set can be partitioned into (possibly empty) sets, where one is a clique and the other is an independent set. As shown in~\cite{HammerSimeone81}, a degree sequence $d$ is the degree sequence of a split graph if and only if $\Delta_m(d)=0$, where $m=m(d)$. It follows from Theorem~\ref{thm: threshold deg seq char} that threshold graphs are split graphs. We now consider weakly threshold graphs.
\begin{lem} \label{lem: Delta_m is even} If $d$ is a degree sequence and $m=m(d)$, then $\Delta_m(d)$ is an even number. \end{lem} \begin{proof} Since each $1$ in $C(d)$ lies in one of the first $m(d)$ rows or one of the first $m(d)$ columns of the diagram, and the sum of the terms of $d$ is an even number (the sum is twice the number of edges), it follows from Lemma~\ref{lem: EG diff via diagram} that $\Delta_m(d)$ is also even. \end{proof}
\begin{thm} \label{thm: WT graphs are split} Every weakly threshold graph is a split graph. \end{thm} \begin{proof} Let $d$ be the degree sequence of a weakly threshold graph $G$, and let $m=m(d)$. By definition, $\Delta_m(d) \leq 1$, so Lemma~\ref{lem: Delta_m is even} implies that $\Delta_m(d)=0$, which in turn implies that $G$ is a split graph. \end{proof}
We now discuss the majorization order on degree sequences. Statement (iii) of Theorem implies that in the poset of degree sequences ordered by majorization, the threshold sequences are the maximal elements. Our next result shows that weakly threshold sequences, though they include degree sequences that are not maximal in this poset, do form an upward-closed subset of the poset. We first require some preliminary ideas.
A \emph{unit transformation} is an operation on a degree sequence $a=(a_1,\dots,a_n)$ that subtracts 1 from $a_i$ and adds 1 to $a_j$ for indices $i,j \in \{1,\dots,n\}$ such that $a_i \geq a_j+2$. Pictorially, if $a'$ is the resulting sequence (after reordering terms into descending order), then $C(a')$ is obtained by ``moving'' a 1 from one row of $C(a)$ down to the end of the nonzero entries in a lower row (replacing a 0 in that row and being replaced by a 0 in the original row). A well known result states that if $a \succeq b$, then $b$ can be obtained from $a$ after a finite sequence of unit transformations.
\begin{lem}\label{lem: EG diffs preserved upwards} If the degree sequence $d$ can be obtained by a unit transformation on a degree sequence $e$, and if $m'=\min\{m(d),m(e)\}$, then for each $k \in \{1,\dots,m'\}$ we have $\Delta_k(e) \leq \Delta_k(d)$. \end{lem} \begin{proof} Define $R_k(d)$ and $B_k(d)$ as in the statement of Lemma~\ref{lem: EG diff via diagram}. Since $d$ is obtained through a unit transformation on $e$, it follows that $R_k(d) \leq R_k(e) \leq B_k(e) \leq B_k(d)$; by Lemma~\ref{lem: EG diff via diagram}, we see that $\Delta_k(e) \leq \Delta_k(d)$. \end{proof}
\begin{lem}\label{lem: if m changes} Let $d$ and $e$ be degree sequences, and let $m=m(e)$ and $m'=m(d)$. Suppose that $e \succeq d$ and that $d$ is obtained by performing a single unit transformation on $e$. If $m' < m$, then $m=m'+1$ and $\Delta_{m}(e) = \Delta_{m'}(d)$. \end{lem} \begin{proof} Suppose that $d$ and $e$ are as described, with $m' < m$, and suppose that the unit transformation on $e$ that produces $d$ reduces $e_i$ by 1 and increases $e_j$ by 1. Now $e_m -1 \leq d_{m}< m-1 \leq e_m$, so $i=m$ and $e_m = m-1$ and $d_m = m-2$, while $d_l = e_l$ for $1 \leq l \leq m-1$. We then find that $d_{m-1} \geq d_m = m-2$, so $m-1 \leq m' < m$, which implies $m' = m-1$. When $l > m$, we have $e_l \leq e_{m+1} < m$, so $\min\{m,e_l\} = e_l$. Likewise, when $l>m'$ we have $\min\{m',d_l\} = d_l$. Then \begin{align*} \Delta_m(e) - \Delta_{m'}(d) &= m(m-1) - m'(m'-1) + \sum_{l > m} \min\{m,e_l\} - \sum_{l > m'} \min\{m',d_l\} - \sum_{l=1}^m e_l + \sum_{l=1}^{m'} d_l\\ &= 2(m-1) + e_j - d_j - d_m - e_m\\ &= 0. \qedhere \end{align*} \end{proof}
\begin{thm}\label{thm: WT sequences are upward closed} If $d$ is a weakly threshold sequence and $e$ is a degree sequence such that $e \succeq d$, then $e$ is also a weakly threshold sequence. \end{thm} \begin{proof} It suffices to prove the result in the case that $d$ is obtained via a single unit transformation on $e$; suppose that this is the case. If $m(e) \leq m(d)$, then the result follows inductively from Lemma~\ref{lem: EG diffs preserved upwards}. If instead $m(e)>m(d)$ then applying Lemmas~\ref{lem: EG diffs preserved upwards} and~\ref{lem: if m changes} inductively we find that each of $\Delta_1(e), \dots, \Delta_{m(e)}(e)$ is equal to one of $\Delta_1(d), \dots, \Delta_{m(d)}(d)$, and hence $e$ is a weakly threshold sequence. \end{proof}
\section{Iterative construction} \label{sec: iter const}
Threshold graphs have a characterization via a construction algorithm. A \emph{dominating vertex} is a vertex that is adjacent to all other vertices in the graph. An \emph{isolated vertex} is a vertex that is adjacent to none of the other vertices.
\begin{thm}[see~{{\cite[Theorem 1.2.4]{MahadevPeled95}}}] \label{thm: threshold dom/iso} A graph $G$ is a threshold graph if and only if $G$ can be obtained by beginning with a single vertex and iteratively adding either a dominating vertex or an isolated vertex. \end{thm}
In this section we show that weakly threshold graphs can be obtained from small initial graphs by repeatedly adding vertices; to generate all weakly threshold graphs we slightly relax the conditions on the adjacencies and non-adjacencies required of the added vertices.
Given a graph $G$ and a vertex $v$ of $G$, we say that $v$ is \emph{weakly dominating} if $v$ is adjacent to every other vertex of $G$ except for a single vertex that has minimum degree in $G$. The vertex $v$ is instead \emph{weakly isolated} if $v$ has no neighbors except for a single vertex that has maximum degree in $G$. A \emph{semi-joined $P_4$} in $G$ is a collection $P$ of 4 vertices that induce a subgraph isomorphic to $P_4$, in which the path midpoints are adjacent to every vertex not in $P$, and the path endpoints are adjacent to no vertex not in $P$. (Note that the midpoints of a semi-joined $P_4$ are weakly dominating vertices, and the endpoints are weakly isolated vertices.)
Most of this section will be devoted to establishing the following.
\begin{thm}\label{thm: char via Construction Algorithm} A graph $G$ is a weakly threshold graph if and only if $G$ can be obtained by beginning with a graph isomorphic to $K_1$ or to $P_4$ and iteratively adding to the graph either a dominating vertex, an isolated vertex, a weakly dominating vertex, a weakly isolated vertex, or a semi-joined $P_4$. \end{thm}
The additions in the theorem refer to new vertices added; in no case do we change any of the adjacency relationships that existed prior to the addition of a new vertex or set of vertices.
We present the proof of Theorem~\ref{thm: char via Construction Algorithm} in Section~\ref{subsec: pf of iter const}. In order to facilitate the proof and to lay groundwork for later sections, in Section~\ref{subsec: canon decomp} we introduce a decomposition scheme of graphs and degree sequences that will assist in analyzing the Erd\H{o}s--Gallai differences of a degree sequence. Following the proof of Theorem~\ref{thm: char via Construction Algorithm}, in Section~\ref{subsec: complements} we present a few of its consequences, including the fact that weakly threshold graphs are closed under complementation.
\subsection{The canonical decomposition of graphs and degree sequences} \label{subsec: canon decomp}
In this section we recall two composition operations and their accompanying decompositions. Both decompositions were introduced by Tyshkevich in multiple papers (our presentation is adapted from~\cite{Tyshkevich00}, which contains a summary and bibliography) and are called the \emph{canonical decomposition}; one is a decomposition of graphs, and the other is a natural translation of the ideas into the context of degree sequences. After describing the results of Tyshkevich, we use the canonical decomposition of a degree sequence to better understand its list of Erd\H{o}s--Gallai differences.
Let $(G,A,B)$ denote a triple consisting of a split graph $G$ and a partition $A,B$ of its vertex set into an independent set $A$ and a clique $B$ such that either $A$ or $B$ can be empty but $A \cup B \neq \emptyset$. Since the partition $A,B$ is fixed, we refer to this triple as a \emph{splitted graph}. Similarly, if $d$ is the degree sequence of a split graph having partition $A,B$, then we may form a \emph{splitted degree sequence} by writing the terms of $d$ with a semicolon separating the terms corresponding to vertices in $B$ from terms corresponding to vertices in $A$ (note that we can do this even while maintaining the terms in descending order). For example, the split graph with degree sequence $(3,3,2,1,1)$ has two distinct partitions of its vertex set into an independent set and a clique; the associated splitted degree sequences are $(3,3,2;1,1)$ and $(3,3;2,1,1)$.
Given a splitted graph $(G,A,B)$ and a graph $H$, each with nonempty vertex sets, we define the \emph{composition} $(G,A,B) \circ H$ to be the graph formed by taking the disjoint union of $G$ and $H$ and adding to it all edges joining vertices in $B$ to vertices in $V(H)$.
We can also compose two splitted graphs $(G,A,B)$ and $(H,C,D)$ by treating the second graph simply as a graph, though if we wish to we can also think of the resulting graph $(G,A,B) \circ (H,C,D)$ as a splitted graph with independent set $A \cup C$ and $B \cup D$. We trust that context will make it clear whether the result of a composition is intended as a (non-partitioned) graph or a splitted graph. With these understandings, however, we note that the operation $\circ$ is associative.
We use the same notation $\circ$ to indicate the analogous composition of a splitted degree sequence with a graph degree sequence or splitted degree sequence. Here, if $d=(d_1,\dots,d_k;d_{k+1},\dots,d_n)$ and $e = (e_1,\dots,e_m)$, then $d \circ e$ is obtained by adding $m$ to each of the terms $d_1,\dots,d_k$, adding $k$ to each of the terms $e_1,\dots,e_m$, and combining these results with the (unchanged) terms $d_{k+1},\dots,d_n$ into a sequence in descending order. In symbols, \[(d_1,\dots,d_k;d_{k+1},\dots,d_n) \circ (e_1,\dots,e_m) = (d_1+m,\dots,d_k+m, e_1+k,\dots,e_m+k,d_{k+1},\dots,d_n).\] Clearly, the composition of the splitted degree sequence of $(G,A,B)$ and the degree sequence of a graph $H$ is the degree sequence of the graph $(G,A,B) \circ H$. (We can analogously define the composition of two splitted degree sequences and treat these compositions with the same understandings as with the compositions of graphs.)
We call a (splitted or non-partitioned) graph or degree sequence \emph{decomposable} if it can be written as the composition of other graphs or sequences; otherwise, it is \emph{indecomposable}.
Tyshkevich showed that graphs and degree sequences can be completely decomposed in a unique way, which we refer to as the \emph{canonical decomposition}.
\begin{thm}[\cite{Tyshkevich00}]\label{thm: canon decomp}\mbox{} \begin{enumerate} \item[\textup{(i)}] Every graph $G$ can be expressed as a composition \[(G_k,A_k,B_k)\circ \dots \circ (G_1,A_1,B_1) \circ G_0\] of indecomposable components; here the leftmost $k$ components are splitted graphs and the rightmost is a graph (we say that $k=0$ if $G$ is indecomposable). Moreover, this decomposition is unique in the sense that two canonical decompositions of a graph have the same number of components and have isomorphisms between corresponding components (that preserve splitting partitions, in the case of splitted graph components). \item[\textup{(ii)}] Every degree sequence $d$ can be uniquely expressed as a composition \[d=\alpha_k\circ \dots \circ \alpha_1 \circ \alpha_0\] of indecomposable components; here the leftmost $k$ components are splitted degree sequences and the rightmost is a degree sequence (we again say that $k=0$ if $d$ is indecomposable). \end{enumerate} \end{thm}
Our first application of the canonical decomposition will be to describe the Erd\H{o}s--Gallai differences of degree sequences. In~\cite{HeredUniII} the author presented a connection between the canonical components of a graph and the Erd\H{o}s--Gallai differences of its degree sequence that equal 0. We now describe a more general connection between Erd\H{o}s--Gallai differences and the composition operation $\circ$.
We begin by examining the corrected Ferrers diagrams of compositions. Suppose that $d$ is a splitted degree sequence with $n$ terms (with $k$ terms before its semicolon), and that $e$ is a degree sequence with $m$ terms. We form a new corrected Ferrers diagram by first dividing $C(d)$ into four blocks, with rows separated after the first $k$ rows and columns similarly separated. We move these blocks to the corresponding corners of a larger $(n+m)$-by-$(n+m)$ diagram, insert $C(e)$ in the middle of this diagram, and fill in the rest of the diagram with blocks of 1's above and to the left of the inserted copy of $C(e)$ and with blocks of 0's below and to the right. For example, if we let $d=(2,2;1,1)$ and $e=(1,1,1,1,0)$, then $C(d \circ e)$ is shown in Figure~\ref{fig: composition Ferrers}, with dashed lines illustrating the blocks (compare this diagram to those in Figure~\ref{fig: C(11110) and C(2211)}). It is straightforward to verify that if $b = d \circ e$, then the diagram constructed above is the corrected Ferrers diagram $C(b)$. \begin{figure}
\caption{The corrected Ferrers diagram of $(2,2;1,1)\circ(1,1,1,1,0) = (7,7,3,3,3,3,2,1,1)$.}
\label{fig: composition Ferrers}
\end{figure}
Note now that $m(b) = k + m(e)$. By the symmetry of the blocks of 1's and of 0's which pad the copy of $C(e)$, Lemma~\ref{lem: EG diff via diagram} implies that $\Delta_i(b)=\Delta_i(d)$ for $1 \leq i \leq k$, and for $1 \leq i \leq m(e)$ we have $\Delta_{k+i}(b) = \Delta_i(e)$. Hence, the list of the first $m(b)$ Erd\H{o}s--Gallai differences of $b$ are obtained by appending the Erd\H{o}s--Gallai differences of $e$ to an initial segment of the list of Erd\H{o}s--Gallai differences of $d$.
More generally, we obtain the following by induction.
\begin{thm} \label{thm: concatenate EG diffs} Suppose that $d$ is the degree sequence of a graph, and $d = \alpha_k \circ \dots \circ \alpha_1 \circ \alpha_0$ is the canonical decomposition of $d$, where in each $\alpha_i$ there are $m_i$ terms appearing before the semicolon.
Suppose also that $S(d)$ is the sequence of integers beginning with the first $m_k$ Erd\H{o}s--Gallai differences of $\alpha_k$, followed by the first $m_{k-1}$ Erd\H{o}s--Gallai differences of $\alpha_{k-1}$, and so on, up through the first $m_1$ Erd\H{o}s--Gallai differences of $\alpha_1$, and ending with the first $m(\alpha_0)$ Erd\H{o}s--Gallai differences of $\alpha_0$.
The terms of $S(d)$, in this order, are precisely the first $m(d)$ Erd\H{o}s--Gallai differences of $d$. \end{thm}
\subsection{Proof of Theorem~\ref{thm: char via Construction Algorithm}} \label{subsec: pf of iter const}
We first prove that if a graph is weakly threshold then it can be built up through the operations described in the theorem. We begin with a lemma.
\begin{lem} \label{lem: max min degrees} If $d=(d_1,\dots,d_n)$ is the degree sequence of a weakly threshold graph $G$, then exactly one of the following is true: \begin{enumerate} \item[\textup{(a)}] $d_n=0$ and $G$ has an isolated vertex; \item[\textup{(b)}] $d_1=n-1$ and $G$ has a dominating vertex; \item[\textup{(c)}] $d_2 < d_1 = n-2$ and $d_{n-1} = d_n = 1$, and $G$ has a weakly isolated vertex; \item[\textup{(d)}] $d_1 = d_2 = n-2$ and $d_{n-1} > d_n = 1$, and $G$ has a weakly dominating vertex; \item[\textup{(e)}] $d_1=d_2=n-2$ and $d_{n-1}=d_n=1$ and $\Delta_2(d)=0$, and $G$ has four vertices forming a $P_4$ that is semi-joined to the rest of the graph. \end{enumerate} In each case, deleting the vertex (or four vertices, in the last case) described leaves a graph that is also weakly threshold. \end{lem} \begin{proof} Comparing the degree conditions in each case, we see that no two of the properties (a)--(e) can simultaneously hold.
Suppose that neither (a) nor (b) holds. Then $d_1 \leq n-2$ and $d_n \geq 1$; hence \[1 \geq \Delta_1(d) = 1\cdot 0 + (n-1)\cdot 1 - d_1,\] and $d_1=n-2$. We know $d_2 \neq 1$, since otherwise the terms of $d$ would then sum to an odd number, contradicting our assumption that $d$ is a degree sequence. Letting $p$ denote the number of terms of $d$ exactly equal to $1$, we can now write \[1 \geq \Delta_2(d) = 2 \cdot 1 + (n-2-p)\cdot 2 + p \cdot 1 - (n-2)-d_2.\] Then $n-2 = d_1 \geq d_2 \geq n-1-p$, so $p \geq 1$ and hence $d_n=1$. In fact, if $d_2 < d_1$ then $d_{n-1}=d_n=1$, so (c) holds; if instead $d_{n-1}>d_n$, then $d_2=d_1$ and (d) holds. Finally, if $d_1=d_2=n-2$ and $d_{n-1}=d_n = 1$, then $p \geq 2$. Since $d$ is a degree sequence, we also have $0 \leq \Delta_2(d) = 2-p$; we see that in fact $\Delta_2(d)=0$, and property (e) holds. \end{proof}
Lemma~\ref{lem: max min degrees} allow us to apply induction on the number of vertices in a weakly threshold graph. Each weakly threshold graph on up to four vertices is either isomorphic to $P_4$ or is a threshold graph, in which case Theorem~\ref{thm: threshold dom/iso} implies that the graph can be built up from $K_1$ by adding dominating and/or isolated vertices.
Suppose now that $G$ is an arbitrary weakly threshold graph on $n \geq 5$ vertices, and that every weakly threshold graph on fewer than $n$ vertices can be built up from $K_1$ or $P_4$ by iteratively adding a vertex or vertices as claimed. It follows from Lemma~\ref{lem: max min degrees} that $G$ may be obtained by adding a dominating, isolated, weakly dominating, or weakly isolated vertex, or adding a semi-joined $P_4$, to a weakly threshold graph on fewer than $n$ vertices, and so by the induction hypothesis $G$ can be built up in the desired way from $K_1$ or $P_4$. This completes our proof that weakly threshold graphs may each be constructed using the operations from Theorem~\ref{thm: char via Construction Algorithm}.
In order to prove the converse, we first present a simplifying lemma. Let $\mathcal{C}$ denote the set of graphs that may be built up from $K_1$ or $P_4$ through the operations described in Theorem~\ref{thm: char via Construction Algorithm}. Call these operations (adding a dominating vertex, an isolated vertex, a weakly dominating vertex, a weakly isolated vertex, or a semi-joined $P_4$) \emph{permissible operations}. Call the addition of a dominating vertex or an isolated vertex a Type 1 operation, call the addition of a weakly dominating vertex or weakly isolated vertex a Type 2 operation, and call the addition of a semi-joined $P_4$ a Type 3 operation. (We will use these terms in Sections~\ref{sec: forb subgr} and~\ref{sec: enum} as well.)
\begin{lem} \label{lem: weaklies placement} For any element $G$ of $\mathcal{C}$, there exists a sequence of permissible operations that constructs $G$ from $K_1$ or $P_4$ with the property that between any Type 1 operation and a later Type 2 operation, a Type 3 operation is performed.\end{lem} \begin{proof} We proceed by induction on the number $p$ of permissible operations needed to construct $G$; fix a sequence of operations $O_1,\dots,O_p$ that constructs $G$ from $K_1$ or $P_4$. If $p<2$ then the conclusion holds trivially.
Now suppose that $p=k+1$ for some integer $k \geq 1$, and all graphs in $\mathcal{C}$ that can be constructed from $K_1$ or $P_4$ using $k$ permissible operations can be constructed so that between any Type 1 operation and a later Type 2 operation, there occurs a Type 3 operation.
Let $G'$ be the graph on which the operation $O_{k+1}$ is performed to create $G$; by the induction hypothesis, we may assume that in the construction of $G'$ at least one Type 3 operation occurs between any Type 1 operation and a later Type 2 operation. The conclusion of the lemma holds for $G$ except possibly in the case that $O_{k+1}$ is a Type 2 operation and $O_{k}$ is a Type 1 operation, so assume that $O_k$ and $O_{k+1}$ are operations of these types. Further let $G''$ be the graph on which the operation $O_{k}$ is performed to create $G'$.
If $O_k$ is the addition of an isolated vertex and $O_{k+1}$ is the addition of a weakly isolated vertex, then $G$ can be formed by first adding a weakly isolated vertex to $G''$ (call the resulting graph $G^*$) and then adding an isolated vertex. The induction hypothesis applies to $G^*$, so some sequence of operations creating $G^*$ has Type 3 operations in all the appropriate places; this sequence, followed by adding an isolated vertex, is a sequence of operations creating $G$ that satisfies the claim of the lemma. A similar argument applies if $O_k$ and $O_{k+1}$ are the additions of a dominating vertex and a weakly dominating vertex, respectively.
If $O_k$ is the addition of an isolated vertex and $O_{k+1}$ is the addition of a weakly dominating vertex, then $G$ can be created by adding a dominating vertex to $G''$ (again call the resulting graph $G^*$) and then adding an isolated vertex. As before, we obtain a suitable construction of $G$ by appending the addition of an isolated vertex to a suitable construction of $G^*$. A similar argument handles the case that $O_k$ is the addition of a dominating vertex and $O_{k+1}$ is the addition of a weakly isolated vertex, completing the inductive step. \end{proof}
We can now prove that every graph in $\mathcal{C}$ is weakly threshold. We proceed by induction on the number of vertices.
By Theorem~\ref{thm: threshold dom/iso}, any graph in $\mathcal{C}$ on four or fewer vertices is either a threshold graph or $P_4$ and hence must be weakly threshold.
Now suppose that every graph in $\mathcal{C}$ with fewer than $n$ vertices, where $n \geq 5$, is weakly threshold, and let $G$ be a graph in $\mathcal{C}$ with $n$ vertices. Consider a sequence of permissible operations that produces $G$ from $K_1$ or $P_4$ and that has the property described in Lemma~\ref{lem: weaklies placement}. We proceed by cases according to the last-performed operation. Let $d=(d_1,\dots,d_n)$ be the degree sequence of $G$.
\noindent \emph{Case: The last operation in creating $G$ is a Type 1 operation or a Type 3 operation.}
Observe that graphs with a dominating vertex, isolated vertex, or semi-joined $P_4$ are all decomposable under $\circ$. Let $d$ be the degree sequence of $G$, and suppose that $G'$ is the graph that the last operation is performed on to yield $G$, and that $d'$ is the degree sequence of $G'$. If the last operation in constructing $G$ is the addition of a dominating vertex, then $d = (0;)\circ d'$. If the last operation is the addition of an isolated vertex, then $d=(;0)\circ d'$. Finally, if the last operation is the addition of a semi-joined $P_4$, then $d=(2,2;1,1)\circ d'$. By the induction hypothesis, $G'$ is a weakly threshold graph, so the first $m(d')$ Erd\H{o}s differences of $d'$ are all $0$ or $1$. Let $s'$ denote the list of these differences, and suppose that $s$ is the list of the first $m(d)$ Erd\H{o}s--Gallai differences of $G$. It follows from Theorem~\ref{thm: concatenate EG diffs} that adding a dominating vertex forms $s$ by inserting a 0 at the beginning of $s'$, adding an isolated vertex yields $s=s'$, and adding a semi-joined $P_4$ forms $s$ by inserting the terms $1,0$ at the beginning of $s'$. In each case, each entry of $s$ is 0 or 1, so $G$ is weakly threshold.
\noindent \emph{Case: The last operation in creating $G$ adds a weakly dominating vertex.}
Let $v$ denote the added vertex, and let $G'=G-v$. The degree sequence of $G'$ is $d'=(d_2-1,\dots,d_{n-1}-1,d_n)$. By hypothesis, $G'$ is a weakly threshold graph, so the first $m(d')$ Erd\H{o}s differences of $d'$ are all $0$ or $1$.
By Lemma~\ref{lem: weaklies placement}, we may assume that a Type 3 operation was performed after the last Type 1 operation, if any Type 1 operation occurred; if none did, we may assume that the construction of $G$ began with $P_4$. Note now that when the construction algorithm began with $P_4$, or the last Type 3 operation was employed, the immediately resulting graph had minimum degree 1 and maximum degree equal to 2 less than the number of vertices. These properties are preserved by any Type 2 operations that follow, so we may assume that $G'$ has minimum degree 1 and maximum degree $n-3$.
We now compare the corrected Ferrers diagrams of $d$ and of $d'$. Observe that we may obtain $C(d)$ by first taking $C(d')$ and inserting a new first row and column containing $n-1$ copies of 1, as shown in the first diagram in Figure~\ref{fig: weakly dom diagram}. (In the diagrams we have shown a few specific entries, to emphasize the maximum and minimum degree in $d'$.) \begin{figure}
\caption{Additions to $C(d')$ in the construction of $C(d)$.}
\label{fig: weakly dom diagram}
\end{figure} The last row of $C(d')$ contains a single 1, followed by 0's and a terminal $\star$, and the last column of $C(d')$ contains only 0's and the final $\star$. Thus to complete the creation of $C(d)$ from $C(d')$, we interchange the 0 and the 1 in the last row of the augmented diagram.
Observe that $m(d) = m(d')+1$. Applying Lemma~\ref{lem: EG diff via diagram}, we see that $\Delta_1(d) = 1$, and $\Delta_{i}(d) = \Delta_{i-1}(d')$ for each $i$ such that $2 \leq i \leq m(d)$. It follows that $G$ is a weakly threshold graph.
\noindent \emph{Case: The last operation in creating $G$ adds a weakly isolated vertex.}
As before, let $v$ denote the added vertex, and let $G'=G-v$. By the induction hypothesis, $G'$ is a weakly threshold graph, and the first $m(d')$ Erd\H{o}s differences of $d'$ are all $0$ or $1$. By the same argument as in the previous case, $G'$ has minimum degree 1 and maximum degree $n-3$.
We may obtain $C(d)$ in this case by first taking $C(d')$ and appending a new row and column each containing one copy of 1, as shown in the second diagram in Figure~\ref{fig: weakly dom diagram}. As a reminder, the diagram shows the single 0 in the top row of $C(d')$. To create $C(d)$, we interchange the 0 and the 1 in the first row of the augmented diagram.
Observe that $m(d) = m(d')$, and that by Lemma~\ref{lem: EG diff via diagram}, $\Delta_{i}(d) = \Delta_{i}(d')$ for each $i$ such that $1 \leq i \leq m(d)$. It follows that $G$ is a weakly threshold graph, and our proof of Theorem~\ref{thm: char via Construction Algorithm} is complete.
\subsection{Weakly threshold graphs and complementation} \label{subsec: complements}
The iterative construction in Theorem~\ref{thm: char via Construction Algorithm} allows us an easy conclusion about weakly threshold graphs and sequences not necessarily obvious from their definitions. Henceforth, let $\overline{G}$ denote the complement of a graph $G$. Also let $G\vee H$ and $G+H$ denote the join and disjoint union, respectively, of graphs $G$ and $H$. It is easy to see that for any graphs $G$ and $H$, $\overline{G \vee H} \cong \overline{G} + \overline{H}$.
\begin{thm}\label{thm: complementation} A graph is a weakly threshold graph if and only if its complement is. \end{thm} \begin{proof} The result follows by induction on the number of addition operations needed to construct a weakly threshold graph; first note that $K_1$ and $P_4$ are self-complementary. Adding a dominating vertex to a graph $G$ has the effect of simultaneously adding an isolated vertex to $\overline{G}$, i.e., $\overline{G \vee K_1} \cong \overline{G} + K_1$. Similarly, $\overline{G + K_1} \cong \overline{G} \vee K_1$, and additions of weakly dominating vertices and weakly isolated vertices have the same relationship. Finally, because $\overline{P_4} \cong P_4$, and complementation changes endpoints to midpoints and vice versa, adding a semi-joined $P_4$ to a graph has the effect of adding a semi-joined $P_4$ to the complement. Thus a graph can iteratively be constructed using these types of operations if and only if its complement can. \end{proof}
\section{A forbidden subgraph characterization} \label{sec: forb subgr}
In this section we show that the weakly threshold graphs form a hereditary graph class, i.e., the property of being a weakly threshold graph is preserved under taking induced subgraphs. This allows us to characterize these graphs in terms of a collection of minimal forbidden induced subgraphs, and it reveals a connection between weakly threshold graphs and interval graphs.
Given a graph $F$, we say that a graph $G$ is \emph{$F$-free} if no induced subgraph of $G$ is isomorphic to $F$. If $\mathcal{F}$ is a collection of graphs, then $G$ is \emph{$\mathcal{F}$-free} if $G$ is $F$-free for every element $F$ of $\mathcal{F}$. Let $2K_2$ denote $K_2+K_2$, and let $H$ and $S_3$ respectively denote the unique split graphs with degree sequences $(3,3,1,1,1,1)$ and $(3,3,3,1,1,1)$.
\begin{thm} \label{thm: forb subgr} A graph $G$ is a weakly threshold graph if and only if $G$ is $\{2K_2,C_4,C_5,H,\overline{H}, S_3,\overline{S_3}\}$-free. \end{thm} \begin{proof} In the following, let $\mathcal{F} = \{2K_2,C_4,C_5,H,\overline{H}, S_3,\overline{S_3}\}$.
Suppose first that $G$ is a weakly threshold graph. By Theorem~\ref{thm: WT graphs are split}, $G$ is a split graph. Since all split graphs are $\{2K_2,C_4,C_5\}$-free (this was proved by F\"{o}ldes and Hammer in~\cite{FoldesHammer76}), $G$ induces none of these three subgraphs.
By Theorem~\ref{thm: char via Construction Algorithm} we know that there is a sequence of operations $\mathcal{O}_1,\dots,\mathcal{O}_p$ of Types 1, 2, or 3 (as defined in the previous section) that create $G$ from $K_1$ or $P_4$. We prove that $G$ is $\mathcal{F}$-free by induction on $p$. Observe that if $p=0$, then $G$ is $K_1$ or $P_4$, both of which are $\mathcal{F}$-free.
Suppose now that $p=k+1$ for some nonnegative integer $k$, and assume that every weakly threshold graph that can be constructed from $K_1$ or $P_4$ via a sequence of $k$ addition operations is $\mathcal{F}$-free. Let $G'$ be the graph from which $G$ is created by applying the operation $\mathcal{O}_p$. By assumption, $G'$ is $\mathcal{F}$-free.
If $\mathcal{O}_p$ is a Type 1 or Type 3 operation, then $G = (G_1,A_1,B_1) \circ G'$, where $G_1$ is isomorphic to $K_1$ or $P_4$. Since $G_1$ and $G'$ are both $\mathcal{F}$-free, and we can verify that every graph in $\mathcal{F}$ is indecomposable, it follows that $G$ is $\mathcal{F}$-free as well.
Suppose instead that $\mathcal{O}_p$ is a Type 2 operation, and that $v$ is the vertex that is added to $G'$ to create $G$. Suppose also to the contrary that $G$ does induce an element of $\mathcal{F}$ other than $2K_2$, $C_4$, or $C_5$. Since this induced subgraph was not present in $G'$, it must contain the vertex $v$. Let $A,B$ be a partition of $V(G)$ into an independent set and a clique, respectively.
If $G$ contains an induced subgraph $F$ isomorphic to $H$, then the vertices of degree 1 in $F$ must belong to $A$, and the two other vertices belong to $B$. In the operation $\mathcal{O}_p$ the vertex $v$ cannot have been a weakly dominating vertex, since $v$ would have to be a dominating or weakly dominating vertex in $F$, and $H$ has no such vertex. Thus $v$ is a weakly isolated vertex in $G$ and hence is one of the vertices of $F$ in $A$. Let $w$ denote the neighbor of $v$ in $F$, and let $x$ denote the other vertex of degree 3 in $F$. Since $v$ was added to $G'$ as a weakly isolated vertex, this implies that $w$ was a vertex of maximum degree in $G'$, so in $G$ the degree of $w$ is larger than the degree of $x$. Since in $F$ the vertex $x$ has the same degree as $w$, the vertex $w$ must have a neighbor $y$ that $x$ does not; this vertex must belong to $A$, along with the vertices of degree 1 in $F$. However, the vertex $y$, together with the vertices of $F-v$, then induces $H$ in $G'$, a contradiction to the induction hypothesis.
If instead $G$ contains an induced subgraph $F$ isomorphic to $S_3$, then again the vertices of degree 1 in $F$ belong to $A$, while the vertices of degree 3 belong to $B$. Since $F$ contains no dominating or weakly dominating vertex, then as before, vertex $v$ was added during $\mathcal{O}_p$ as a weakly isolated vertex, so $v$ is one of the vertices of $F$ in $A$. Let $w$ be the neighbor of $v$ in $F$, and denote the other vertices of $F$ in $B$ by $x$ and $x'$. Since $v$ was added as a weakly isolated vertex in $\mathcal{O}_p$, vertex $w$ has a higher degree in $G$ than do $x$ or $x'$. Since in $F$ the vertices $x$ and $x'$ have the same degree as $w$, each of these vertices must be non-adjacent to some neighbor of $w$ other than $v$. If some neighbor $y$ of $w$ other than $v$ is non-adjacent to both $x$ and $x'$, then $y$, together with the vertices of $F-v$, induces $S_3$ in $G'$, a contradiction to the induction hypothesis. Otherwise, $w$ has neighbors $y$, which is adjacent to $x$ but not $x'$, and $y'$, which is adjacent to $x'$ but not $x$. However, the vertices $y,y',w,'w$, and the two vertices non-adjacent to $w$ in $F$ then induce $H$ in $G-v$, which is another contradiction.
Finally, if $G$ contains an induced subgraph $F$ isomorphic to $\overline{H}$ or $\overline{S_3}$, then by Theorem~\ref{thm: complementation} we can apply the arguments of the last two paragraphs to $\overline{G}$, which must contain $H$ or $S_3$, to arrive at a similar contradiction. From these contradictions in every case we conclude that $G$ is $\mathcal{F}$-free, and in fact all weakly threshold graphs are as well.
We now prove that all $\mathcal{F}$-free graphs are weakly threshold graphs. We do this by induction on the number $n$ of vertices in an arbitrary $\mathcal{F}$-free graph $G$. Observe that all $\mathcal{F}$-free graphs on at most four vertices are threshold graphs or are isomorphic to $P_4$; any such graph is a weakly threshold graph.
Suppose now that $n \geq 5$, and assume that every $\mathcal{F}$-free graph with fewer than $n$ vertices is a weakly threshold graph. Note that if $G$ contains a dominating or isolated vertex $v$, then by the induction hypothesis the graph $G-v$ is a weakly threshold graph. As such it can be constructed by a sequence of operations as described in Theorem~\ref{thm: char via Construction Algorithm}; if we append to this sequence the addition of $v$ to the graph (a Type 1 operation), then Theorem~\ref{thm: char via Construction Algorithm} implies that $G$ is a weakly threshold graph as well. A similar conclusion holds if $G$ contains a semi-joined $P_4$. Suppose now that $G$ has no dominating or isolated vertex, and no semi-joined $P_4$.
Observe that since $G$ is $\{2K_2,C_4,C_5\}$ free, $G$ is split (see~\cite{FoldesHammer76}). Fix a partition $A,B$ of $V(G)$ into an independent set and a clique, respectively.
Let $v$ be a vertex of maximum degree in $G$; since $G$ is split, we may assume that $v$ is an element of $B$. We claim that the degree of $v$ is $n-2$. If this is not the case, then $v$ is non-adjacent to at least two vertices $w_1$ and $w_2$, which must belong to $A$. Since $G$ has no isolated vertices, the vertices $w_1$ and $w_2$ each have a neighbor in $B$. If they have a common neighbor $x$, then since $v$ had maximum degree, $v$ must have two neighbors that $x$ is not adjacent to. These two neighbors then must belong to $A$, and together with $v,w_1,w_2,x$ induce a subgraph isomorphic to $H$, a contradiction. If instead $w_1$ and $w_2$ have no common neighbor, then $w_1$ is adjacent to $x_1$, and $w_2$ is adjacent to $x_2$ for some $x_1,x_2 \in B$. Since $v$ has the maximum degree in $G$, this implies that $x_1$ and $x_2$ each have a non-neighbor among the neighbors of $v$. If $x_1$ and $x_2$ have such a non-neighbor in common, then this vertex and vertices $v,w_1,w_2,x_1,x_2$ induce a copy of $S_3$ in $G$, a contradiction. Thus $v$ has a neighbor $y_1$ adjacent to $x_1$ but not $x_2$, and a neighbor $y_2$ adjacent to $x_2$ but not $x_1$. However, then the vertices $w_1,w_2,x_1,x_2,y_1,y_2$ together induce a copy of $H$, again a contradiction. In light of all these contradictions, we conclude that $v$ cannot have two non-neighbors in $G$; hence, $v$ has degree $n-2$.
Let $z$ be a vertex of minimum degree in $G$. Note that $z$ is a vertex of maximum degree in the complement $\overline{G}$, and $\overline{G}$ is also split and $\mathcal{F}$-free since $\mathcal{F}$ is closed under taking complements. Furthermore, $\overline{G}$ also cannot contain any dominating or isolated vertices. Thus the arguments above show that $z$ has degree $n-2$ in $\overline{G}$ and hence $z$ has degree $1$ in $G$.
If $G$ has two vertices $v,v'$ of degree $n-2$ and two vertices $z,z'$ of degree 1, then we can verify that either $G$ is isomorphic to $P_4$ or the subgraph induced by $v,v',z,z'$ is a semi-joined $P_4$ in $G$. Since both these possibilities have already been handled previously, we assume now that $G$ has either a unique vertex of degree $n-2$ or a unique vertex of degree $1$.
Suppose that $G$ has a unique vertex $v$ of maximum degree $n-2$. If $v$ is adjacent to any vertex $z$ of degree 1, then we may obtain $G$ from the graph $G-z$ by adding a weakly dominating vertex (namely, attaching vertex $z$ to $v$). If $v$ is not adjacent to any vertex of degree 1, then the minimum degree of $G-v$ is still 1, and we may obtain $G$ again by adding a weakly dominating vertex.
We may apply similar arguments to the $\mathcal{F}$-free graph $\overline{G}$; we conclude that since $G$ has either a unique vertex of degree $n-2$ or a unique vertex of degree 1, $G$ may be obtained from $G-w$, where $w$ is some vertex of $G$, via a Type 2 operation. Since $G-w$ is $\mathcal{F}$-free, by the induction hypothesis $G-w$ is a weakly threshold graph and thus Theorem~\ref{thm: char via Construction Algorithm} applies; if we append the Type 2 operation that replaces $w$ to the sequence of permissible operations that constructs $G-w$, we see that $G$ can also be constructed from $K_1$ or $P_4$ by a sequence of permissible operations, implying that $G$ is weakly threshold. \end{proof}
Interestingly, the list $\mathcal{F}$ of forbidden subgraphs is strikingly similar to that of another hereditary family. Let $\mathcal{H}$ denote the class of graphs that are both interval graphs and complements of interval graphs. As noted in~\cite{ISGCI}, this class is equivalent to the class of split permutation graphs and is precisely the class of $\{2K_2,C_4,C_5,S_3,\overline{S_3},\text{rising sun},\text{co-rising sun}\}$-free, where the rising sun and co-rising sun graphs are shown in Figure~\ref{fig: rising sun}. \begin{figure}
\caption{The rising sun graph and its complement.}
\label{fig: rising sun}
\end{figure} Note that $H$ and $\overline{H}$ can be obtained by deleting a single vertex from the rising sun graph and from its complement, respectively. Hence the weakly threshold graphs form a notable subclass of $\mathcal{H}$.
\section{Enumeration} \label{sec: enum}
In this section we count both the weakly threshold sequences and the weakly threshold graphs of order $n$. Our approach, which is essentially the same technique used by Tyshkevich in~\cite{Tyshkevich84} in enumerating matrogenic and matroidal graphs, will use the canonical decomposition as a convenient framework. For both sequences and graphs, we begin by finding recurrences that are satisfied respectively by the number of $n$-term sequences and $n$-vertex graphs in question. Using the generating functions of these sequences, together with the structure imposed in both contexts by the canonical decomposition, we obtain generating functions for the number of weakly threshold sequences and graphs, which we denote by $S(x)$ and by $W(x)$, respectively.
For $n \geq 4$, let $g_n$ be the number of indecomposable weakly threshold sequences with $n$ terms, and let $h_n$ be the number of indecomposable weakly threshold graphs with $n$ vertices. If \[G(x) = 2x + \sum_{k=4}^\infty g_kx^k \qquad \text{ and } \qquad H(x) = 2x + \sum_{k=4}^\infty h_k x^k,\] then $G(x)$ and $H(x)$ are the generating functions for the number of splitted indecomposable weakly threshold sequences and graphs, respectively, indexed by the number of terms or vertices. Note that in both equations the coefficient of $2$ in front of $x$ reflects that in a canonical component having a single vertex, this vertex may belong to either the clique or the independent set in the splitted graph.
We now discuss weakly threshold graphs, though analogous arguments apply to weakly threshold sequences. The sequence counting the $n$-vertex weakly threshold graphs having exactly $k$ canonically indecomposable components has generating function $W_k(x)$ given by \[W_k(x) = H(x)^{k-1}(H(x)-x),\] since as the distributive law is applied to the product of sums, the coefficient on the resulting term with degree $n$ counts the ways to choose the $k$ indecomposable components for the canonical decomposition. Note here that the last factor in the expression above is $H(x)-x$, since if the rightmost component in the canonical decomposition is isomorphic to $K_1$, the isomorphism class is the same whether the vertex is in the clique or the independent set of the splitted graph.
Summing the functions $W_k(x)$, we arrive at the generating function $W(x)$ for the number of weakly threshold graphs: \begin{equation}\label{eq: W(x)} W(x) = \sum_{k=1}^\infty W_k(x) = \sum_{k=1}^\infty H(x)^{k-1}(H(x)-x) = \frac{H(x)-x}{1-H(x)}.\end{equation} Similarly, \begin{equation}\label{eq: S(x)} S(x) = \frac{G(x)-x}{1-G(x)}.\end{equation}
(Here and elsewhere we assume that $x$ belongs to some suitable interval of convergence.) In Sections~\ref{subsec: sequences} and~\ref{subsec: graphs} we will derive expresions for $G(x)$ and $H(x)$, respectively. Then, in Section~\ref{subsec: conclusion}, we will use~\eqref{eq: W(x)} and~\eqref{eq: S(x)} to obtain the generating functions $S(x)$ and $G(x)$ and comment on the numbers of weakly threshold sequences and graphs.
\subsection{Indecomposable weakly threshold sequences} \label{subsec: sequences}
In order to determine $G(x)$ we derive a recurrence for the sequence $(g_n)$. By direct observation we see that $g_4=1$, since $(2,2,1,1)$ is the unique indecomposable weakly threshold sequence with four terms. It follows from Theorem~\ref{thm: char via Construction Algorithm} that for any $n \geq 5$ we may obtain any $n$-term indecomposable weakly threshold sequence by choosing an $(n-1)$-term indecomposable weakly threshold sequence and either (i) appending a 1 to the end of the sequence and increasing the first term of the sequence by 1, or (ii) increasing the first $n-2$ terms of the sequence by 1 and then inserting another copy of the value $n-2$ at the beginning of the sequence. Thus $g_n = 2g_{n-1}$ and hence $g_n = 2^{n-4}$ for $n \geq 4$. We see that
\begin{equation} \label{eq: G(x)} G(x) = 2x + \sum_{k=4}^\infty 2^{k-4}x^k =2x + \frac{x^4}{1-2x} = \frac{2x - 4x^2+x^4}{1-2x}. \end{equation}
\subsection{Indecomposable weakly threshold graphs} \label{subsec: graphs}
We now count the isomorphism classes of indecomposable weakly threshold graphs on $n$ vertices. The arguments here will be more intricate than in Section~\ref{subsec: sequences}, since more than one weakly threshold graph can have the same degree sequence. In order to obtain a recurrence for $h_n$, we will first need stronger results on the iterative construction of indecomposable weakly threshold graphs, which we present in Lemmas~\ref{lem: constructing indecomp WT} and~\ref{lem: module cases}. As in the last section, we then use the recurrence to derive a closed form expression for the generating function $H(x)$.
As a preliminary step, we recall a characterization of indecomposable graphs due to the author and West.
\begin{lem}[{{\cite[Theorem 3.2]{BarrusWest12}}}] \label{lem: A4struc} A graph $G$ is canonically indecomposable if and only if for every pair $u, v$ of vertices there is a sequence $A_1,\dots,A_k$ of 4-element vertex subsets of $G$ such that $u$ and $v$ belong to $A_1$ and $A_k$, respectively, consecutive subsets in the sequence have nonempty intersection, and each $A_i$ is the vertex set of an induced subgraph isomorphic to $2K_2$, $C_4$, or $P_4$. \end{lem}
\begin{lem}\label{lem: constructing indecomp WT} A graph $G$ is a canonically indecomposable weakly threshold graph if and only if $G$ is isomorphic to $K_1$ or can be obtained by beginning with a graph isomorphic to $P_4$ and iteratively performing a sequence of Type 2 operations. \end{lem} \begin{proof} Suppose that $G$ is an indecomposable weakly threshold graph. By Theorem~\ref{thm: char via Construction Algorithm} and Lemma~\ref{lem: weaklies placement} we may assume that there exists a sequence $O_1,\dots,O_p$ of operations, each of which is Type 1, Type 2, or Type 3, that constructs $G$ from $K_1$ or $P_4$, where a Type 3 operation occurs between any Type 1 operation that is followed later in the sequence by a Type 2 operation. It is straightforward to see by induction on $p$ that if $G$ is constructed from $K_1$ and $p>1$, or if $G$ is constructed from $P_4$ and any of the $O_i$ is a Type 1 or Type 3 operation, then $G$ is canonically decomposable, a contradiction. Thus $O_1,\dots,O_p$ is a sequence of Type 2 operations that construct $G$ from $P_4$.
Conversely, noting that $K_1$ is an indecomposable weakly threshold graph, suppose that $G$ is constructed from $P_4$ via a sequence $O_1,\dots,O_p$ of Type 2 operations. By Theorem~\ref{thm: char via Construction Algorithm} we see that $G$ is a weakly threshold graph. We now prove by induction on $p$ that $G$ is indecomposable. This is true if $p=0$, since $G$ is then isomorphic to $P_4$. Suppose that all graphs constructed from $P_4$ via $k$ Type 2 operations are indecomposable, where $k$ is some nonnegative integer, and suppose that $p=k+1$. Now consider the vertex $u$ added during the operation $O_{p}$. If $u$ is a weakly dominating vertex, then there is a vertex $v$ of degree 1 not adjacent to $u$. Let $w$ be the neighbor of $v$. Since $u$ is weakly dominating, $u$ is adjacent to $w$, and since $u$ has a degree at least as large as $w$, while $w$ has a neighbor that $u$ does not, $u$ must have a neighbor $x$ that $w$ does not. Recalling Theorem~\ref{thm: forb subgr}, we conclude that the subgraph of $G$ induced on $\{u,v,w,x\}$ is isomorphic to $P_4$. By assumption the graph $G-u$ is canonically indecomposable; it follows from Lemma~\ref{lem: A4struc} that $G$ is indecomposable as well. A similar argument holds if $u$ is a weakly isolated vertex, so by induction we conclude that $G$ must be an indecomposable weakly threshold graph. \end{proof}
In preparation for Lemma~\ref{lem: module cases}, we now present structural results on modules in canonically indecomposable split graphs. A \emph{module} in a graph $G$ is a set $M$ of vertices of $G$ such that for each $v \in V(G)-M$, the vertex $v$ is adjacent to either all or none of the vertices of $M$. The module $M$ is \emph{proper} if $M \neq V(G)$.
A well known theorem of Gallai~\cite{Gallai67} states that if a graph is neither disconnected nor the complement of a disconnected graph, then its maximal proper modules are disjoint; this result is the foundation for what is known as the \emph{modular decomposition} of a graph. The modular and canonical decompositions of a graph are usually distinct, though one connection is easy to verify: if $G$ has canonical decomposition $(G_k,A_k,B_k)\circ \dots \circ (G_1,A_1,B_1) \circ G_0$, then each set of the form $\bigcup_{i=0}^j V(G_i)$ is a module.
Despite this connection and our interest in modules in the results of this section, in what follows we will not refer to the modular decomposition of $G$, other than a quick application of Gallai's result during the proof of Lemma~\ref{lem: modules and twin sets}. As in previous sections, the terms `decomposable' and `indecomposable' will refer solely to the canonical decomposition.
\begin{lem}\label{lem: max modules} If $G$ is a canonically indecomposable split graph, then every maximal proper module $M$ of $G$ lies within the maximum clique or within the maximum independent set of $G$. \end{lem} \begin{proof} Let $G$ be a canonically indecomposable split graph with maximum clique $Q$ and maximum independent set $I$, and suppose to the contrary that there exists a maximal proper module $M$ such that $M$ includes a vertex $x$ from $Q$ and a vertex $y$ from $I$. Note that every vertex from $Q - M$ is adjacent to $x$ and hence must be adjacent to $y$, and every vertex from $I-M$ is non-adjacent to $y$ and hence must be non-adjacent to $x$. However, then we may write $(G,I,Q) = (G-M, I-M,Q-M) \circ (G[M], I \cap M, Q \cap M)$, which is a contradiction, since $M$ is a nonempty proper module and $G$ is indecomposable. \end{proof}
Two or more pairwise nonadjacent vertices in a graph are \emph{twins} if they have the same neighbors. Two or more pairwise adjacent vertices in a graph are \emph{clones} if they have the same closed neighborhoods.
\begin{lem}\label{lem: modules and twin sets} If $G$ is a canonically indecomposable split graph, then its maximal proper modules are disjoint, and two vertices belong to the same maximal proper module if and only if they are twins or clones. \end{lem} \begin{proof} Suppose that $G$ is canonically indecomposable and split. Since $G$ is split, it and its complement are both $2K_2$-free (see~\cite{FoldesHammer76}). This implies that if $G$ is disconnected, then all but one of the components are isolated vertices, which contradicts the indecomposability of $G$. A similar contradiction arises if $G$ is the complement of a disconnected graph, so both $G$ and its complement are connected. The modular decomposition theorem of Gallai mentioned previously then implies that the maximal proper modules of $G$ are disjoint.
It is clear that if two vertices are twins or clones, then they belong to the same maximal proper module. By Lemma~\ref{lem: max modules}, these modules lie within the maximum clique $Q$ or within the maximum independent set $I$ of $G$. If two vertices belong to the same maximal proper module $M$, then they have the same open or closed neighborhood, depending on whether $M$ is a subset of $I$ or of $M$, respectively, and hence the vertices are twins or clones. \end{proof}
We will use Lemmas~\ref{lem: max min degrees} and~\ref{lem: modules and twin sets} multiple times without mention in proving the next lemma.
\begin{lem} \label{lem: module cases} If $G$ is an indecomposable weakly threshold graph with five or more vertices, then exactly one of the following is true of $G$: \begin{enumerate} \item[\textup{(i)}] there is a unique vertex $u$ of maximum degree, and the vertices of minimum degree comprise a maximal proper module with at least two vertices, each of which is adjacent to $u$; \item[\textup{(ii)}] there is a unique vertex $u$ of maximum degree, and the vertices of minimum degree belong to exactly two distinct maximal proper modules, one of which has size one and contains the unique vertex $v$ not adjacent to $u$; \item[\textup{(iii)}] there is a unique vertex $v$ of minimum degree, and the vertices of maximum degree comprise a maximal proper module with at least two vertices, each of which is non-adjacent to $v$; \item[\textup{(iv)}] there is a unique vertex $v$ of minimum degree, and the vertices of maximum degree belong to exactly two distinct maximal proper modules, one of which has size one and contains the unique vertex $u$ adjacent to $v$. \end{enumerate} \end{lem} \begin{proof} Let $G$ be an indecomposable weakly threshold graph with five or more vertices. By Lemma~\ref{lem: constructing indecomp WT}, $G$ may be constructed from an induced subgraph isomorphic to $P_4$ by iteratively applying a sequence $O_1,\dots,O_p$ of Type 2 operations. We proceed by induction on the $p$. Observe that if $p\geq 1$, since $G$ has at least five vertices, and if $p=1$, then $G$ is isomorphic to the chair or kite graph, which respectively satisfy cases (ii) and (iv) of the claim.
Assume now that the claim holds for all indecomposable weakly threshold graphs constructed from $P_4$ via $k$ operations of Type 2, where $k$ is some natural number, and suppose that $p=k+1$. Let $w$ be the vertex added during the operation $O_p$. Note that graph complementation preserves modules and the properties of being indecomposable, split, and a weakly threshold graph. Furthermore, under graph complementation weakly dominating vertices become weakly isolated vertices, and vice versa. Thus we may replace $G$ by its complement if desired and assume that the vertex $w$ is a weakly isolated vertex. Let $G' = G-w$. By the induction hypothesis, $G'$ is described by one of the statements (i)--(iv).
We consider each of those cases in turn. If (i) holds for $G'$, then $G$ has a unique vertex of maximum degree, and $w$ is a twin of vertices having minimum degree in $G'$, creating a larger such module in $G$; hence (i) holds for $G$.
If (ii) holds for $G'$, then $G$ again has a unique vertex $u$ of maximum degree, and $w$ is a twin of the vertices of minimum degree adjacent to $u$, preserving the module of size one containing the vertex not adjacent to $u$; hence (ii) holds for $G$.
If (iii) holds for $G'$, then the addition of $w$ to $G'$ creates exactly two distinct maximal proper modules in $G$, each with just one vertex, that consist of vertices of minimum degree. Furthermore, the neighbor of $w$ is the unique vertex of maximum degree in $G$; hence (ii) holds for $G$.
Finally, if (iv) holds for $G'$, then let $v$ be the vertex of minimum degree in $G'$. In $G$, either $v$ and $w$ are twins, in which case (i) holds for $G$, or $v$ and $w$ have different neighbors, in which case (ii) holds for $G$. \end{proof}
Recall that for $n \geq 4$, $h_n$ denotes the number of indecomposable weakly threshold graphs on $n$ vertices. Lemma~\ref{lem: module cases} now allows us to derive a recurrence relation for the terms $h_n$.
\begin{thm}\label{thm: b recurrence} For all $n \geq 7$, we have $h_n = 3h_{n-1} - h_{n-2}$. \end{thm} \begin{proof} For each $n \geq 5$, let $b_n$ denote the number of indecomposable weakly threshold graphs $G$ with $n$ vertices in which the vertices of maximum degree comprise a single module of $G$, and the vertices of minimum degree likewise comprise a single module in $G$. Observe that $b_n$ counts the number of graphs with $n$ vertices that are described in Lemma~\ref{lem: module cases} in cases (i) and (iii). Further define $a_n = h_n - b_n$ for each integer $n \geq 5$.
It follows from Lemma \ref{lem: module cases} that the indecomposable weakly threshold graphs on $n \geq 5$ vertices can each be obtained by adding a weakly dominating vertex or a weakly isolated vertex to an indecomposable weakly threshold graph on $n-1$ vertices. Furthermore, since adding a weakly isolated vertex to a weakly threshold graph creates a graph with at least two vertices of degree 1 and a single vertex of maximum degree, and adding a weakly dominating vertex to a weakly threshold graph creates a graph with at least two vertices of degree $n-2$ and a unique vertex of degree 1, it will never be the case that we can obtain the same indecomposable weakly threshold graph on $n$ vertices by adding a weakly isolated vertex to one weakly threshold graph and adding a weakly dominating vertex to another.
Furthermore, we can determine the number of distinct isomorphism classes that are represented by graphs produced by adding a weakly isolated or weakly dominating vertex to a given indecomposable weakly threshold graph $H$, as we now describe. Our cases come from Lemma~\ref{lem: module cases}.
If $H$ is described by cases (i) or (iii), then up to isomorphism there is one way in which a weakly isolated vertex can be added to $H$, and exactly one way in which a weakly dominating vertex can be added.
If $H$ is described by case (ii), then there is one way to add a weakly isolated vertex, and up to isomorphism there are two ways to add a weakly dominating vertex---we may make the new vertex the clone of an already-existing vertex of maximum degree, or we may make the new weakly dominating vertex non-adjacent to a vertex of minimum degree adjacent to the vertex of maximum degree in $H$. Note that these two ways produce graphs in cases (iii) and (iv), respectively, which hence cannot be isomorphic. For similar reasons, if $H$ is described by case (iv), then there is one way to add a weakly dominating vertex and two ways (up to isomorphism) to add a weakly isolated vertex.
Thus $h_{n} = 2b_{n-1} + 3a_{n-1}$ for all $n \geq 6$. We now show that $b_n= h_{n-1}$ for all $n \geq 6$. Indeed, note that $b_{n}$ counts the number of $n$-vertex graphs described in cases (i) and (iii). There is a bijection between $n$-vertex graphs satisfying (i) and graphs on $n-1$ vertices satisfying (i) or (iv), given by identifying in the smaller graph a vertex of minimum degree whose neighbor belongs to a maximal proper module of size 1 and then creating a twin for the vertex of minimum degree. There is a similar bijection between $n$-vertex graphs satisfying (iii) and graphs on $n-1$ vertices satisfying (ii) or (iii), completing our proof that $b_n= h_{n-1}$ for all $n \geq 6$.
Now for $n \geq 7$ we can now conclude that \[h_n = 2b_{n-1} + 3a_{n-1} = 2h_{n-2} + 3(h_{n-1}-h_{n-2}) = 3h_{n-1} - h_{n-2}. \qedhere\] \end{proof}
Using standard techniques, from the recurrence for $h_n$ and the observed values $h_4=1$, $h_5=2$, and $h_6=6$, we can now derive a closed form expression for $H(x)$, obtaining \[H(x) = 2x + \frac{x^4-x^5+x^6}{1-3x+x^2} = \frac{2x-6x^2+2x^3+x^4-x^5+x^6}{1-3x+x^2}.\]
\subsection{General weakly threshold sequences and graphs} \label{subsec: conclusion}
Having obtained expressions for $G(x)$ and $H(x)$, we can now substitute them into equations~\eqref{eq: S(x)} and~\eqref{eq: W(x)} to obtain $S(x)$ and $W(x)$.
\begin{thm} \label{thm: gen fcns} The generating function for the weakly threshold sequences, indexed by the number of terms, is \[S(x) = \frac{x-x^2-x^3}{1-3x+x^2+x^3} = -1 + \frac{1}{2(1-x)}+\frac{1-x}{2(1-2x-x^2)}.\] The generating function for the weakly threshold graphs, indexed by the number of vertices, is \[W(x) = \frac{x-2x^2-x^3-x^5}{1-4x+3x^2+x^3+x^5} = -1 + \frac{2}{3(1-x-x^2)} + \frac{1-2x}{3(1-3x+x^2-x^3)}.\] \end{thm}
Using standard techniques, we obtain formulae for the numbers $s_n$ and $w_n$ of weakly threshold sequences and graphs on $n$ vertices.
\begin{thm} \label{thm: gen fcns} For integers $n \geq 1$, \[s_n = \frac{2+\left(1+\sqrt{2}\right)^n + \left(1-\sqrt{2}\right)^n}{4},\] and \begin{align*} w_n = &c_1\left(\frac{1+\sqrt{5}}{2}\right)^n + c_2\left(\frac{1-\sqrt{5}}{2}\right)^n\\ &+ c_3\left(\frac{6-(1+i \sqrt{3}) (27-3 \sqrt{57})^{1/3}-(1-i \sqrt{3}) (27+3\sqrt{57})^{1/3}}{6}\right)^n\\ &+ c_4\left(\frac{6-(1-i \sqrt{3}) (27-3 \sqrt{57})^{1/3}-(1+i \sqrt{3}) (27+3\sqrt{57})^{1/3}}{6}\right)^n\\ &+ c_5\left( \frac{3+(27-3 \sqrt{57})^{1/3}+(27+3\sqrt{57})^{1/3}}{3}\right)^n, \end{align*}
where
\begin{align*} c_1 &= \frac{\sqrt{5}+1}{3\sqrt{5}}, \qquad c_2 = \frac{\sqrt{5}-1}{3\sqrt{5}},\\ c_3 &= \frac{1}{9}\left(1 - \frac{(1+i \sqrt{3}) (3 \sqrt{57}-19)^{1/3}}{4\cdot 19^{2/3}}+\frac{1-i \sqrt{3}}{2\cdot (19 (3 \sqrt{57}-19))^{1/3}}\right),\\ c_4 &= \frac{1}{9}\left(1 - \frac{(1-i \sqrt{3}) (3 \sqrt{57}-19)^{1/3}}{4\cdot 19^{2/3}}+\frac{1+i \sqrt{3}}{2\cdot (19 (3 \sqrt{57}-19))^{1/3}}\right), \text{ and}\\ c_5 &= \frac{1}{9}\left(1+\frac{(3 \sqrt{57}-19)^{1/3}}{2 \cdot 19^{2/3}}- \frac{1}{(19 (3 \sqrt{57}-19))^{1/3}}\right). \end{align*} \end{thm}
We close this section with a few remarks. First, the sequence $(s_n)$ has previously appeared in the Online Encyclopedia of Integer Sequences, matching sequences A171842 and A024537~\cite{OEIS}. The sequence is reported to count several other sets of combinatorial objects; it may be interesting to find correspondences between weakly threshold sequences and these objects.
Next, we recall that the number of threshold graphs on $n$ vertices is precisely $2^{n-1}$, and that threshold graphs are the unique realizations of their degree sequences. In comparison, Theorem \ref{thm: gen fcns} indicates that \begin{equation} s_n \;\sim\; \frac{1}{4}(1+\sqrt{2})^n \qquad \text{ and } \qquad w_n \;\sim\; c_5\left( \frac{3+(27-3 \sqrt{57})^{1/3}+(27+3\sqrt{57})^{1/3}}{3}\right)^n. \label{eq: asymptotics} \end{equation} That there are more weakly threshold graphs of a given order than weakly threshold sequences is not surprising, since adding a weakly isolated vertex (or weakly dominating vertex) to a weakly threshold graph can often involve a choice of the neighborhood of the added vertex, though the resulting degree sequence is the same no matter which choice is made. Approximating in \eqref{eq: asymptotics}, we see that for large $n$, $s_n \geq \frac{1}{4} \cdot 2.4^n$ and $w_n \geq c_5 \cdot 2.7^n$.
\end{document} |
\begin{document}
\title{Higher localization and higher branching laws}
\begin{abstract}
For a connected reductive group $G$ and an affine smooth $G$-variety $X$ over the complex numbers, the localization functor takes $\mathfrak{g}$-modules to $D_X$-modules. We extend this construction to an equivariant and derived setting using the formalism of h-complexes due to Beilinson--Ginzburg, and show that the localizations of Harish-Chandra $(\mathfrak{g}, K)$-modules onto $X = H \backslash G$ have regular holonomic cohomologies when $H, K \subset G$ are both spherical reductive subgroups. The relative Lie algebra homologies and $\operatorname{Ext}$-branching spaces for $(\mathfrak{g}, K)$-modules are interpreted geometrically in terms of equivariant derived localizations. As direct consequences, we show that they are finite-dimensional under the same assumptions, and relate Euler--PoincarΓ© characteristics to local index theorem; this recovers parts of the recent results of M.\ Kitagawa. Examples and discussions on the relation to Schwartz homologies are also included. \end{abstract}
\tableofcontents
\section{Introduction}\label{sec:intro} \subsection{Backgrounds} We work over the complex numbers. Let $G$ be a connected reductive group with Lie algebra $\mathfrak{g}$. The main theme of this work begins with the functor \[ \mathscr{D}_X \dotimes{U(\mathfrak{g})} (\cdot): \mathfrak{g}\dcate{Mod} \to \mathscr{D}_X\dcate{Mod} \] where $X$ is a smooth variety with right $G$-action (we say that $X$ is a $G$-variety), with sheaf of algebraic differential operators $\mathscr{D}_X$, and $\mathfrak{g}\dcate{Mod}$ (resp.\ $\mathscr{D}_X\dcate{Mod}$) is the abelian category of $\mathfrak{g}$-modules (resp.\ left $\mathscr{D}_X$-modules that are $\mathscr{O}_X$-quasi-coherent). The tensor product is taken using the homomorphism $j: U(\mathfrak{g}) \to D_X := \Gamma(X, \mathscr{D}_X)$ induced by $G$-action.
This functor is called \emph{localization}. Its significance can be partly explained by the fact that when $X$ is a homogeneous $G$-space, the fiber of $\mathscr{D}_X \dotimes{U(\mathfrak{g})} V$ at $x \in X$ is isomorphic to $V/\mathfrak{h} V$ where $H := \ensuremath{\mathrm{Stab}}_G(x)$; therefore, localization organizes \emph{various} spaces of co-invariants into a geometric object over $X$.
The celebrated Beilinson--Bernstein localization is the special case when $X$ is the flag variety of $G$, but allowing some twists on $\mathscr{D}_X$. It has tremendous consequences in the study of $\mathfrak{g}\dcate{Mod}$ and related structures. In this work we try to use localization in another way. Namely, we start with a homogeneous $G$-space $X$ and study aspects of representation theory of $G$ or its real form that are ``relative'' to $X$, by means of the localization functors.
Localization has been utilized in a similar flavor in the ``group case'' $G = H \times H$ and $X = H$ by D.\ Ben-Zvi and I.\ Ganev \cite{BZG19}, who applied it to study the asymptotics of matrix coefficients of admissible representations. They also related this construction to Beilinson--Bernstein localization by specialization at infinity. We remark that V.\ Ginzburg's work \cite{Gin89} played an important role there; specifically, it ensures that the localized $D_H$-modules are regular holonomic and puts some control on its characteristic variety --- in fact, their irreducible constituents are character $D_H$-modules.
A similar result of regularity is contained in \cite{Li22} for localizations of Harish-Chandra $(\mathfrak{g}, K)$-modules to $X := H \backslash G$, where $H, K \subset G$ are reductive spherical subgroups. By a \emph{Harish-Chandra $(\mathfrak{g}, K)$-module}, we mean a $(\mathfrak{g}, K)$-module that is finitely generated over $\mathfrak{g}$ and locally $\mathcal{Z}(\mathfrak{g})$-finite, where $\mathcal{Z}(\mathfrak{g})$ is the center of $U(\mathfrak{g})$. By sphericity of $H$ (ditto for $K$), we mean that the homogeneous $G$-space $H \backslash G$ has an open Borel orbit. The proof is based on a variant of Ginzburg's criterion of regularity in \cite{Gin89}. Note that reductivity implies $X$ is affine, so we may work with $D_X\dcate{Mod}$ instead of $\mathscr{D}_X\dcate{Mod}$.
Nonetheless, the usage of localization is made complicated by the following two facts, at least. \begin{itemize}
\item Given an equivariant morphism $f: Y_1 \to Y_2$ between smooth $G$-varieties, there is no obvious way to relate the corresponding localizations via inverse or direct images;
\item Localization is a right exact functor, but it is non-exact in many cases of interest. This will be shown in Remark \ref{rem:non-exactness}. \end{itemize}
\subsection{Higher localization} In the first part of this work, we generalize these ideas to the equivariant and derived setting, i.e.\ to higher localizations.
First off, take a reductive subgroup $K \subset G$ and let $(\mathfrak{g}, K)\dcate{Mod}$ be the abelian category of $(\mathfrak{g}, K)$-modules. In representation theory, one is especially interested in its subcategory of Harish-Chandra $(\mathfrak{g}, K)$-modules.
Take an affine homogeneous space $X = H \backslash G$ and let $(D_X, K)\dcate{Mod}$ be the abelian category of $K$-equivariant $D_X$-modules. Since $j: U(\mathfrak{g}) \to D_X$ is equivariant, it is easily seen that the localization lifts to the equivariant level: \[ D_X \dotimes{U(\mathfrak{g})} (\cdot): (\mathfrak{g}, K)\dcate{Mod} \to (D_X, K)\dcate{Mod}. \] We wish to left-derive this right exact functor in order to obtain ``higher localizations'', and apply them to Harish-Chandra modules. Two issues arise immediately. \begin{enumerate}
\item Homological algebra for $(\mathfrak{g}, K)$-modules is done in the classical way in \cite{KV95}. On the other hand, the correct $K$-equivariant derived category of $D_X$-modules requires less naive techniques when $K \neq \{1\}$, such as that of Bernstein--Lunts \cite{BL94} which replaces $X$ by various resolutions $P \to X$, on which $K$ acts freely (see also \cite{Ac21}).
This kind of ``type mismatch'' makes it non-trivial to derive equivariant localization. Nor is there any obvious way to define it as a compatible family on resolutions.
\item Another option is to consider $(\mathfrak{g}, K)\dcate{Mod} \to D_X\dcate{Mod}$ instead, in order to avoid equivariant derived categories. This discards too many structures, thus impedes applications to higher branching laws. Moreover, since nonzero projectives in $(\mathfrak{g}, K)\dcate{Mod}$ are never Harish-Chandra, it seems difficult to prove regularity or holonomicity in cohomologies, unless one addresses the first issue simultaneously.
For similar reasons, it seems unfeasible to define the derived localization through realization functor \cite[Theorem A.7.16]{Ac21}. \end{enumerate}
Our approach to these problems is based on the formalism of \emph{h-complexes}, reviewed in \S\ref{sec:dg}. This theory is presented in \cite{BL95}, attributed to Beilinson--Ginzburg and the earlier work of Duflo--Vergne \cite{DV87}; for subsequent developments, see \cite{Pan95, Pan05, Pan07, Ki12} and \cite[Chapter 7]{BD}. Let $A \in \{U(\mathfrak{g}), D_X\}$, so there is a natural homomorphism $j: \mathfrak{k} \to A$ of Lie algebras (the Lie brackets in $A$ are commutators). A weak $(A, K)$-module is a vector space $M$ with \begin{itemize}
\item a left $A$-module structure, say through $\alpha: A \to \operatorname{End}_{\ensuremath{\mathbb{C}}}(M)$
\item an algebraic $K$-action, say through $\rho: K \to \operatorname{Aut}_{\ensuremath{\mathbb{C}}}(M)$, compatibly with $A$, \end{itemize} but the $\mathfrak{k}$-actions $\mathop{}\!\mathrm{d}\rho$ and $\alpha j$ on $M$ are not required to agree; if they agree, we obtain an $(A, K)$-module. An h-complex over $(A, K)$ is a complex $(C, d)$ of weak $(A, K)$-modules together with a family of maps $i_\xi \in \operatorname{End}^{-1}(C) := \operatorname{End}^{-1}_{\ensuremath{\mathbb{C}}}(C)$, linear in $\xi \in \mathfrak{k}$, such that \begin{enumerate}[(i)]
\item $k i_\xi k^{-1} = i_{\operatorname{Ad}(k)\xi}$ for all $k \in K$;
\item $i_\xi$ is $A$-linear for all $\xi \in \mathfrak{k}$;
\item $i_\xi i_\eta + i_\eta i_\xi = 0$ for all $\xi, \eta \in \mathfrak{k}$;
\item $d i_\xi + i_\xi d = (\mathop{}\!\mathrm{d}\rho - \alpha j)(\xi)$. \end{enumerate} The last homotopy condition implies that $\operatorname{H}^n(C)$ is an $(A, K)$-module for all $n$. There are also translation functors $C \mapsto C[1]$, mapping cones and $\operatorname{Hom}$-complexes for h-complexes. This leads to the h-derived category ${}^{\mathrm{h}} \cate{D}(A, K)$: it is triangulated and endowed with a $t$-structure whose heart is $(A, K)\dcate{Mod}$. For every subgroup $T \subset K$ there are functors of oblivion fitting into commutative diagrams up to isomorphism: \[\begin{tikzcd}
{}^{\mathrm{h}} \cate{D}(A, K) \arrow[r, "{\operatorname{H}^n}"] \arrow[d] & (A, K)\dcate{Mod} \arrow[d] \\
{}^{\mathrm{h}} \cate{D}(A, T) \arrow[r, "{\operatorname{H}^n}"] & (A, T)\dcate{Mod}. \end{tikzcd}\]
Therefore we obtain the bounded h-derived categories \[ {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(\mathfrak{g}, K), \quad {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K). \] In \cite{BL95, Pan05}, these are shown to be equivalent to the usual bounded versions \[ \cate{D}^{\mathrm{b}}(\mathfrak{g}, K), \quad \cate{D}^{\mathrm{b}}_K(X) \] of the derived category of $(\mathfrak{g}, K)$-modules and the $K$-equivariant derived category of $D_X$-modules, respectively; in particular, the $\operatorname{Ext}$'s are the same. They proved the second case for non-affine $X$ and non-reductive $K$ as well.
The best way to understand the h-construction is to do homological algebra over \emph{Harish-Chandra dg-algebras}; this is just an equivariant version of the theory of dg-modules over dg-algebras (``dg'' = differential graded), which is nowadays standard; see \cite{BL94} or \cite{Yek20}. Consequently, left and right h-derived functors are defined, upon replacing projective (resp.\ injective) resolutions by K-projective (resp.\ K-injective) resolutions. As we assumed $X$ is affine and $K$ is reductive, there are enough K-projective bounded-above h-complexes over $(D_X, K)$.
We adopt this toolbox to deduce the following main theorem. Let \[ \mathbf{Loc}_X = \mathbf{Loc}_{X, K}: {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K) \] be the equivariant derived localization functor so obtained; see \S\ref{sec:Loc-functor}. An advantage of the h-construction is that for all reductive subgroups $T$ of $K$, one easily obtains commutative diagrams up to isomorphisms (Proposition \ref{prop:Loc-oblv}): \[\begin{tikzcd}[column sep=large]
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(\mathfrak{g}, K) \arrow[r, "{\mathbf{Loc}_{X, K}}"] \arrow[d] & {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K) \arrow[d] \\
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(\mathfrak{g}, T) \arrow[r, "{\mathbf{Loc}_{X, T}}"'] & {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, T). \end{tikzcd}\]
A closely related approach to $\mathbf{Loc}_{X, K}$ can be found in \cite[\S 7.8]{BD}, as a part of the ``Hecke patterns''.
\begin{Thm}
Let $H$ and $K$ be spherical reductive subgroups of $G$. Let $V$ be a Harish-Chandra $(\mathfrak{g}, K)$-module, then the cohomologies of $\mathbf{Loc}_X(V)$ are all regular holonomic as $D_X$-modules, and their characteristic varieties are included in the nilpotent locus. \end{Thm}
We refer to Theorem \ref{prop:regularity} and the results in that section for complete statements. The proof is based on a criterion (Theorem \ref{prop:reg-criterion}) from \cite{Li22}. One must show that the cohomologies of $\mathbf{Loc}_X(V)$ are (R1) finitely generated over $D_X$, (R2) carry $K$-equivariant structures, and (R3) locally $\mathcal{Z}(\mathfrak{g})$-finite through \[\mathcal{Z}(\mathfrak{g}) \subset U(\mathfrak{g}) \xrightarrow{j} D_X. \]
Note that (R2) is immediate in our setting. For the remaining conditions, one passes to the case $K = \{1\}$ by commuting $\mathbf{Loc}_X$ and $\operatorname{H}^n$ with oblivion. The hardest part is (R3): to prove it, we need deep results of F.\ Knop \cite{Kn94} on the structure of the algebra $D_X^G$ of invariant differential operators in the spherical case. This is the content of Proposition \ref{prop:local-Zg-finiteness}.
We remark that $D_X^G$ also acts on the right of the functor $D_X \dotimes{U(\mathfrak{g})} (\cdot)$, by letting $z \in D_X^G$ send $D \otimes w$ to $D z\otimes w$. This induces a right $D_X^G$-action on $\mathbf{Loc}_X$. As a by-product, Proposition \ref{prop:ZX-locally-finite} shows that $D_X^G$ acts locally finitely on the cohomologies of $\mathbf{Loc}_X(V)$, for all Harish-Chandra $(\mathfrak{g}, K)$-modules $V$.
\subsection{Higher branching} We hope that the results on $\mathbf{Loc}_X$ will have some use in geometric representation theory. However, this work is originally motivated by \emph{branching laws} in harmonic analysis.
In the setting of $p$-adic groups $H \subset I$, the branching law studies the spaces $\operatorname{Hom}_H(V|_H, W)$ where $V$ (resp.\ $W$) is an admissible representation of $I$ (resp.\ $H$). In particular, one is interested in the dimension of the $\operatorname{Hom}$-space\footnote{We do not consider the case of $\operatorname{Hom}_H(V, W|_H)$ in this work.}. The $\operatorname{Ext}$-analogue or \emph{higher branching law}, proposed by D.\ Prasad \cite{Pra18} for $p$-adic groups, considers the corresponding problem for $\operatorname{Ext}^n_H(V|_H, W)$ for general $n \geq 0$. One is also interested in the Euler--PoincarΓ© characteristic $\sum_n (-1)^n \dim \operatorname{Ext}^n_H(V|_H, W)$, well-defined as long as $\dim \operatorname{Ext}^n_H(V|_H, W)$ is finite and vanishes for $n \gg 0$. By replacing $I$ by $H \times I$ in which $H$ embeds diagonally, the problem can be reduced to the case $W = \ensuremath{\mathbb{C}}$, the trivial representation.
This work is motivated by higher branching laws in the Archimedean case. We formulate it in the algebraic framework, namely by considering subgroups \begin{equation*}\begin{tikzcd}
H \arrow[phantom, r, "\subset" description] & G \\
K^H \arrow[phantom, u, "\subset" description, sloped] \arrow[phantom, r, "\subset" description] & K \arrow[phantom, u, "\subset" description, sloped]. \end{tikzcd}\end{equation*} where all groups are assumed to be complex reductive. We are led to study
\[ \operatorname{Ext}^n_{\mathfrak{h}, K^H}(V|_H, \ensuremath{\mathbb{C}}), \quad n \in \ensuremath{\mathbb{Z}}_{\geq 0} \]
where $V$ is a $(\mathfrak{g}, K)$-module and $V|_H$ denotes its restriction to $(\mathfrak{h}, K^H)$. The $\operatorname{Ext}$ can be computed either in the h-derived category or in the classical one.
Let $X = H \backslash G$ and let $x$ be the point $H \cdot 1$. The inclusion map $i_x: \mathrm{pt} \to X$ is a morphism between $K^H$-varieties. By using the relation between localization and co-invariants, and working systematically in the h-derived categories, one obtains the following canonical isomorphisms. For all $(\mathfrak{g}, K)$-modules $V$ and all $n \in \ensuremath{\mathbb{Z}}$, we have \begin{align}
\label{eqn:Ext-interpretation}
\operatorname{Ext}^n_{\mathfrak{h}, K^H}(V|_H, \ensuremath{\mathbb{C}}) & \simeq \operatorname{Ext}^n_{D_{\mathrm{pt}}, K^H}\left( i_x^\bullet( \mathbf{Loc}_X(V) ), \ensuremath{\mathbb{C}} \right), \\
\label{eqn:Hm-interpretation}
\operatorname{H}_n(\mathfrak{h}, K^H; V|_H) & \simeq \operatorname{H}^{-n}\operatorname{L}\!\left( \mathrm{coInv}^{\ensuremath{\mathbb{C}}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}} \right) \left(i_x^\bullet \mathbf{Loc}_X(V)\right). \end{align} For the complete statements, see Propositions \ref{prop:RHom-RHom} and \ref{prop:H-coInv}. Here: \begin{itemize}
\item $\operatorname{H}_n(\mathfrak{h}, K^H; \cdot)$ are the relative Lie algebra homologies of $(\mathfrak{h}, K^H)$-modules (see \cite[p.157]{KV95}), and note that
\[ \operatorname{Ext}^n_{\mathfrak{h}, K^H}(V|_H, \ensuremath{\mathbb{C}}) \simeq \operatorname{H}_n(\mathfrak{h}, K^H; V|_H)^* ; \]
\item $\mathbf{Loc}_X(V) := \mathbf{Loc}_{X, K^H}(V)$, but it is also isomorphic to the oblivion of $\mathbf{Loc}_{X, K}(V)$ via $K^H \subset K$;
\item $i_x^\bullet: {}^{\mathrm{h}} \cate{D}^-(D_X, K^H) \to {}^{\mathrm{h}} \cate{D}^-(D_{\mathrm{pt}}, K^H)$ is the h-version of the inverse image functor for $D$-modules;
\item $\operatorname{L}\!\left( \mathrm{coInv}^{\ensuremath{\mathbb{C}}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}}\right): {}^{\mathrm{h}} \cate{D}^-(\ensuremath{\mathbb{C}}, K^H) \to \cate{D}^-(\ensuremath{\mathbb{C}})$ is the left h-derived functor of taking co-invariants of h-complexes over $(\ensuremath{\mathbb{C}}, K^H)$, see \S\ref{sec:inv-coinv}, and $\cate{D}(\ensuremath{\mathbb{C}})$ is the the derived category of $\ensuremath{\mathbb{C}} = D_{\mathrm{pt}}$. \end{itemize}
Note that $i_x^\bullet$ coincides with the inverse image functor under the equivalence ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K^H) \simeq \cate{D}^{\mathrm{b}}_{K^H}(X)$; see Proposition \ref{prop:inverse-image-compatibility}. The same can also be said for $\operatorname{L}\!\left( \mathrm{coInv}^{\ensuremath{\mathbb{C}}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}}\right)$, but truncation is needed since this functor does not land in $\cate{D}^{\mathrm{b}}(\ensuremath{\mathbb{C}})$ in general.
Actually, the deductions of \eqref{eqn:Ext-interpretation} and \eqref{eqn:Hm-interpretation} are routine once the basic formalism of \S\S\ref{sec:review-dg}---\ref{sec:Loc-coinv} is set up. The h-formalism is needed only when $K^H \neq \{1\}$.
The result below is a direct consequence of the earlier theorem on regularity.
\begin{Thm}
Assume that $H, K \subset G$ are both spherical reductive subgroups. For all Harish-Chandra $(\mathfrak{g}, K)$-modules $V$ and all $n \in \ensuremath{\mathbb{Z}}$, there are canonical isomorphisms
\begin{align*}
\operatorname{Ext}^n_{\mathfrak{h}, K^H}(V|_H, \ensuremath{\mathbb{C}}) & \simeq \operatorname{Ext}^n_{D_{\mathrm{pt}}, K^H}\left( i_x^! \mathbf{Loc}_X(V)[\dim X], \ensuremath{\mathbb{C}} \right), \\
\operatorname{H}_n(\mathfrak{h}, K^H; V|_H) & \simeq \operatorname{H}^{- n + \dim X}\operatorname{L}\!\left( \mathrm{coInv}^{\ensuremath{\mathbb{C}}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}} \right) \left(i_x^! \mathbf{Loc}_X(V)\right).
\end{align*}
When $K^H = \{1\}$ we also have
\begin{align*}
\operatorname{Ext}^n_{\mathfrak{h}}(V|_H, \ensuremath{\mathbb{C}}) & \simeq \operatorname{Ext}^n_{D_X}\left( \mathbf{Loc}_X(V), i_{x, *}(\ensuremath{\mathbb{C}})[\dim X] \right), \\
\operatorname{H}_n(\mathfrak{h}; V|_H) & \simeq \operatorname{H}^{-n - \dim X} \left(i_x^* \mathbf{Loc}_X(V)\right).
\end{align*}
Here $i_x^!$, $i_x^*$ and $i_{x, *}$ are defined to match the synonymous functors between the constructible equivariant derived categories via Riemann--Hilbert correspondence.
All the complexes of $D$-modules above are bounded with regular holonomic cohomologies. Consequently, all these vector spaces are finite-dimensional. \end{Thm}
We refer to Theorem \ref{prop:RHom-Loc} and Proposition \ref{prop:H-Loc} for the complete statements.
The finiteness of $\operatorname{H}_n(\mathfrak{h}, K^H; V|_H)$ is covered by a recent work of M.\ Kitagawa \cite{Ki21}, who considers a broader setting of branching laws and obtains uniform bounds. His approach is to interpret relative Lie algebra cohomologies in terms of Zuckerman functors, and then pass to homologies by PoincarΓ© duality. The approach in this work relies on standard results about constructible or regular holonomic equivariant derived categories; although this is conceptually straightforward, the resulting bound is less effective and involves more machinery.
In the special case $K^H = \{1\}$, we can connect the Euler--PoincarΓ© characteristic
\[ \mathrm{EP}_{\mathfrak{h}}(V|_H, \ensuremath{\mathbb{C}}) := \sum_n (-1)^n \dim \operatorname{Ext}^n_{\mathfrak{h}}(V|_H, \ensuremath{\mathbb{C}}) \] to a celebrated topological counterpart, namely the local Euler--PoincarΓ© characteristic at $x$ of the \emph{solution complex} of $\mathbf{Loc}_{X, \{1\}}(V)$. See Theorem \ref{prop:local-index} for the complete statement.
\begin{Thm}
Retain the previous assumptions on $H, K \subset G$ and assume $K^H = \{1\}$. Let $V$ be a Harish-Chandra $(\mathfrak{g}, K)$-module, and set $\mathcal{L} := \mathbf{Loc}_{X, \{1\}}(V)$. Then $\mathrm{EP}_{\mathfrak{h}}(V|_H, \ensuremath{\mathbb{C}})$ equals the local Euler--PoincarΓ© characteristic
\[ \chi_x\left( \mathrm{Sol}_X(\mathcal{L}) \right) \]
of the solution complex of $\mathcal{L}$ at $x$, which is expressible in terms of characteristic cycles of $\mathcal{L}$ and Euler obstructions by Kashiwara's local index theorem \cite[Theorem 4.6.7]{HTT08}. \end{Thm}
We remark that the reductivity of $H$ and $K$ makes homological algebra over $(D_X, K)$ and $(\mathfrak{g}, K)$ easier; it is also involved with the criterion of regularity and the results of Knop in \S\S\ref{sec:regularity-criterion}---\ref{sec:end-of-regularity}. It is still unclear whether our approach can extend to non-reductive subgroups.
It should be emphasized that the finiteness of $\operatorname{Ext}^n$ in the spherical case is just a first example of the usage of higher localization, and the computation of $\chi_x(\mathrm{Sol}_X(\mathcal{L}))$ requires knowledge about characteristic cycles. To gain more applications in higher branching laws, one needs a deeper understanding of $\mathbf{Loc}_X(V)$. Regularity is merely the first step.
\subsection{Twist by characters} In arithmetic applications, one often has to study branching laws in which the trivial module $\ensuremath{\mathbb{C}}$ of $H$ is replaced by some character $\chi$, say for $p$-adic groups. In the setting of $(\mathfrak{g}, K)$-modules, one should consider a $1$-dimensional $(\mathfrak{h}, K^H)$-module; by abusing notation, we identify it with the underlying character $\chi: \mathfrak{h} \to \ensuremath{\mathbb{C}}$. Denote by $\chi^\vee$ its contragredient $(\mathfrak{h}, K^H)$-module\footnote{In the study of branching laws of real groups, many authors allow $\chi$ to be a finite-dimensional module. It seems hard to incorporate this case into our formalism.}.
Since $H$ is assumed to be reductive, $\chi$ factors through the Lie algebra of some torus $S = H/\underline{H}$ and can be identified with an element of $\mathfrak{s}^*$. The spaces
\[ \operatorname{Ext}^n_{\mathfrak{h}, K^H}(V|_H, \chi), \quad \operatorname{H}_n(\mathfrak{h}, K^H; V|_H \otimes \chi^\vee) \] can still be interpreted via localization to $X = H \backslash G$, provided that we work with twisted differential operators (TDO's) on $X$.
Specifically, we have an $S$-torsor (on the left) \[ \pi: \tilde{X} := \underline{H} \backslash G \to H \backslash G = X \] that is also $G$-equivariant (on the right). This situation is already considered by \cite{BL95}, where it is called an $S$-monodromic $G$-variety.
Let $\mathfrak{m}_\chi \subset \operatorname{Sym}(\mathfrak{s})$ be the maximal ideal corresponding to $\chi$. The sheaf of $\chi$-twisted differential operators on $X$ is \[ \mathscr{D}_{X, \chi} := (\pi_* \mathscr{D}_{\tilde{X}})^S \big/ \mathfrak{m}_\chi (\pi_* \mathscr{D}_{\tilde{X}})^S. \]
In the affine setting, $D_{X, \chi} = \Gamma(X, \mathscr{D}_{X, \chi})$ equals $D_{\tilde{X}}^S / \mathfrak{m}_\chi D_{\tilde{X}}^S$. There is again a $G$-equivariant homomorphism \[ j: U(\mathfrak{g}) \to D_{X, \chi}, \] by which we define the $\chi$-twisted localization \[ \mathbf{Loc}_{X, \chi} = \mathbf{Loc}_{X, K, \chi}: {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_{X, \chi}, K). \]
All the earlier results have monodromic counterparts in this context recorded in \S\ref{sec:monodromic}. For example, Proposition \ref{prop:coinv-Loc-monodromic} relates the fibers of $D_{X, \chi} \dotimes{U(\mathfrak{g})} V$ to $(\mathfrak{h}, \chi)$-co-invariants of $V$, i.e.\ the quotient of $V$ by the span of all $\eta v - \chi(\eta)v$ where $\eta \in \mathfrak{h}$ and $v \in V$. On the other hand, the $K^H$-equivariant sheaf $\ensuremath{\mathbb{C}}$ on $\mathrm{pt}$ must be replaced by the monodromic variant $\ensuremath{\mathbb{C}}_\chi$, and this gives rise to monodromic versions of \eqref{eqn:Ext-interpretation} and \eqref{eqn:Hm-interpretation}.
The key ingredient here is a monodromic version of the regularity Theorem \ref{prop:regularity-monodromic} in the spherical case. One has to extend Knop's results \cite{Kn94} to the twisted setting of $D_{X, \chi}$. This is done in Lemma \ref{prop:Knop-monodromic}, by reducing to the spherical homogeneous $S \times G$-space $\tilde{X}$.
We remark that the Riemann--Hilbert correspondence also holds in the twisted (i.e.\ monodromic) and equivariant setting; one has to consider twisted constructible sheaves, though. We refer to \cite{Ka08}.
\subsection{On the analytic picture}
The discussions thus far focus on the algebraic setting. In the representation theory of real groups, one often takes $G$ and $H$ to be real, and take $K$ (resp.\ $K^H$) corresponding to a maximal compact subgroup of $G$ (resp.\ $H$). In studying $\operatorname{Hom}_{\mathfrak{h}, K^H}(V|_H, \ensuremath{\mathbb{C}})$, one is more interested in those element which extend continuously to the Casselman--Wallach globalization $E$ of $V$. This is the same as the continuous linear functionals on the space of co-invariants $E_H := E \big/ \sum_h (h-1)E$ endowed with the quotient topology.
For higher branching laws in the analytic setting, it is thus natural to replace relative Lie algebra homologies by the \emph{Schwartz homologies}
\[ \operatorname{H}^{\mathcal{S}}_n(H; E|_H), \quad n \in \ensuremath{\mathbb{Z}} \] defined by Y.\ Chen and B.\ Sun \cite{BC21}; they are locally convex topological spaces, not necessarily Hausdorff, and equal to $E_H$ when $n=0$. In fact, they agree with the \emph{smooth homologies} constructed earlier by P.\ Blanc and D.\ Wigner \cite{BW83}.
In \S\ref{sec:comparison} we will define the comparison maps \begin{equation*}
c_n(E): \operatorname{H}_n\left(\mathfrak{h}, K^H; E^{K\text{-fini}}\right) \to \operatorname{H}^{\mathcal{S}}_n(H; E|_H), \quad n \in \ensuremath{\mathbb{Z}}_{\geq 0} \end{equation*} from the algebraic to the analytic picture, for every Casselman--Wallach representation $E$ of $G$, where $E^{K\text{-fini}}$ is the $(\mathfrak{g}, K)$-module of $K$-finite vectors in $E$.
It is natural to ask if $c_n(E)$ is an isomorphism, at least when $H$ is a reductive spherical subgroup of $G$ after extension of scalars to $\ensuremath{\mathbb{C}}$. If it is indeed the case, $\operatorname{H}^{\mathcal{S}}_n(H; E|_H)$ will be finite-dimensional since $\operatorname{H}_n(\mathfrak{h}, K^H; E^{K\text{-fini}})$ is; in turn, this will imply $\operatorname{H}^{\mathcal{S}}_n(H; E|_H)$ is Hausdorff.
It seems too early to pass verdict on the question of comparison. Its validity for $n=0$ is equivalent to the automatic continuity theorem, which is open except when $H$ is a symmetric subgroup \cite{BD88} or $H=G$; another (non-reductive) example is when $H$ is maximal unipotent \cite{HT98, LLY21}. Nonetheless, we will settle two cases in affirmative. \begin{itemize}
\item In Example \ref{eg:admissible-restriction}, we show that $c_n(E)$ is always an isomorphism when $E$ is $K^H$-admissible in the sense of T.\ Kobayashi. This includes the case when $H$ is symmetric, $H/K^H \to G/K$ is a holomorphic embedding of Hermitian symmetric domains and $E$ is a unitary highest weight module. We refer to \cite{Ko15} for an overview of admissible restrictions.
\item In Examples \ref{eg:SL2}, \ref{eg:SL2-more}, we prove that $c_n(E)$ is always an isomorphism when $G = \operatorname{SL}(2)$ and $H$ is a reductive spherical subgroup. This is done by an explicit computation with principal series, which suffices by a general argument of Hecht--Taylor (Proposition \ref{prop:HT-reduction}). In fact, these computations show that $\operatorname{H}_1\left(\mathfrak{h}, K^H; E^{K\text{-fini}}\right)$ can indeed be nonzero in this case. \end{itemize}
In general, neither the finiteness nor Hausdorffness of $\operatorname{H}^{\mathcal{S}}_n(H; E|_H)$ is known for $n > 0$, even when $H$ is reductive and spherical. See \cite{BC21} for related discussions.
\subsection{Structure of this article} In \S\ref{sec:dg} we collect the necessary definitions and backgrounds on Harish-Chandra dg-algebras, explain the derived categories and derived functors in this generality, and then proceed to the case of h-complexes and h-derived categories. Our references on Harish-Chandra dg-algebras are \cite{BL95, Pan95, Pan05, Pan07}.
In \S\ref{sec:gK-mod}, the formalism of h-complexes is applied to a pair $(\mathfrak{g}, K)$, and a generalized notion of Harish-Chandra modules is presented in Definition \ref{def:HC-module}. The results are either taken from \cite{BL95, Pan05}, or are straightforward analogues from the classical picture, cf.\ \cite{KV95}.
The \S\ref{sec:D-basic} concerns the geometric counterpart. We review the definition of h-derived category of $D$-modules, focusing on the affine case. Most of the materials are from \cite{BL95}. In addition, we show that Beilinson's equivalence is compatible with various operations on equivariant derived categories and their h-analogues, especially for the case of inverse images.
In \S\ref{sec:Loc}, we define the localization functor $\mathbf{Loc}_X$ in the equivariant h-derived setting, relate its fibers to co-invariants, and prove the main Theorem \ref{prop:regularity} about regularity.
In \S\ref{sec:Ext-application}, we begin with the general algebraic framework of $\operatorname{Ext}$-branching laws, relate both the $\operatorname{Ext}$ spaces and relative Lie algebra homologies to localizations, and draw some consequences from regularity in the spherical case (Theorems \ref{prop:RHom-Loc}, \ref{prop:local-index} and Corollaries \ref{prop:Ext-consequence-1}, \ref{prop:Ext-consequence-2}). The monodromic setting discussed in \S\ref{sec:monodromic} is similar.
Finally, in \S\ref{sec:analytic} we review the Schwartz homologies and connect them to the algebraic version via a family of maps $c_n(E)$. After several sundry results, we present three cases in which the comparison maps $c_n(E)$ are isomorphisms: admissible restriction (Example \ref{eg:admissible-restriction}), the diagonal torus in $\operatorname{SL}(2)$ (Example \ref{eg:SL2}), and more general reductive spherical subgroups of $\operatorname{SL}(2)$ (Example \ref{eg:SL2-more}). This section is largely independent of the previous ones.
\subsection{Conventions} Unless otherwise specified (such as in \S\ref{sec:analytic}), all vector spaces, varieties and algebraic groups are defined over $\ensuremath{\mathbb{C}}$. Points of a variety are assumed to be closed, and we write $\mathrm{pt} := \operatorname{Spec} \ensuremath{\mathbb{C}}$. By default, groups act on the right of varieties.
We use the abbreviation $\otimes = \otimes_{\ensuremath{\mathbb{C}}}$. The dual of a vector space $V$ is denoted by $V^*$; the invariants under a group $\Gamma$ is denoted by $V^\Gamma$; the symmetric algebra is denoted by $\operatorname{Sym}(V)$.
Differential operators on a smooth variety $X$ are assumed to be algebraic. The structure sheaf of $X$ is denoted by $\mathscr{O}_X$. The algebra (resp.\ sheaf of algebras) of differential operators on $X$ is denoted by $D_X$ (resp.\ $\mathscr{D}_X$).
The identity connected component of a Lie group or an algebraic group $G$ is denoted by $G^\circ$; the opposite of $G$ is denoted by $G^{\mathrm{op}}$. Subgroups are always assumed to be closed.
An affine group $K$ is said to be \emph{reductive} if $K^\circ$ is a connected reductive group. Since we are in characteristic zero, this is equivalent to the linear reductivity of $K$.
Lie algebras of Lie groups or algebraic groups are denoted by Gothic letters, such as $\mathfrak{g} = \operatorname{Lie} G$. The universal enveloping algebra of $\mathfrak{g}$ is denoted as $U(\mathfrak{g})$, and the center of $U(\mathfrak{g})$ is denoted as $\mathcal{Z}(\mathfrak{g})$. The adjoint action of a group is denoted by $\operatorname{Ad}$.
Categories and functors are all $\ensuremath{\mathbb{C}}$-linear. The opposite of a category $\mathcal{C}$ is denoted by $\mathcal{C}^{\mathrm{op}}$. We neglect all set-theoretic issues and employ only $1$-categories in this work, although dg-enrichment still plays a vital role.
Complexes are assumed to be cochain complexes. Given such a complex $(C, d)$ or simply $C$, we have the corresponding chain complex given by $C_n := C^{-n}$. We denote by $\cate{C}(\ensuremath{\mathbb{C}})$ (resp.\ $\cate{K}(\ensuremath{\mathbb{C}})$, $\cate{D}(\ensuremath{\mathbb{C}})$) the category of complexes of $\ensuremath{\mathbb{C}}$-vector spaces (resp.\ its homotopy category, its derived category), and put the superscripts $+, -, \mathrm{b}$ to denote the full subcategories with boundedness conditions. More generally, for an abelian category $\mathcal{A}$, we have the categories $\cate{C}(\mathcal{A})$, $\cate{K}(\mathcal{A})$, $\cate{D}(\mathcal{A})$ and so forth.
For an algebra $A$ (resp.\ Lie algebra $\mathfrak{g}$), we denote by $A\dcate{Mod}$ (resp.\ $\mathfrak{g}\dcate{Mod}$) the abelian category of left $A$-modules (resp.\ $\mathfrak{g}$-modules); the opposite algebra of $A$ is denoted by $A^{\mathrm{op}}$. These notations will be generalized to dg-modules over Harish-Chandra dg-algebras in \S\ref{sec:HC-dga}.
\section{Homological algebra over Harish-Chandra pairs}\label{sec:dg} \subsection{Review of dg-algebras and dg-modules}\label{sec:review-dg} Below is a short review about dg-modules over dg-algebras. These materials are standard, and detailed expositions can be found in \cite[Part II]{BL94} or \cite{Yek20}.
First of all, we make $\cate{C}(\ensuremath{\mathbb{C}})$ into a symmetric monoidal category by using Koszul's sign rule, namely: the tensor product of complexes $M$ and $N$ are given by $(M \otimes N)^n = \bigoplus_{i+j=n} M^i \otimes N^j$, with $d(m \otimes n) = dm \otimes n + (-1)^i m \otimes dn$, and the braiding $M \otimes N \rightiso N \otimes M$ is $m \otimes n \mapsto (-1)^{ij} n \otimes m$ on $M^i \otimes N^j$.
\begin{definition}
If a complex $A$ carries the structure of an algebra\footnote{Also known as a \emph{monoid} in some textbooks.} in the symmetric monoidal category $\cate{C}(\ensuremath{\mathbb{C}})$, then $A$ is said to be a \emph{dg-algebra}. \end{definition}
Multiplication in a dg-algebra is given by a morphism $A \otimes A \to A$ in $\cate{C}(\ensuremath{\mathbb{C}})$. In concrete terms, this means that there are linear maps $A^i \otimes A^j \to A^{i+j}$ for all $i, j \in \ensuremath{\mathbb{Z}}$, satisfying \[ d(xy) = dx \cdot y + (-1)^i x \cdot dy \] and are associative and unital with respect to some element in $A^0$.
It is customary to identify the complex $A$ with $\bigoplus_n A^n$ and view $d$ as an endomorphism of that vector space. Note that dg-algebras in degree zero are simply algebras.
\begin{definition}
By the abstract formalism of algebras in symmetric monoidal categories, it makes sense to define left (resp.\ right) $A$-modules $M$, with scalar multiplication given by morphisms $A \otimes M \to M$ (resp.\ $M \otimes A \to M$), which we call left (resp. right) \emph{dg-modules} over $A$. Homomorphisms between dg-modules are defined as morphisms between complexes that respect scalar multiplications. \end{definition}
In concrete terms, a left dg-module over $A$ is a complex $M$ together with linear maps $A^i \otimes M^j \to M^{i+j}$ for all $i, j \in \ensuremath{\mathbb{Z}}$, satisfying \[ d(am) = da \cdot m + (-1)^i a \cdot dm \] and are associative and unital. Again, it is customary to identify $M$ with the vector space $\bigoplus_n M^n$, so that $M$ is also a left $A$-module in the non-dg sense. Right dg-modules over $A$ are described similarly, and one can also define bimodules.
When $A = \ensuremath{\mathbb{C}}$ (in degree zero), left and right dg-modules are nothing but complexes.
We now review the tensor product of dg-modules; see \cite[Definition 3.3.23]{Yek20} for details.
\begin{definition}\label{def:tensor-dg}
Let $M$ be a right dg-module and $N$ be a left dg-module over a dg-algebra $A$. Let us form the vector space $M \dotimes{A} N$ in the non-dg sense; the kernel of the natural linear map
\[ \bigoplus_n (M \otimes N)^n \simeq (\bigoplus_i M^i) \otimes (\bigoplus_j M^j) \to M \dotimes{A} N \]
is readily seen to be a subcomplex of $M \otimes N$. In this way, we see that $M \dotimes{A} N$ comes from a complex, which will also be denoted as $M \dotimes{A} N$.
If $M$ (resp.\ $N$) is a dg-bimodule over $(R, A)$ (resp.\ over $(A, S)$), where $R$ and $S$ are dg-algebras, then $M \dotimes{A} N$ inherits a natural $(R, S)$-bimodule structure. \end{definition}
The \emph{opposite dg-algebra} $A^{\text{op}}$ of $A$ is the same complex with the multiplication $x \overset{\mathrm{op}}{\cdot} y := (-1)^{ij} yx$ for all $x \in A^i$ and $y \in A^j$. One passes between left dg-modules over $A$ and right dg-modules over $A^{\mathrm{op}}$ by the rule $ma := (-1)^{ij} am$, for $a \in A^i$ and $m \in M^j$. Henceforth, we will mainly consider left dg-modules.
\begin{definition}
A \emph{dg-category} is a category $\mathcal{C}$ enriched over the symmetric monoidal category $\cate{C}(\ensuremath{\mathbb{C}})$. In other words, to each pair $(X, Y)$ of objects is assigned the $\operatorname{Hom}$-complex $\operatorname{Hom}^\bullet_{\mathcal{C}}(X, Y) \in \cate{C}(\ensuremath{\mathbb{C}})$, and there are morphisms of composition in $\cate{C}(\ensuremath{\mathbb{C}})$ of the form
\[ \operatorname{Hom}^\bullet_{\mathcal{C}}(Y, Z) \otimes \operatorname{Hom}^\bullet_{\mathcal{C}}(X, Y) \to \operatorname{Hom}^\bullet_{\mathcal{C}}(X, Z), \]
subject to strict associativty and unit laws. The non-dg version $\operatorname{Hom}_{\mathcal{C}}(X, Y)$ can be recovered by taking $0$-cocycles in $\operatorname{Hom}^\bullet_{\mathcal{C}}(X, Y)$. We also write $\operatorname{End}^\bullet_{\mathcal{C}}(X) := \operatorname{Hom}^\bullet_{\mathcal{C}}(X, X)$.
A \emph{dg-functor} $\mathcal{C} \to \mathcal{D}$ between dg-categories is a functor whose action on $\operatorname{Hom}$ sets upgrades to $\operatorname{Hom}$-complexes. \end{definition}
For example, for any abelian category $\mathcal{A}$, the category $\cate{C}(\mathcal{A})$ is naturally a dg-category. In the case of $\cate{C}(\ensuremath{\mathbb{C}})$, the $\operatorname{Hom}$-complexes will be denoted as $\operatorname{Hom}^\bullet$. Let $M$ and $N$ be complexes, typical elements in $\operatorname{Hom}^n(M, N)$ take the form $(f^k)_{k \in \ensuremath{\mathbb{Z}}}$ with $f^k: M^k \to N^{k+n}$.
\begin{definition}
Let $\mathcal{C}$ be a dg-category. Its opposite $\mathcal{C}^{\mathrm{op}}$ is the dg-category with the same objects, and $\operatorname{Hom}_{\mathcal{C}^{\mathrm{op}}}^\bullet(X, Y) := \operatorname{Hom}_{\mathcal{C}}^\bullet(Y, X)$, with morphisms of composition
\[ \operatorname{Hom}_{\mathcal{C}^{\mathrm{op}}}^p(Y, Z) \otimes \operatorname{Hom}_{\mathcal{C}^{\mathrm{op}}}^q(X, Y) \to \operatorname{Hom}_{\mathcal{C}^{\mathrm{op}}}^{p+q}(X, Z) \]
defined as $(-1)^{pq}$ times the reversed composition in $\mathcal{C}$. \end{definition}
We now extend the definition of $\operatorname{Hom}$-complexes to dg-modules.
\begin{definition}\label{def:Hom-dg}
Let $M$ and $N$ be left dg-modules over a dg-algebra $A$. Define $\operatorname{Hom}^\bullet_A(M, N)$ as the subcomplex of $\operatorname{Hom}^\bullet(M, N)$ given by
\[ \operatorname{Hom}^n_A(M, N) := \left\{\begin{array}{r|l}
(f^k)_{k \in \ensuremath{\mathbb{Z}}} \in \operatorname{Hom}^n(M, N)& \forall i, j, \; \forall a \in A^i, \; m \in M^j \\
& f^{i+j} (am) = (-1)^{ni} a f^j(m)
\end{array}\right\}. \]
If $M, N, L$ are left dg-modules over $A$, we have the morphism of composition
\[ \operatorname{Hom}^\bullet_A(N, L) \otimes \operatorname{Hom}^\bullet_A(M, N) \to \operatorname{Hom}^\bullet_A(M, L) \]
in $\cate{C}(\ensuremath{\mathbb{C}})$. We write $\operatorname{End}^\bullet_A(M) := \operatorname{Hom}^\bullet_A(M, M)$. \end{definition}
\begin{remark}
For every complex $M$, composition makes $\operatorname{End}^\bullet(M)$ into a dg-algebra. It is routine to check (see eg.\ \cite[Proposition 3.3.17]{Yek20}) that given a dg-algebra $A$, to promote $M$ into a left dg-module over $A$ amounts to prescribing a homomorphism of dg-algebras
\[ A \to \operatorname{End}^\bullet(M). \] \end{remark}
\begin{definition}
For every dg-algebra $A$, we denote the abelian category of left dg-modules over $A$ by $A\dcate{dgMod}$ in order to emphasize that its objects are complexes. \end{definition}
\begin{proposition}
The construction of $\operatorname{Hom}^\bullet_A$ upgrades $A\dcate{dgMod}$ to a dg-category. \end{proposition} \begin{proof}
The $0$-cocycles in $\operatorname{Hom}^\bullet_A(M, N)$ are nothing but morphisms between dg-modules. \end{proof}
For any complex $M$, its translate $M[1]$ is given by $M[1]^n = M^{n+1}$ and $d_{M[1]}^n = -d_M^{n+1}$. The translation functor on $A\dcate{dgMod}$ is given as follows (see \cite[Definition 4.1.9]{Yek20}).
\begin{definition}\label{def:translation-dg}
Let $M$ be a left dg-module over a dg-algebra $A$. We turn $M[1]$ into a left dg-module over $A$ by setting the scalar multiplication $\cdot$ of $A$ on $M[1]$ to be
\[ a \cdot m = (-1)^i am, \quad a \in A^i, \quad m \in M[1]^j = M^{j+1}. \] \end{definition}
\subsection{Harish-Chandra dg-algebras}\label{sec:HC-dga} To begin with, let $K$ be an affine algebraic group. Set $O_K := \Gamma(K, \mathscr{O}_K)$. An abstract representation $V$ of $K$, possibly infinite-dimensional, is said to be \emph{algebraic} if it arises from a $O_K$-comodule structure on $V$; in other words, $V$ is the union of finite-dimensional subrepresentations $V_i$ on which $K$ acts through a homomorphism $\sigma_i: K \to \operatorname{GL}(V_i)$ of algebraic groups. In general, we say $v \in V$ is $K$-algebraic if it belongs to some algebraic subrepresentation, and define \[ V^{K\text{-alg}} := \{v \in V: \text{algebraic} \}. \] This is the maximal $K$-algebraic subrepresentation of $V$. The assignment $V \mapsto V^{K\text{-alg}}$ is functorial, and one readily checks that \[ V^{K\text{-alg}} = V^{K^\circ \text{-alg}}. \]
The class of algebraic representations is closed under subquotients, direct sums and tensor products. If $\sigma: K \to \operatorname{Aut}_{\ensuremath{\mathbb{C}}}(V)$ gives rise to an algebraic representation, then its derivative at identity \[ \mathop{}\!\mathrm{d}\sigma: \mathfrak{k} \to \operatorname{End}_{\ensuremath{\mathbb{C}}}(V) \] makes sense: all reduces to the finite-dimensional case.
\begin{definition}
We say that $K$ acts algebraically on a complex $M$, if $M^n$ is an algebraic representation of $K$ and $d^n: M^n \to M^{n+1}$ is $K$-equivariant, for each $n$. In particular, this induces a homomorphism $K \to \operatorname{Aut}_{\cate{C}(\ensuremath{\mathbb{C}})}(M)$ of abstract groups. \end{definition}
\begin{definition}[{\cite[1.9.1]{BL95}}]\label{def:HC-dga}
By a \emph{Harish-Chandra dg-algebra} we mean a quadruplet
\[ (A, K, \sigma, j), \]
or more succinctly a pair $(A, K)$, where:
\begin{itemize}
\item $A$ is a dg-algebra and $K$ is as above,
\item $K$ acts algebraically on $A$, and induces a homomorphism $\sigma: K \to \operatorname{Aut}_{\cate{dga}}(A)$, where $\cate{dga}$ is the category of dg-algebras;
\item $j: \mathfrak{k} \to A^0 \subset A$ is a $K$-equivariant linear map satisfying
\begin{gather*}
d \circ j = 0, \\
j([\xi_1, \xi_2]) = [j(\xi_1), j(\xi_2)] := j(\xi_1) j(\xi_2) - j(\xi_2) j(\xi_1)
\end{gather*}
for all $\xi_1, \xi_2 \in \mathfrak{k}$;
\item furthermore, we impose the condition that for all $\xi \in \mathfrak{k}$,
\begin{equation*}
(\mathop{}\!\mathrm{d}\sigma)(\xi) = [j(\xi), \cdot] \;\in \operatorname{End}_{\ensuremath{\mathbb{C}}}(A).
\end{equation*}
\end{itemize} \end{definition}
A morphism $(A, K) \to (A', K')$ between Harish-Chandra dg-algebras consists of a homomorphism $\varphi: K \to K'$ between algebraic groups together with a $\varphi$-equivariant homomorphism $\psi: A \to A'$ between dg-algebras, such that \[\begin{tikzcd}
\mathfrak{k} \arrow[r, "{j}"] \arrow[d, "{\operatorname{Lie} \varphi}"'] & A \arrow[d, "\psi"] \\
\mathfrak{k}' \arrow[r, "{j'}"'] & A' \end{tikzcd}\] commutes.
\begin{definition}\label{def:HC-dg-module}
Let $(A, K)$ be a Harish-Chandra dg-algebra. A (left) dg-module over $(A, K)$ is a left dg-module $M$ over $A$, say given by a homomorphism $\alpha: A \to \operatorname{End}^\bullet(M)$ between dg-algebras, together with a group homomorphism $\rho: K \to \operatorname{Aut}_{\cate{C}(\ensuremath{\mathbb{C}})}(M)$ making $K$ act algebraically on $M$, subject to the following compatibilities:
\begin{itemize}
\item $\alpha(\sigma(k)a) = \rho(k) \alpha(a) \rho(k)^{-1}$ in $\operatorname{End}^\bullet(M)$, for all $k \in K$ and $a \in A$;
\item $\alpha \circ j = \mathop{}\!\mathrm{d}\rho$ as linear maps $\mathfrak{k} \to \operatorname{End}_{\cate{C}(\ensuremath{\mathbb{C}})}(M)$.
\end{itemize}
Homomorphisms $M \to N$ between dg-modules over $(A, K)$ are $K$-equivariant homomorphisms in $\operatorname{Hom}_A(M, N)$. The abelian category so obtained is denoted by $(A, K)\dcate{dgMod}$. \end{definition}
When $K = \{1\}$, this reverts to the dg-category $A\dcate{dgMod}$ reviewed in \S\ref{sec:review-dg}.
Every morphism $(\varphi, \psi): (A, K) \to (A', K')$ between Harish-Chandra dg-algebras induces a pullback functor \begin{equation}\label{eqn:HC-pullback}
(\varphi, \psi)^*: (A', K')\dcate{dgMod} \to (A, K)\dcate{dgMod}. \end{equation} This can be naturally upgraded to a dg-functor between dg-categories.
\begin{remark}\label{rem:AK-module}
When $A$ is simply a $\ensuremath{\mathbb{C}}$-algebra, we can define $(A, K)$-modules as dg-modules over $(A, K)$ in degree zero, and the resulting abelian category will be denoted by $(A, K)\dcate{Mod}$. \end{remark}
\subsection{Derived categories and functors}\label{sec:derived-categories} The basic reference for what follows is \cite[1.9]{BL95}. Let $(A, K)$ be a Harish-Chandra dg-algebra. Many operations on $A\dcate{dgMod}$ carry over to $(A, K)\dcate{dgMod}$ by imposing $K$-equivariance. Below are some key examples.
\begin{description}
\item[Translation functor] Let $M$ be a dg-module over $(A, K)$. We make $M[1]$ into a dg-module over $(A, K)$ using the dg-module structure over $A$ from Definition \ref{def:translation-dg}, and leaving the $K$-action intact.
\item[The $\operatorname{Hom}$-complex] Let $M$, $N$ be dg-modules over $(A, K)$. Define $\operatorname{Hom}^\bullet_{A, K}(M, N)$ to be the subcomplex of $\operatorname{Hom}^\bullet_A(M, N)$ (see Definition \ref{def:Hom-dg}) given by
\[ \operatorname{Hom}^n_{A, K}(M, N) = \left\{ (f^k)_{k \in \ensuremath{\mathbb{Z}}} \in \operatorname{Hom}^n_A(M, N): \forall k, \; f^k \;\text{is $K$-equivariant} \right\}. \]
Morphisms between dg-modules over $(A, K)$ are nothing but $0$-cocycles in $\operatorname{Hom}^\bullet_{A, K}$. This turns $(A, K)\dcate{dgMod}$ into a dg-category.
\item[Homotopy category] Two morphisms $f, g \in \operatorname{Hom}_{A, K}(M, N)$ are said to be homotopic if $f-g$ is a coboundary in $\operatorname{Hom}^0_{A, K}(M, N)$. The homotopy category $\cate{K}(A, K)$ of $(A, K)\dcate{dgMod}$ has the same objects as $(A, K)\dcate{dgMod}$, whilst
\[ \operatorname{Hom}_{\cate{K}(A, K)}(M, N) := \operatorname{H}^0 \operatorname{Hom}^\bullet_{A, K}(M, N) \]
for all objects $M$ and $N$. The translation functor $M \mapsto M[1]$ passes to $\cate{K}(A, K)$, and we have $\operatorname{Hom}_{\cate{K}(A, K)}(M, N[n]) \simeq \operatorname{H}^n \operatorname{Hom}^\bullet_{A, K}(M, N)$.
\item[Acyclic objects] We say $M$ is acyclic if it is acyclic as an object of $\cate{C}(\ensuremath{\mathbb{C}})$.
\item[Quasi-isomorphisms] If $f \in \operatorname{Hom}_{A, K}(M, N)$ induces isomorphisms $\operatorname{H}^n(M) \to \operatorname{H}^n(N)$ for every $n$, we say $f$ is a quasi-isomorphism. This property depends only on the image of $f$ in the homotopy category.
\item[Mapping cones] Let $f \in \operatorname{Hom}_{A, K}(M, N)$. The mapping cone $\mathrm{Cone}(f)$ taken in $\cate{C}(\ensuremath{\mathbb{C}})$ is made into a dg-module over $(A, K)$ by recalling that $\mathrm{Cone}(f)$ is $M[1] \oplus N$ as graded vector spaces, and we let $A$ and $K$ act on $M[1]$ and $N$ in the way prescribed before. The natural morphisms
\[ N \to \mathrm{Cone}(f) \to M[1] \]
are morphisms in $(A, K)\dcate{dgMod}$. Note that this is a $K$-equivariant version of \cite[Definition 4.2.1]{Yek20}. \end{description}
The constructions above make $\cate{K}(A, K)$ into a triangulated category: the distinguished triangles are the ones isomorphic to \[ M \xrightarrow{f} N \to \mathrm{Cone}(f) \xrightarrow{+1} \] given before, where $f: M \to N$ is any morphism in $(A, K)\dcate{dgMod}$.
\begin{definition}
Let $\cate{D}(A, K)$ be the Verdier quotient of $\cate{K}(A, K)$ by acyclic objects; equivalently, it is obtained from $\cate{K}(A, K)$ by inverting quasi-isomorphisms. We call $\cate{D}(A, K)$ the \emph{derived category} of $(A, K)$. It inherits the triangulated structure from $\cate{K}(A, K)$. \end{definition}
When $K=\{1\}$, the above reduces to the well-known derived category of dg-modules: see \cite[\S 10]{BL94} or \cite[Chapter 7]{Yek20}.
In order to better understand $\cate{D}(A, K)$, one has to adapt the notion of K-projective and K-injective complexes into our context. Cf.\ \cite[\S\S 10.1 --- 10.2]{Yek20}.
\begin{definition}
Let $M$ be a dg-module over $(A, K)$.
\begin{itemize}
\item We say $M$ is \emph{K-injective} if
\[ Q\;\text{is acyclic} \implies \operatorname{Hom}^\bullet_{A, K}(Q, M)\;\text{is acyclic}; \]
equivalently,
\[ Q\;\text{is acyclic} \implies \operatorname{Hom}_{\cate{K}(A, K)}(Q, M) = 0. \]
\item We say $M$ is \emph{K-projective} if
\[ Q\;\text{is acyclic} \implies \operatorname{Hom}^\bullet_{A, K}(M, Q) \;\text{is acyclic}; \]
equivalently,
\[ Q\;\text{is acyclic} \implies \operatorname{Hom}_{\cate{K}(A, K)}(M, Q) = 0. \]
\end{itemize} \end{definition}
Being K-injective (resp.\ K-projective) is thus a property within $\cate{K}(A, K)$. If $M$ is K-injective (resp.\ K-projective), then \begin{gather*}
\operatorname{Hom}_{\cate{K}(A, K)}(X, M) \simeq \operatorname{Hom}_{\cate{D}(A, K)}(X, M) \\
(\text{resp.}\; \operatorname{Hom}_{\cate{K}(A, K)}(M, X) \simeq \operatorname{Hom}_{\cate{D}(A, K)}(M, X)) \end{gather*} for all dg-module $X$ over $(A, K)$. Cf.\ \cite[Theorems 10.1.13 and 10.2.9]{Yek20}.
We now move to $t$-structure. We say $A$ is \emph{non-positively graded} if $A^n = 0$ for all $n > 0$.
\begin{proposition}
Suppose that $A$ is non-positively graded. For every dg-module $M$ over $(A, K)$, the ``smart truncation'' $\tau^{< 0} M$ in $\cate{C}(\ensuremath{\mathbb{C}})$ is actually a dg-submodule of $M$ over $(A, K)$, thus so is $\tilde{\tau}^{\geq 0} M := M/\tau^{< 0} M$. We have the canonical distinguished triangle
\[ \tau^{< 0} M \to M \to \tilde{\tau}^{\geq 0} M \xrightarrow{+1} \]
in $\cate{K}(A, K)$ as well as in $\cate{D}(A, K)$. \end{proposition} \begin{proof}
Routine, see \cite[1.9.4 Lemma]{BL95}. \end{proof}
Using the fact above, we equip $\cate{D}(A, K)$ with a $t$-structure and define its full triangulated subcategories $\cate{D}^{\star}(A, K)$ where $\star \in \{+, -, \mathrm{b}\}$. They are characterized by $\operatorname{H}^n = 0$ for $n \ll 0$, $n \gg 0$ and $|n| \gg 0$ respectively.
We also denote by $\cate{K}^{\star}(A, K)$ (where $\star \in \{+, -, \mathrm{b}\}$) the full triangulated subcategories of $\cate{K}(A, K)$ consisting of bounded below, bounded above, and bounded complexes respectively, so that $\cate{D}^{\star}(A, K)$ is equivalent to the corresponding Verdier quotients.
Now comes the definition of derived functors in an abstract context. More general formulations exist and can be found in \cite[Chapter 8]{Yek20}, for example.
\begin{definition}\label{def:derived-functor}
Let $(A, K)$ (resp.\ $(A', K')$) be Harish-Chandra dg-algebras, and let $\cate{K}$ (resp.\ $\cate{K}'$) be a full triangulated subcategory of $(A, K)\dcate{dgMod}$ (resp.\ $(A', K')\dcate{dgMod}$). Denote the functor to Verdier quotients modulo acyclic objects as $Q: \cate{K} \to \cate{D}$ and $Q': \cate{K}' \to \cate{D}'$ respectively.
Consider a triangulated functor $F: \cate{K} \to \cate{K}'$. A left (resp.\ right) \emph{derived functor} of $F$ is defined to be a triangulated functor $\operatorname{L}\! F$ (resp.\ $\operatorname{R}\! F$) from $\cate{D}$ to $\cate{D}'$ which is the right (resp.\ left) Kan extension of $Q' F$ along $Q: \cate{K} \to \cate{D}$. \end{definition}
Whenever they exist, $\operatorname{L}\! F$ and $\operatorname{R}\! F$ fit into 2-cells: \begin{equation}\label{eqn:2-cells-der}\begin{tikzcd}
\cate{K} \arrow[r, "F"] \arrow[d, "Q"'] & \cate{K}' \arrow[d, "{Q'}"] \\
\cate{D} \arrow[r, "{\operatorname{L}\! F}"'] \arrow[Rightarrow, ru] & \cate{D}' \end{tikzcd}\quad\begin{tikzcd}
\cate{K} \arrow[r, "F"] \arrow[d, "Q"'] & \cate{K}' \arrow[d, "{Q'}"] \arrow[Rightarrow, ld] \\
\cate{D} \arrow[r, "{\operatorname{R}\! F}"'] & \mathcal{D}' \end{tikzcd}\end{equation} The morphisms $\Rightarrow$ between functors are part of the data of derived functors. The universal property of Kan extension amounts to asserting that every 2-cell of the left (resp.\ right) form as \eqref{eqn:2-cells-der} uniquely ``retracts to'' (resp.\ is uniquely ``inflated from'') the 2-cell of $\operatorname{L}\! F$ (resp.\ $\operatorname{R}\! F$).
\begin{example}
The easiest example is the case when $F$ is \emph{exact}, i.e.\ when $F$ preserves acyclicity (equivalently, preserves quasi-isomorphisms). For such functors, $\operatorname{L}\! F = \operatorname{R}\! F$ exists and is simply induced from the universal property of Verdier quotients; furthermore, the $\Rightarrow$ in \eqref{eqn:2-cells-der} are isomorphisms. \end{example}
The pull-back functors $(\varphi, \psi)^*$ from \eqref{eqn:HC-pullback} are exact, since they preserve the underlying complexes.
For general $F$, the left (resp.\ right) derived functor can be accessed from K-injective (resp.\ K-projective) resolutions as in the usual setting, if they exist.
\begin{definition}
Let $M$ be a dg-module over $(A, K)$. A K-injective (resp.\ K-projective) \emph{resolution} of $M$ is a quasi-isomorphism $M \to I$ (resp.\ $P \to M$) in $(A, K)\dcate{dgMod}$ where $I$ is K-injective (resp.\ $P$ is K-projective).
Let $\cate{K}$ be a full triangulated subcategory of $\cate{K}(A, K)$. If every object $M$ of $\cate{K}$ admits a K-injective (resp.\ K-projective) resolution within $\cate{K}$, then $\cate{K}$ is said to have enough K-injectives (resp.\ K-projectives). \end{definition}
\begin{theorem}\label{prop:enough-K-general}
Assume that $(A, K)$ be non-positively graded. Then $\cate{K}^+(A, K)$ has enough K-injectives; if we assume moreover that $K$ is reductive, then $\cate{K}^-(A, K)$ has enough K-projectives. \end{theorem} \begin{proof}
The first part follows from \cite[1.15.3]{BL95}. The second part follows from \cite[\S 5.6]{Pan95}. \end{proof}
The following is a special instance of \cite[Theorems 10.1.20 and 10.2.15]{Yek20}.
\begin{proposition}\label{prop:derived-functor-resolution}
Consider the situation of Definition \ref{def:derived-functor}.
\begin{itemize}
\item If $\cate{K}$ has enough K-injectives, then $\operatorname{R}\! F$ exists, and $(\operatorname{R}\! F)(M) \simeq Q' F(I)$ if $M \to I$ is a K-injective resolution within $\cate{K}$.
\item If $\cate{K}$ has enough K-projectives, then $\operatorname{L}\! F$ exists, and $(\operatorname{L}\! F)(M) \simeq Q' F(P)$ if $P \to M$ is a K-projective resolution within $\cate{K}$.
\end{itemize} \end{proposition}
As a result, one can define $\operatorname{RHom}_{A, K}(X, Y) \in \cate{D}(\ensuremath{\mathbb{C}})$ for $X$ bounded above and $Y$ bounded below by the familiar recipe: \begin{itemize}
\item either as the derived functor of $\operatorname{Hom}^\bullet_{A, K}(X, \cdot)$ from $(A, K)\dcate{dgMod}^+$ to $\cate{C}(\ensuremath{\mathbb{C}})$,
\item or as the derived functor of $\operatorname{Hom}^\bullet_{A, K}(\cdot, Y)$ from $(A, K)\dcate{dgMod}^-$ to $\cate{C}(\ensuremath{\mathbb{C}})^{\mathrm{op}}$. \end{itemize} The boundedness conditions are removable if the relevant K-injective or K-projective resolutions exist. In any case, for all $n \in \ensuremath{\mathbb{Z}}$ we have \begin{equation}
\operatorname{H}^n \operatorname{RHom}_{A, K}(X, Y) \simeq \operatorname{Hom}_{\cate{D}(A, K)}(X, Y[n]) =: \operatorname{Ext}^n_{A, K}(X, Y). \end{equation}
Given functors $\cate{K} \xrightarrow{F} \cate{K}' \xrightarrow{G} \cate{K}''$, the universal property furnishes canonical morphisms \[ \operatorname{R}\! (GF) \to (\operatorname{R}\! G)(\operatorname{R}\! F), \quad (\operatorname{L}\! G)(\operatorname{L}\! F) \to \operatorname{L}\! (GF), \] provided that all these derived functors exist. We record the following standard result (see eg.\ \cite[Remark 8.4.31]{Yek20}) which is also immediate from Proposition \ref{prop:derived-functor-resolution}. Recall that a functor is said to be exact if it preserves acyclicity.
\begin{corollary}\label{prop:derived-functor-composite}
Keep the notations of Definition \ref{def:derived-functor} and consider functors $\cate{K} \xrightarrow{F} \cate{K}' \xrightarrow{G} \cate{K}''$.
\begin{itemize}
\item Suppose $\cate{K}$ and $\cate{K}'$ have enough K-injectives. If either $F$ preserves K-injectives or $G$ is exact, then $\operatorname{R}\! (GF) \rightiso (\operatorname{R}\! G)(\operatorname{R}\! F)$.
\item Suppose $\cate{K}$ and $\cate{K}'$ have enough K-projectives. If either $F$ preserves K-projectives or $G$ is exact, then $(\operatorname{L}\! G)(\operatorname{L}\! F) \rightiso \operatorname{L}\! (GF)$.
\end{itemize} \end{corollary}
The standard tool to ensure the preservation of K-injectives or K-projectives is adjunction.
\begin{proposition}\label{prop:K-injectives-adjunction}
Let $\mathcal{C}$ and $\mathcal{C}'$ be dg-categories, each is of the form $(A, K)\dcate{dgMod}$ for some $(A, K)$. Suppose that
\[\begin{tikzcd}
F: \mathcal{C} \arrow[shift left, r] & \mathcal{D}: G \arrow[shift left, l]
\end{tikzcd}\]
is a pair of adjoint functors, and both $F$ and $G$ upgrade to dg-functors (see the discussions after \eqref{eqn:HC-pullback}). Then
\begin{itemize}
\item $F$ preserves K-projectives if $G$ is exact;
\item $G$ preserves K-injectives if $F$ is exact.
\end{itemize} \end{proposition} \begin{proof}
Adjunction is described by morphisms $\eta: \ensuremath{\mathrm{id}}_{\mathcal{C}} \to GF$ and $\epsilon: FG \to \ensuremath{\mathrm{id}}_{\mathcal{C}'}$ satisfying the triangular equalities. As $F$ and $G$ upgrade to dg-functors, $(\eta, \epsilon)$ induces not only functorial bijections
\[ \operatorname{Hom}_{\mathcal{C}'}(FX, Y) \simeq \operatorname{Hom}_{\mathcal{C}}(X, GY), \]
but also functorial isomorphisms of complexes
\[ \operatorname{Hom}^\bullet_{\mathcal{C}'}(FX, Y) \simeq \operatorname{Hom}^\bullet_{\mathcal{C}}(X, GY), \]
i.e.\ a dg-adjunction. The assertions follow at once. \end{proof}
\subsection{h-construction}\label{sec:h-cplx} Throughout this subsection, we consider a Harish-Chandra dg-algebra $(A, K)$ with $A$ concentrated at degree zero, i.e.\ $A$ is an algebra coming with compatible homomorphisms \[ j: \mathfrak{k} \to A, \quad \sigma: K \to \operatorname{Aut}_{\cate{alg}}(A). \]
Recall from Remark \ref{rem:AK-module} that the $(A, K)$-modules are simply left $A$-modules equipped with compatible $K$-actions. The dg-category of complexes of $(A, K)$-modules is denoted by $\cate{C}(A, K)$.
\begin{definition}\label{def:weak-module}
With the conventions above, a \emph{weak $(A, K)$-module} is a $\ensuremath{\mathbb{C}}$-vector space $M$ equipped with homomorphisms
\[ \alpha: A \to \operatorname{End}_{\ensuremath{\mathbb{C}}}(M), \quad \rho: K \to \operatorname{Aut}_{\ensuremath{\mathbb{C}}}(M), \]
making $M$ into a left $A$-module and an algebraic representation of $K$ respectively, such that
\[ \alpha(\sigma(k) a) = \rho(k) \alpha(a) \rho(k)^{-1} \]
holds in $\operatorname{End}_{\ensuremath{\mathbb{C}}}(M)$ for all $k \in K$ and $a \in A$, i.e.\ $\alpha$ is $K$-equivariant. \end{definition}
Weak $(A, K)$-modules form an abelian category. Given a weak $(A, K)$-module $M$, define \begin{equation}\label{eqn:weak-AK-w}
w := \mathop{}\!\mathrm{d}\rho - \alpha \circ j: \mathfrak{k} \to \operatorname{End}_{\ensuremath{\mathbb{C}}}(M). \end{equation} This map is clearly $K$-equivariant. It is also $A$-linear: indeed, \begin{align*}
[\alpha (j(\xi)), \alpha(a)] & = \alpha\left( [j(\xi), a] \right) \\
& = \alpha\left( \mathop{}\!\mathrm{d}\sigma(\xi) \cdot a \right) = [\mathop{}\!\mathrm{d}\rho(\xi), \alpha(a)] \end{align*} for all $\xi \in \mathfrak{k}$ and $a \in A$; the last equality follows from the $K$-equivariance of $\alpha$. This entails $[w(\xi), \alpha(a)] = 0$.
Therefore, a weak $(A, K)$-module is an $(A, K)$-module if and only if $w = 0$.
\begin{definition}
Denote by ${}^{\mathrm{w}} \cate{C}(A, K)$ the category of complexes of weak $(A, K)$-modules. The $\operatorname{Hom}$-complex in this dg-category is denoted by ${}^{\mathrm{w}} \operatorname{Hom}^\bullet$. \end{definition}
Our main reference for the h-construction below is \cite{BL95}; the idea is attributed to Duflo--Vergne \cite{DV87} and Beilinson--Ginzburg in \textit{loc.\ cit.}
\begin{definition}[{\cite[1.5]{BL95}}]\label{def:h-cplx}
An \emph{h-complex} over $(A, K)$ is a complex $C = (C, d)$ of weak $(A, K)$-modules together with a linear map
\[ i: \mathfrak{k} \to \operatorname{End}^{-1}(C) := \operatorname{End}^{-1}_{\ensuremath{\mathbb{C}}}(C), \quad \xi \mapsto i_\xi, \]
such that
\begin{enumerate}[(i)]
\item $k i_\xi k^{-1} = i_{\operatorname{Ad}(k)\xi}$ for all $k \in K$;
\item $i_\xi$ is $A$-linear;
\item $i_\xi i_\eta + i_\eta i_\xi = 0$ for all $\xi, \eta \in \mathfrak{k}$;
\item $d i_\xi + i_\xi d = w(\xi)$ (recall \eqref{eqn:weak-AK-w}).
\end{enumerate}
The morphisms between h-complexes are morphisms between complexes of weak $(A, K)$-modules that commute with all $i_\xi$. \end{definition}
The homotopy condition (iv) implies that the cohomologies of an h-complex are actually $(A, K)$-modules.
We remark that h-complexes are called \emph{equivariant complexes} in \cite{Pan95, Pan05, Pan07, Ki12}.
\begin{definition}
Denote by ${}^{\mathrm{h}} \cate{C}(A, K)$ the category of h-complexes over $(A, K)$. \end{definition}
Every h-complex is a complex of weak $(A, K)$-modules, whence the functor ${}^{\mathrm{h}} \cate{C}(A, K) \to {}^{\mathrm{w}} \cate{C}(A, K)$. We upgrade ${}^{\mathrm{h}} \cate{C}(A, K)$ into a dg-category by defining ${}^{\mathrm{h}} \operatorname{Hom}^\bullet(C_1, C_2)$ as the subcomplex of ${}^{\mathrm{w}} \operatorname{Hom}^\bullet(C_1, C_2)$: \[ {}^{\mathrm{h}}\operatorname{Hom}^n(C_1, C_2) := \left\{ (f^l)_{l \in \ensuremath{\mathbb{Z}}} \in {}^{\mathrm{w}} \operatorname{Hom}^n(C_1, C_2) : \forall l, \xi, \; f^{l-1} i_\xi = (-1)^n i_\xi f^l \right\}. \]
As usual, we say a morphism (resp.\ an object) of ${}^{\mathrm{h}} \cate{C}(A, K)$ is a quasi-isomorphism (resp.\ acyclic) if it is so as a complex. The following definition thus makes sense.
\begin{definition}
Using ${}^{\mathrm{h}} \operatorname{Hom}^\bullet$, we define the homotopy category ${}^{\mathrm{h}} \cate{K}(A, K)$ of ${}^{\mathrm{h}} \cate{C}(A, K)$, and the \emph{h-derived category} ${}^{\mathrm{h}} \cate{D}(A, K)$ is defined as its Verdier quotient by acyclic complexes, or equivalently by inverting quasi-isomorphisms.
The K-injective and K-projective h-complexes are also defined in this way. The derived functors in this setting are also called \emph{h-derived functors}. \end{definition}
For an h-complex $C$, we make $C[1]$ into an h-complex by setting \[ i': \mathfrak{k} \to \operatorname{End}^{-1}(C[1]), \quad \xi \mapsto i'_\xi := -i_\xi. \] Using this, mapping cones can be defined for any morphism $f: C_1 \to C_2$ in ${}^{\mathrm{h}} \cate{C}(A, K)$. In this way, ${}^{\mathrm{h}} \cate{K}(A, K)$ and ${}^{\mathrm{h}} \cate{D}(A, K)$ become triangulated categories.
For every h-complex $C$, the ``smart truncation'' $\tau^{< 0} C$ as a subcomplex of weak $(A, K)$-modules is an h-complex, and so is $\tilde{\tau}^{\geq 0} C := C/\tau^{< 0} C$. Consequently, ${}^{\mathrm{h}}\cate{D}(A, K)$ is endowed with a $t$-structure, whose heart is exactly the category $(A, K)\dcate{Mod}$ of $(A, K)$-modules.
We wish to compare the h-derived category with the naive one. There is an evident dg-functor \begin{equation}\label{eqn:h-comparison-C}
\cate{C}((A, K)\dcate{Mod}) \to {}^{\mathrm{h}} \cate{C}(A, K) \end{equation} by turning complexes over $(A, K)\dcate{Mod}$ into h-complexes with $i_\xi = 0$. It induces a functor between homotopy categories and preserves acyclicity.
Recall that the derived category $\cate{D}((A, K)\dcate{Mod})$ of $(A, K)\dcate{Mod}$ is a triangulated category with $t$-structure whose heart is $(A, K)\dcate{Mod}$. For $\star \in \{\; , +, -, \mathrm{b}\}$, from \eqref{eqn:h-comparison-C} we obtain a functor \begin{equation}\label{eqn:h-comparison}
\alpha: \cate{D}^{\star}((A, K)\dcate{Mod}) \to {}^{\mathrm{h}} \cate{D}^{\star}(A, K). \end{equation} It is obviously $t$-exact.
All the assertion above can be checked by hand. Nonetheless, the formalism of Harish-Chandra dg-algebras provides a conceptually more satisfactory approach to the h-construction. We present a summary below after some preparations.
First, suppose that $A_i$ are dg-algebras for $i=1,2$. By the general theory of algebras in a symmetric monoidal category, the tensor product $A_1 \otimes A_2$ of complexes underlies a dg-algebra.
Secondly, consider the dg Lie-algebra \[ \overline{\mathfrak{k}} := \left[ \mathfrak{k} \xrightarrow{\ensuremath{\mathrm{id}}} \mathfrak{k} \right], \quad \text{degrees:}\; -1, 0, \] whose Lie bracket on $\overline{\mathfrak{k}}^0 \otimes \overline{\mathfrak{k}}^0$ and $\overline{\mathfrak{k}}^0 \otimes \overline{\mathfrak{k}}^{-1}$ equals the $[\cdot, \cdot]$ for $\mathfrak{k}$, and is zero otherwise. Its universal enveloping dg-algebra is \[ U(\overline{\mathfrak{k}}) = \underbracket{\bigwedge^\bullet \mathfrak{k}}_{\deg \leq 0} \otimes \underbracket{U(\mathfrak{k})}_{\deg = 0}. \] The differential $d$ is induced from the complex $\overline{\mathfrak{k}}$; explicit formulas will be given in \S\ref{sec:std-resolution}. We let $K$ act on $U(\overline{\mathfrak{k}})$ by adjoint actions on both $\otimes$-slots.
\begin{theorem}\label{prop:h-vs-dg}
We have the following equivalences of dg-categories.
\begin{enumerate}[(i)]
\item Take $B = A$. Then
\[ (B, K)\dcate{dgMod} \simeq \cate{C}((A, K)\dcate{Mod}). \]
\item Take $B = U(\mathfrak{k}) \otimes A$ with diagonal $K$-action, where $U(\mathfrak{k})$ is viewed as a dg-algebra concentrated at degree zero. Define $j_B: \mathfrak{k} \to B$ by
\[ j_B(\xi) = \xi \otimes 1 + 1 \otimes j(\xi). \]
This makes $(B, K)$ into a Harish-Chandra dg-algebra, and
\[ (B, K)\dcate{dgMod} \simeq {}^{\mathrm{w}} \cate{C}(A, K). \]
Specifically, the action of $\xi \in \mathfrak{k} \subset U(\mathfrak{k})$ corresponds to $w(\xi)$; see \eqref{eqn:weak-AK-w}.
\item Take $B = U(\overline{\mathfrak{k}}) \otimes A$ with diagonal $K$-action. Define $j_B: \mathfrak{k} \to B$ by
\[ j_B(\xi) = j^\natural(\xi) \otimes 1 + 1 \otimes j(\xi) \]
where $j^\natural$ is the embedding $\mathfrak{k} \hookrightarrow 1 \otimes U(\mathfrak{k}) \subset U(\overline{\mathfrak{k}})$. This makes $(B, K)$ into a Harish-Chandra dg-algebra, and
\[ (B, K)\dcate{dgMod} \simeq {}^{\mathrm{h}} \cate{C}(A, K). \]
Specifically, the action of $\xi \in \mathfrak{k} \subset \bigwedge^\bullet \mathfrak{k}$ corresponds to $i_\xi$.
\end{enumerate}
All these equivalences are identity on the underlying complexes; they preserve acyclicity, quasi-isomorphisms and homotopies. \end{theorem} \begin{proof}
The case (i) is trivial. Case (iii) is \cite[1.11.1]{BL95}, and (ii) is explained in \cite[p.2201]{Pan07}. \end{proof}
\begin{remark}
The dg-algebra $B$ above is always non-positively graded. Hence Theorem \ref{prop:enough-K-general} provides enough K-injectives (resp.\ K-projectives, assuming $K$ reductive) for the bounded below (resp.\ bounded above) homotopy categories. \end{remark}
\begin{remark}\label{rem:h-Ext}
The h-version of $\operatorname{Ext}$ functors, denoted by ${}^{\mathrm{h}} \operatorname{Ext}_{A, K}$, can be defined through the equivalence in Theorem \ref{prop:h-vs-dg} (iii). As the familiar $\operatorname{Ext}$ functor, it can be computed in terms of ${}^{\mathrm{h}} \operatorname{Hom}^\bullet$ and K-injective or K-projective resolutions of h-complexes. \end{remark}
In view of these identifications, the dg-functor \eqref{eqn:h-comparison-C} corresponds to the pullback induced by the $K$-equivariant homomorphism \[ \epsilon \otimes \ensuremath{\mathrm{id}}: U(\overline{\mathfrak{k}}) \otimes A \to \ensuremath{\mathbb{C}} \otimes A \simeq A \] of dg-algebras, where $\epsilon: U(\overline{\mathfrak{k}}) \to \ensuremath{\mathbb{C}}$ is the augmentation homomorphism. On the other hand, pull-back along the inclusion $U(\mathfrak{k}) \otimes A \to U(\overline{\mathfrak{k}}) \otimes A$ amounts to forgetting the datum $i$ in h-complexes.
\subsection{Adjoint functors of oblivion}\label{sec:adjoint-oblv} Consider Harish-Chandra dg-algebras $(A, K, \sigma, j)$, $(A', K', \sigma', j')$ and a morphism $(A, K) \to (A', K')$. In most of the scenarios, the maps $A \to A'$ and $K \to K'$ will be inclusions. For this reason, the corresponding exact functor from \eqref{eqn:HC-pullback} will be named as \emph{oblivion} instead of pullback. Denote it by \[ \mathrm{oblv}: (A', K')\dcate{dgMod} \to (A, K)\dcate{dgMod}. \]
We begin with the change of dg-algebras, i.e.\ the case $K' = K$. Consider a $K$-equivariant homomorphism $\varphi: A \to A'$ between dg-algebras such that $\varphi j = j'$. Two constructions on a dg-module $M$ over $(A, K)$ will be needed.
\begin{itemize}
\item Definition \ref{def:tensor-dg} affords the dg-module $A' \dotimes{A} M$ over $A'$. Let $K$ acts diagonally on it; this action is algebraic.
\item Since $A'$ is a dg-bimodule over $(A, A')$, the $\operatorname{Hom}$-complex $\operatorname{Hom}^\bullet_A(A', M)$ is actually a left dg-module over $A'$. It carries the standard $K$-action
\begin{equation}\label{eqn:K-adjoint-action}
f \xmapsto{k \in K} \underbracket{k}_{M} \circ f \circ \underbracket{k^{-1}}_{A'},
\end{equation}
which respects differentials and $A'$-action. Now take $\operatorname{Hom}^\bullet_A(A', M)^{K\text{-alg}}$. \end{itemize}
We contend that they yield dg-modules over $(A', K)$.
\begin{lemma}\label{prop:oblv-adjunction-prepr}
Both constructions above yield dg-functors
\[ (A, K)\dcate{dgMod} \to (A', K)\dcate{dgMod}. \] \end{lemma} \begin{proof}
We first check that $A' \dotimes{A} M$ is a dg-module over $(A', K)$. Writing the actions by $K$, $A$ and $A'$ as left multiplication, we have
\begin{align*}
(k a'_1) \cdot (a'_2 \otimes m) & = (k a'_1)a'_2 \otimes m \\
& = k \cdot \left( (a'_1 \cdot k^{-1} a'_2) \otimes k^{-1} m \right) \\
& = k \cdot a'_1 \cdot k^{-1} \cdot (a'_2 \otimes m)
\end{align*}
for all $a'_1, a'_2 \in A'$, $k \in K$ and $m \in M$. This is the first condition in Definition \ref{def:HC-dg-module}.
As for the second condition, taking derivative of the diagonal $K$-action yields
\begin{align*}
\mathop{}\!\mathrm{d} \rho(\xi) (a' \otimes m) & = (\mathop{}\!\mathrm{d}\sigma'(\xi) a') \otimes m + a' \otimes (\mathop{}\!\mathrm{d}\rho(\xi) m) \\
& = [j'(\xi), a'] \otimes m + a' \otimes (j(\xi) m) \\
& = \left( [j'(\xi), a'] + a' j'(\xi) \right) \otimes m \\
& = (j'(\xi)a') \otimes m = j'(\xi) \cdot (a' \otimes m)
\end{align*}
for all $\xi \in \mathfrak{k}$ since $j' = \varphi j$, and we are done.
Similarly, one readily checks that $\operatorname{Hom}^\bullet_A(A', M)^{K\text{-alg}}$ satisfies these two conditions.
Standard facts from the theory of dg-modules (see eg.\ \cite[\S 9.1]{Yek20}) show that they are dg-functors if we forget $K$-actions. Our case follows by imposing $K$-equivariance. \end{proof}
\begin{proposition}\label{prop:oblv-adjunction}
The dg-functors from Lemma \ref{prop:oblv-adjunction-prepr} fit into adjunctions
\begin{equation*}\begin{gathered}
\begin{tikzcd}
A' \dotimes{A} (\cdot): (A, K)\dcate{dgMod} \arrow[shift left, r] & (A', K)\dcate{dgMod} : \mathrm{oblv}, \arrow[shift left, l]
\end{tikzcd} \\
\begin{tikzcd}
\mathrm{oblv}: (A', K)\dcate{dgMod} \arrow[shift left, r] & (A, K)\dcate{dgMod}: \operatorname{Hom}^\bullet_A(A', \cdot)^{K\text{-alg}}. \arrow[shift left, l]
\end{tikzcd}
\end{gathered}\end{equation*} \end{proposition} \begin{proof}
Consider the first adjunction. It is a standard fact that
\[ \operatorname{Hom}_{A'}(A' \dotimes{A} M, M') \simeq \operatorname{Hom}_A(M, \mathrm{oblv}(M')) \;\;\text{canonically.} \]
Adding $K$-equivariance yields the desired adjunction for $\operatorname{Hom}_{A', K}$. The second adjunction can be deduced in a similar way, by using standard adjunctions for dg-modules. \end{proof}
\begin{corollary}\label{prop:oblv-adjoint-K}
The functor $A' \dotimes{A} (\cdot)$ preserves K-projectives and $\operatorname{Hom}^\bullet_A(A', \cdot)^{K\text{-alg}}$ preserves K-injectives. \end{corollary} \begin{proof}
Oblivion is exact, so the assertions follow from Proposition \ref{prop:K-injectives-adjunction}. \end{proof}
Next, we describe the adjoint functors of oblivion in h-construction (Definition \ref{def:h-cplx}).
\begin{proposition}\label{prop:adjoint-h-oblv}
Suppose that $A$ and $A'$ are both in degree zero in the circumstance of Proposition \ref{prop:oblv-adjunction}. In what follows, $M$ stands for an arbitrary h-complex over $(A, K)$, say with $i^M_\xi \in \operatorname{End}^{-1}(M)$ in its data ($\xi \in \mathfrak{k}$).
\begin{enumerate}[(i)]
\item The left adjoint of
\[ \mathrm{oblv}: {}^{\mathrm{h}}\cate{C}(A', K) \to {}^{\mathrm{h}}\cate{C}(A, K) \]
is the dg-functor
\[ A' \dotimes{A} (\cdot): {}^{\mathrm{h}}\cate{C}(A, K) \to {}^{\mathrm{h}}\cate{C}(A', K), \]
where $K$ acts diagonally on $A' \dotimes{A} M$, and the degree $-1$ endomorphisms $i^{\otimes}_\xi$ of $A' \dotimes{A} M$ are given by
\[ i^{\otimes}_\xi(a' \otimes m) = a' \otimes i^M_\xi(m). \]
\item The right adjoint of $\mathrm{oblv}$ is the dg-functor
\[ \operatorname{Hom}^\bullet_A(A', \cdot)^{K\text{-alg}}: {}^{\mathrm{h}}\cate{C}(A, K) \to {}^{\mathrm{h}}\cate{C}(A', K), \]
where $K$ acts Γ la \eqref{eqn:K-adjoint-action} on $\operatorname{Hom}^\bullet_A(A', M)$ and the degree $-1$ endomorphisms $i^{\operatorname{Hom}}_\xi$ of $\operatorname{Hom}^\bullet_A(A', M)$ are given by
\[ (i_\xi^{\operatorname{Hom}} f)(a') = i^M_\xi f(a'), \quad f \in \operatorname{Hom}^n_A(A', M). \]
\end{enumerate} \end{proposition} \begin{proof}
This is essentially a combination of Theorem \ref{prop:h-vs-dg} and Proposition \ref{prop:oblv-adjunction}, since
\begin{align*}
(U(\overline{\mathfrak{k}}) \otimes A') \dotimes{U(\overline{\mathfrak{k}}) \otimes A} M & \leftiso A' \dotimes{A} M, \\
\operatorname{Hom}^\bullet_{U(\overline{\mathfrak{k}}) \otimes A}(U(\overline{\mathfrak{k}}) \otimes A', M) & \rightiso \operatorname{Hom}^\bullet_A(A', M)
\end{align*}
as dg-modules over $A'$, and these isomorphisms are $K$-equivariant. It remains to identify $i_\xi^{\operatorname{Hom}}$ and $i_\xi^{\otimes}$, and this is routine. \end{proof}
\begin{remark}
The functor $\operatorname{Hom}^\bullet_A(A', \cdot)^{K\text{-alg}}$ specializes to the co-induction in \cite[1.12]{BL95} in the special case $A = \ensuremath{\mathbb{C}}$. \end{remark}
Finally, we consider change of group, i.e.\ the case $A = A'$. For the sake of simplicity, we only treat the right adjoint of oblivion in the case of h-construction.
\begin{theorem}[P.\ PandΕΎiΔ]\label{prop:equivariant-Zuckerman}
Suppose that $A$ is concentrated at degree zero. Let $T \to K$ be a homomorphism of affine groups and assume $T$ is reductive. Then there is a pair of adjoint functors
\[\begin{tikzcd}
\mathrm{oblv}: {}^{\mathrm{h}} \cate{C}(A, K) \arrow[shift left, r] & {}^{\mathrm{h}} \cate{C}(A, T): \Gamma^{\mathrm{eq}}_{K, T}. \arrow[shift left, l]
\end{tikzcd}\]
Moreover, $\Gamma^{\mathrm{eq}}_{K, T}$ upgrades to a dg-functor and induces a $t$-exact functor between homotopy categories. The functor $\Gamma^{\mathrm{eq}}_{K, T}$ is exact (i.e.\ preserves acyclicity). \end{theorem} \begin{proof}
This is \cite[Theorems 3.1.2 --- 3.1.5]{Pan07}. The assertion that $\Gamma^{\mathrm{eq}}_{K, T}$ upgrades to a dg-functor is contained in the cited proofs. \end{proof}
In \cite{Pan07}, the functors $\Gamma^{\mathrm{eq}}_{K, T}$ are called \emph{equivariant Zuckerman functors}. A comparison with the classical Zuckerman functors is given in \cite[Theorem 3.3.2]{Pan07}.
\begin{corollary}\label{prop:change-group-K-proj}
If $T$ is reductive, then $\mathrm{oblv}: {}^{\mathrm{h}} \cate{C}(A, K) \to {}^{\mathrm{h}} \cate{C}(A, T)$ preserves K-projectives. \end{corollary} \begin{proof}
Apply Proposition \ref{prop:K-injectives-adjunction}. \end{proof}
Finally, we record the easy observation that $\mathrm{oblv}$ induces functors between h-derived categories for all $T \to K$ (by exactness), and the diagrams \begin{equation}\label{eqn:oblv-comm}
\begin{tikzcd}
{}^{\mathrm{h}} \cate{C}(A, K) \arrow[r, "{\operatorname{H}^n}"] \arrow[d, "{\mathrm{oblv}}"'] & (A, K)\dcate{Mod} \arrow[d] \\
{}^{\mathrm{h}} \cate{C}(A, T) \arrow[r, "{\operatorname{H}^n}"] & (A, T)\dcate{Mod}
\end{tikzcd}
\quad
\begin{tikzcd}
{}^{\mathrm{h}} \cate{D}(A, K) \arrow[r, "{\operatorname{H}^n}"] \arrow[d, "{\mathrm{oblv}}"'] & (A, K)\dcate{Mod} \arrow[d] \\
{}^{\mathrm{h}} \cate{D}(A, T) \arrow[r, "{\operatorname{H}^n}"] & (A, T)\dcate{Mod}
\end{tikzcd} \end{equation} commute for all $n \in \ensuremath{\mathbb{Z}}$.
\subsection{Invariants and co-invariants}\label{sec:inv-coinv} Consider a Harish-Chandra dg-algebra $(A, K)$ together with \begin{itemize}
\item $\mathfrak{a} \subsetneq A$: a $K$-invariant dg-ideal;
\item $N \lhd K$: a subgroup acting trivially on $A/\mathfrak{a}$. \end{itemize} From this we obtain a morphism $(A, K) \to (A/\mathfrak{a}, K/N)$ between Harish-Chandra dg-algebras. The pull-back dg-functor in this case is called \emph{inflation} \[ \mathrm{Infl}^{A, K}_{A/\mathfrak{a}, K/N}: (A/\mathfrak{a}, K/N)\dcate{dgMod} \to (A, K)\dcate{dgMod}. \]
\begin{proposition}\label{prop:Inv-coInv}
There exists a left (resp.\ right) adjoint dg-functor $\mathrm{coInv}^{A, K}_{A/\mathfrak{a}, K/N}$ (resp.\ $\mathrm{Inv}^{A, K}_{A/\mathfrak{a}, K/N}$) of $\mathrm{Infl}^{A, K}_{A/\mathfrak{a}, K/N}$. \end{proposition} \begin{proof}
The construction is standard: $\mathrm{Inv}^{A, K}_{A/\mathfrak{a}, K/N}$ (resp.\ $\mathrm{coInv}^{A, K}_{A/\mathfrak{a}, K/N}$) takes the maximal quotient dg-module (resp.\ dg-submodule) on which $\mathfrak{a}$ and $N$ act trivially. \end{proof}
The transitivity $\mathrm{Infl}^{A, K}_{A/\mathfrak{a}, K/N} = \mathrm{Infl}^{A, K}_{A/\mathfrak{a}, K} \mathrm{Infl}^{A/\mathfrak{a}, K}_{A/\mathfrak{a}, K/N}$ implies \begin{equation}\label{eqn:Inv-coInv-transitive}\begin{aligned}
\mathrm{Inv}^{A, K}_{A/\mathfrak{a}, K/N} & \simeq \mathrm{Inv}^{A/\mathfrak{a}, K}_{A/\mathfrak{a}, K/N} \mathrm{Inv}^{A, K}_{A/\mathfrak{a}, K}, \\
\mathrm{coInv}^{A, K}_{A/\mathfrak{a}, K/N} & \simeq \mathrm{coInv}^{A/\mathfrak{a}, K}_{A/\mathfrak{a}, K/N} \mathrm{coInv}^{A, K}_{A/\mathfrak{a}, K}. \end{aligned}\end{equation}
Note that the left and right adjoint of $\mathrm{Infl}^{A, K}_{A/\mathfrak{a}, K}$ have been given in \S\ref{sec:adjoint-oblv}. The right adjoint of $\mathrm{Infl}^{A, K}_{A, K/N}$ is given in Theorem \ref{prop:equivariant-Zuckerman} when $K$ is reductive.
\begin{proposition}\label{prop:Inv-coInv-K}
The functors $\mathrm{coInv}^{A, K}_{A/\mathfrak{a}, K/N}$ (resp.\ $\mathrm{Inv}^{A, K}_{A/\mathfrak{a}, K/N}$) preserves K-projectives (resp.\ K-injectives). \end{proposition} \begin{proof}
This follows from the exactness of inflation and Proposition \ref{prop:K-injectives-adjunction}. \end{proof}
In general, $\mathrm{Inv}$ (resp.\ $\mathrm{coInv}$) is not exact, and one has to consider its right (resp.\ left) h-derived functor, provided that $(A, K)\dcate{dgMod}$ or some suitable subcategory has enough K-injectives (resp.\ K-projectives). For these h-derived functors, the adjunction to $\mathrm{Infl}^{A, K}_{A/\mathfrak{a}, K/N}$ still holds on the level of derived categories.
\begin{example}\label{eg:h-inflation}
Let us specialize to the h-construction. Assume $A$ is in degree zero, realized as the quotient of $B := U(\overline{\mathfrak{k}}) \otimes A$ modulo the augmentation ideal (Theorem \ref{prop:h-vs-dg}). The inflation
\[ \cate{C}(A, K) = (A, K)\dcate{dgMod} \to (B, K)\dcate{dgMod} \simeq {}^{\mathrm{h}} \cate{C}(A, K) \]
is the obvious inclusion. The functor of invariants (resp.\ co-invariants) extracts $\bigcap_\xi (\operatorname{ker}(i_\xi) \cap \operatorname{ker}(w(\xi)))$ (resp.\ takes the quotient modulo $\sum_\xi (\operatorname{im}(i_\xi) + \operatorname{im}(w(\xi))$).
The left h-derived functor $\operatorname{L}\!(\mathrm{coInv})$ in this setting played a major role in \cite{Pan05}. \end{example}
\section{\texorpdfstring{$(\mathfrak{g}, K)$}{(g, K)}-modules}\label{sec:gK-mod} \subsection{Basic definitions}\label{sec:gK-basic} Following \cite{BL95}, we consider the following data \begin{itemize}
\item $\mathfrak{g}$: finite-dimensional Lie algebra,
\item $K$: affine algebraic group,
\item $\operatorname{Ad}: K \to \operatorname{Aut}_{\text{Lie alg.}}(\mathfrak{g})$: a homomorphism of algebraic groups,
\item $\iota: \mathfrak{k} \to \mathfrak{g}$: inclusion of Lie algebras, \end{itemize} subject to the conditions below \begin{itemize}
\item $\iota$ is $K$-equivariant,
\item $(\mathop{}\!\mathrm{d}\operatorname{Ad}(\xi))(x) = [\iota(\xi), x]$ for all $\xi \in \mathfrak{k}$ and $x \in \mathfrak{g}$. \end{itemize}
Note that no grading is put on $U(\mathfrak{g})$. As $\iota$ extends to an inclusion $U(\mathfrak{k}) \to U(\mathfrak{g})$ and $\operatorname{Ad}$ extends to an algebraic action on $U(\mathfrak{g})$, the following is obvious.
\begin{proposition}
The quadruplet $(U(\mathfrak{g}), K, \operatorname{Ad}, \iota)$ is a Harsh-Chandra dg-algebra in the sense of Definition \ref{def:HC-dga}, concentrated at degree zero. \end{proposition}
We shall write $(\mathfrak{g}, K)$ instead of $(U(\mathfrak{g}), K)$. Thus we obtain the notions of $(\mathfrak{g}, K)$-modules, weak $(\mathfrak{g}, K)$-modules, and h-complexes over $(\mathfrak{g}, K)$ by the general formalism in \S\ref{sec:h-cplx}.
The constructions below for tensor products and internal $\operatorname{Hom}$'s are taken from \cite[p.80]{Pan05}. Let $M$ and $N$ be objects of ${}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K)$, say with data $i^M_\xi$ and $i^N_\xi$ for all $\xi \in \mathfrak{k}$. We make $M \otimes N$ into an h-complex by letting \begin{align*}
\eta (x \otimes y) & = \eta x \otimes y + x \otimes \eta y, \quad \eta \in \mathfrak{g}, \\
k (x \otimes y) & = kx \otimes ky, \quad k \in K, \\
i^{M \otimes N}_\xi(x \otimes y) & = i^M_\xi(x) \otimes y + (-1)^p x \otimes i^N_\xi(y), \quad \xi \in \mathfrak{k}, \end{align*} where $x \in M^p$ and $y \in N^q$, for all $(p, q) \in \ensuremath{\mathbb{Z}}^2$.
Let $\mathfrak{g}$ and $K$ act on the $\operatorname{Hom}$-complex $\operatorname{Hom}^\bullet_{\ensuremath{\mathbb{C}}}(M, N)$ via \begin{gather*}
(\eta f)(m) = \eta (f(m)) - f(\eta m), \quad \eta \in \mathfrak{g}, \\
(kf)(m) = k(f(k^{-1} m)), \quad k \in K, \end{gather*} where $f \in \operatorname{Hom}^n_{\ensuremath{\mathbb{C}}}(M, N)$, $n \in \ensuremath{\mathbb{Z}}$. We take $\operatorname{Hom}^\bullet_{\ensuremath{\mathbb{C}}}(M, N)^{K\text{-alg}}$ and define \[ (i^{\operatorname{Hom}}_\xi f)(m) = i^N_\xi f(m) - (-1)^n f(i^M_\xi m), \quad f \in \operatorname{Hom}^n_{\ensuremath{\mathbb{C}}}(M, N)^{K\text{-alg}}, \quad \xi \in \mathfrak{k}. \]
\begin{definition}\label{def:internal-Hom}
The h-complex constructed above is called the \emph{internal $\operatorname{Hom}$} of $M$ and $N$. By taking $N = \ensuremath{\mathbb{C}}$, one arrives at the notion of \emph{contragredient} h-complexes $M^\vee$ over $(\mathfrak{g}, K)$. \end{definition}
The next notion applies only to $(\mathfrak{g}, K)$-modules. Recall that a $\mathcal{Z}(\mathfrak{g})$-module is called \emph{locally $\mathcal{Z}(\mathfrak{g})$-finite} if it is a union of finite-dimensional submodules.
\begin{definition}\label{def:HC-module}
A $(\mathfrak{g}, K)$-module $M$ is said to be a \emph{Harish-Chandra module} if the following two conditions hold.
\begin{enumerate}[(i)]
\item $M$ is finitely generated as a $\mathfrak{g}$-module;
\item $M$ is locally $\mathcal{Z}(\mathfrak{g})$-finite.
\end{enumerate} \end{definition}
Harish-Chandra modules form a Serre subcategory of $(\mathfrak{g}, K)\dcate{Mod}$. Cf.\ Lemma \ref{prop:Zg-finite-ses}.
\begin{remark}
In the applications to Lie groups, $K$ is often taken to be a compact Lie group instead, and the algebraicity of the $K$-action on $M$ translates into local $K$-finiteness; see eg.\ \cite[p.45 and (1.64)]{KV95}. \end{remark}
From \S\ref{sec:Loc} onward, we will specialize to the case where $\mathfrak{g} = \operatorname{Lie} G$ for some connected reductive group $G$ and $K$ is a reductive subgroup of $G$; the maps $\iota$ and $\operatorname{Ad}$ will then be the obvious ones. If $K$ is a symmetric subgroup, then a $(\mathfrak{g}, K)$-module $M$ is Harish-Chandra if and only if it is finitely generated over $\mathfrak{g}$ and admissible; see \cite[Corollary 7.223]{KV95} and \cite[3.4.1 Theorem]{Wa88} after taking suitable real forms.
\subsection{Standard resolutions}\label{sec:std-resolution} Let $\mathfrak{g}$ be a finite-dimensional Lie algebra. The standard complex $N\mathfrak{g}$ is given by \[ \cdots \to U(\mathfrak{g}) \otimes \bigwedge^2 \mathfrak{g} \to U(\mathfrak{g}) \otimes \bigwedge^1 \mathfrak{g} \to U(\mathfrak{g}) \otimes \bigwedge^0 \mathfrak{g} \] in degrees $\ldots, -2, -1, 0$ and zero elsewhere. The differentials \[ \partial_n: U(\mathfrak{g}) \otimes \bigwedge^n \mathfrak{g} \to U(\mathfrak{g}) \otimes \bigwedge^{n-1} \mathfrak{g} \] for $n \geq 1$ are \begin{align*}
u \otimes (\xi_1 \otimes \cdots \otimes \xi_n) \mapsto \sum_{i=1}^n (-1)^{i+1} u\xi_i \otimes (\xi_1 \wedge \cdots \widehat{\xi_i} \cdots \wedge \xi_n ) \\
+ \sum_{p < q} (-1)^{p+q} u \otimes ( [\xi_p, \xi_q] \wedge \cdots \widehat{\xi_p} \cdots \widehat{\xi_q} \cdots ) \end{align*} where $\widehat{\cdots}$ means terms to be neglected.
Let $U(\mathfrak{g})$ act on each $U(\mathfrak{g}) \otimes \bigwedge^n \mathfrak{g}$ by left multiplication, and take the augmentation homomorphism \[ \epsilon: U(\mathfrak{g}) = U(\mathfrak{g}) \otimes \bigwedge^0 \mathfrak{g}\to \ensuremath{\mathbb{C}}. \]
It is well known \cite[Theorem 2.122]{KV95} that $\epsilon: N\mathfrak{g} \to \ensuremath{\mathbb{C}}$ furnishes a K-projective resolution of the trivial $\mathfrak{g}$-module $\ensuremath{\mathbb{C}}$.
We now consider the data $(\mathfrak{g}, K)$ of \S\ref{sec:gK-basic}. View $\ensuremath{\mathbb{C}}$ as the trivial $(\mathfrak{g}, K)$-module.
\begin{theorem}[P.\ PandΕΎiΔ {\cite[Theorem 3.2.6]{Pan07}}]\label{prop:N-resolution}
Let $K$ act diagonally on each term of $N\mathfrak{g}$, and define
\begin{align*}
i_\xi: U(\mathfrak{g}) \otimes \bigwedge^n \mathfrak{g} & \to U(\mathfrak{g}) \otimes \bigwedge^{n+1} \mathfrak{g} \\
u \otimes \lambda & \mapsto -u \otimes (\xi \wedge \lambda)
\end{align*}
for all $\xi \in \mathfrak{k}$, $u \in U(\mathfrak{g})$, $\lambda \in \bigwedge^n \mathfrak{g}$ and $n \geq 0$. Then $N\mathfrak{g}$ is an h-complex over $(\mathfrak{g}, K)$.
If $K$ is reductive, then $\epsilon: N\mathfrak{g} \to \ensuremath{\mathbb{C}}$ is a K-projective resolution in the sense of h-complexes. \end{theorem}
The following result is essentially a formal consequence of Theorem \ref{prop:N-resolution}.
\begin{theorem}[P.\ PandΕΎiΔ {\cite[Theorem 3.5]{Pan05}}]\label{prop:N-resolution-gen}
Assume that $K$ is reductive. For every h-complex $M$ over $(\mathfrak{g}, K)$, the morphism $\ensuremath{\mathrm{id}} \otimes \epsilon: M \otimes N\mathfrak{g} \to M$ is a K-projective resolution in the h-sense. \end{theorem}
Therefore, the homotopy category ${}^{\mathrm{h}} \cate{K}^{\star}(\mathfrak{g}, K)$ of h-complexes have enough K-projectives for $\star \in \{\; , +, -, \mathrm{b}\}$ as long as $K$ is reductive.
The upshot is that there are canonical K-projective resolutions $M \otimes N\mathfrak{g} \to M$ for all $M$ in ${}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K)$ when $K$ is reductive. Moreover, these are also K-projective resolutions in $\cate{C}(\mathfrak{g}\dcate{Mod})$; this agrees with the assertion of Corollary \ref{prop:change-group-K-proj} (take $T = \{1\}$).
\subsection{Bernstein--Lunts equivalence} Let $(\mathfrak{g}, K)$ be as in \S\ref{sec:gK-basic}. Let \[ \cate{C}(\mathfrak{g}, K), \quad \cate{K}(\mathfrak{g}, K), \quad \cate{D}(\mathfrak{g}, K) \] be the category of complexes of $(\mathfrak{g}, K)$-modules, its homotopy category and derived category, respectively. Recall that they have the h-avatars \[ {}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K), \quad {}^{\mathrm{h}} \cate{K}(\mathfrak{g}, K), \quad {}^{\mathrm{h}} \cate{D}(\mathfrak{g}, K) \] and so on for the variants with superscripts $+, -, \mathrm{b}$ to denote full subcategories with boundedness conditions. The comparison functor \eqref{eqn:h-comparison} becomes \begin{equation}\label{eqn:h-comparison-gK}
\alpha: \cate{D}^{\star}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}^{\star}(\mathfrak{g}, K), \quad \star \in \{\; , +, -, \mathrm{b}\}. \end{equation}
The following equivalences are due to Bernstein--Lunts \cite{BL95} and P.\ PandΕΎiΔ \cite{Pan05}.
\begin{theorem}\label{prop:BL-equiv}
The functor $\alpha: \cate{D}^{\mathrm{b}}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(\mathfrak{g}, K)$ is an equivalence. If $K$ is reductive, then so is $\alpha: \cate{D}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}(\mathfrak{g}, K)$. In both cases, the functors are triangulated and preserve $t$-structures. \end{theorem} \begin{proof}
The first part is \cite[Theorem 1.3]{BL95} and the second is \cite[Theorem 1.1]{Pan05}. Moreover, we have noted after \eqref{eqn:h-comparison} that $\alpha$ is triangulated and preserves $t$-structures. \end{proof}
Denote the $\operatorname{Ext}$ functors for $\cate{C}(\mathfrak{g}, K)$ and ${}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K)$ by $\operatorname{Ext}^\bullet_{\mathfrak{g}, K}$ and ${}^{\mathrm{h}} \operatorname{Ext}^\bullet_{\mathfrak{g}, K}$, respectively. See Remark \ref{rem:h-Ext}.
\begin{corollary}\label{prop:BL-equiv-Ext}
There are canonical isomorphisms
\[ \operatorname{Ext}^n_{\mathfrak{g}, K}(M, N) \rightiso {}^{\mathrm{h}} \operatorname{Ext}^n_{\mathfrak{g}, K}(\alpha M, \alpha N) \]
for all objects $M, N$ of $\cate{D}^{\mathrm{b}}(\mathfrak{g}, K)$ and all $n \in \ensuremath{\mathbb{Z}}$, compatibly with long exact sequences for $\operatorname{Ext}$. If $K$ is reductive, the same holds for all objects of $\cate{D}(\mathfrak{g}, K)$. \end{corollary} \begin{proof}
For the left-hand side we have $\operatorname{Ext}^n_{\mathfrak{g}, K}(M, N) \simeq \operatorname{Hom}_{\cate{D}(\mathfrak{g}, K)}(M, N[n])$, and similarly for the right-hand side. \end{proof}
In view of these results, we will often omit $\alpha$ from the formulas, and denote the $\operatorname{Ext}$ functor for both $\cate{D}^{\mathrm{b}}(\mathfrak{g}, K)$ and ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(\mathfrak{g}, K)$ by $\operatorname{Ext}^\bullet_{\mathfrak{g}, K}$.
\subsection{A lemma on restriction}\label{sec:restriction-lemma} Here we consider two pairs $(\mathfrak{g}, K)$ and $(\mathfrak{h}, T)$ as in \S\ref{sec:gK-basic}, assuming: \begin{itemize}
\item $\mathfrak{h}$ is a Lie subalgebra of $\mathfrak{g}$;
\item $T$ is a subgroup of $K$;
\item the maps $K \to \operatorname{Aut}(\mathfrak{g})$, $T \to \operatorname{Aut}(\mathfrak{h})$, $\mathfrak{k} \to \mathfrak{g}$ and $\mathfrak{t} \to \mathfrak{h}$ are compatible with the inclusions above. \end{itemize}
Therefore we obtain a morphism of pairs $(U(\mathfrak{h}), T) \to (U(\mathfrak{g}), K)$ in the sense of \S\ref{sec:HC-dga}, and similarly for the Harish-Chandra dg-algebras obtained by h-construction.
Below is the h-counterpart of a standard result \cite[Proposition 2.57 (c)]{KV95} about restriction (or oblivion); the proof is also similar to the one in \textit{loc.\ cit.}
\begin{lemma}\label{prop:restriction-lemma}
Suppose that $T$ is reductive. Then the functor ${}^{\mathrm{h}}\cate{C}(\mathfrak{g}, K) \to {}^{\mathrm{h}}\cate{C}(\mathfrak{h}, T)$ is exact and preserves K-projectives. \end{lemma} \begin{proof}
Exactness is known. As for the preservation of K-projectives, we break the restriction into
\[ {}^{\mathrm{h}}\cate{C}(\mathfrak{g}, K) \to {}^{\mathrm{h}}\cate{C}(\mathfrak{g}, T) \to {}^{\mathrm{h}}\cate{C}(\mathfrak{h}, T). \]
The first step preserves K-projectives by Corollary \ref{prop:change-group-K-proj}. In view of Proposition \ref{prop:K-injectives-adjunction}, it suffices to show that ${}^{\mathrm{h}}\cate{C}(\mathfrak{g}, T) \to {}^{\mathrm{h}}\cate{C}(\mathfrak{h}, T)$ has a right adjoint which is exact and upgrades to a dg-functor.
The desired right adjoint dg-functor is given by Proposition \ref{prop:adjoint-h-oblv} (ii), namely
\[ \operatorname{Hom}^\bullet_{U(\mathfrak{h})}(U(\mathfrak{g}), \cdot)^{T\text{-alg}}: {}^{\mathrm{h}}\cate{C}(\mathfrak{h}, T) \to {}^{\mathrm{h}}\cate{C}(\mathfrak{g}, T). \]
It remains to show that $\operatorname{Hom}^\bullet_{U(\mathfrak{h})}(U(\mathfrak{g}), M)^{T\text{-alg}}$ is acyclic for every acyclic h-complex $M$ over $(\mathfrak{h}, T)$.
By reductivity, we may choose an $T^\circ$-invariant subspace $\mathfrak{q} \subset \mathfrak{g}$ under $\operatorname{Ad}$, such that $\mathfrak{g} = \mathfrak{h} \oplus \mathfrak{q}$. Next, apply \cite[Lemma 2.56]{KV95} (with $L = \{1\}$ and swapping left and right) to obtain an isomorphism
\[ U(\mathfrak{h}) \otimes \operatorname{Sym}(\mathfrak{q}) \rightiso U(\mathfrak{g}) \quad \text{as $\mathfrak{h}$-modules.} \]
In \textit{loc.\ cit.} the map is $u \otimes v \mapsto u \sigma(v)$ where $\sigma: \operatorname{Sym}(\mathfrak{q}) \to U(\mathfrak{g})$ is symmetrization. Hence it is also $T^\circ$-equivariant.
Hence there is a $T^\circ$-equivariant (relative to \eqref{eqn:K-adjoint-action}) isomorphism in $\cate{C}(\ensuremath{\mathbb{C}})$:
\begin{align*}
\operatorname{Hom}^\bullet_{U(\mathfrak{h})}(U(\mathfrak{g}), M)^{T^\circ\text{-alg}} & \rightiso \operatorname{Hom}^\bullet_{\ensuremath{\mathbb{C}}}(\operatorname{Sym}(\mathfrak{q}), M)^{T^\circ\text{-alg}} \\
\varphi & \mapsto \varphi|_{1 \otimes \operatorname{Sym}(\mathfrak{q})}.
\end{align*}
Consider any algebraic representation $W$ of $T^\circ$ and a short exact sequence $0 \to V' \to V \to V'' \to 0$ of algebraic representations of $T^\circ$. Taking a compact real form of $T^\circ$ and passing from algebraic to locally $T^\circ(\ensuremath{\mathbb{R}})$-finite modules, by \cite[Proposition 1.18b]{KV95} the short exact sequence splits. As a consequence,
\[ 0 \to \operatorname{Hom}_{\ensuremath{\mathbb{C}}}(W, V')^{T^\circ\text{-alg}} \to \operatorname{Hom}_{\ensuremath{\mathbb{C}}}(W, V)^{T^\circ\text{-alg}} \to \operatorname{Hom}_{\ensuremath{\mathbb{C}}}(W, V'')^{T^\circ\text{-alg}} \to 0 \]
is acyclic in $\cate{C}(\ensuremath{\mathbb{C}})$.
From this fact, $\operatorname{Hom}^\bullet_{\ensuremath{\mathbb{C}}}(\operatorname{Sym}(\mathfrak{q}), M)^{T^\circ\text{-alg}}$ and $\operatorname{Hom}^\bullet_{U(\mathfrak{h})}(U(\mathfrak{g}), M)^{T^\circ\text{-alg}}$ are seen to be acyclic. It remains to recall that $(\cdots)^{T\text{-alg}} \simeq (\cdots)^{T^\circ\text{-alg}}$. \end{proof}
\section{\texorpdfstring{$D$}{D}-modules}\label{sec:Dmod} \subsection{Basic definitions}\label{sec:D-basic} Let $K$ be an affine algebraic group. A smooth variety $X$ is said to be a \emph{$K$-variety} if $K$ acts algebraically on the right of $X$. Morphisms of $K$-varieties are defined to be $K$-equivariant morphisms of varieties.
Let $X$ be a smooth variety. Unless otherwise specified, $D_X$-modules (resp.\ $\mathscr{D}_X$-modules) will mean left modules (resp.\ $\mathscr{O}_X$-quasi-coherent left modules). We denote by \[ \cate{D}^{\star}(X), \quad \star \in \{\;, +, -, \mathrm{b}\} \] the derived category of $\mathscr{D}_X$-modules with boundedness condition $\star$; for affine $X$, they are just $\cate{D}^{\star}(D_X\dcate{Mod})$.
If $X$ is a smooth affine $K$-variety, then we obtain the homomorphism \[ j: U(\mathfrak{k}) \to D_X \] of algebras, mapping $\xi \in \mathfrak{k}$ to the vector field on $X$ generated by $\xi$. On the other hand, $K$ acts on $D_X$ in the following way. The right $K$-action on $X$ induces a left $K$-action on regular functions; let $P \in D_X$ and $k \in K$, then ${}^k P \in D_X$ is the operator \[ \varphi \mapsto k (P (k^{-1} \varphi)) \] for all regular function $\varphi$. It belongs to $D_X$: in fact ${}^k P$ is just the transport of structure by $k$ acted on $P$. Denote by $\sigma: K \to \operatorname{Aut}(D_X)$ the resulting homomorphism.
\begin{proposition}
Give a smooth affine $K$-variety $X$, the quadruplet $(D_X, K, \sigma, j)$ is a Harsh-Chandra dg-algebra in the sense of Definition \ref{def:HC-dga}, concentrated at degree zero. \end{proposition} \begin{proof}
This is stated in \cite[2.1]{BL95} (see the references therein). See also \cite{Li22}. \end{proof}
Fix a smooth affine $K$-variety $X$ hereafter. We obtain the notions of $(D_X, K)$-modules, weak $(D_X, K)$-modules, and h-complexes over $(D_X, K)$ by the general formalism in \S\ref{sec:h-cplx}. In particular, we obtain the categories \[ {}^{\mathrm{h}}\cate{C}(D_X, K), \quad {}^{\mathrm{h}}\cate{K}(D_X, K), \quad {}^{\mathrm{h}}\cate{D}(D_X, K) \] as well as the versions with boundedness conditions.
In comparison with the standard terminologies, we have: \begin{itemize}
\item $(D_X, K)$-modules are exactly the (strongly) \emph{$K$-equivariant $D_X$-modules};
\item weak $(D_X, K)$-modules are exactly the \emph{weakly $K$-equivariant $D_X$-modules}. \end{itemize} See also the explanations after \cite[Definition 2.2]{Li22}.
For all subgroup $T$ of $K$ and $n \in \ensuremath{\mathbb{Z}}$, it is clear that \[\begin{tikzcd}
{}^{\mathrm{h}} \cate{D}(D_X, K) \arrow[r] \arrow[d, "{\operatorname{H}^n}"'] & {}^{\mathrm{h}}\cate{D}(D_X, T) \arrow[d, "{\operatorname{H}^n}"] \\
(D_X, K)\dcate{Mod} \arrow[r] & (D_X, T)\dcate{Mod} \end{tikzcd}\] commutes, where the horizontal arrows are the evident ones, and the upper one is $t$-exact triangulated. Taking $T = \{1\}$, the second column becomes $\cate{D}(X) \xrightarrow{\operatorname{H}^n} D_X\dcate{Mod}$.
\begin{remark}
If $x \in X$ is fixed by $K$, we can also talk about $(D_{X, x}, K)$-modules, etc., where $D_{X, x}$ is the stalk of $D_X$ at $x$. Taking stalks yield the dg-functors
\[ {}^{\mathrm{h}}\cate{C}(D_X, K) \to {}^{\mathrm{h}}\cate{C}(D_{X, x}, K) \]
and so forth. In fact it is $D_{X, x} \dotimes{D_X} (\cdot)$; it preserves K-projectives by Corollary \ref{prop:oblv-adjoint-K}. \end{remark}
\begin{remark}
The assumption that $X$ is affine is artificial here. It is clear that the whole formalism can be sheafified. For any smooth $K$-variety $X$, there is a theory of $(\mathscr{D}_X, K)$-modules, weak $(\mathscr{D}_X, K)$-modules, h-complexes over $(\mathscr{D}_X, K)$ and the corresponding derived categories. See \cite[2.16.1]{BL95}, or \cite{Ki12} for partial flag varieties. Nevertheless, this level of generality is needed in this work.
Several constructions from the theory of $D$-modules generalize to the h-setting. For example, the same ideas from \S\ref{sec:std-resolution} upgrades the \emph{Spencer complex} \cite[(1.5.6)]{HTT08}
\[ \cdots \to \mathscr{D}_X \dotimes{\mathscr{O}_X} \bigwedge^2 \mathscr{T}_X \to \mathscr{D}_X \dotimes{\mathscr{O}_X} \bigwedge^1 \mathscr{T}_X \to \mathscr{D}_X \dotimes{\mathscr{O}_X} \bigwedge^0 \mathscr{T}_X \]
into an h-complex over $(\mathscr{D}_X, K)$, where $\mathscr{T}_X$ stands for the tangent sheaf of $X$. Denote this h-complex as $NX$, then the morphism $\mathscr{D}_X \to \mathscr{O}_X$ given by $D \mapsto D(1)$ yields a quasi-isomorphism $NX \to \mathscr{O}_X$ in ${}^{\mathrm{h}} \cate{C}(\mathscr{D}_X, K)$. \end{remark}
\subsection{Inverse images}\label{sec:inverse-image} There are two basic operations on derived categories of $D$-modules: inverse image (suitably shifted) and de Rham push-forward. The goal here is to define inverse images for h-complexes on smooth affine $K$-varieties. We do not consider de Rham push-forward in this work.
For every morphism $f: X \to Y$ between smooth varieties, we set \[ \mathrm{rd}(f) := \dim X - \dim Y. \]
Assuming that there are enough flat $\mathscr{D}_Y$-modules (eg.\ when $Y$ is quasi-projective), the basic theory of $D$-modules provides a functor \[ f^\bullet = f^![-\mathrm{rd}(f)] : \cate{D}^-(Y) \to \cate{D}^-(X). \] \begin{itemize}
\item The $f^\bullet$ above is the inverse image functor denoted by $\operatorname{L}\! f^*$ in \cite[p.33]{HTT08}, the left derived functor of $\mathscr{D}_{X \to Y} \dotimes{f^{-1} \mathscr{D}_Y} f^{-1}(\cdot)$;
\item The $f^!$ is denoted by $f^\dagger$ in \textit{loc.\ cit.}, and $f^!$ corresponds to the $!$-pullback under Riemann--Hilbert correspondence when applied to regular holonomic complexes. \end{itemize}
The effect of $\mathscr{D}_{X \to Y} \dotimes{f^{-1} \mathscr{D}_Y} f^{-1}(\cdot)$ is to take the pullback of a $\mathscr{D}_Y$-module as a quasi-coherent sheaf, and then equip it with a natural $\mathscr{D}_X$-module structure.
If $f$ is a morphism between $K$-varieties, then $\mathscr{D}_{X \to Y}$ is $K$-equivariant, and this functor can be lifted to the level of h-complexes, cf.\ Proposition \ref{prop:adjoint-h-oblv} (i). The point is to take its left h-derived functor. This splits into two cases.
\begin{description}
\item[Smoooth inverse image] Suppose $f$ is smooth, then $\mathscr{D}_{X \to Y} \dotimes{f^{-1} \mathscr{D}_Y} f^{-1}(\cdot)$ preserves acyclic objects, hence it induces a $t$-exact functor
\[ f^\bullet: {}^{\mathrm{h}} \cate{D}(D_Y, K) \to {}^{\mathrm{h}} \cate{D}(D_X, K) \]
as well as $f^! := f^\bullet[\mathrm{rd}(f)]$.
\item[General inverse image] For general $f$, we shall assume $K$ is reductive. Theorem \ref{prop:enough-K-general} ensures that ${}^{\mathrm{h}} \cate{C}(D_Y, K)$ has enough K-projectives. Hence we may define the left h-derived functor
\[ f^\bullet: {}^{\mathrm{h}} \cate{D}^-(D_Y, K) \to {}^{\mathrm{h}} \cate{D}^-(D_X, K) \]
and $f^! := f^\bullet[\mathrm{rd}(f)]$. Note that $f^\bullet$ can be computed from K-flat resolutions, and K-projective implies K-flat (cf.\ \cite[Proposition 10.3.4]{Yek20}). \end{description}
Clearly, these two definitions agree in overlapping cases. The following results are trivial for smooth inverse images, thus we state them only for the general ones.
\begin{lemma}\label{prop:pullback-composite}
Suppose $K$ is reductive. Consider morphisms $X \xrightarrow{f} Y \xrightarrow{g} Z$ of smooth affine $K$-varieties. There is a canonical isomorphism $(gf)^\bullet \simeq f^\bullet g^\bullet$. \end{lemma} \begin{proof}
Analogous to the non-equivariant version; see the proof of \cite[Propositionp 1.5.11]{HTT08}. It boils down to standard properties of derived tensor products, but for our setting of h-complexes, the dg-counterparts are needed (cf.\ \cite[\S 12.3]{Yek20}). \end{proof}
\begin{lemma}\label{prop:pullback-amplitude}
Suppose $K$ is reductive. Let $f: X \to Y$ be a morphism between smooth affine $K$-varieties. Then $f^\bullet$ restricts to ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_Y, K) \to {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K)$. \end{lemma} \begin{proof}
It suffices to show that $f^\bullet$ has bounded amplitude. The functor of forgetting $K$-equivariance
\[ {}^{\mathrm{h}} \cate{C}(D_Y, K) \to {}^{\mathrm{h}} \cate{C}(D_Y, \{1\}) = \cate{C}(D_Y\dcate{Mod}) \]
is exact and preserves K-projectives by Corollary \ref{prop:change-group-K-proj}. Ditto for $Y$ replaced by $X$. Therefore the left square of the diagram
\[\begin{tikzcd}
{}^{\mathrm{h}} \cate{D}^-(D_Y, K) \arrow[d] \arrow[r, "{f^\bullet}"] & {}^{\mathrm{h}} \cate{D}^-(D_X, K) \arrow[r, "{\operatorname{H}^n}"] \arrow[d] & (D_X, K)\dcate{Mod} \arrow[d] \\
\cate{D}^-(Y) \arrow[r, "{f^\bullet}"'] & \cate{D}^-(X) \arrow[r, "{\operatorname{H}^n}"'] & D_X\dcate{Mod}
\end{tikzcd}\]
commutes up to a canonical isomorphism --- to see this, compute left h-derived functors via Corollary \ref{prop:derived-functor-composite}. So does the right square by \eqref{eqn:oblv-comm}. The amplitude of the upper $f^\bullet$ can thus be bounded by that of the lower one, which is finite by \cite[Corollary 1.4.20]{HTT08}. \end{proof}
The non-affine case is not needed in this work, and is left to the interested reader.
\subsection{Comparison with the equivariant derived category} To begin with, suppose $X$ is a smooth $K$-variety.
\begin{definition}
Let $\cate{D}^{\mathrm{b}}_K(X)$ be the \emph{bounded equivariant derived category} of $\mathscr{D}_X$-modules under $K$-action defined by Bernstein and Lunts \cite[4.2]{BL94}. It is a triangulated category endowed with a $t$-structure, whose heart is the category of $K$-equivariant $\mathscr{D}_X$-modules. \end{definition}
For details about the definition of $\cate{D}^{\mathrm{b}}_K(X)$, we refer to \cite{BL94}, \cite[2.11]{BL95}, or to the more systematic \cite[Chapter 6]{Ac21} in the setting of constructible sheaves. Below is only a sketch.
\begin{definition}
A \emph{resolution} of a $K$-variety $X$ is a pair $(P, p)$ where $P$ is a $K$-torsor and $p: P \to X$ is a smooth affine $K$-equivariant morphism. We also set $\overline{P} := P/K$. Morphisms $f: (Q, q) \to (P, p)$ are defined as commutative diagrams
\[\begin{tikzcd}
Q \arrow[r, "f"] \arrow[rd, "q"'] & P \arrow[d, "p"] \\
& X
\end{tikzcd}\]
where $f$ is a smooth $K$-equivariant morphism. From $f$ we obtain a smooth morphism $\overline{f}: \overline{Q} \to \overline{P}$. \end{definition}
\begin{example}
The \emph{trivial resolution} of $X$ is $P := X \times K$ and $p := \mathrm{pr}_1$, where $K$ acts on $P$ by $(x, h)k = (xk, k^{-1}h)$. Then $X \rightiso \overline{P}$ by mapping $x \in X$ to the orbit of $(x, 1)$. \end{example}
For the construction of sufficiently many resolutions of $X$ (namely, $n$-acyclic ones \cite[Definition 6.1.18]{Ac21} for every $n \in \ensuremath{\mathbb{Z}}_{\geq 0}$), we refer to \cite[\S 6.1]{Ac21}.
\begin{itemize}
\item The objects in $\cate{D}^{\mathrm{b}}_K(X)$ are collections $M$ of objects $M_P$ in $\cate{D}^{\mathrm{b}}(\overline{P})$ for every resolution $(P, p)$, together with isomorphisms
\[ \alpha_f: \overline{f}^\bullet M_P \rightiso M_Q \]
for each morphism $f: (Q, q) \to (P, p)$, compatibly with compositions in the sense that
\[ \alpha_g \circ \overline{g}^\bullet(\alpha_f) = \alpha_{fg}. \]
\item The morphisms $M \to N$ in $\cate{D}^{\mathrm{b}}_K(X)$ are collections of morphisms $\varphi_P: M_P \to N_P$ compatibly with various $\alpha_f$. \end{itemize}
For the description of the following structures of $\cate{D}^{\mathrm{b}}_K(X)$, see also see \cite[pp.286--287]{Ac21}; note that one works with constructible sheaves in \textit{loc.\ cit.} and the smooth inverse images differ by a shift by $\mathrm{rd}(f)$.
\begin{description}
\item[Forgetting equivariance] The functor $\mathbf{oblv}: \cate{D}^{\mathrm{b}}_K(X) \to \cate{D}^{\mathrm{b}}(X)$ sends an object $M$ to $M_{X \times K}$, by taking the trivial resolution.
\item[Translation functor] Define $M[1]$ to be the collection $(M_P[1])_{(P, p)}$.
\item[Distinguished triangles] A triangle $M_1 \to M_2 \to M_3 \xrightarrow{+1}$ is said to be distinguished if $M_{1, P} \to M_{2, P} \to M_{3, P} \xrightarrow{+1}$ is, for every resolution $(P, p)$.
\item[Bounded $t$-structure] For any bounded interval $I$, an object $M$ lies in $\cate{D}^I_K(X)$ if and only if $\mathbf{oblv}(M)$ lies in $\cate{D}^I(X)$. \end{description}
More generally, let $T \subset K$ be a subgroup. There is a $t$-exact functor \[ \mathbf{oblv}^K_T: \cate{D}^{\mathrm{b}}_K(X) \to \cate{D}^{\mathrm{b}}_T(X) \] whose definition Γ la \cite[Definition 6.5.2]{Ac21} is sketched below.
Any resolution $(P, p)$ can be viewed as a resolution of $X$ as a $T$-variety. Let $\cate{D}^{\mathrm{b}}_{T \subset K}(X)$ be the category defined in the same way as $\cate{D}^{\mathrm{b}}_T(X)$, but allowing only the resolutions so obtained. This gives rise to a functor \[ \mathbf{oblv}^K_{T \subset K}: \cate{D}^{\mathrm{b}}_K(X) \to \cate{D}^{\mathrm{b}}_{T \subset K}(X). \] On the other hand, \cite[Lemmas 6.4.7, 6.4.8]{Ac21} realizes $\cate{D}^{\mathrm{b}}_T(X)$ as a full subcategory of $\cate{D}^{\mathrm{b}}_{T \subset K}(X)$. All these constructions being triangulated and $t$-exact, one can verify that $\mathbf{oblv}^K_{T \subset K}$ lands in $\cate{D}^{\mathrm{b}}_T(X)$ by checking on hearts, as in \cite[p.292]{Ac21}. This yields $\mathbf{oblv}^K_T$.
Moreover, it is proved in \textit{loc.\ cit.} that $\cate{D}^{\mathrm{b}}_{\{1\}}(X) \simeq \cate{D}^{\mathrm{b}}(X)$ and $\mathbf{oblv}^K_{\{1\}} \simeq \mathbf{oblv}$.
\begin{theorem}[A.\ Beilinson, see {\cite[Theorem 2.13]{BL95}}]\label{prop:Beilinson-equiv}
Let $X$ be a smooth affine $K$-variety. There is an equivalence between triangulated categories
\[ \varepsilon: {}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, K) \to \cate{D}^{\mathrm{b}}_K(X) \]
with the following properties.
\begin{enumerate}[(i)]
\item For every subgroup $T \subset K$, the diagram
\[\begin{tikzcd}
{}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, K) \arrow[d] \arrow[r, "\varepsilon"] & \cate{D}^{\mathrm{b}}_K(X) \arrow[d, "{\mathbf{oblv}^K_T}"] \\
{}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, T) \arrow[r, "\varepsilon"'] & \cate{D}^{\mathrm{b}}_T(X)
\end{tikzcd}\]
commutes up to isomorphism; in particular, $\varepsilon$ commutes with ${}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, K) \to \cate{D}^{\mathrm{b}}(X)$ and $\mathbf{oblv}$.
\item It is $t$-exact.
\item It induces identity on hearts, which are the category of equivariant $D_X$-modules on both sides.
\end{enumerate} \end{theorem} \begin{proof}
The equivalence is stated in \textit{loc.\ cit.}; the case here is simpler since $X$ is affine and we do not consider monodromic structures yet. Let us sketch the definition of $\varepsilon$ now. Given a resolution $(P, p)$, consider the functors
\begin{equation}\label{eqn:epsilon-construction}
\begin{tikzcd}
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K) \arrow[r, "{p^\bullet}"] & {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_P, K) & \cate{D}^{\mathrm{b}}((D_P, K)\dcate{Mod}) \arrow[l, "\sim"'] & \cate{D}^{\mathrm{b}}(\overline{P}) \arrow[l, "\sim"'] .
\end{tikzcd}
\end{equation}
The middle equivalence is the functor $\alpha$ in the step 2 of proof of \cite[Theorem 2.13]{BL95}. The rightmost equivalence comes from $D_{\overline{P}}\dcate{Mod} \simeq (D_P, K)\dcate{Mod}$, as $P \to \overline{P}$ is a $K$-torsor; it is also the functor in the step 1 of the cited proof.
Suppose that an object $M$ of ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K)$ is given. When $(P, p)$ varies, we obtain a compatible collection of objects of $\cate{D}^{\mathrm{b}}(\overline{P})$. Hence \eqref{eqn:epsilon-construction} yields the functor $\varepsilon$. It is triangulated since all the functors in \eqref{eqn:epsilon-construction} are.
Consider the property (i). Given $T \subset K$, define $\varepsilon_{T \subset K}: {}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, T) \to \cate{D}^{\mathrm{b}}_{T \subset K}(X)$ by the same recipe \eqref{eqn:epsilon-construction}, but allowing only resolutions restricted from $K$. By the earlier review on $\mathbf{oblv}^K_T$, it suffices to commute
\[\begin{tikzcd}
{}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, K) \arrow[d] \arrow[r, "\varepsilon"] & \cate{D}^{\mathrm{b}}_K(X) \arrow[d, "{\mathbf{oblv}^K_{T \subset K}}"] \\
{}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, T) \arrow[r, "{\varepsilon_{T \subset K}}"'] & \cate{D}^{\mathrm{b}}_{T \subset K}(X)
\end{tikzcd}\]
up to isomorphism. But this is immediate by construction.
As $\varepsilon$ is seen to commute with ${}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, K) \to \cate{D}^{\mathrm{b}}(X)$ and $\mathbf{oblv}$, it is also $t$-exact, whence (ii).
Consider (iii). If $M$ is an equivariant $D_X$-module, then its image under \eqref{eqn:epsilon-construction} is the descent to $\overline{P}$ of its inverse image in $(D_P, K)\dcate{Mod}$. When $(P, p)$ varies, such a collection also gives rise to an object in the heart of $\cate{D}^{\mathrm{b}}_K(X)$ that corresponds to $M$; for details, see \cite[Theorem 6.4.10]{Ac21} and its proof. Beware of the shifts in the constructible setting considered in \cite{Ac21}. \end{proof}
\begin{corollary}\label{prop:Beilinson-equiv-Ext}
Let $X$ be a smooth affine $K$-variety. There are canonical isomorphisms
\[ {}^{\mathrm{h}} \operatorname{Ext}^n_{D_X, K}(M, N) \rightiso \operatorname{Ext}^n_{\cate{D}^{\mathrm{b}}_K(X)}(\varepsilon M, \varepsilon N) \]
for all objects $M, N$ of ${}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, K)$ and all $n \in \ensuremath{\mathbb{Z}}$, compatibly with long exact sequences for $\operatorname{Ext}$. \end{corollary} \begin{proof}
Analogous to Corollary \ref{prop:BL-equiv-Ext}. \end{proof}
The inverse image functor is also defined on equivariant derived categories, denoted by the same symbol $f^\bullet$. They match the h-counterpart under $\varepsilon$ by the next result.
\begin{proposition}\label{prop:inverse-image-compatibility}
Assume that $K$ is reductive. Let $f: X \to Y$ be a morphism between smooth affine $K$-varieties. The following diagram commutes up to a canonical isomorphism:
\[\begin{tikzcd}
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_Y, K) \arrow[r, "\varepsilon"] \arrow[d, "{f^\bullet}"'] & \cate{D}^{\mathrm{b}}_K(Y) \arrow[d, "{f^\bullet}"] \\
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K) \arrow[r, "\varepsilon"'] & \cate{D}^{\mathrm{b}}_K(X) .
\end{tikzcd}\] \end{proposition} \begin{proof}
Given a resolution $q: Q \to Y$, we form the Cartesian square
\[\begin{tikzcd}
P \arrow[r, "{\tilde{f}}"] \arrow[d, "p"'] & Q \arrow[d, "q"] \\
X \arrow[r, "f"'] & Y
\end{tikzcd}\]
then $p$ is also a resolution. Let $\overline{f}: \overline{P} \to \overline{Q}$ denote the induced morphism. Then
\begin{equation*}
\mathrm{rd}(p) = \mathrm{rd}(q), \quad \mathrm{rd}(\overline{f}) = \mathrm{rd}(f) = \mathrm{rd}(\tilde{f}).
\end{equation*}
Recall the construction \eqref{eqn:epsilon-construction} of $\varepsilon$, and consider the following diagram
\[\begin{tikzcd}
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_Y, K) \arrow[d, "{f^\bullet}"'] \arrow[r, "{q^\bullet}"] & {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_Q, K) \arrow[d, "{\tilde{f}^\bullet}"] & \cate{D}^{\mathrm{b}}((D_Q, K)\dcate{Mod}) \arrow[l, "\sim"'] & \cate{D}^{\mathrm{b}}(\overline{Q}) \arrow[l, "\sim"'] \arrow[d, "{\overline{f}^\bullet}"] \\
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K) \arrow[r, "{p^\bullet}"'] & {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_P, K) & \cate{D}^{\mathrm{b}}((D_P, K)\dcate{Mod}) \arrow[l, "\sim"] & \cate{D}^{\mathrm{b}}(\overline{P}) \arrow[l, "\sim"] .
\end{tikzcd}\]
The left square commutes by Lemma \ref{prop:pullback-composite}. As for the right rectangle, consider a bounded complex $\overline{M}$ of $D_{\overline{Q}}$-modules. We may and do take a quasi-isomorphism $\overline{F} \to \overline{M}$ such that $\overline{F}$ is K-flat when viewed over $O_{\overline{Q}} := \Gamma(\overline{Q}, \mathscr{O}_{\overline{Q}})$. Then their inverse images as complexes of $(D_Q, K)$-modules $F \to M$ is still a quasi-isomorphism, and $F$ is K-flat over $O_Q$ since
\[ F \dotimes{O_Q} (\cdot) \simeq \overline{F} \dotimes{O_{\overline{Q}}} O_Q \dotimes{O_Q} (\cdot) \simeq \overline{F} \dotimes{O_{\overline{Q}}} (\cdot). \]
The image of $F$ in ${}^{\mathrm{h}} \cate{C}(D_Q, K)$ can be used to compute $\tilde{f}^\bullet$, whilst $\overline{F}$ can be used to compute $\overline{f}^\bullet$. This concludes the commutativity since
\[\begin{tikzcd}
Q \arrow[r] & \overline{Q} \\
P \arrow[r] \arrow[u, "f"] & \overline{P} \arrow[u, "{\overline{f}}"']
\end{tikzcd} \quad \text{commutes.} \]
When $(Q, q)$ varies, the resulting $(P, p)$ determine objects of $\cate{D}^{\mathrm{b}}_K(X)$ by \cite[Lemma 6.4.8]{Ac21}. The assignment
\[ \left( M_Q, \ldots \right)_{(Q, q)} \mapsto \left( \overline{f}^\bullet M_Q, \ldots\right)_{(P, p)}, \]
where $M_Q$ are objects of $\cate{D}^{\mathrm{b}}(\overline{Q})$, is exactly the recipe of Bernstein--Lunts \cite{BL94} for defining $f^{\bullet}$ or its shift $f^!$ on equivariant derived categories; see \cite[\S 6.5]{Ac21}. This completes the proof. \end{proof}
Finally, suppose that $N \lhd K$ and $X$ is a smooth affine $K/N$-variety. There is an inflation functor \[ \mathrm{Infl}^K_{K/N}: \cate{D}^{\mathrm{b}}_{K/N}(X) \to \cate{D}^{\mathrm{b}}_K(X). \] The construction is based on the fact that if $p: P \to X$ is a resolution as $K$-varieties, then so is its quotient $p': P' := P/N \to X$ as $K/N$-varieties, and $\overline{P} \rightiso \overline{P'}$. It is $t$-exact.
\begin{proposition}\label{prop:Infl-epsilon}
The diagram
\[\begin{tikzcd}
{}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, K/N) \arrow[d, "{\mathrm{Infl}^K_{K/N}}"'] \arrow[r, "\varepsilon"] & \cate{D}^{\mathrm{b}}_{K/N}(X) \arrow[d, "{\mathrm{Infl}^K_{K/N}}"] \\
{}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(D_X, K) \arrow[r, "\varepsilon"'] & \cate{D}^{\mathrm{b}}_K(X)
\end{tikzcd}\]
commutes up to isomorphism, where the $\mathrm{Infl}^K_{K/N} := \mathrm{Infl}^{D_X, K}_{D_X, K/N}$ on the left is defined in \S\ref{sec:inv-coinv}. \end{proposition} \begin{proof}
Given a resolution $(P, p)$ as $K$-varieties, define $(P', p')$ as above. By the construction \eqref{eqn:epsilon-construction} of $\varepsilon$, it suffices to check the commutativity of
\begin{equation*}
\begin{tikzcd}
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K/N) \arrow[r, "{(p')^\bullet}"] \arrow[d, "{\mathrm{Infl}^K_{K/N}}"'] & {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_{P'}, K/N) \arrow[d] & \cate{D}^{\mathrm{b}}((D_{P'}, K/N)\dcate{Mod}) \arrow[l, "\sim"'] \arrow[d] & \cate{D}^{\mathrm{b}}(\overline{P'}) \arrow[l, "\sim"'] \arrow[d] \\
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K) \arrow[r, "{p^\bullet}"] & {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_P, K) & \cate{D}^{\mathrm{b}}((D_P, K)\dcate{Mod}) \arrow[l, "\sim"'] & \cate{D}^{\mathrm{b}}(\overline{P}) \arrow[l, "\sim"']
\end{tikzcd}
\end{equation*}
up to isomorphism, where the vertical arrows except the first one are inverse images via $P \to P'$ and $\overline{P} \rightiso \overline{P'}$. This is clear. \end{proof}
\section{Localization and higher regularity}\label{sec:Loc} \subsection{Derived localization functor}\label{sec:Loc-functor} Let $G$ be a connected reductive group, and let $K \subset G$ be a reductive subgroup. Let $X$ be an affine smooth $G$-variety.
From this we obtain a pair $(\mathfrak{g}, K)$ as in \S\ref{sec:gK-basic}: the homomorphism $\operatorname{Ad}: K \to \operatorname{Aut}(\mathfrak{g})$ is just the adjoint action, and $\iota: \mathfrak{k} \to \mathfrak{g}$ is the inclusion. Recall that we write $(\mathfrak{g}, K)$ instead of $(U(\mathfrak{g}), K)$ for the pairs.
On the other hand, we also obtain $(D_X, G)$ and its subpair $(D_X, K)$. The constructions in \S\ref{sec:D-basic} are applicable.
\begin{proposition}\label{prop:j-pair}
The map $j: U(\mathfrak{g}) \to D_X$ induced from the $G$-action on $X$ induces a morphism $(\mathfrak{g}, K) \to (D_X, K)$ of pairs in the sense of \S\ref{sec:HC-dga}. \end{proposition} \begin{proof}
The map $j$ is $K$-equivariant since $K \subset G$. The requirement that $\mathfrak{k} \to U(\mathfrak{g}) \xrightarrow{j} D_X$ composes to $j|_{\mathfrak{k}}: \mathfrak{k} \to D_X$ is trivial. \end{proof}
Therefore we have the change-of-algebra dg-functor in Proposition \ref{prop:adjoint-h-oblv} (i): \[ D_X \dotimes{U(\mathfrak{g})} (\cdot): {}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{C}(D_X, K). \] Being left adjoint to oblivion, it preserves K-projectives.
\begin{definition}
Denote by $\mathbf{Loc}_X = \mathbf{Loc}_{X, K}: {}^{\mathrm{h}} \cate{D}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}(D_X, K)$ the h-derived functor of $D_X \dotimes{U(\mathfrak{g})} (\cdot)$, called the \emph{h-derived localization functor}. \end{definition}
Since $K$ is reductive, the definition makes sense: ${}^{\mathrm{h}} \cate{K}(\mathfrak{g}, K)$ has enough K-projectives, namely the ones from standard resolutions. The functor $\mathbf{Loc}_X$ is right $t$-exact.
\begin{remark}
For general smooth $G$-variety, one can also define $\mathbf{Loc}_X$ with the sheafified pair $(\mathscr{D}_X, K)$. The Beilinson--Bernstein localization corresponds to the case when $X = \mathcal{B}$ is the flag variety, allowing twisted differential operators, and considering only $\operatorname{H}^0 \mathbf{Loc}_{\mathcal{B}}$. \end{remark}
\begin{proposition}
The triangulated functor $\mathbf{Loc}_X$ has amplitude in $[-\dim G, 0]$. In particular, it restricts to
\[ \mathbf{Loc}_X: {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_X, K). \] \end{proposition} \begin{proof}
Use standard resolutions (Theorem \ref{prop:N-resolution-gen}). \end{proof}
\begin{proposition}\label{prop:Loc-oblv}
Let $T$ be a reductive subgroup of $K$. The diagram below commutes up to canonical isomorphism:
\[\begin{tikzcd}[column sep=large]
{}^{\mathrm{h}} \cate{D}(\mathfrak{g}, K) \arrow[r, "{\mathbf{Loc}_{X, K}}"] \arrow[d] & {}^{\mathrm{h}} \cate{D}(D_X, K) \arrow[d] \\
{}^{\mathrm{h}} \cate{D}(\mathfrak{g}, T) \arrow[r, "{\mathbf{Loc}_{X, T}}"'] & {}^{\mathrm{h}} \cate{D}(D_X, T).
\end{tikzcd}\]
In particular, the same holds for
\[\begin{tikzcd}[column sep=large]
{}^{\mathrm{h}} \cate{D}(\mathfrak{g}, K) \arrow[r, "{\mathbf{Loc}_{X, K}}"] \arrow[d] & {}^{\mathrm{h}} \cate{D}(D_X, K) \arrow[d] \\
\cate{D}(\mathfrak{g}\dcate{Mod}) \arrow[r, "{D_X \otimesL[U(\mathfrak{g})] (\cdot)}"'] & \cate{D}(X) .
\end{tikzcd}\] \end{proposition} \begin{proof}
The starting point is the commutativity of
\[\begin{tikzcd}
{}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K) \arrow[r] \arrow[d] & {}^{\mathrm{h}} \cate{C}(D_X, K) \arrow[d] \\
{}^{\mathrm{h}} \cate{C}(\mathfrak{g}, T) \arrow[r] & {}^{\mathrm{h}} \cate{C}(D_X, T).
\end{tikzcd}\]
The vertical arrows are exact and preserve K-projectives by Corollary \ref{prop:change-group-K-proj}. Taking left h-derived functors via Corollary \ref{prop:derived-functor-composite} gives the desired result. \end{proof}
\begin{proposition}
Let $V$ be a $(\mathfrak{g}, K)$-module. There is a canonical isomorphism of $K$-equivariant $D_X$-modules
\[ \operatorname{H}^0 \mathbf{Loc}_X(V) \simeq D_X \dotimes{U(\mathfrak{g})} V; \]
on the right-hand side, $D_X$ acts by left multiplication and $K$ acts diagonally. \end{proposition} \begin{proof}
As $K$ is reductive, one can take the standard resolution $P := V \otimes N\mathfrak{g} \to V$. Now take the h-complex $D_X \dotimes{U(\mathfrak{g})} P$. Its $\operatorname{H}^0$ yields $D_X \dotimes{U(\mathfrak{g})} V$. \end{proof}
In view of this result, $\mathbf{Loc}_X$ or its cohomologies can be seen as ``higher localization''.
The localization functor has the following symmetries. Let $D_X^G$ be the algebra of $G$-invariant differential operators on $X$. For every $z \in D_X^G$ and every h-complex $P$ over $(D_X, K)$, we obtain an endomorphism $\mathcal{R}_z$ of the h-complex $D_X \dotimes{U(\mathfrak{g})} P$ given by \begin{align*}
\mathcal{R}_z^n: D_X \dotimes{U(\mathfrak{g})} P^n & \to D_X \dotimes{U(\mathfrak{g})} P^n \\
D \otimes p & \mapsto Dz \otimes p. \end{align*} This is functorial in $P$ and passes to the h-derived category, giving rise to a homomorphism of algebras \begin{equation}\label{eqn:Z-action}
\mathcal{R}: (D_X^G)^{\mathrm{op}} \to \operatorname{End}_{\mathrm{functors}}(\mathbf{Loc}_X). \end{equation} In fact, the construction works over any $G$-variety $X$ in the sheafified context. It is also compatible with oblivion, relative to the diagram in Proposition \ref{prop:Loc-oblv}.
\subsection{Localization and co-invariants}\label{sec:Loc-coinv} Consider a homogeneous $G$-space $G = H \backslash G$, and let $x$ be the point $H \cdot 1$ of $X$.
The arguments in \S\ref{sec:Ext-application} will rely crucially on the following fact relating localizations and co-invariants. We do not assume $G$ is reductive or $X$ is affine here.
\begin{proposition}\label{prop:Loc-coinv}
For every $\mathfrak{g}$-module $V$, we have the canonical isomorphism
\begin{align*}
V/\mathfrak{h}V & \rightiso \ensuremath{\mathbb{C}} \dotimes{\mathscr{O}_x} \left( \mathscr{D}_X \dotimes{U(\mathfrak{g})} V \right)_x \\
v + \mathfrak{h}V & \mapsto 1 \otimes (1 \otimes v),
\end{align*}
where $\mathscr{O}_x$ is the local ring at $x$ and the $\otimes$ is relative to the evaluation map at $x$. \end{proposition} \begin{proof}
See \cite[Lemma 2.2]{BZG19}. \end{proof}
Let $K^H$ be a subgroup of $H$, acting trivially on $\ensuremath{\mathbb{C}}$. Several observations are in order. \begin{itemize}
\item If $V$ is a weak $(\mathfrak{g}, K^H)$-module, the isomorphism in Proposition \ref{prop:Loc-coinv} is $K^H$-equivariant. Thus we obtain an isomorphism between two dg-functors from ${}^{\mathrm{w}} \cate{C}(\mathfrak{g}, K^H)$ to ${}^{\mathrm{w}} \cate{C}(\ensuremath{\mathbb{C}}, K^H)$.
\item Furthermore, the functors $V \mapsto V/\mathfrak{h}V$ and $V \mapsto \ensuremath{\mathbb{C}} \dotimes{\mathscr{O}_x} \left( \mathscr{D}_X \dotimes{U(\mathfrak{g})} V \right)_x$ both lift to the level of h-complexes
\[ {}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K^H) \to {}^{\mathrm{h}} \cate{C}(\ensuremath{\mathbb{C}}, K^H). \]
In fact, the dg-functor induced by $V \mapsto V/\mathfrak{h}V$ equals the $\mathrm{coInv}^{U(\mathfrak{h}), K^H}_{\ensuremath{\mathbb{C}}, K^H}$ from Proposition \ref{prop:Inv-coInv} (in the h-setting), where $\ensuremath{\mathbb{C}}$ is viewed as the quotient of $U(\mathfrak{h})$ by augmentation ideal.
\item Since the data $i_\xi$ in an h-complex over $(\mathfrak{g}, K^H)$ are $\mathfrak{g}$-linear, and the isomorphism in Proposition \ref{prop:Loc-coinv} is natural in the $\mathfrak{g}$-module $V$, it also yields an isomorphism between these functors on the level of h-complexes. \end{itemize}
Observe that $\ensuremath{\mathbb{C}} = D_{\mathrm{pt}}$, and $\ensuremath{\mathbb{C}} \dotimes{\mathscr{O}_x} (\cdot)_x$ equals the non-derived inverse image of $\mathscr{D}_X$-modules via the inclusion $i_x: \mathrm{pt} \to X$ of $X$. The next result is immediate.
\begin{lemma}\label{prop:Loc-coInv-prep}
Assume that $X$ is affine. Let $K^H$ be a subgroup of $H$. The following diagram commutes up to a canonical isomorphism.
\[\begin{tikzcd}[column sep=large]
{}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K^H) \arrow[d] \arrow[r, "{D_X \dotimes{U(\mathfrak{g})} (\cdot)}"] & {}^{\mathrm{h}} \cate{C}(D_X, K^H) \arrow[d, "{\ensuremath{\mathbb{C}} \dotimes{\mathscr{O}_x} (\cdot)_x}"] \\
{}^{\mathrm{h}} \cate{C}(\mathfrak{h}, K^H) \arrow[r, "{\mathrm{coInv}^{U(\mathfrak{h}), K^H}_{\ensuremath{\mathbb{C}}, K^H}}"' inner sep=0.6em] & {}^{\mathrm{h}} \cate{C}(D_{\mathrm{pt}}, K^H)
\end{tikzcd}\] \end{lemma}
When $K^H$ is reductive, one can take the left h-derived functor $\operatorname{L}\! \left(\mathrm{coInv}^{U(\mathfrak{h}), K^H}_{\ensuremath{\mathbb{C}}, K^H}\right)$ on the bounded-above h-derived categories.
\begin{proposition}\label{prop:Loc-coInv}
Assume that $X$ is affine. Let $K$ (resp.\ $K^H$) be a reductive subgroup of $G$ (resp.\ $H$), and denote $\mathrm{oblv}^K_{K^H}$ the oblivion ${}^{\mathrm{h}} \cate{D}(D_X, K) \to {}^{\mathrm{h}} \cate{D}(D_X, K^H)$. There are canonical isomorphisms
\begin{align*}
i_x^\bullet \mathbf{Loc}_{X, K^H} & \simeq \operatorname{L}\! \left(\mathrm{coInv}^{U(\mathfrak{h}), K^H}_{\ensuremath{\mathbb{C}}, K^H}\right), \\
i_x^\bullet \left( \mathrm{oblv}^K_{K^H} \mathbf{Loc}_{X, K}(M)\right) & \simeq \operatorname{L}\! \left(\mathrm{coInv}^{U(\mathfrak{h}), K^H}_{\ensuremath{\mathbb{C}}, K^H}\right)(M|_H),
\end{align*}
where $M$ stands for an object of ${}^{\mathrm{h}} \cate{D}^-(\mathfrak{g}, K)$ and $M|_H$ denotes its image in ${}^{\mathrm{h}} \cate{D}^-(\mathfrak{h}, K^H)$. \end{proposition} \begin{proof}
It suffices to prove the first isomorphism, since the second follows by commuting $\mathbf{Loc}_X$ and oblivion using Proposition \ref{prop:Loc-oblv}.
Note that the functors emitting from ${}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K^H)$ in the diagram of Lemma \ref{prop:Loc-coInv-prep} preserve K-projectives, by Lemma \ref{prop:restriction-lemma}. It remains to take left h-derived functors in two ways, which give isomorphic results. \end{proof}
Finally, $D_X^G$ acts on the right of $i_x^\bullet \mathbf{Loc}_{X, K^H}$ through its action \eqref{eqn:Z-action} on $\mathbf{Loc}_{X, K^H}$. Let us relate it to co-invariants.
\begin{proposition}
Let $z \in D_X^G$. Via Proposition \ref{prop:Loc-coInv}, the $z$-action on $i_x^\bullet \mathbf{Loc}_{X, K^H}$ arises from the following endomorphism of the functor $\mathrm{coInv}^{U(\mathfrak{k}), K^H}_{\ensuremath{\mathbb{C}}, K^H}$. Near $x$, we can express $z$ as the image of some $f \otimes u$ under the homomorphism
\[ \mathscr{O}_X \otimes U(\mathfrak{g}) \to \mathscr{D}_X, \quad f \otimes u \mapsto f j(u) \]
where $j: U(\mathfrak{g}) \to D_X$ is the natural map. For every h-complex $P$ over $(\mathfrak{h}, K^H)$, we let $z$ act in each degree $n$ by
\begin{align*}
P^n / \mathfrak{h} P^n & \to P^n / \mathfrak{h}P^n \\
p + \mathfrak{h}P^n & \mapsto f(x) \cdot up + \mathfrak{h} P^n.
\end{align*} \end{proposition} \begin{proof}
The action $\ensuremath{\mathrm{id}} \otimes \mathcal{R}_z^n$ on $\ensuremath{\mathbb{C}} \dotimes{\mathscr{O}_x} D_X \dotimes{U(\mathfrak{g})} P^n$ depends only on the behavior of $z$ near $x$. Therefore it maps
\[ 1 \otimes 1 \otimes p \mapsto 1 \otimes f j(u) \otimes p = f(x) \otimes 1 \otimes up. \]
It remains to compare this with Proposition \ref{prop:Loc-coinv}. \end{proof}
\subsection{Statement of higher regularity} The assumptions on $(\mathfrak{g}, K)$ and $X$ from \S\ref{sec:Loc-functor} remain in force.
\begin{definition}
An object of ${}^{\mathrm{h}} \cate{D}(D_X, K)$ is said to be \emph{holonomic} (resp.\ \emph{regular holonomic}) if all its cohomologies are holonomic (resp.\ regular holonomic) as $D_X$-modules. This gives rise to a full triangulated subcategory ${}^{\mathrm{h}} \cate{D}_{\mathrm{h}}(D_X, K)$ (resp.\ ${}^{\mathrm{h}} \cate{D}_{\mathrm{rh}}(D_X, K)$) of ${}^{\mathrm{h}} \cate{D}(D_X, K)$, and one may impose boundedness conditions as well. \end{definition}
Inside the equivariant derived category $\cate{D}^{\mathrm{b}}_K(X)$, we also have $\cate{D}^{\mathrm{b}}_{K, \mathrm{h}}(X)$ (resp.\ $\cate{D}^{\mathrm{b}}_{K, \mathrm{rh}}(X)$), the full triangulated subcategory of objects with holonomic (resp.\ regular holonomic) cohomologies. It matches ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{h}}(D_X, K)$ (resp.\ ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{h}}(D_X, K)$) under Beilinson's equivalence (Theorem \ref{prop:Beilinson-equiv}).
For every $D_X$-module $L$, denote by $\mathrm{Ch}(L) \subset T^* X$ its characteristic variety. We have the \emph{moment map} \[ \bm{\mu}: T^* X \to \mathfrak{g}^*. \]
By choosing a base point, every homogeneous $G$-spaces take the form $H \backslash G$, and $T^*(H \backslash G) \simeq \mathfrak{h}^\perp \utimes{H} G$. Then $\bm{\mu}$ maps $[\lambda, g]$ to $\operatorname{Ad}^*(g^{-1}) \lambda$ (co-adjoint action) for all $\lambda \in \mathfrak{h}^\perp$ and $g \in G$.
Let $\mathcal{N} \subset \mathfrak{g}^*$ denote the nilpotent cone.
\begin{definition}\label{def:rh-plus}
Let ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{rh}+}(D_X, K)$ be the full subcategory of ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{rh}}(D_X, K)$ consisting of objects $L$ such that
\[ \mathrm{Ch}\left( \operatorname{H}^n(L) \right) \subset \bm{\mu}^{-1}\left( \mathcal{N} \cap \mathfrak{k}^\perp \right) \]
for all $n \in \ensuremath{\mathbb{Z}}$, where $\operatorname{H}^n(L)$ is viewed merely as a $D_X$-module. \end{definition}
This is a full triangulated subcategory by standard properties of characteristic varieties \cite[\S 2.2]{HTT08}. Again, it has the counterpart $\cate{D}^{\mathrm{b}}_{K, \mathrm{rh}+}(X)$ inside $\cate{D}^{\mathrm{b}}_{K, \mathrm{rh}}(X)$.
\begin{definition}
A normal $G$-variety is said to be \emph{spherical} if there is an open $B$-orbit in $X$ for some (equivalently, any) Borel subgroup $B \subset G$. A subgroup $H \subset G$ is said to be \emph{spherical} if the homogeneous $G$-space $H \backslash G$ is spherical. \end{definition}
We are ready to state the higher regularity of localizations.
\begin{theorem}\label{prop:regularity}
Let $G$ be a connected reductive group. Suppose that
\begin{itemize}
\item $X$ is an affine spherical homogeneous $G$-space,
\item $K$ is a reductive spherical subgroup of $G$.
\end{itemize}
Let $V$ be a Harish-Chandra module over $(\mathfrak{g}, K)$ (Definition \ref{def:HC-module}), then $\mathbf{Loc}_X(V)$ lies in ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{rh}+}(D_X, K)$. \end{theorem}
The proof will be split into three chunks in \S\ref{sec:regularity-criterion}, the hardest one among which will be settled in \S\ref{sec:end-of-regularity}.
\begin{corollary}\label{prop:regularity-gen}
Under the assumptions of Theorem \ref{prop:regularity}, let ${}^{\mathrm{h}}\cate{D}^{\mathrm{b}}_{\mathrm{HC}}(\mathfrak{g}, K)$ be the full triangulated subcategory of ${}^{\mathrm{h}}\cate{D}^{\mathrm{b}}(\mathfrak{g}, K)$ consisting of objects whose cohomologies are Harish-Chandra $(\mathfrak{g}, K)$-modules. Then $\mathbf{Loc}_X$ restricts to
\[ {}^{\mathrm{h}}\cate{D}^{\mathrm{b}}_{\mathrm{HC}}(\mathfrak{g}, K) \to {}^{\mathrm{h}}\cate{D}^{\mathrm{b}}_{\mathrm{rh}+}(D_X, K). \] \end{corollary} \begin{proof}
Since $\mathbf{Loc}_X$ is triangulated, one can truncate and shift to reduce to the case of Harish-Chandra $(\mathfrak{g}, K)$-modules. \end{proof}
We remark that in view of Theorems \ref{prop:BL-equiv} and \ref{prop:Beilinson-equiv}, the result above can be rephrased in terms of ``usual'' derived categories as \[ \mathbf{Loc}_X: \cate{D}^{\mathrm{b}}_{\mathrm{HC}}(\mathfrak{g}, K) \to \cate{D}^{\mathrm{b}}_{K, \mathrm{rh}+}(X). \] However, the construction of $\mathbf{Loc}_X$ passes through h-derived categories.
\begin{remark}
Take $X = H \backslash G$ for some subgroup $H \subset G$. By Matsushima's criterion \cite[Theorem 3.8]{Ti11}, $H \backslash G$ is affine if and only if $H$ is reductive. The conditions in Theorem \ref{prop:regularity} are thus symmetric in $H$ and $K$: both are required to be spherical and reductive. One can also view $\mathbf{Loc}_{H \backslash G}(V)$ as objects of the derived category of the stack $H \backslash G / K$. \end{remark}
\begin{example}
In order to appreciate the property of characteristic varieties in Definition \ref{def:rh-plus}, let us take
\begin{itemize}
\item $H$: a connected reductive group, embedded diagonally in $G := H \times H$,
\item $K := H$ as a subgroup of $G$,
\item $X := H$ with $G$ acting by $x(h_1, h_2) = h_2^{-1} x h_1$.
\end{itemize}
Note that $X \simeq H \backslash G$ by choosing $1$ as the base point, and $H \backslash G / K$ becomes the adjoint quotient stack $\frac{H}{H}$.
The $(\mathfrak{g}, K)$-modules in this case give rise to \textit{Harish-Chandra bimodules}. They are closely related to harmonic analysis on $H(\ensuremath{\mathbb{C}})$.
All conditions in Theorem \ref{prop:regularity} are met in this case. Given a Harish-Chandra $(\mathfrak{g}, K)$-module $V$ and $n \in \ensuremath{\mathbb{Z}}$, the irreducible constituents of the $\operatorname{Ad}$-equivariant $D_H$-module $\operatorname{H}^n \mathbf{Loc}_H(V)$ are actually \emph{character $D_H$-modules}, i.e.\ regular holonomic $D_H$-modules corresponding to \emph{character sheaves} via Riemann--Hilbert (see \cite[\S 2.1]{MV88}, and also \cite{Gin89}). Indeed, this follows from the condition on $\mathrm{Ch}\left(\operatorname{H}^n \mathbf{Loc}_H(V)\right)$ and \cite[Theorem 4.4]{MV88}. \end{example}
\subsection{Criterion of regularity}\label{sec:regularity-criterion} Let $K \subset G$ and $X$ be as in Theorem \ref{prop:regularity}. Our proof of Theorem \ref{prop:regularity} is based on the result below from \cite{Li22}, which is a variant of Ginzburg's \cite[Corollary 8.9.1]{Gin89}.
\begin{theorem}\label{prop:reg-criterion}
Suppose that $M$ is a $D_X$-module with the following properties:
\begin{enumerate}[(R1)]
\item $M$ is finitely generated over $D_X$;
\item $M$ carries a structure of $K$-equivariant $D_X$-module;
\item $M$ is locally $\mathcal{Z}(\mathfrak{g})$-finite, where $\mathcal{Z}(\mathfrak{g})$ acts through the homomorphism $j: U(\mathfrak{g}) \to D_X$ of algebras.
\end{enumerate}
Then $M$ is regular holonomic, and $\mathrm{Ch}(M) \subset \bm{\mu}^{-1}(\mathcal{N} \cap \mathfrak{k}^\perp)$. \end{theorem} \begin{proof}
This is an instance of \cite[Proposition 3.4, Corollary 5.7]{Li22}. \end{proof}
We can now depict the strategy for proving Theorem \ref{prop:regularity}.
\begin{proof}[Proof of Theorem \ref{prop:regularity}]
Let $V$ be a Harish-Chandra $(\mathfrak{g}, K)$-module. Set $\mathcal{L} := \mathbf{Loc}_{X, \{1\}}(V)$, which is also the image of $\mathbf{Loc}_{X, K}(V)$ under ${}^{\mathrm{h}} \cate{D}(D_X, K) \to \cate{D}(X)$ by Proposition \ref{prop:Loc-oblv}. We shall verify (R1) --- (R3) for the cohomologies of $\mathcal{L}$.
First, we identify $\mathcal{L}$ with $D_X \otimesL[U(\mathfrak{g})] V$. Since $V$ is finitely generated over $\mathfrak{g}$ and $U(\mathfrak{g})$ is left (and right) Noetherian, there is a free resolution over $\mathfrak{g}$:
\[ \cdots \to U(\mathfrak{g})^{\oplus n_1} \to U(\mathfrak{g})^{\oplus n_0} \to V \to 0 , \quad n_i \in \ensuremath{\mathbb{Z}}_{\geq 0}. \]
Hence $D_X \otimesL[U(\mathfrak{g})] V$ is represented by the complex
\[ \cdots \to D_X^{\oplus n_{i+1}} \to D_X^{\oplus n_i} \to \cdots . \]
Since $D_X$ is left (and right) Noetherian by \cite[Proposition 1.4.6]{HTT08}, the cohomologies of the complex above are finitely generated $D_X$-modules. This verifies (R1).
As for (R2), from \eqref{eqn:oblv-comm} we see that $\operatorname{H}^n(\mathcal{L})$ is the image of $\operatorname{H}^n(\mathbf{Loc}_{X, K}(V))$, for all $n$.
What remains is (R3); this will be settled by Proposition \ref{prop:local-Zg-finiteness}. \end{proof}
Note that the case of zeroth cohomology, i.e.\ the non-derived $D_X \dotimes{U(\mathfrak{g})} V$, has been addressed in \cite[Example 5.5 (iii)]{Li22}.
\subsection{End of the proof}\label{sec:end-of-regularity} Hereafter, $G$ is a connected reductive group, and $X$ is a smooth affine $G$-variety.
We say a left module $M$ over a commutative algebra $A$ is \emph{locally $A$-finite} if it it is the union of finite-dimensional $A$-submodules.
\begin{lemma}\label{prop:Zg-finite-ses}
Suppose $A$ is a finitely generated commutative algebra. For every short exact sequence $0 \to N' \to N \to N'' \to 0$ of $A$-modules, we have: $N$ is locally finite if and only if $N'$ and $N''$ are both locally finite. \end{lemma} \begin{proof}
It suffices to explain the ``if'' part. Let $x \in N$. Its image in $N''$ is annihilated by an ideal $I$ of finite codimension (as vector subspace), hence $Ix \subset N'$. Since $A$ is Noetherian, $I$ is finitely generated, hence there exist ideals $I_1, \ldots, I_k$ of finite codimension such that $I_1 \cdots I_k I x = 0$. However $I_1 \cdots I_k I$ is also of finite codimension, by the structure of $A$. \end{proof}
\begin{lemma}\label{prop:alpha-surj}
Let $M$ be a $D_X$-module, thus also a $U(\mathfrak{g})$-submodule via the homomorphism $j: U(\mathfrak{g}) \to D_X$. Let $M^\natural \subset M$ be a $\mathfrak{g}$-submodule. Then the map
\[\begin{tikzcd}[row sep=tiny]
\alpha: D_X \otimes M^\natural \arrow[r] & M \\
P \otimes m \arrow[r, mapsto] & Pm
\end{tikzcd}\]
is a homomorphism of $\mathfrak{g}$-modules, if we let $\theta \in \mathfrak{g}$ act on $P \in D_X$ by $\theta \odot P := [j(\theta), P]$. \end{lemma} \begin{proof}
Omit $j$ to simplify notation. Given $\theta \in \mathfrak{g}$, we have
\begin{align*}
\theta \left(\alpha(P \otimes m)\right) & = \theta(Pm) = (\theta P) m - (P\theta) m + P(\theta m) \\
& = [\theta, P] m + P(\theta m) \\
& = \alpha\left( (\theta \odot P) \otimes m \right) + \alpha(P \otimes \theta m) \\
& = \alpha\left( \theta (P \otimes m) \right)
\end{align*}
by the standard definition of the $\mathfrak{g}$-module structure on $D_X \otimes M$. \end{proof}
\begin{lemma}\label{prop:tensor-Kostant}
Let $M$ be a $D_X$-module as before, and let $M^\flat \subset M$ be a $\mathcal{Z}(\mathfrak{g})$-submodule that is locally finite, and generates $M$ over $D_X$. Then $M$ is locally $\mathcal{Z}(\mathfrak{g})$-finite. \end{lemma} \begin{proof}
Take the $\mathfrak{g}$-submodule $M^\natural := U(\mathfrak{g}) M^\flat$ of $M$. Note that $M^\natural$ is locally $\mathcal{Z}(\mathfrak{g})$-finite, and generates $M$ over $D_X$. Note that $G$ acts algebraically on $D_X$ by $P \xmapsto{g} gPg^{-1}$; this is indeed clear, and is included in Proposition \ref{prop:j-pair}. Its derivative is the action $\odot$ in Lemma \ref{prop:alpha-surj}, hence $D_X$ is a union of finite-dimensional $\mathfrak{g}$-submodules.
Consider now the $\mathfrak{g}$-linear surjection $\alpha: D_X \otimes M^\natural \twoheadrightarrow M$ from Lemma \ref{prop:alpha-surj}. By a theorem of Kostant \cite[Theorem 7.133]{KV95} (and taking $\varinjlim$), the $\mathfrak{g}$-module $D_X \otimes M^\natural$ is seen to be locally $\mathcal{Z}(\mathfrak{g})$-finite. Hence so is $M$. \end{proof}
Note that in Lemma \ref{prop:tensor-Kostant}, if $M^\flat$ has infinitesimal character $\chi$, then $M$ as a $\mathcal{Z}(\mathfrak{g})$-module is supported on $\chi + \{\text{weights}\}$. Here we describe $\operatorname{Spec}\mathcal{Z}(\mathfrak{g})$ via Harish-Chandra's isomorphism.
For every homomorphism of algebras $\chi: \mathcal{Z}(\mathfrak{g}) \to \ensuremath{\mathbb{C}}$, we have the ideal \[ \mathfrak{m}_\chi := \operatorname{ker}(\chi) \subset \mathcal{Z}(\mathfrak{g}), \] and we say a $\mathfrak{g}$-module has \emph{infinitesimal character} $\chi$ if $\mathcal{Z}(\mathfrak{g})$ acts through $\chi$. For example, the $\mathfrak{g}$-module below has infinitesimal character $\chi$: \begin{equation}\label{eqn:Mchi}
M_\chi := U(\mathfrak{g}) / \mathfrak{m}_\chi U(\mathfrak{g}). \end{equation}
For all $\mathfrak{g}$-module $V$, define the $D_X$-modules \[ \operatorname{Tor}^{U(\mathfrak{g})}_n\left( D_X, V \right) := \operatorname{H}^{-n}\left( D_X \otimesL[U(\mathfrak{g})] V \right), \quad n \in \ensuremath{\mathbb{Z}}. \]
\begin{definition}\label{def:ZX}
Following \cite[pp.254--255]{Kn94}, we define $\mathcal{Z}(X)$ to be the center of $D_X^G$. It is known to be the whole $D_X^G$ when $X$ is spherical, see \textit{loc.\ cit.} \end{definition}
Note that $j: U(\mathfrak{g}) \to D_X$ restricts to $\mathcal{Z}(\mathfrak{g}) \to \mathcal{Z}(X)$.
\begin{lemma}\label{prop:Mchi-Zg-finiteness}
Given $\chi$, the $D_X$-modules $\operatorname{Tor}^{U(\mathfrak{g})}_n\left( D_X, M_\chi \right)$ are locally $\mathcal{Z}(\mathfrak{g})$-finite for all $n \in \ensuremath{\mathbb{Z}}$, where $M_\chi$ is as in \eqref{eqn:Mchi}. \end{lemma} \begin{proof}
This relies critically on some results of B.\ Kostant and F.\ Knop. We will perform change-of-rings through the commutative diagram
\[\begin{tikzcd}[column sep=small, row sep=small]
& D_X & \\
\mathcal{Z}(X) \arrow[hookrightarrow, ru, "\text{free}"] & & U(\mathfrak{g}) \arrow[lu, "j"'] \\
& \mathcal{Z}(\mathfrak{g}) \arrow[lu, "j"] \arrow[hookrightarrow, ru, "\text{free}"'] &
\end{tikzcd}\]
of algebras; here ``free'' means free as left and right modules. Indeed,
\begin{itemize}
\item the freeness of $U(\mathfrak{g})$ over $\mathcal{Z}(\mathfrak{g})$ is due to Kostant --- see \cite[Theorem 7.114]{KV95},
\item the freeness of $D_X$ over $\mathcal{Z}(X)$ is due to Knop \cite[Theorem 9.5 (c)]{Kn94}.
\end{itemize}
Using the $\mathcal{Z}(\mathfrak{g})$-flatness of $U(\mathfrak{g})$, inside $\cate{D}(\mathfrak{g}\dcate{Mod})$ we have
\begin{equation*}
M_\chi \simeq U(\mathfrak{g}) \dotimes{\mathcal{Z}(\mathfrak{g})} \frac{\mathcal{Z}(\mathfrak{g})}{\mathfrak{m}_\chi} \simeq U(\mathfrak{g}) \otimesL[\mathcal{Z}(\mathfrak{g})] \frac{\mathcal{Z}(\mathfrak{g})}{\mathfrak{m}_\chi}.
\end{equation*}
Set $N_\chi := \mathcal{Z}(\mathfrak{g}) / \mathfrak{m}_\chi$. Since change-of-ring preserves K-projectives, performing $\otimesL$ in stages leads to
\begin{align*}
D_X \otimesL[U(\mathfrak{g})] M_\chi & \simeq D_X \otimesL[U(\mathfrak{g})] \left( U(\mathfrak{g}) \otimesL[\mathcal{Z}(\mathfrak{g})] N_\chi \right) \\
& \simeq D_X \otimesL[\mathcal{Z}(\mathfrak{g})] N_\chi \\
& \simeq D_X \otimesL[\mathcal{Z}(X)] \left( \mathcal{Z}(X) \otimesL[\mathcal{Z}(\mathfrak{g})] N_\chi \right) \;\quad \text{in}\; \cate{D}(D_X\dcate{Mod}).
\end{align*}
Hence by the $\mathcal{Z}(X)$-flatness of $D_X$, for all $n \geq 0$ we have in $D_X\dcate{Mod}$
\begin{equation}\label{eqn:Mchi-Zg-finiteness-aux0}
\operatorname{Tor}^{U(\mathfrak{g})}_n\left( D_X, M_\chi \right) \simeq D_X \dotimes{\mathcal{Z}(X)} \operatorname{Tor}^{\mathcal{Z}(\mathfrak{g})}_n\left( \mathcal{Z}(X), N_\chi \right).
\end{equation}
We contend that for all $n \geq 0$,
\begin{equation}\label{eqn:Mchi-Zg-finiteness-aux1}
\mathcal{Z}(\mathfrak{g}) \;\text{acts on}\; \operatorname{Tor}^{\mathcal{Z}(\mathfrak{g})}_n \left( \mathcal{Z}(X), N_\chi\right) \;\text{through}\; \chi.
\end{equation}
Here $\mathcal{Z}(\mathfrak{g})$ acts through $j: \mathcal{Z}(\mathfrak{g}) \to \mathcal{Z}(X)$. The action of $z \in \mathcal{Z}(\mathfrak{g})$ is given as follows. Take a projective resolution $\cdots \to Q_1 \to Q_0 \to N_\chi \to 0$, then there exist $f_i \in \operatorname{End}_{\mathcal{Z}(\mathfrak{g})}(Q_i)$ making
\[\begin{tikzcd}
\cdots \arrow[r] & Q_1 \arrow[r] \arrow[d, "f_1"] & Q_0 \arrow[r] \arrow[d, "f_0"] & N_\chi \arrow[d, "z"] \arrow[r] & 0 \\
\cdots \arrow[r] & Q_1 \arrow[r] & Q_0 \arrow[r] & N_\chi \arrow[r] & 0
\end{tikzcd}\]
commutative; different choices of $(f_i)_{i \geq 0}$ are related by $\mathcal{Z}(\mathfrak{g})$-linear homotopies. Take $\mathcal{Z}(X) \dotimes{\mathcal{Z}(\mathfrak{g})} (\cdot)$ on the whole diagram, then the induced action on $\operatorname{H}_n$ is the desired one; the commutativity of $\mathcal{Z}(X)$ is crucial here.
\begin{itemize}
\item Naturally, one can take $f_i = z$.
\item On the other hand, one can also take $f_i = \chi(z) \ensuremath{\mathrm{id}}$. This proves \eqref{eqn:Mchi-Zg-finiteness-aux1}.
\end{itemize}
By \eqref{eqn:Mchi-Zg-finiteness-aux0}, $\operatorname{Tor}^{U(\mathfrak{g})}_n\left( D_X, M_\chi \right)$ is generated by $M^\flat := 1 \otimes \operatorname{Tor}^{\mathcal{Z}(\mathfrak{g})}_n\left( \mathcal{Z}(X), N_\chi \right)$ as a $D_X$-module. By combining \eqref{eqn:Mchi-Zg-finiteness-aux1} and Lemma \ref{prop:tensor-Kostant}, the local $\mathcal{Z}(\mathfrak{g})$-finiteness follows. \end{proof}
\begin{proposition}\label{prop:local-Zg-finiteness}
Let $V$ be a $\mathfrak{g}$-module that is finitely generated and locally $\mathcal{Z}(\mathfrak{g})$-finite. Then $\operatorname{Tor}^{U(\mathfrak{g})}_n( D_X, V)$ is locally $\mathcal{Z}(\mathfrak{g})$-finite for all $n \in \ensuremath{\mathbb{Z}}$. \end{proposition} \begin{proof}
Using the assumptions on $V$, one produces a filtration $0 = V_0 \subset \cdots \subset V_n = V$ such that each subquotient has an infinitesimal character. By the long exact sequence (of $D_X$-modules) for $\operatorname{Tor}^{U(\mathfrak{g})}_n(D_X, \cdot)$ and Lemma \ref{prop:Zg-finite-ses}, we are thus reduced to the case that $V$ has infinitesimal character $\chi$.
We argue by dimension shifting. The case $n < 0$ is trivial. By taking generators of $V$ and using \eqref{eqn:Mchi}, there is a short exact sequence of $\mathfrak{g}$-modules
\[ 0 \to W \to M_\chi^{\oplus I} \to V \to 0 \]
where $I$ is some (small) set; all modules have infinitesimal character $\chi$. In the long exact sequence we have the piece
\[ \operatorname{Tor}^{U(\mathfrak{g})}_n( D_X, M_\chi^{\oplus I}) \to \operatorname{Tor}^{U(\mathfrak{g})}_n( D_X, V) \to \operatorname{Tor}^{U(\mathfrak{g})}_{n-1}( D_X, W). \]
The third term is locally $\mathcal{Z}(\mathfrak{g})$-finite by recursion; so is the first term by Lemma \ref{prop:Mchi-Zg-finiteness} and the fact that $\operatorname{Tor}^{U(\mathfrak{g})}_n(D_X, \cdot)$ commutes with direct sums. This completes the proof. \end{proof}
\begin{remark}
When $V$ has infinitesimal character $\chi$, the support of $\operatorname{Tor}^{U(\mathfrak{g})}_n(D_X, V)$ as a $\mathcal{Z}(\mathfrak{g})$-module can be loosely controlled as discussed after Lemma \ref{prop:tensor-Kostant}. \end{remark}
We record a by-product of the proofs above, which implies that $D_X \dotimes{U(\mathfrak{g})} (\cdot)$ is usually non-exact.
\begin{proposition}\label{prop:non-exactness}
Define $M_\chi$ by \eqref{eqn:Mchi} for all $\chi$. We have
\[ \left[ \forall \chi, \; \operatorname{Tor}^{U(\mathfrak{g})}_1(D_X, M_\chi) = 0 \right] \iff \mathcal{Z}(X) \;\text{is flat over}\; \mathcal{Z}(\mathfrak{g}). \] \end{proposition} \begin{proof}
Let $N_\chi := \mathcal{Z}(\mathfrak{g})/\mathfrak{m}_\chi$.
By the local criterion of flatness, $\mathcal{Z}(X)$ is flat over $\mathcal{Z}(\mathfrak{g})$ if and only if $\operatorname{Tor}_1^{\mathcal{Z}(\mathfrak{g})}(\mathcal{Z}(X), N_\chi) = 0$ for all $\chi$. In turn, this is equivalent to $\operatorname{Tor}^{U(\mathfrak{g})}_1(D_X, M_\chi) = 0$ by \eqref{eqn:Mchi-Zg-finiteness-aux0} together with the freeness of $D_X$ over $\mathcal{Z}(X)$. \end{proof}
\begin{remark}\label{rem:non-exactness}
In view of the structure theorem in \cite{Kn94} and the notation therein, $\mathcal{Z}(X)$ is flat over $\mathcal{Z}(\mathfrak{g})$ if and only if the natural map between categorical quotients
\[ (\rho + \mathfrak{a}_X^*) \sslash W_X \to \mathfrak{t}^* \sslash W \]
is flat, which rarely holds; a necessary condition is that $\mathrm{rank}(X) = \mathrm{rank}(G)$. Therefore $D_X$ is rarely flat over $U(\mathfrak{g})$, by Proposition \ref{prop:non-exactness}.
However, flatness of $\mathcal{Z}(X)$ does hold when $X$ is the affine closure of $U \backslash G$ where $U$ is a maximal unipotent subgroup: this is a combination of \cite[Lemma 6.4]{Kn94} and Pittie--Steinberg theorem. Note that $\mathcal{Z}(Y)$ is defined for all normal $G$-varieties $Y$ in \textit{loc.\ cit.}, and it is an equivariant-birational invariant of $Y$. \end{remark}
The same arguments also lead to the following fact about the right $\mathcal{Z}(X)$-action on $\mathbf{Loc}_X$, which might be of independent interest.
\begin{proposition}\label{prop:ZX-locally-finite}
Under the assumptions of Proposition \ref{prop:local-Zg-finiteness}, $\operatorname{Tor}_n^{U(\mathfrak{g})}(D_X, V)$ is locally $\mathcal{Z}(X)$-finite under the action \eqref{eqn:Z-action}, for all $n \in \ensuremath{\mathbb{Z}}$.
Furthermore, if $V$ has infinitesimal character $\chi$, then the support of $\operatorname{Tor}_n^{U(\mathfrak{g})}(D_X, V)$ as a $\mathcal{Z}(X)$-module is a subset of the fiber of $\chi$ under $\operatorname{Spec}(\mathcal{Z}(X)) \to \operatorname{Spec}(\mathcal{Z}(\mathfrak{g}))$, which is finite. \end{proposition} \begin{proof}
First, Lemma \ref{prop:Zg-finite-ses} applies to $\mathcal{Z}(X)$-modules, by describing the structure of $\mathcal{Z}(X)$ using Knop's theorem.
As before, one reduces to the case when $V$ has infinitesimal character $\chi$, and then to the case $V = M_\chi$. Let $\mathcal{Z}(\mathfrak{g})$ act on $\operatorname{Tor}^{U(\mathfrak{g})}_n(D_X, M_\chi)$ via
\[ \mathcal{Z}(\mathfrak{g}) \to \mathcal{Z}(X) \xrightarrow{\eqref{eqn:Z-action}} \operatorname{End}_{D_X}\left(\operatorname{Tor}^{U(\mathfrak{g})}_n(D_X, M_\chi)\right). \]
By inspecting the proof of Lemma \ref{prop:Mchi-Zg-finiteness}, we see that the action above is simply $\chi$. We conclude by the fact that $\mathcal{Z}(X)$ is a finite $\mathcal{Z}(\mathfrak{g})$-module; see \cite[p.254]{Kn94}. \end{proof}
\section{Application to \texorpdfstring{$\operatorname{Ext}$}{Ext}-branching}\label{sec:Ext-application} \subsection{General setting of branching laws}\label{sec:general-branching} Suppose we are given a pair $(\mathfrak{i}, K^I)$ (see \S\ref{sec:gK-basic}) and its subpair $(\mathfrak{h}, K^H)$, where $\mathfrak{h} \subset \mathfrak{i}$ and $K^H \subset K^I$; see \S\ref{sec:restriction-lemma} for the precise conditions on subpairs. This induces the diagonal embedding of pairs \[ (\mathfrak{h}, K^H) \to (\mathfrak{i} \times \mathfrak{h}, K^I \times K^H). \] On the first component it is $U(\mathfrak{h}) \to U(\mathfrak{i}) \otimes U(\mathfrak{h})$, given by $\theta \mapsto \theta \otimes 1 + 1 \otimes \theta$ for all $\theta \in \mathfrak{h}$. On the second component it is $k \mapsto (k, k)$ for all $k \in K^H$.
Consider h-complexes $M$ over $(\mathfrak{i}, K^I)$ and $N$ over $(\mathfrak{h}, K^H)$. Taking tensor product yields an h-complex over $(\mathfrak{i} \times \mathfrak{h}, K^I \times K^H)$, which we denote as $M \boxtimes N$.
\begin{definition}\label{def:M-H}
Denote by $M|_H$ the restriction of $M$ to an h-complex over $(\mathfrak{h}, K^H)$. Define $(M \boxtimes N)|_H$ similarly via the diagonal embedding. Ditto for the functors induced on derived categories. \end{definition}
By the general formalism of \S\ref{sec:derived-categories}, we can define $\operatorname{RHom}$ between h-complexes over these pairs; they will be denoted as $\operatorname{RHom}_{\mathfrak{h}, K^H}$ and so on.
\begin{proposition}
Assume $K^H$ and $K^I$ are reductive. Let $M$ (resp.\ $N$) be an h-complex over $(\mathfrak{i}, K^I)$ (resp.\ $(\mathfrak{h}, K^H)$). Denote by $N^\vee$ the contragredient of $N$, as in Definition \ref{def:internal-Hom}. There is a canonical isomorphism in $\cate{D}(\ensuremath{\mathbb{C}})$
\[ \operatorname{RHom}_{\mathfrak{h}, K^H}\left( M|_H, N^\vee \right) \simeq \operatorname{RHom}_{\mathfrak{h}, K^H}\left((M \boxtimes N)|_H, \ensuremath{\mathbb{C}} \right). \]
The same isomorphism also holds for complexes of $(\mathfrak{i}, K^I)$-modules and $(\mathfrak{h}, K^H)$-modules, with the corresponding $\operatorname{RHom}$. \end{proposition} \begin{proof}
We will only sketch the case of h-complexes; the other case is similar and well-known.
Take a K-projective resolution $P \to M$ in ${}^{\mathrm{h}} \cate{C}(\mathfrak{i}, K^I)$. Then so is $P|_H \to M|_H$ in ${}^{\mathrm{h}} \cate{C}(\mathfrak{h}, K^H)$ by Lemma \ref{prop:restriction-lemma}. We claim that $(P \boxtimes N)|_H \to (M \boxtimes N)|_H$ is a K-projective resolution as well.
Since $((\cdot) \boxtimes N)|_H \simeq (\cdot)|_H \otimes N$ is exact, the above is indeed a quasi-isomorphism. By \cite[Lemma 3.2]{Pan05}, $(\cdot)|_H \otimes N$ has a right adjoint dg-functor given by internal $\operatorname{Hom}$ (see \S\ref{sec:gK-basic})
\[ \operatorname{Hom}^\bullet_{\ensuremath{\mathbb{C}}}(N, (\cdot)|_H)^{K_H\text{-alg}}. \]
As seen in the proof of Lemma \ref{prop:restriction-lemma}, this right adjoint is exact, thus the claim follows.
All in all, $\operatorname{RHom}_{\mathfrak{h}, K^H}\left( M|_H, N^\vee \right)$ is represented by the complex
\[ {}^{\mathrm{h}} \operatorname{Hom}^\bullet_{\mathfrak{h}, K^H}\left(P|_H , N^\vee \right) \simeq {}^{\mathrm{h}} \operatorname{Hom}^\bullet_{\mathfrak{h}, K^H}\left(P|_H \otimes N, \ensuremath{\mathbb{C}} \right) \]
where the aforementioned dg-adjunction is applied once again. By the claim, the right-hand side represents $\operatorname{RHom}_{\mathfrak{h}, K^H}\left((M \boxtimes N)|_H, \ensuremath{\mathbb{C}} \right)$. \end{proof}
Cf.\ \cite[Proposition 2.6]{Pra18} for the case of $p$-adic groups.
Under the assumptions of reductivity, the study of general $\operatorname{Ext}$-branching
\[ {}^{\mathrm{h}} \operatorname{Ext}^n_{\mathfrak{h}, K^H}(M|_H, N^\vee) \]
for various $M$, $N$ thus reduces to the special case ${}^{\mathrm{h}} \operatorname{Ext}^n_{\mathfrak{h}, K^H}(M|_H, \ensuremath{\mathbb{C}})$, once we replace $\mathfrak{i}$ (resp.\ $K^I$) by $\mathfrak{g} := \mathfrak{i} \times \mathfrak{h}$ (resp.\ $K := K^I \times K^H$). This recipe is certainly well-known, at least in degree $n=0$ and for complexes of $(\mathfrak{g}, K)$-modules instead of h-complexes.
We record another easy fact about $\operatorname{Ext}^n_{\mathfrak{h}, K^H}(\cdot, \ensuremath{\mathbb{C}})$.
\begin{proposition}[{\cite[Corollary 3.2]{KV95}}]\label{prop:Ext-H}
Consider a subpair $(\mathfrak{h}, K^H)$ of $(\mathfrak{g}, K)$ and assume $K^H$ and $K$ are reductive. Let $V$ be a $(\mathfrak{g}, K)$-module. There are canonical isomorphisms
\[ \operatorname{Ext}^n_{\mathfrak{h}, K^H}(V|_H, \ensuremath{\mathbb{C}}) \simeq \operatorname{H}_n\left(\mathfrak{h}, K^H; V|_H \right)^* \]
for each $n \in \ensuremath{\mathbb{Z}}$, where $\operatorname{H}_n\left(\mathfrak{h}, K^H; \cdot \right)$ is the relative Lie algebra homology. \end{proposition}
Note that ${}^{\mathrm{h}} \operatorname{Ext}_{\mathfrak{h}, K^H} \simeq \operatorname{Ext}_{\mathfrak{h}, K^H}$ by Corollary \ref{prop:BL-equiv-Ext}. The upcoming results about $\operatorname{Ext}^n$ can all be viewed as assertions about $\operatorname{H}^n\left(\mathfrak{h}, K^H; V \right)^*$.
\subsection{Branching and localization} Consider a connected reductive group $G$ and its subgroups \begin{equation}\label{eqn:four-groups}\begin{tikzcd}
H \arrow[phantom, r, "\subset" description] & G \\
K^H \arrow[phantom, u, "\subset" description, sloped] \arrow[phantom, r, "\subset" description] & K \arrow[phantom, u, "\subset" description, sloped]. \end{tikzcd}\end{equation} We assume that $H$, $K$ and $K^H$ are all reductive.
\begin{lemma}\label{prop:Hom12}
The following diagram commutes up to canonical isomorphism
\[\begin{tikzcd}[column sep=large]
{}^{\mathrm{h}} \cate{C}(\mathfrak{h}, K^H) \arrow[r, "{\mathrm{coInv}^{\mathfrak{h}, K^H}_{\ensuremath{\mathbb{C}}, K^H}}"] \arrow[rd, "{\operatorname{Hom}_1}"'] & {}^{\mathrm{h}} \cate{C}(\ensuremath{\mathbb{C}}, K^H) \arrow[d, "{\operatorname{Hom}_2}"] \\
& \cate{C}(\ensuremath{\mathbb{C}})^{\mathrm{op}}
\end{tikzcd}\]
where
\begin{align*}
\operatorname{Hom}_1 & := {}^{\mathrm{h}} \operatorname{Hom}^\bullet_{\mathfrak{h}, K^H}(\cdot, \ensuremath{\mathbb{C}}), \\
\operatorname{Hom}_2 & := {}^{\mathrm{h}} \operatorname{Hom}^\bullet_{\ensuremath{\mathbb{C}}, K^H}(\cdot, \ensuremath{\mathbb{C}}),
\end{align*}
and all actions upon $\ensuremath{\mathbb{C}}$ are defined to be trivial. \end{lemma} \begin{proof}
All functors in view are dg-functors, and it amounts to establishing a canonical isomorphism in $\cate{C}(\ensuremath{\mathbb{C}})$
\[ {}^{\mathrm{h}} \operatorname{Hom}^\bullet_{\ensuremath{\mathbb{C}}, K^H}\left( \mathrm{coInv}^{\mathfrak{h}, K^H}_{\ensuremath{\mathbb{C}}, K^H}(N), \ensuremath{\mathbb{C}}\right) \simeq {}^{\mathrm{h}} \operatorname{Hom}^\bullet_{\mathfrak{h}, K^H}(N, \ensuremath{\mathbb{C}}) \]
where $N$ is any h-complex over $(\mathfrak{h}, K^H)$. Indeed, this follows from the adjunction between co-invariants and inflation (Proposition \ref{prop:Inv-coInv}); the adjunction extends to the dg-level, cf.\ the proof of Proposition \ref{prop:K-injectives-adjunction}. \end{proof}
Consider the affine homogeneous $G$-space \[ X := H \backslash G, \quad x := H \cdot 1. \] Thus $x$ is a $K^H$-fixed point in $X$, and we have the corresponding $K^H$-equivariant morphism \[ i_x: \mathrm{pt} \to X. \] Note that $D_{\mathrm{pt}} = \ensuremath{\mathbb{C}}$, with trivial $K^H$-action.
To save space, we will omit the oblivion ${}^{\mathrm{h}}\cate{D}(D_X, K) \to {}^{\mathrm{h}}\cate{D}(D_X, K^H)$ in the statements below, and adopt the notation $M \mapsto M|_H$ of Definition \ref{def:M-H} on the level of h-derived categories.
\begin{proposition}\label{prop:RHom-RHom}
For all objects $M$ of ${}^{\mathrm{h}} \cate{D}^-(\mathfrak{g}, K)$, there are canonical isomorphisms in $\cate{D}^+(\ensuremath{\mathbb{C}})$:
\[ \operatorname{RHom}_{\mathfrak{h}, K^H}(M|_H, \ensuremath{\mathbb{C}}) \simeq \operatorname{RHom}_{D_{\mathrm{pt}}, K^H}\left( i_x^\bullet( \mathbf{Loc}_X(M) ), \ensuremath{\mathbb{C}} \right). \]
Here $i_x^\bullet: {}^{\mathrm{h}} \cate{D}^-(D_X, K^H) \to {}^{\mathrm{h}} \cate{D}^-(D_{\mathrm{pt}}, K^H)$ is the inverse image functor introduced in \S\ref{sec:inverse-image}. \end{proposition} \begin{proof}
The functor ${}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{C}(\mathfrak{h}, K^H)$ preserves K-projectives by Lemma \ref{prop:restriction-lemma}, hence
the left-hand side is the left h-derived functor of the composition of
\[ {}^{\mathrm{h}} \cate{C}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{C}(\mathfrak{h}, K^H) \xrightarrow{\mathrm{coInv}^{\mathfrak{h}, K^H}_{\ensuremath{\mathbb{C}}, K^H}} {}^{\mathrm{h}} \cate{C}(\ensuremath{\mathbb{C}}, K^H) \xrightarrow{\operatorname{Hom}_2} \cate{C}(\ensuremath{\mathbb{C}})^{\mathrm{op}}, \]
in the notation of Lemma \ref{prop:Hom12}.
Since $\mathrm{coInv}^{\mathfrak{h}, K^H}_{\ensuremath{\mathbb{C}}, K^H}$ also preserves K-projectives by Proposition \ref{prop:Inv-coInv-K}, one can take left h-derived functors in stages. Using Proposition \ref{prop:Loc-coInv}, the result is isomorphic to the composition of
\[ i_x^\bullet \left( \mathbf{Loc}_X \right): {}^{\mathrm{h}} \cate{D}^-(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}^-(\ensuremath{\mathbb{C}}, K^H) \]
with $\operatorname{L}\!(\operatorname{Hom}_2): {}^{\mathrm{h}} \cate{D}^-(\ensuremath{\mathbb{C}}, K^H) \to \cate{D}^+(\ensuremath{\mathbb{C}})^{\mathrm{op}}$. However, $\operatorname{L}\!(\operatorname{Hom}_2)$ is just $\operatorname{RHom}_{D_{\mathrm{pt}}, K^H}(\cdot, \ensuremath{\mathbb{C}})$. \end{proof}
\begin{corollary}
For all objects $M$ of $\cate{D}^{\mathrm{b}}(\mathfrak{g}, K)$ and all $n \in \ensuremath{\mathbb{Z}}$, there are canonical isomorphisms:
\[ \operatorname{Ext}^n_{\mathfrak{h}, K^H}(M|_H, \ensuremath{\mathbb{C}}) \simeq \operatorname{Ext}^n_{\cate{D}^{\mathrm{b}}_{K^H}(\mathrm{pt})}\left( i_x^\bullet( \varepsilon \mathbf{Loc}_X(M) ), \ensuremath{\mathbb{C}} \right). \]
Here $\varepsilon$ is the equivalence in Theorem \ref{prop:Beilinson-equiv}. \end{corollary} \begin{proof}
Combine the Bernstein--Lunts equivalence (Corollary \ref{prop:BL-equiv-Ext}) with Beilinson's equivalence $\varepsilon$ (Corollary \ref{prop:Beilinson-equiv-Ext}), noting that $\varepsilon$ is compatible with inverse images (Proposition \ref{prop:inverse-image-compatibility}). \end{proof}
One can also interpret the relative Lie algebra homologies in terms of localization.
\begin{proposition}\label{prop:H-coInv}
Let $V$ be a $(\mathfrak{g}, K)$-module. There are canonical isomorphisms
\[ \operatorname{H}_n(\mathfrak{h}, K^H; V|_H) \simeq \operatorname{H}^{-n}\operatorname{L}\!\left( \mathrm{coInv}^{\ensuremath{\mathbb{C}}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}} \right) \left(i_x^\bullet \mathbf{Loc}_X(V)\right), \quad n \in \ensuremath{\mathbb{Z}}. \]. \end{proposition} \begin{proof}
The arguments from Proposition \ref{prop:RHom-RHom} shows that the right-hand side is the $\operatorname{H}^{-n}$ of the composition
\begin{equation*}
{}^{\mathrm{h}} \cate{D}^-(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}^-(\mathfrak{h}, K^H) \xrightarrow{\operatorname{L}\!\left(\mathrm{coInv}^{\mathfrak{h}, K^H}_{\ensuremath{\mathbb{C}}, K^H}\right)}
{}^{\mathrm{h}} \cate{D}^-(\ensuremath{\mathbb{C}}, K^H) \xrightarrow{\operatorname{L}\!\left(\mathrm{coInv}^{\ensuremath{\mathbb{C}}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}}\right)} \cate{D}^-(\ensuremath{\mathbb{C}}).
\end{equation*}
Taking co-invariants preserves K-projectives. By the transitivity \eqref{eqn:Inv-coInv-transitive}, the composition above folds into
\[ {}^{\mathrm{h}} \cate{D}^-(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}^-(\mathfrak{h}, K^H) \xrightarrow{\operatorname{L}\!\left(\mathrm{coInv}^{\mathfrak{h}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}}\right)}
\cate{D}^-(\ensuremath{\mathbb{C}}). \]
It remains to show that after taking $\operatorname{H}^{-n}$, the second arrow gives $\operatorname{H}_n(\mathfrak{h}, K^H; W)$ when applied to any $(\mathfrak{h}, K^H)$-module $W$.
Use transitivity to break $\mathrm{coInv}^{\mathfrak{h}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}}$ into two stages
\[ {}^{\mathrm{h}} \cate{C}(\mathfrak{h}, K^H) \xrightarrow{\mathrm{coInv}_1} \cate{C}(\mathfrak{h}, K^H) \xrightarrow{\mathrm{coInv}_2} \cate{C}(\ensuremath{\mathbb{C}}), \]
cf.\ Example \ref{eg:h-inflation} for $\mathrm{coInv}_1$. The left derived functors are decomposed accordingly. Now take the standard resolution $W \otimes N\mathfrak{h} \to W$ in ${}^{\mathrm{h}} \cate{C}(\mathfrak{h}, K^H)$ (see \S\ref{sec:std-resolution}). Unwinding the construction of $\mathrm{coInv}_1$, one obtains
\[ \mathrm{coInv}_1(W \otimes N\mathfrak{g}) \simeq W \otimes \mathrm{coInv}_1(N\mathfrak{g}) \]
in $\cate{C}(\mathfrak{h}, K^H)$. It is shown in \cite[Proposition 3.2.7]{Pan07} that $\mathrm{coInv}_1(N\mathfrak{g})$ is the standard resolution of $\ensuremath{\mathbb{C}}$ in $\cate{C}(\mathfrak{h}, K^H)$, i.e.\ the relative standard complex. Hence $\mathrm{coInv}_2 (W \otimes \mathrm{coInv}_1(N\mathfrak{g}))$ represents the relative Lie algebra homologies of $W$, as desired. \end{proof}
\subsection{Some consequences of regularity}\label{sec:consequence-regularity} Throughout this section, we let $G$ be a connected reductive group, $X$ be a spherical affine homogeneous $G$-space, and $K$ be a spherical reductive subgroup of $G$. We consider a point $x$ of $X$ and a reductive subgroup $K^H$ of $K$ that stabilizes $x$. In particular, $i_x: \mathrm{pt} \hookrightarrow X$ is $K^H$-equivariant.
By taking $H := \ensuremath{\mathrm{Stab}}_G(x)$ to identify $X$ with $H \backslash G$, the situation is like \eqref{eqn:four-groups}; the extra assumption here is that $H$ and $K$ are both spherical.
For every smooth $K^H$-variety $Y$, let $\cate{D}^{\mathrm{b}}_{K^H, \mathrm{cons}}(Y)$ denote the bounded $K^H$-equivariant derived category of constructible sheaves defined by Bernstein--Lunts \cite{BL94}, equipped with perverse $t$-structure. Assuming $Y$ is affine, we have equivalences \[ {}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{rh}}(D_Y, K^H) \simeq \cate{D}^{\mathrm{b}}_{K^H, \mathrm{rh}}(Y) \simeq \cate{D}^{\mathrm{b}}_{K^H, \mathrm{cons}}(Y). \] \begin{itemize}
\item The first one is Beilinson's equivalence (Theorem \ref{prop:Beilinson-equiv}).
\item The second one is the equivariant Riemann--Hilbert correspondence; see \cite[4.2]{BL94} or \cite[Theorem 4.6.2]{Ka08}. \end{itemize}
\begin{definition}
Let
$\begin{tikzcd}
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{rh}}(D_X, K^H) \arrow[r, shift left, "{i_x^*, i_x^!}"] &
{}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{rh}}(D_{\mathrm{pt}}, K^H) \arrow[shift left, l, "{i_{x, *}}"]
\end{tikzcd}$
be the functors that correspond to the synonymous functors between $\cate{D}^{\mathrm{b}}_{K^H, \mathrm{cons}}(X)$ and $\cate{D}^{\mathrm{b}}_{K^H, \mathrm{cons}}(\mathrm{pt})$. \end{definition}
Therefore we have \begin{align*}
i_x^! & = i_x^\bullet[-\dim X], \\
i_x^* & = i_x^![2\dim X] = i_x^\bullet [\dim X] \quad \text{when}\; K^H = \{1\}, \end{align*} and $i_x^*$ is left adjoint to $i_{x, *}$. For the relation between $i_x^!$ and $i_x^\bullet$, see eg.\ \cite[Theorem 7.1.1]{HTT08}. For the relation between the $i_x^*$ and $i_x^!$ in the non-equivariant case, see eg.\ \cite[p.8]{BL94}.
\begin{theorem}\label{prop:RHom-Loc}
Let $M$ be an object of the category ${}^{\mathrm{h}}\cate{D}^{\mathrm{b}}_{\mathrm{HC}}(\mathfrak{g}, K)$ introduced in Corollary \ref{prop:regularity-gen}. There is a canonical isomorphism
\begin{equation*}
\operatorname{RHom}_{\mathfrak{h}, K^H}(M|_H, \ensuremath{\mathbb{C}}) \simeq \operatorname{RHom}_{D_{\mathrm{pt}}, K^H}\left( i_x^! \mathbf{Loc}_X(M)[\dim X], \ensuremath{\mathbb{C}} \right).
\end{equation*}
When $K^H = \{1\}$, this is also isomorphic to
\begin{equation*}
\operatorname{RHom}_{D_{\mathrm{pt}}}\left( i_x^* \mathbf{Loc}_X(M)[-\dim X], \ensuremath{\mathbb{C}} \right)
\simeq \operatorname{RHom}_{D_X}\left( \mathbf{Loc}_X(M), i_{x, *}(\ensuremath{\mathbb{C}})[\dim X] \right).
\end{equation*}
All these complexes of $D$-modules are bounded with regular holonomic cohomologies. \end{theorem} \begin{proof}
By Corollary \ref{prop:regularity-gen}, $\mathbf{Loc}_X(M)$ is in ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{rh}}(D_X, K)$. It remains to apply Proposition \ref{prop:RHom-RHom} and the relations recalled above. \end{proof}
Note that by \cite[Examples 1.5.23 and 1.6.4]{HTT08}, $i_{x, *}(\ensuremath{\mathbb{C}})$ is the $D_X$-module generated by the Dirac measure at $x$.
In order to apply Theorem \ref{prop:RHom-Loc} to concrete problems, one needs a deeper, quantitative understanding of $\mathbf{Loc}_X(M)$. We only give some crude applications below, which bypasses this issue.
\begin{corollary}\label{prop:Ext-consequence-1}
Let $M$ be as in Theorem \ref{prop:RHom-Loc}. Then ${}^{\mathrm{h}} \operatorname{Ext}^n_{\mathfrak{h}, K^H}(M|_H, \ensuremath{\mathbb{C}})$ is finite-dimensional for all $n \in \ensuremath{\mathbb{Z}}$. It vanishes for $|n| \gg 0$. \end{corollary} \begin{proof}
To prove the vanishing for $|n| \gg 0$, simply take the standard resolution of $M$.
To prove the finiteness of ${}^{\mathrm{h}}\operatorname{Ext}^n_{\mathfrak{h}, K^H}(M|_H, \ensuremath{\mathbb{C}})$, it suffices to show that for every object $N$ of ${}^{\mathrm{h}}\cate{D}^{\mathrm{b}}_{\mathrm{rh}}(D_{\mathrm{pt}}, K^H)$, we have
\[ \dim {}^{\mathrm{h}}\operatorname{Ext}^n_{D_{\mathrm{pt}}, K^H}\left(N, \ensuremath{\mathbb{C}}\right) < +\infty. \]
There are at least two ways to see this. (i) Working in $\cate{D}^{\mathrm{b}}_{K^H, \mathrm{cons}}(\mathrm{pt})$, use the constructibility of $\mathrm{R}\mathscr{H}\mathit{om}$ (see \cite[Desideratum 6.4.1]{Ac21}) together with the functor $\mathrm{Inv}^{\leq m}_{K^H, *}$ of truncated invariants introduced in \cite[Proposition 6.6.7]{Ac21} ($m \gg 0$) to reach $\operatorname{Ext}^n$. (ii) Use $m$-acyclic resolutions and holonomicity to access $\operatorname{Ext}^n$: see \cite[Proposition 2.13.1]{BL95} together with the proof of \cite[Theorem 2.13]{BL95}. \end{proof}
In particular, the Euler--PoincarΓ© characteristic for branching laws \begin{equation}\label{eqn:EP-branching}
\mathrm{EP}_{\mathfrak{h}, K^H}(M|_H, \ensuremath{\mathbb{C}}) := \sum_n (-1)^n \dim {}^{\mathrm{h}}\operatorname{Ext}^n_{\mathfrak{h}, K^H}(M|_H, \ensuremath{\mathbb{C}}) \end{equation} is well-defined, for all object $M$ of ${}^{\mathrm{h}}\cate{D}^{\mathrm{b}}_{\mathrm{HC}}(\mathfrak{g}, K)$.
\begin{theorem}\label{prop:local-index}
Take $K^H = \{1\}$ in the formalism above. Let $M$ be in ${}^{\mathrm{h}}\cate{D}^{\mathrm{b}}_{\mathrm{HC}}(\mathfrak{g}, K)$ and set $\mathcal{L} := \mathbf{Loc}_{X, \{1\}}(M)$. Define the solution complex $\mathrm{Sol}_X(\mathcal{L}) \in \cate{D}^{\mathrm{b}}_{\mathrm{cons}}(X)$ of $\mathcal{L}$ by \cite[p.118]{HTT08}. Then
\[ \operatorname{RHom}_{\mathfrak{h}}(M|_H, \ensuremath{\mathbb{C}}) \simeq i_x^* \mathrm{Sol}_X(\mathcal{L}). \]
Consequently, the $\mathrm{EP}_{\mathfrak{h}, \{1\}}(M|_H, \ensuremath{\mathbb{C}})$ in \eqref{eqn:EP-branching} equals the local Euler--PoincarΓ© characteristic
\[ \chi_x\left( \mathrm{Sol}_X(\mathcal{L}) \right) \]
of $\mathrm{Sol}_X(\mathcal{L})$ at $x$, which can be expressed in terms of characteristic cycles and Euler obstructions by Kashiwara's local index theorem \cite[Theorem 4.6.7]{HTT08}. \end{theorem} \begin{proof}
By \cite[Proposition 4.7.4]{HTT08} we have
\[ \mathrm{Sol}_X(\mathcal{L})[\dim X] \simeq \mathrm{DR}_X\left( \mathbb{D}_X \mathcal{L} \right) \]
canonically, where $\mathrm{DR}_X$ (resp.\ $\mathbb{D}_X$) denotes the non-equivariant de Rham functor (resp.\ duality endo-functor) for $X$. Since $\mathrm{DR}_{\mathrm{pt}} = \ensuremath{\mathrm{id}}$, the Riemann--Hilbert correspondence leads to
\[ i_x^* \mathrm{Sol}_X(\mathcal{L}) \simeq \left( \mathbb{D}_{\mathrm{pt}} i_x^! \mathcal{L} \right)[-\dim X] \simeq \mathbb{D}_{\mathrm{pt}} \left( i_x^! \mathcal{L}[\dim X] \right). \]
Now put $K^H = \{1\}$ in Theorem \ref{prop:RHom-Loc} to infer that $i_x^* \mathrm{Sol}_X(\mathcal{L}) \simeq \operatorname{RHom}_{\mathfrak{h}}(M|_H, \ensuremath{\mathbb{C}})$. The remaining assertions follow at once. \end{proof}
The isomorphism in Theorem \ref{prop:local-index} generalizes \cite[Proposition 10.2]{Li22}.
We now specialize to the case when $M$ is concentrated in degree zero.
\begin{corollary}\label{prop:Ext-consequence-2}
For every Harish-Chandra $(\mathfrak{g}, K)$-module $V$ and $n \geq 0$, the dimension of
\[ {}^{\mathrm{h}} \operatorname{Ext}^n_{\mathfrak{h}, K^H}(V|_H, \ensuremath{\mathbb{C}}) \simeq \operatorname{Ext}^n_{\mathfrak{h}, K^H}(V|_H, \ensuremath{\mathbb{C}}) \]
is finite. It also equals $\dim \operatorname{H}_n(\mathfrak{h}, K^H; V|_H)$. \end{corollary} \begin{proof}
The isomorphism in the first part is Corollary \ref{prop:BL-equiv-Ext}. The second part follows from Corollary \ref{prop:Ext-H}. \end{proof}
The finiteness in Corollary \ref{prop:Ext-consequence-2} is also a consequence of a more general result of M.\ Kitagawa \cite[Fact 4.7]{Ki21}; a uniform bound is given in \cite[Corollary 7.17]{Ki21}. In the case $K^H = \{1\}$, the finiteness of $\dim \operatorname{H}_n(\mathfrak{h}; V|_H)$ is shown in \cite[Proposition 4.2.2]{AGKL16} when $K$ is a symmetric subgroup in good position relative to $H$.
Likewise, Proposition \ref{prop:H-coInv} and Corollary \ref{prop:regularity-gen} lead to the following statement for relative Lie algebra homologies.
\begin{proposition}\label{prop:H-Loc}
Let $V$ be a Harish-Chandra $(\mathfrak{g}, K)$-module. There are canonical isomorphisms
\begin{equation*}
\operatorname{H}_n(\mathfrak{h}, K^H; V|_H) \simeq \operatorname{H}^{- n + \dim X}\operatorname{L}\!\left( \mathrm{coInv}^{\ensuremath{\mathbb{C}}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}} \right) \left(i_x^! \mathbf{Loc}_X(V)\right)
\end{equation*}
for all $n \in \ensuremath{\mathbb{Z}}$; when $K^H = \{1\}$, it is also isomorphic to
\begin{equation*}
\operatorname{H}^{-n - \dim X} \left(i_x^* \mathbf{Loc}_X(V)\right).
\end{equation*} \end{proposition}
To better understand the effect of $\operatorname{H}^{-n + \dim X} \operatorname{L}\!\left( \mathrm{coInv}^{\ensuremath{\mathbb{C}}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}} \right)$, we remark that on the constructible side, it matches the composition \[ \cate{D}^{\mathrm{b}}_{K^H, \mathrm{cons}}(\mathrm{pt}) \xrightarrow{\mathrm{Inv}^{\geq m}_{K^H, !}} {}^{\mathrm{p}} \cate{D}^-_{\mathrm{cons}}(\mathrm{pt})^{\geq m} \xrightarrow{{}^{\mathrm{p}} \operatorname{H}^{-n \pm \dim X}} \left\{\text{f.d.}\; \ensuremath{\mathbb{C}} \text{-vector spaces} \right\} \] for $m \leq -n + \dim X$, where $\mathrm{Inv}^{\geq m}_{K^H, !}$ stands for the functor of truncated co-invariants in \cite[Proposition 6.6.7]{Ac21}. This can be deduced from Proposition \ref{prop:Infl-epsilon} and adjunction. For a detailed discussion, see \cite[\S 6.6]{Ac21}.
From this, the finiteness of $\dim \operatorname{H}_n(\mathfrak{h}, K^H; V|_H)$ can be deduced directly from Proposition \ref{prop:H-Loc}.
\subsection{The monodromic setting}\label{sec:monodromic} Let $G$ and $X = H \backslash G$ be as in \S\ref{sec:consequence-regularity}, so $H$ is reductive. In addition, we fix a character of Lie algebra \[ \chi: \mathfrak{h} \to \ensuremath{\mathbb{C}}. \] It must factor through the Lie algebra of a torus quotient, say $S = H/\underline{H}$ where $\underline{H} \lhd H$. Accordingly, there is a morphism of $G$-varieties \[ \pi: \tilde{X} := \underline{H} \backslash G \to H \backslash G = X. \] Note that $S$ acts on the left of $\tilde{X}$, commuting with $G$ and makes $\pi$ into an $S$-torsor. Since $S$ is a torus, $\pi$ is even Zariski-locally trivial. Also note that $\tilde{X}$ is affine since $X$ is. In \cite[2.2]{BL95}, $\pi$ is said to make $X$ into an \emph{$S$-monodromic $G$-variety}.
The character $\chi$ corresponds to an element of $\mathfrak{s}^*$. Let $\mathfrak{m}_\chi \subset \operatorname{Sym}(\mathfrak{s})$ be the corresponding maximal ideal; its image in $\widetilde{\mathscr{D}}_X := (\pi_* \mathscr{D}_{\tilde{X}})^S$ is central. Recall the following notion from \cite[p.24]{BB93} (see also \cite{Li22}).
\begin{definition}
Set $\mathscr{D}_{X, \chi} := \widetilde{\mathscr{D}}_X / \mathfrak{m}_\chi \widetilde{\mathscr{D}}_X$ be the sheaf of algebras of twisted differential operators (TDO's) attached to $\chi$. Put $D_{X, \chi} := \Gamma(X, \mathscr{D}_{X, \chi})$. \end{definition}
By trivializing $\pi$ locally over $X$, we see that $\mathscr{D}_{X, \chi}$ is a locally trivial sheaf of TDO's. Taking $\chi=0$ reverts to the untwisted version $\mathscr{D}_X$.
\begin{remark}\label{rem:monodromic-inverse-image}
For later use, we remark that taking inverse images realizes an equivalence from $\mathscr{D}_{X, \chi}\dcate{Mod}$ to the category of $(S, \chi)$-monodromic $\mathscr{D}_{\tilde{X}}$-modules, the latter being a twisted version of $S$-equivariance; see \cite[(2.3) and Proposition 2.8]{Li22} or \cite[1.8.10 Lemma]{BB93} for details. A further inverse image via $G \to \underline{H} \backslash G = \tilde{X}$ brings us to $(H^{\mathrm{op}}, \chi)$-monodromic $\mathscr{D}_G$-modules. \end{remark}
The actions of $G$ and $S$ commute, hence we obtain $G$-equivariant homomorphisms $U(\mathfrak{g}) \to \widetilde{\mathscr{D}}_X \to \mathscr{D}_{X, \chi}$. Since $X$ is affine, we may and will work with $D_{X, \chi}$-modules instead. The previous constructions now become \[ D_{X, \chi} \simeq D_{\tilde{X}}^S / \mathfrak{m}_\chi D_{\tilde{X}}^S. \] We obtain a natural $G$-equivariant homomorphism $j: U(\mathfrak{g}) \to D_{X, \chi}$.
Given a reductive subgroup $K \subset G$, there is also a monodromic version of Beilinson's equivalence (Theorem \ref{prop:BL-equiv}), identifying ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}(D_{X, \chi}, K)$ with the monodromic equivariant derived category $\cate{D}^{\mathrm{b}}_{K, \chi}(X)$ of $D$-modules; see \cite[2.14]{BL95}.
\begin{remark}
Since we fixed $\chi$, there is no need to consider $(\tilde{\mathscr{D}}_X, F|K)$-modules as in \cite[2.5]{BL95} where $F := K \times S$. This corresponds to taking $I = \mathfrak{m}_\chi$ in the formalism of \cite[2.3]{BL95}. \end{remark}
In the context of TDO's arising from monodromic structures, the notions of holonomicity and regularity can be found in \cite[\S 7.14]{Ka08}\footnote{The author is indebted to Masatoshi Kitagawa for clarifications on this point.}. They reduce to the untwisted theory by taking inverse images via $\pi$. We define ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{rh}+}(D_{X, \chi}, K)$ accordingly, cf.\ Definition \ref{def:rh-plus}.
The derived monodromic localization functor \begin{equation}
\mathbf{Loc}_{X, \chi} = \mathbf{Loc}_{X, K, \chi}: {}^{\mathrm{h}} \cate{D}(\mathfrak{g}, K) \to {}^{\mathrm{h}} \cate{D}(D_{X, \chi}, K) \end{equation} can still be defined, with amplitude in $[-\dim G, 0]$.
The following construction is needed for the monodromic counterpart of Theorem \ref{prop:regularity}. Define the commutative algebra $\mathcal{Z}(\tilde{X})$ for the $S \times G$-variety $\tilde{X}$ by applying Definition \ref{def:ZX}. There is a homomorphism \[ \operatorname{Sym}(\mathfrak{s}) \otimes \mathcal{Z}(\mathfrak{g}) \simeq \mathcal{Z}(\mathfrak{s} \times \mathfrak{g}) \to \mathcal{Z}(\tilde{X}) \subset D_{\tilde{X}}^{S \times G} \subset D_{\tilde{X}}^S \] of algebras. Define \begin{equation*}
\mathcal{Z}_\chi(X) := \mathcal{Z}(\tilde{X}) / \mathfrak{m}_\chi \mathcal{Z}(\tilde{X}). \end{equation*} We obtain homomorphisms \[ \mathcal{Z}(\mathfrak{g}) \to \mathcal{Z}_\chi(X) \to D_{X, \chi}^G \subset D_{X, \chi}. \]
Thus $\mathcal{Z}_\chi(X)$ acts on the right of $\mathbf{Loc}_{X, \chi}$ through $\mathcal{Z}_\chi(X) \to D_{X, \chi}^G$, as in the non-twisted case \eqref{eqn:Z-action}.
\begin{lemma}\label{prop:Knop-monodromic}
The algebra $\mathcal{Z}_\chi(X)$ is commutative. Moreover, $D_{X, \chi}$ is projective over $\mathcal{Z}_\chi(X)$ and $\mathcal{Z}_\chi(X)$ is finitely generated over $\mathcal{Z}(\mathfrak{g})$, both as left and right modules. \end{lemma} \begin{proof}
We know that $\mathcal{Z}(\tilde{X})$ is commutative, hence so is $\mathcal{Z}_\chi(X)$.
Next, the projectivity and finite generation as left and right modules hold on the level of
\[ \mathcal{Z}(\mathfrak{s} \times \mathfrak{g}) \to \mathcal{Z}(\tilde{X}) \hookrightarrow D_{\tilde{X}} \]
by \cite{Kn94}. Note that $\mathcal{Z}(\tilde{X})$ is actually included in $D_{\tilde{X}}^S$. We claim that $D_{\tilde{X}}^S$ is also projective as left and right $\mathcal{Z}(\tilde{X})$-modules.
Since $S$ is reductive, acting algebraically on $D_{\tilde{X}}$ and trivially on $\mathcal{Z}(\tilde{X})$, one can realize $D_{\tilde{X}}^S$ canonically as a direct $\mathcal{Z}(\tilde{X})$-summand of $D_{\tilde{X}}$, thus the claim follows at once. This can also be seen by identifying $S$-action on $D_{\tilde{X}}$ with a grading by the cocharacter lattice of $S$.
By the claim, we may tensor the homomorphisms
\[ \mathcal{Z}(\mathfrak{s} \times \mathfrak{g}) \to \mathcal{Z}(\tilde{X}) \hookrightarrow D_{\tilde{X}}^S \]
with $\operatorname{Sym}(\mathfrak{s}) / \mathfrak{m}_\chi$ over $\operatorname{Sym}(\mathfrak{s})$ on the left or right. The required projectivity and finite generation persist. \end{proof}
\begin{theorem}\label{prop:regularity-monodromic}
Suppose that $X$ is affine and spherical, $K$ is a reductive spherical subgroup of $G$, and $V$ is a Harish-Chandra $(\mathfrak{g}, K)$-module. Then $\mathbf{Loc}_{X, \chi}(V)$ lies in ${}^{\mathrm{h}} \cate{D}^{\mathrm{b}}_{\mathrm{rh}+}(D_{X, \chi}, K)$. \end{theorem} \begin{proof}
Recall the strategy in the non-monodromic case in \S\S\ref{sec:regularity-criterion}--\ref{sec:end-of-regularity}. The key criterion from \cite{Li22} applies in the monodromic setting. In fact, after ascending to $G$ by Remark \ref{rem:monodromic-inverse-image}, it boils down to showing that a finitely generated, $(H^{\mathrm{op}} \times K, \chi \otimes \mathrm{triv})$-monodromic and locally $\mathcal{Z}(\mathfrak{g} \times \mathfrak{g})$-finite $D_G$-module is regular holonomic, and this is indeed covered by \cite[Theorem 5.6]{Li22}. Note that $\chi$ is required to be trivial on the nilpotent radical of $\mathfrak{h}$ in \textit{loc.\ cit.}, but $H$ is reductive here.
The other ingredients in the proof carry over, by using Lemma \ref{prop:Knop-monodromic}. The bound on the characteristic varieties are obtained in the same way as \cite[Proposition 3.4]{Li22}. \end{proof}
Likewise, Corollary \ref{prop:regularity-gen} and Proposition \ref{prop:ZX-locally-finite} also admit monodromic versions.
Given the base-point $x$ of $X$, the inclusion map $i_x$ gives rise to an inclusion of $S$-monodromic $K^H$-varieties, given by the Cartesian square \[\begin{tikzcd}
\pi^{-1}(x) \arrow[r] \arrow[d] & \tilde{X} \arrow[d, "\pi"] \\
\mathrm{pt} \arrow[r, "{i_x}"'] & X. \end{tikzcd}\]
Next, suppose that \begin{itemize}
\item $K^H$ is a reductive subgroup of $K$ that fixes $x$,
\item a character $K^H \to \ensuremath{\mathbb{G}_\mathrm{m}}$ is given, whose derivative coincides with $\chi|_{\mathfrak{k}^H}$. \end{itemize} Therefore $\chi$ can be viewed as a $(\mathfrak{h}, K^H)$-module.
Note that $D_{\mathrm{pt}, \chi} \simeq \operatorname{Sym}(\mathfrak{s}) / \mathfrak{m}_\chi$. Define the $K^H$-equivariant $D_{\mathrm{pt}, \chi}$-module $\ensuremath{\mathbb{C}}_\chi$ by letting $\mathfrak{s}$ (resp.\ $K^H$) act on $\ensuremath{\mathbb{C}}$ through $\chi$ (resp.\ the given $K^H \to \ensuremath{\mathbb{G}_\mathrm{m}}$). Below is the monodromic counterpart of Proposition \ref{prop:RHom-RHom}.
\begin{proposition}
For all objects $M$ of ${}^{\mathrm{h}} \cate{D}^-(\mathfrak{g}, K)$, there are canonical isomorphisms in $\cate{D}^+(\ensuremath{\mathbb{C}})$:
\[ \operatorname{RHom}_{\mathfrak{h}, K^H}(M|_H, \chi) \simeq \operatorname{RHom}_{D_{\mathrm{pt}, \chi}, K^H}\left( i_x^\bullet( \mathbf{Loc}_{X, \chi}(M) ), \ensuremath{\mathbb{C}}_\chi \right) \]
where $i_x^\bullet$ is the h-derived inverse image for $K^H$-equivariant monodromic $D$-modules. \end{proposition} \begin{proof}
Same as the non-monodromic case. It suffices to replace $\ensuremath{\mathbb{C}}$ by $\ensuremath{\mathbb{C}}_\chi$, then apply the following analog of Proposition \ref{prop:Loc-coinv}. \end{proof}
\begin{proposition}\label{prop:coinv-Loc-monodromic}
For every $\mathfrak{g}$-module $V$, its space of $(\mathfrak{h}, \chi)$-co-invariants is isomorphic to $\ensuremath{\mathbb{C}} \dotimes{\mathscr{O}_{X, x}} \left( \mathscr{D}_{X, \chi} \dotimes{U(\mathfrak{g})} V\right)$, by sending the image of $v \in V$ to $1 \otimes (1 \otimes v)$. \end{proposition} \begin{proof}
Same as the one given in \cite[Lemma 2.2]{BZG19}. It suffices to replace the map $\mathscr{O}_X \otimes \mathfrak{g} \to \mathscr{D}_X$ by $\mathscr{O}_X \otimes \mathfrak{g} \to \mathscr{D}_{X, \chi}$. \end{proof}
Below is the monodromic counterpart of Theorem \ref{prop:RHom-Loc}. Define the functors $i_x^!$, $i_x^*$ and $i_{x, *}$ in the same manner as in \S\ref{sec:consequence-regularity}.
\begin{theorem}
Let $M$ be an object of the category ${}^{\mathrm{h}}\cate{D}^{\mathrm{b}}_{\mathrm{HC}}(\mathfrak{g}, K)$. There are canonical isomorphisms
\begin{equation*}
\operatorname{RHom}_{\mathfrak{h}, K^H}(M|_H, \chi) \simeq \operatorname{RHom}_{D_{\mathrm{pt}, \chi}, K^H}\left( i_x^! \mathbf{Loc}_{X, \chi}(M)[\dim X], \ensuremath{\mathbb{C}}_\chi \right).
\end{equation*}
When $K^H = \{1\}$, this is also isomorphic to
\begin{multline*}
\operatorname{RHom}_{D_{\mathrm{pt}, \chi}}\left( i_x^* \mathbf{Loc}_{X, \chi}(M)[-\dim X], \ensuremath{\mathbb{C}}_\chi \right) \\
\simeq \operatorname{RHom}_{D_{X, \chi}}\left( \mathbf{Loc}_{X, \chi}(M), i_{x, *}(\ensuremath{\mathbb{C}}_\chi)[\dim X] \right).
\end{multline*}
All these complexes of $D$-modules are bounded with regular holonomic cohomologies. \end{theorem} \begin{proof}
Since the Riemann--Hilbert correspondence has a monodromic version \cite[Theorem 3.16.2]{Ka89}, and similarly in the equivariant case, one can repeat the proof of Theorem \ref{prop:RHom-Loc}. Caution: the monodromic Riemann---Hilbert correspondence lands in the derived category of ``twisted sheaves''. \end{proof}
Accordingly, we deduce the consequences about: \begin{itemize}
\item ${}^{\mathrm{h}} \operatorname{Ext}^n_{\mathfrak{h}, K^H}(M|_H, \chi)$ (cf.\ Corollary \ref{prop:Ext-consequence-1}),
\item $\operatorname{Ext}^n_{\mathfrak{h}, K^H}(V|_H, \chi)$, $\operatorname{H}_n(\mathfrak{h}, K^H; V|_H \otimes \chi^\vee)$ (cf.\ Corollary \ref{prop:Ext-consequence-2}), where $V$ is a Harish-Chandra $(\mathfrak{g}, K)$-module and $\chi^\vee$ means the contragredient of $\chi$. \end{itemize}
The finiteness of $\dim \operatorname{H}_n(\mathfrak{h}, K^H; V|_H \otimes \chi^\vee)$ is still covered by the work of M.\ Kitagawa \cite[Fact 4.7]{Ki21}.
Finally, the Proposition \ref{prop:H-Loc} also has a monodromic analogue: \begin{equation*}
\operatorname{H}_n(\mathfrak{h}, K^H; V|_H \otimes \chi^\vee) \simeq \operatorname{H}^{- n + \dim X}\operatorname{L}\!\left( \mathrm{coInv}^{\ensuremath{\mathbb{C}}, K^H}_{\ensuremath{\mathbb{C}}, \{1\}} \right) \left(i_x^! \mathbf{Loc}_{X, \chi}(V)\right). \end{equation*} When $K^H = \{1\}$, it is isomorphic to $\operatorname{H}^{-n - \dim X}\left(i_x^* \mathbf{Loc}_{X, \chi}(V)\right)$.
\section{Comparison with the analytic picture}\label{sec:analytic} \subsection{Schwartz homologies} In this subsection, we work on the analytic side of representation theory. Therefore, we take \begin{itemize}
\item $G$: an almost linear Nash group,
\item $K$: a maximal compact subgroup of $G$. \end{itemize} They are both Lie groups, and we will consider continuous representations of them.
Let $E$ be a smooth FrΓ©chet representation of $G$ of moderate growth. In \cite{CS21}, Y.\ Chen and B.\ Sun defined the \emph{Schwartz homologies} $\operatorname{H}^{\mathcal{S}}_n(G; E)$ for $n \in \ensuremath{\mathbb{Z}}_{\geq 0}$. These are locally convex topological vector spaces which are possibly non-Hausdorff, functorial in $E$, and satisfy various desirable properties such as long exact sequences and Shapiro's lemma. In particular, \[ \operatorname{H}^{\mathcal{S}}_0(G; E) = E_G := E \big/ \sum_{g \in G} (g - \ensuremath{\mathrm{id}})(E) \; + \;\text{quotient topology}. \] We refer to \cite{CS21} for all the details and terminologies.
Let $\mathfrak{g}$ be the complexified Lie algebra of $G$. Every smooth representation $E$ of $G$ gives rise to an $(\mathfrak{g}, K)$-module in a generalized sense: it is a vector space equipped with compatible structures of $\mathfrak{g}$-module and $K$-module; unlike \S\ref{sec:gK-basic}, the $K$-action here is not assumed to be algebraic or locally finite, but only smooth.
Let $\mathfrak{p} := \mathfrak{g}/\mathfrak{k}$, on which $K$ acts; we may and do choose a decomposition $\mathfrak{g} = \mathfrak{k} \oplus \mathfrak{p}$ as $K$-modules, and denote by $\mathcal{P}: \mathfrak{g} \to \mathfrak{p}$ the corresponding projection.
\begin{definition}
For an $(\mathfrak{g}, K)$-module $E$, the relative Lie algebra homologies of $E$ are
\[ \operatorname{H}_n(\mathfrak{g}, K; E) := \operatorname{H}_n\left( C(\mathfrak{g}, K; E) \right) \]
where the \emph{standard complex} $C(\mathfrak{g}, K; E)$ for $E$ is defined by
\[
C_n(\mathfrak{g}, K; E) := (\bigwedge^n \mathfrak{p}) \dotimes{K} E, \quad n \in \ensuremath{\mathbb{Z}}_{\geq 0}.
\]
Here we make the left $K$-module $\bigwedge^n \mathfrak{p}$ into a right one by $u k := k^{-1} u$, so $\otimes_{K}$ is simply the tensor product over the abstract group algebra $\ensuremath{\mathbb{C}}[K]$. Alternatively, $\bigwedge^n \mathfrak{p} \dotimes{K} E$ is also the space of co-invariants (algebraically) of $\bigwedge^n \mathfrak{p} \otimes E$ under the diagonal $K$-action.
The boundary maps in the complex
\[ \partial_n: C_n(\mathfrak{g}, K; E) \to C_{n-1}(\mathfrak{g}, K; E) \]
for $n \geq 1$ are the usual ones (cf.\ \cite[(2.126b)]{KV95}):
\begin{multline*}
\partial_n((\xi_1 \wedge \cdots \wedge \xi_n) \otimes v) = \sum_{i=1}^n (-1)^i (\cdots \wedge \widehat{\xi_i} \wedge \cdots) \otimes \xi_i v \\
+ \sum_{p<q} (-1)^{p+q} (\mathcal{P}[\xi_p, \xi_q] \wedge \cdots \widehat{\xi_p} \cdots \widehat{\xi_q} \cdots ) \otimes v.
\end{multline*} \end{definition}
Now let $E$ be a smooth FrΓ©chet representation of $G$ of moderate growth. Then each $C_n(\mathfrak{g}, K; E)$ gets topologized, each $\partial_n$ is continuous, and the homologies are endowed with quotient topologies. In \cite[Theorem 7.7]{CS21} it is proved that \begin{equation}\label{eqn:HS-Lie}
\operatorname{H}^{\mathcal{S}}_n(G; E) \simeq \operatorname{H}_n(\mathfrak{g}, K; E), \quad n \in \ensuremath{\mathbb{Z}}_{\geq 0} \end{equation} canonically and topologically.
\begin{definition}
Let $E$ be a smooth FrΓ©chet representation of $G$ of moderate growth. Denote by $E^{K\text{-fini}}$ the subspace of $K$-finite vectors in $E$. \end{definition}
Therefore $E^{K\text{-fini}}$ is an $(\mathfrak{g}, K)$-module in the usual algebraic sense. By functoriality and \eqref{eqn:HS-Lie}, we obtain canonical linear maps \[ \operatorname{H}_n\left(\mathfrak{g}, K; E^{K\text{-fini}}\right) \to \operatorname{H}^{\mathcal{S}}_n(G; E), \quad n \in \ensuremath{\mathbb{Z}}_{\geq 0}. \]
\begin{proposition}\label{prop:HS-int}
Let $E$ be a smooth FrΓ©chet representation of $G$ of moderate growth. For every $n$, we have
\[ \operatorname{H}_n\left(\mathfrak{g}, K; E^{K\text{-fini}}\right) \rightiso \operatorname{H}^{\mathcal{S}}_n(G; E) \]
as vector spaces. \end{proposition} \begin{proof}
Let $\mathrm{Irr}(K)$ be the set of isomorphism classes of finite-dimensional irreducible representations of $K$. Let $\mathcal{M}(K)$ be the unital algebra of bounded complex Borel measures on $K$ under convolution. Let $R(K)$ be the non-unital subalgebra comprised of left $K$-finite (equivalently, right $K$-finite) elements, which is also spanned by the matrix coefficients from $\mathrm{Irr}(K)$ once we fix a Haar measure. See \cite[I.2]{KV95}.
By continuity and completeness, $\mathcal{M}(K)$ acts on $E$, thus so does $R(K)$.
For each $\gamma \in \mathrm{Irr}(K)$, define
\[ \chi_\gamma := \dim\gamma \cdot \Theta_{\gamma^\vee} \cdot \frac{\mathop{}\!\mathrm{d} k}{\mathrm{mes}(K)} \; \in R(K), \]
where $\Theta_{\gamma^\vee}$ is the character of the contragredient $\gamma^\vee$; see \cite[(1.23)]{KV95}. This is an idempotent in $R(K)$ which is $K$-invariant under conjugation.
For every finite subset $\Gamma$ of $\mathrm{Irr}(K)$, we define $\chi_\Gamma := \sum_{\gamma \in \Gamma} \chi_\gamma$. This is still a $K$-invariant idempotent. When applied to locally $K$-finite representations, its effect is to project to the $\Gamma$-isotypic part.
The evident maps $C_n(\mathfrak{g}, K; E^{K\text{-fini}}) \to C_n(\mathfrak{g}, K; E)$ give a morphism between complexes. It suffices to show it is an isomorphism in each degree $n$.
First, we prove that $C_n(\mathfrak{g}, K; E^{K\text{-fini}}) \to C_n(\mathfrak{g}, K; E)$ is surjective. Take $\Gamma$ that contains all irreducible constituents of $\bigwedge^n \mathfrak{p}$. Let $r \mapsto r'$ be the anti-involution of $R(K)$ induced by $k \mapsto k^{-1}$. For every $\omega \otimes v \in \bigwedge^n \mathfrak{p} \dotimes{K} E$, we claim that
\[ \omega \otimes v = \chi_\Gamma \omega \otimes v = \omega \otimes \chi'_\Gamma v. \]
Indeed, the first equality follows from the choice of $\Gamma$. As for the second one, in the proof of \cite[Theorem 7.7]{CS21}, a ``strong projective resolution'' (45) of $E$ is shown to be topologically isomorphic to $C(\mathfrak{g}, K; E)$ after taking $(\cdot)_G$. It is part of the theory of Schwartz homologies, especially \cite[Theorem 5.9]{CS21}, that taking $(\cdot)_G$ of such a resolution produces FrΓ©chet spaces in each degree. Hence the algebraic co-invariants in the formation of $\bigwedge^n \mathfrak{p} \dotimes{K} v$ are actually topological, i.e.
\[ \bigwedge^n \mathfrak{p} \dotimes{K} E = (\bigwedge^n \mathfrak{p} \otimes E) \bigg/ \overline{\lrangle{k\omega \otimes kv = \omega \otimes v : k \in K, \ldots }}. \]
This implies $\chi_\Gamma \omega \otimes v = \omega \otimes \chi'_\Gamma v$ in $\bigwedge^n \mathfrak{p} \dotimes{K} E$, by approximating $\chi_{\Gamma}$ by Dirac measures. Surjectivity follows since $\chi_{\Gamma} E = E^\Gamma$, the $\Gamma$-isotypic part of $E^{K\text{-fini}}$.
Let us show injectivity. By general properties of $\otimes$ over $\ensuremath{\mathbb{C}}[K]$, we have
\[ \bigwedge^n \mathfrak{p} \dotimes{K} (E^{K\text{-fini}}) = \bigwedge^n \mathfrak{p} \dotimes{K} \varinjlim_{\Gamma} E^\Gamma \leftiso \varinjlim_{\Gamma} \left( \bigwedge^n \mathfrak{p} \dotimes{K} E^\Gamma \right) \]
where the $\varinjlim$ over finite subsets $\Gamma \subset \mathrm{Irr}(K)$ is filtered. The map from the leftmost term to $\bigwedge^n \mathfrak{p} \dotimes{K} E$ is determined by the compatible family of evident maps
\[ \bigwedge^n \mathfrak{p} \dotimes{K} E^\Gamma \to \bigwedge^n \mathfrak{p} \dotimes{K} E. \]
Fixing $\Gamma$, the map above is a split injection since $\chi_\Gamma$ is a $K$-invariant idempotent, so
\[ E = \chi_\Gamma E \oplus (\ensuremath{\mathrm{id}} - \chi_\Gamma) E = E_\Gamma \oplus (\ensuremath{\mathrm{id}} - \chi_\Gamma) E \]
as $\ensuremath{\mathbb{C}}[K]$-modules. As the filtered $\varinjlim$ of injections is still injective, this shows the injectivity of $\bigwedge^n \mathfrak{p} \dotimes{K} (E^{K\text{-fini}}) \to \bigwedge^n \mathfrak{p} \dotimes{K} E$.
Note that the same method shows that $\bigwedge^n \mathfrak{p} \dotimes{K} E^\Gamma \to \bigwedge^n \mathfrak{p} \dotimes{K} E^{\Gamma'}$ is split injective whenever $\Gamma \subset \Gamma'$. \end{proof}
\subsection{Comparison map}\label{sec:comparison} We now consider almost linear Nash groups \begin{equation}\label{eqn:four-groups-an}\begin{tikzcd}
H \arrow[phantom, r, "\subset" description] & G \\
K^H \arrow[phantom, u, "\subset" description, sloped] \arrow[phantom, r, "\subset" description] & K \arrow[phantom, u, "\subset" description, sloped]. \end{tikzcd}\end{equation} analogously to the algebraic setting of \eqref{eqn:four-groups}, the assumptions now being \begin{itemize}
\item $G$ is reductive,
\item $K$ (resp.\ $K^H$) is a maximal compact subgroup of $G$ (resp.\ $H$); this implies $K^H = H \cap K$. \end{itemize}
We consider \emph{Casselman--Wallach representations} of $G$, i.e.\ smooth admissible FrΓ©chet representation of moderate growth; taking $K$-finite vectors establishes an equivalence between this category and the category of Harish-Chandra $(\mathfrak{g}, K)$-modules. See \cite{BK14}.
For a smooth FrΓ©chet representation $E$ of $G$ of moderate growth, its restriction $E|_H$ to $H$ is still smooth FrΓ©chet of moderate growth, by unraveling definitions. However, admissibility is usually lost.
In view of \eqref{eqn:HS-Lie} applied to $H$ and $K^H$, the inclusion $E^{K\text{-fini}} \subset E$ induces natural linear maps \begin{equation}\label{eqn:H-vs-HS}
c_n(E): \operatorname{H}_n\left(\mathfrak{h}, K^H; E^{K\text{-fini}}\right) \to \operatorname{H}^{\mathcal{S}}_n(H; E|_H), \quad n \in \ensuremath{\mathbb{Z}}_{\geq 0} \end{equation} between vector spaces.
\begin{definition}
We call \eqref{eqn:H-vs-HS} the \emph{comparison maps} for the Casselman--Wallach representation $E$ relative to the data \eqref{eqn:four-groups-an}. \end{definition}
The following question is thus natural. \begin{center}\fbox{\begin{minipage}{0.8\textwidth}
For what data \eqref{eqn:four-groups-an}, representations $E$ and $n$ is $c_n(E)$ an isomorphism? \end{minipage}}\end{center}
\begin{remark}\label{rem:comparison-K}
In view of Proposition \ref{prop:HS-int}, the map $c_n(E)$ is the same as the
\[ \operatorname{H}_n\left(\mathfrak{h}, K^H; E^{K\text{-fini}}\right) \to \operatorname{H}_n\left(\mathfrak{h}, K^H; E^{K^H\text{-fini}}\right) \]
induced by $E^{K\text{-fini}} \subset E^{K^H\text{-fini}}$. \end{remark}
For such questions, it is usual to focus on the case when $H$ is a real spherical subgroup of $G$. An affirmative answer for all $E$ will have strong consequences. For example, in degree $n=0$ it amounts to \emph{automatic continuity} for invariant linear functionals (cf.\ \cite[Theorem A]{AGKL16}), which are hard-core results available only in sporadic cases, for example when (i) $n=0$ and $H \subset G$ is a symmetric subgroup \cite{BD88}, or (ii) when $n$ is arbitrary and $H \subset G$ is a maximal unipotent subgroup \cite{HT98, LLY21}.
In order to apply the earlier results, let us assume further that \begin{itemize}
\item all the groups $G$, $K$, $H$, $K^H$ arise from the algebraic setting \eqref{eqn:four-groups}, namely they arise from $\ensuremath{\mathbb{R}}$-points of affine groups;
\item $H$ is a reductive spherical subgroup of $G$, when viewed as complex groups. \end{itemize}
Since $V := E^{K\text{-fini}}$ is a Harish-Chandra $(\mathfrak{g}, K)$-module, by Corollary \ref{prop:Ext-consequence-2} or results of \cite{Ki21}, the source of $c_n(E)$ is finite-dimensional. Under these assumptions, whether $c_n(E)$ is an isomorphism or not is still wide open. In \S\ref{sec:example-comparison}, we will consider some cases in which $c_n(E)$ turn out to be isomorphisms for all $n$.
\begin{remark}
A smooth FrΓ©chet representation $F$ of $H$ of moderate growth
is said to be \emph{homologically finite} (resp.\ \emph{homologically separated}) if $\operatorname{H}^{\mathcal{S}}_n(H; F)$ is finite-dimensional (resp.\ Hausdorff) for all $n$; homological finiteness implies homological separation. See \cite{BC21} for an overview. With all the previous assumptions (algebraicity, sphericity), it is still unknown whether $E|_H$ is homologically finite or not, except for specific choices of $H$ or $E$, or for $n=0$. If $c_n(E)$ is surjective, then $E|_H$ will be homologically finite. In particular, homological finiteness will hold in all the examples of \S\ref{sec:example-comparison}. \end{remark}
\begin{remark}
In parallel with the monodromic setting considered in \S\ref{sec:monodromic}, one should also consider $\operatorname{H}^{\mathcal{S}}_n(H; E|_H \otimes \chi^{-1})$ when $\chi$ is a smooth character of $H$ factoring through the unipotent radical of $H$, and modify \eqref{eqn:H-vs-HS} accordingly. \end{remark}
\subsection{Examples of comparison isomorphisms}\label{sec:example-comparison} Consider the groups in \eqref{eqn:four-groups-an}; we assume that they are all affine groups in what follows.
\begin{proposition}\label{prop:disc-decomp}
Let $E$ be a Casselman--Wallach representation of $G$. If $E^{K^H\text{-fini}} = E^{K\text{-fini}}$, then the comparison map $c_n(E)$ is an isomorphisms for all $n$. \end{proposition} \begin{proof}
Immediate from Remark \ref{rem:comparison-K}. \end{proof}
\begin{example}\label{eg:admissible-restriction}
Suppose $E$ is of the form $E = B^\infty$, where $B$ is an irreducible unitary representation of $G$ and $B^\infty$ denotes its smooth part. We say $B$ is \emph{$K^H$-admissible} if $B|_{K^H}$ decomposes discretely into a Hilbert direct sum of irreducibles with finite multiplicities. By \cite[Proposition 1.6]{Ko98}, this implies that $B^{K\text{-fini}}$ is \emph{discretely decomposable} as an $(\mathfrak{h}, K^H)$-module. The notions of admissibility and discrete decomposability under restriction have been extensively studied by T.\ Kobayashi and his collaborators; see \cite{Ko98} or \cite[\S 4.1]{Ko15} for an overview.
We claim that when $B$ is $K^H$-admissible, $c_n(E)$ is an isomorphism for all $n$.
Indeed, by \cite[Proposition 1.6]{Ko98}, the $K^H$-admissibility implies
\[ B^{K\text{-fini}} = (B|_H)^{\infty, K^H\text{-fini}}; \]
the left-hand side is $E^{K\text{-fini}}$ whilst the right-hand side contains $B^{\infty, K^{H\text{-fini}}} = E^{K^H\text{-fini}}$. Hence Proposition \ref{prop:disc-decomp} can be applied.
Discrete decomposability is a rare phenomenon, so the scope of the condition above is limited. Nonetheless, it covers some interesting families: suppose $H \subset G$ is a symmetric subgroup, and $H/K^H \to G/K$ is a holomorphic embedding of Hermitian symmetric domains, then every unitary highest weight module (eg.\ holomorphic discrete series) is $K^H$-admissible, hence discretely decomposable over $(\mathfrak{h}, K^H)$. See \cite[Fact 5.4]{Ko98}. \end{example}
\begin{example}\label{eg:HK-admissibility}
The $K^H$-admissibility mentioned above always holds if $H = K$. \end{example}
Before stating the next example, we record a standard reduction due to Hecht--Taylor.
\begin{proposition}\label{prop:HT-reduction}
If $c_n(E)$ is an isomorphism for all $n$ and all principal series representations $E$, then the same is true for all Casselman--Wallach representations $E$. \end{proposition} \begin{proof}
In view of the isomorphisms of abstract vector spaces
\[ \operatorname{H}_n\left(\mathfrak{h}, K^H; E^{K^H\text{-fini}} \right) \simeq \operatorname{H}_n\left(\mathfrak{h}, K^H; E\right) \simeq \operatorname{H}^{\mathcal{S}}_n(H; E|_H) \]
of Proposition \ref{prop:HS-int}, one can reiterate the proof of \cite[Proposition 3]{HT98}, which proceeds by resolving the $(\mathfrak{g}, K)$-module $E^{K\text{-fini}}$ into a complex of principal series via Casselman's Subrepresentation Theorem, and analyzing the corresponding map between two double complexes, each computing $(\mathfrak{h}, K^H)$-homologies in the vertical direction.
The point here is that $(\bigwedge^p \mathfrak{p}) \dotimes{K^H} (\cdot)$ is an exact functor from locally $K^H$-finite representations to vector spaces, for every $p \in \ensuremath{\mathbb{Z}}$. Indeed, taking $K^H$-co-invariants is an exact functor on such representations, as follows from \cite[Proposition 1.18]{KV95}. In \cite{HT98} one took $\otimes$ instead of $\otimes_{K^H}$, but the exactness is all one needs to analyze the spectral sequence $E_2^{q, -p}$ therein. \end{proof}
\begin{example}\label{eg:SL2}
Take $G = \operatorname{SL}(2)$, $K = \operatorname{SO}(2)$, $H \simeq \ensuremath{\mathbb{G}_\mathrm{m}}$ being the diagonal torus, and $K^H = K \cap H$; note that $H$ is a symmetric subgroup of $G$, and $K^H = \mu_2 := \{\pm 1\}$. We also take $U$ to be the upper-triangular unipotent subgroup, and identify all these groups with their $\ensuremath{\mathbb{R}}$-points. By identifying $\ensuremath{\mathbb{R}}^2$ with row vectors and $U \backslash G$ with $\ensuremath{\mathbb{R}}^2 \smallsetminus \{0\}$, the principal series can be described as follows, on the level of Casselman--Wallach representations.
Let $\lambda \in \ensuremath{\mathbb{C}}$ and $\epsilon \in \{0, 1\}$. Define
\[ V^\epsilon_\lambda := \left\{\begin{array}{r|l}
f: \ensuremath{\mathbb{R}}^2 \smallsetminus \{0\} \to \ensuremath{\mathbb{C}} & \forall t \in \ensuremath{\mathbb{R}}^\times, \; x \in \ensuremath{\mathbb{R}}^2 \smallsetminus \{0\} \\
\text{smooth} & f(tx) = \ensuremath{\mathrm{sgn}}(t)^\epsilon |t|^\lambda f(x)
\end{array}\right\}. \]
It is topologized in the standard way, and $G$ acts by $(gf)(x) = f(xg)$.
In particular, $\bigl(\begin{smallmatrix} a & \\ & a^{-1} \end{smallmatrix}\bigr)$ maps $f$ to $(x_1, x_2) \mapsto f(ax_1, a^{-1}x_2)$, and $-1 \in K^H$ maps $f$ to $(-1)^\epsilon f$. We also remark that $V^\epsilon_\lambda$ is the normalized parabolic induction of
\[ \begin{pmatrix} a & * \\ & a^{-1} \end{pmatrix} \mapsto \ensuremath{\mathrm{sgn}}(a)^\epsilon |a|^{-\lambda - 1}, \quad a \in \ensuremath{\mathbb{R}}^\times. \]
Let $\theta := \bigl(\begin{smallmatrix} 1 & \\ & -1\end{smallmatrix}\bigr)$, which is $K^H$-invariant and generates $\mathfrak{h}$. The $\theta$-invariants in $V^\epsilon_\lambda$ are precisely the functions $f$ which are locally constant on the real curve $x_1 x_2 = c$, for each $c \in \ensuremath{\mathbb{R}}$.
Clearly, $(V^\epsilon_\lambda)^{K^H\text{-fini}} = V^\epsilon_\lambda$ is strictly larger than $(V^\epsilon_\lambda)^{K\text{-fini}}$.
For $L \in \{K^H, K\}$, the complex computing $\operatorname{H}_\bullet(\mathfrak{h}, K^H; (V^\epsilon_\lambda)^{L\text{-fini}})$ is
\begin{equation}\label{eqn:SL2-ps}\begin{aligned}
\ensuremath{\mathbb{C}}\theta \dotimes{K^H} (V^\epsilon_\lambda)^{L\text{-fini}} & \to \ensuremath{\mathbb{C}} \dotimes{K^H} (V^\epsilon_\lambda)^{L\text{-fini}}, \quad \deg = -1, 0, \\
\theta \otimes v & \mapsto 1 \otimes \theta v.
\end{aligned}\end{equation}
When $\epsilon = 0$, the $\operatorname{H}_1$ is $\{f \in (V^0_\lambda)^{L\text{-fini}} : \theta f = 0 \}$. On each open quadrant, $f$ takes the form $c |x_1 x_2|^{\lambda /2}$ for some $c \in \ensuremath{\mathbb{C}}$. In order that they glue to a smooth function on $\ensuremath{\mathbb{R}}^2 \smallsetminus \{0\}$, the constants $c$ must coincide and we must have $\lambda \in 4\ensuremath{\mathbb{Z}}_{\geq 0}$, in which case $f(x_1 x_2) = c (x_1 x_2)^{\lambda /2}$ is polynomial, hence $K$-finite. Therefore
\[ \operatorname{H}_1\left(\mathfrak{h}, K^H; (V^0_\lambda)^{L\text{-fini}} \right) = \begin{cases}
0, & \text{if}\; \lambda \notin 4\ensuremath{\mathbb{Z}}_{\geq 0} \\
\ensuremath{\mathbb{C}} (x_1 x_2)^{\lambda/2}, & \text{if}\; \lambda \in 4\ensuremath{\mathbb{Z}}_{\geq 0}.
\end{cases}\]
When $\epsilon = 1$, we have $\ensuremath{\mathbb{C}}\theta \dotimes{K^H} (V^1_\lambda)^{L\text{-fini}} = \ensuremath{\mathbb{C}} \dotimes{K^H} (V^1_\lambda)^{L\text{-fini}} = 0$, thus
\[ \operatorname{H}_n\left(\mathfrak{h}, K^H; (V^1_\lambda)^{L\text{-fini}} \right) = 0, \quad n \in \ensuremath{\mathbb{Z}}. \]
These computations show that $c_1(V_\lambda^\epsilon)$ is always an isomorphism. The same also holds for $c_0(V_\lambda^\epsilon)$, by either invoking the automatic continuity theorem for $H \subset G$, or a direct computation as before. By Proposition \ref{prop:HT-reduction}, it follows that $c_n(E)$ is an isomorphism for all Casselman--Wallach representations $E$ and all $n$. \end{example}
The example above shows that, even in the simplest case, it may happen simultaneously that \begin{itemize}
\item $E^{K^H\text{-fini}} \supset E^{K\text{-fini}}$ strictly;
\item $c_n(E)$ is an isomorphism for all $n$;
\item some higher homologies of $E|_H$ are nonzero. \end{itemize}
In particular, the condition in Proposition \ref{prop:disc-decomp} is sufficient but not necessary in general.
\begin{example}\label{eg:SL2-more}
The proper reductive spherical subgroups $H$ of $G = \operatorname{SL}(2)$, identified with their $\ensuremath{\mathbb{R}}$-points, are
\[ \begin{pmatrix} * & \\ & * \end{pmatrix}, \quad \left\langle \begin{pmatrix} * & \\ & * \end{pmatrix}, \begin{pmatrix} & 1 \\ -1 & \end{pmatrix}\right\rangle, \quad \operatorname{SO}(2) \]
up to conjugacy. Take $K = \operatorname{SO}(2)$, so that $K^H := K \cap H$ is maximal compact in $H$ in each case.
In Example \ref{eg:SL2} it is shown that $c_n(E)$ is an isomorphism in the first case, for all $E$ and $n$. We will settle the remaining cases below.
For the second case (the normalizer of the diagonal torus), retain the notations from Example \ref{eg:SL2} and consider the principal series $V_\lambda^\epsilon$, which suffices. One has $K^H \simeq \ensuremath{\mathbb{Z}}/4\ensuremath{\mathbb{Z}}$ whose generators act on $\theta \in \mathfrak{h}$ by $-1$. For $L \in \{K^H, K\}$ we may decompose $(V_\lambda^\epsilon)^{L\text{-fini}}$ into $K^H$-isotypic components. The complex \eqref{eqn:SL2-ps} becomes
\[ (V_\lambda^\epsilon)^{L\text{-fini}}[\rho] \to (V_\lambda^\epsilon)^{L\text{-fini}}[\mathrm{triv}], \quad v \mapsto \theta v, \]
where $\rho$ (resp.\ $\mathrm{triv}$) denotes the character of $K^H$ mapping the generators to $-1$ (resp.\ $1$), and $[\cdots]$ are the isotypic components. As in Example \ref{eg:SL2}, the homologies turn out to be independent of $L$. Hence $c_n(E)$ is an isomorphism for all $E$ and $n$.
Finally, the third case $H = \operatorname{SO}(2)$ is covered by Example \ref{eg:HK-admissibility}. \end{example}
\begin{flushleft} \small
Beijing International Center for Mathematical Research / School of Mathematical Sciences, Peking University. No.\ 5 Yiheyuan Road, Beijing 100871, People's Republic of China. \\
E-mail address: \href{mailto:[email protected]}{\texttt{[email protected]}} \end{flushleft}
\end{document} |
\begin{document}
\title{Category theoretic semantics for theorem proving in logic programming: embracing the laxness}
\begin{abstract} A propositional logic program $P$ may be identified with a $P_fP_f$-coalgebra on the set of atomic propositions in the program. The corresponding $C(P_fP_f)$-coalgebra, where $C(P_fP_f)$ is the cofree comonad on $P_fP_f$, describes derivations by resolution. Using lax semantics, that correspondence may be extended to a class of first-order logic programs without existential variables. The resulting extension captures the proofs by term-matching resolution in logic programming. Refining the lax approach, we further extend it to arbitrary logic programs. We also exhibit a refinement of Bonchi and Zanasi's saturation semantics for logic programming that complements lax semantics.
\keywordname{ Logic programming,
coalgebra, term-matching resolution, coinductive derivation tree,
Lawvere theories, lax
transformations, Kan extensions.}
\end{abstract}
\section{Introduction}
Consider the following two logic programs.
\begin{example} \label{ex:listnat}
ListNat (for lists of natural numbers) denotes the logic program\\
$1.\ \mathtt{nat(0)} \gets $\\
$2.\ \mathtt{nat(s(x))} \gets \mathtt{nat(x)}$\\ $3.\ \mathtt{list(nil)} \gets $ \\ $4.\ \mathtt{list(cons (x, y))} \gets \mathtt{nat(x), list(y)}$\\
\end{example}
\begin{example} \label{ex:lp}
GC (for graph connectivity) denotes the logic program\\
$0.\ \mathtt{connected(x,x)} \gets $\\
$1.\ \mathtt{connected(x,y)} \gets \mathtt{edge(x,z)}, \mathtt{connected(z,y)}$\\
\end{example} A critical difference between ListNat and GC is that in the latter, which is a leading example in Sterling and Shapiro's book~\cite{SS}, there is a variable $z$ in the tail of the second clause that does not appear in its head. The category theoretic consequences of that fact are the central concern of this paper.
It has long been observed, e.g., in~\cite{BM,CLM}, that logic programs induce coalgebras, allowing coalgebraic modelling of
their operational semantics.
In~\cite{KMP}, we developed the idea for variable-free logic
programs as follows. Using the definition of a logic program~\cite{Llo}, given a set of atoms $At$, one can identify a variable-free logic program
$P$ built over $At$ with a $P_fP_f$-coalgebra structure
on $At$, where $P_f$ is the finite powerset functor on $Set$: each atom is the
head of finitely many clauses in $P$, and the body of each
clause contains finitely many atoms. Our main result was that if
$C(P_fP_f)$ is the cofree comonad on $P_fP_f$, then, given a logic
program $P$ qua $P_fP_f$-coalgebra, the corresponding
$C(P_fP_f)$-coalgebra structure characterises the and-or
derivation trees generated by $P$, cf~\cite{GC}.
This result has proved to be stable, not only having been further developed by us~\cite{KoPS,KSH14,JohannKK15,FK15,FKSP16}, but also forming the basis for Bonchi and Zanasi's saturation semantics for logic programming (LP)~\cite{BZ,BonchiZ15}.
In Sections~\ref{sec:backr}, \ref{sec:parallel}, we give an updated account of the work, with updated definitions, proofs and detailed examples, to start our semantic analysis of derivations and proofs in LP.
In~\cite{KoP}, we extended our analysis from variable-free logic programs to arbitrary logic programs. Following~\cite{ALM,BM,BMR,KP96},
given a signature $\Sigma$ of function symbols, we let
$\mathcal{L}_{\Sigma}$ denote the Lawvere theory generated by
$\Sigma$, and, given a logic program $P$ with function symbols in $\Sigma$, we considered the functor category
$[\mathcal{L}_{\Sigma}^{op},Set]$, extending the set $At$ of atoms in
a variable-free logic program to the functor from
$\mathcal{L}_{\Sigma}^{op}$ to $Set$ sending a natural number $n$ to
the set $At(n)$ of atomic formulae with at most $n$ variables
generated by the function symbols in $\Sigma$ and the predicate symbols in $P$. We sought
to model $P$ by a $[\mathcal{L}_{\Sigma}^{op},P_fP_f]$-coalgebra
$p:At\longrightarrow P_fP_fAt$ that, at $n$, takes an atomic formula
$A(x_1,\ldots ,x_n)$ with at most $n$ variables, considers all
substitutions of clauses in $P$ into clauses with variables among $x_1,\ldots ,x_n$ whose head agrees with $A(x_1,\ldots
,x_n)$, and gives the set of sets of atomic formulae in antecedents,
mimicking the construction for variable-free logic
programs.
Unfortunately, that idea was too simple.
Consider the logic program ListNat, i.e., Example~\ref{ex:listnat}. There
is a map in $\mathcal{L}_{\Sigma}$ of the form $0\rightarrow 1$ that
models the nullary function symbol $0$. So, naturality of the map
$p:At\longrightarrow P_fP_fAt$ in $[\mathcal{L}_{\Sigma}^{op},Set]$
would yield commutativity of the diagram \begin{diagram} At(1) & \rTo^{p_1} & P_fP_fAt(1) \\ \dTo<{At(\mathsf{0})} & & \dTo>{P_fP_fAt(\mathsf{0})} \\ At(0) & \rTo_{p_0} & P_fP_fAt(0) \end{diagram} But consider $\mathtt{nat(x)}\in At(1)$: there is no clause of the form $\mathtt{nat(x)}\gets \, $ in
ListNat, so commutativity of the diagram would imply that there
cannot be a clause in ListNat of the form $\mathtt{nat(0)}\gets \, $
either, but in fact there is one.
At that point, proposed resolutions diverged: at CALCO in 2011, we proposed one approach using lax transformations~\cite{KoP}, then at CALCO 2013, Bonchi and Zanasi proposed another, using saturation semantics~\cite{BZ}, an example of the positive interaction generated by CALCO! In fact, as we explain in Section~\ref{sec:sat}, the two approaches may be seen as complementary rather than as alternatives. First we shall describe our approach.
We followed the standard category theoretic technique of relaxing the naturality condition on $p$ to a
subset condition, e.g., as in~\cite{Ben,BKP,HH,Kelly,KP1}, so that, in general, given a map in $\mathcal{L}_{\Sigma}$ of the form $f:n \rightarrow m$, the diagram \begin{diagram} At(m) & \rTo^{p_m} & P_fP_fAt(m) \\ \dTo<{At(f)} & & \dTo>{P_fP_fAt(f)} \\ At(n) & \rTo_{p_n} & P_fP_fAt(n) \end{diagram}
need not commute, but rather the composite via $P_fP_fAt(m)$ need only
yield a subset of that via $At(n)$. So, for example,
$p_1(\mathtt{nat(x)})$ could be the empty set while $p_0(\mathtt{nat(0)})$ could be
non-empty in the semantics for ListNat as required. We extended $Set$ to $Poset$ in order to express the laxness, and we adopted established category theoretic research on laxness, notably that of~\cite{Kelly}, in order to prove that a cofree comonad exists and behaves as we wished.
For a representative class of logic programs, the above semantics describes derivations arising from restricting the usual SLD-resolution used in LP to \emph{term-matching resolution}, cf.~\cite{KoP2,KoPS}. As transpired in further studies~\cite{FK15,JohannKK15}, this particular restriction to resolution rule captures the theorem-proving aspect of LP as opposed to the problem-solving aspect captured by SLD-resolution with unification. We explain this idea in Section~\ref{sec:backr}. Derivation trees arising from proofs by term-matching resolution were called \emph{coiductive trees} in~\cite{KoP2,KoPS} to mark their connection to the coalgebraic semantics.
Categorical semantics introduced in~\cite{KoP} worked well for ListNat, allowing us to model its coinductive trees, as we show in Section~\ref{sec:recall} (It was not shown explicitly in~\cite{KoP}). However, it does not work well for GC, the key difference being that, in ListNat, no variable appears in a tail of a clause that does not also appear in its head, i.e., clauses in ListNat contain no \emph{existential} variables.
In contrast, although not expressed in these terms in~\cite{KoP}, we were unable to model the coinductive trees generated by GC because it is an \emph{existential} program, i.e. program containing clauses with existential variables. We worked around the problems in~\cite{KoP}, but only inelegantly.
We give an updated account of~\cite{KoP} in Section~\ref{sec:recall}, going beyond~\cite{KoP} to explain how coinductive trees for logic programs without existential variables are modelled, and explaining the difficulty in modelling coinductive semantics for arbitrary logic programs. We then devote Section~\ref{sec:derivation} of the paper to resolution of the difficulty, providing lax semantics for coinductive trees generated by arbitrary logic progams.
In contrast to this, Bonchi and Zanasi, concerned by the complexity involved with laxness, proposed the use of saturation, following~\cite{BM}, to provide an alternative category theoretic semantics~\cite{BZ,BonchiZ15}. Saturation is indeed an established and useful construct, as Bonchi and Zanasi emphasised~\cite{BZ,BonchiZ15}, with a venerable tradition, and, as they say, laxness requires careful calculation, albeit much less so in the setting of posets than that of categories. On the other hand, laxness is a standard part of category theory, one that has been accepted by computer scientists as the need has arisen, e.g., by He Jifeng and Tony Hoare to model data refinement~\cite{HH,HH1,KP1,P}. More fundamentally, saturation can be seen as complementary to the use of laxness rather than as an alternative to it, as we shall explain in Section~\ref{sec:sat}.
This reflects the important connection between the theorem proving and problem solving aspects of proof search in LP, as Section~\ref{sec:backr} further explains.
So we would suggest that both approaches are of value, with the interaction between them meriting serious consideration.
Saturation inherently yields a particular kind of compositionality, but one loses the tightness of the relationship between semantic model and operational behaviour. The latter is illustrated by the finiteness of branching in a coinductive tree, in contrast to the infinity of possible substitutions, which are inherent in saturation. To the extent that it is possible, we would like to recover operational semantics from the semantic model, along the lines of~\cite{PP}, requiring maintenance of intensionality where possible. We regard the distinction between ListNat and GC as a positive feature of lax semantics, as a goal of semantics is to shed light on the critical issues of programming, relation of existential programs to theorem-proving in LP being one such~\cite{FK15}. So we regard Section~\ref{sec:sat} as supporting both lax and saturation semantics, the interaction between them shedding light on logic programming.
\section{Background: theorem proving in LP}\label{sec:backr}
A \emph{signature} $\Sigma$ consists of a set $\mathcal{F}$ of function
symbols $f,g, \ldots$ each equipped with an arity. Nullary (0-ary) function symbols are
constants. For any set $\mathit{Var}$ of variables,
the set $Ter(\Sigma)$ of terms over $\Sigma$ is defined
inductively as usual:
\begin{itemize}
\item $x \in Ter(\Sigma)$ for every $x \in \mathit{Var}$.
\item If $f$ is an n-ary function symbol ($n\geq 0$) and $t_1,\ldots
,t_n \in Ter(\Sigma) $, then $f(t_1,\ldots
,t_n) \in Ter(\Sigma)$.
\end{itemize}
A \emph{substitution} over $\Sigma$ is a total function $\sigma: \mathit{Var} \to \mathbf{Term}(\Sigma)$.
Substitutions are extended from variables to terms as usual: if $t\in \mathbf{Term}(\Sigma)$ and $\sigma$ is a substitution, then the {\em application} $\sigma(t)$ is a result of applying $\sigma$ to all variables in $t$. A substitution $\sigma$ is a \emph{unifier} for $t, u$ if $\sigma(t) = \sigma(u)$, and is a \emph{matcher} for $t$ against $u$ if $\sigma(t) = u$. A substitution $\sigma$ is a {\em most general unifier} ({\em mgu}) for $t$ and $u$ if it is a unifier for $t$ and $u$ and is more general than any other such unifier. A {\em most
general matcher} ({\em mgm}) $\sigma$ for $t$ against $u$ is defined analogously.
In line with LP tradition~\cite{Llo}, we also take a set $\mathcal{P}$ of predicate symbols each equipped with an arity. It is possible to define logic programs over terms only,
in line with term-rewriting (TRS) tradition~\cite{Terese}, as we do in~\cite{JohannKK15}, but we will follow the usual LP tradition here. This gives us the following inductive definitions of the sets of atomic formulae, Horn clauses and logic programs (we also include the definition of terms here for convenience):
\begin{definition}\label{df:syntax}
\
Terms $Ter \ ::= \ Var \ | \ \mathcal{F}(Ter,..., Ter)$
Atomic formulae (or atoms) $At \ ::= \ \mathcal{P}(Ter,...,Ter)$
(Horn) clauses $HC \ ::= \ At \gets At,..., At$
Logic programs $Prog \ ::= HC, ... , HC$ \end{definition}
In what follows, we will use letters $A,B,C,D$, possibly with subscripts, to refer to elements of $At$.
Given a logic program $P$, we may ask whether a certain atom is logically entailed by $P$. E.g., given the program ListNat we may ask whether $\mathtt{list(cons(0,nil))}$ is entailed by ListNat. The following rule, which is a restricted form of the famous SLD-resolution, provides a semi-decision procedure to derive the entailment:
\begin{definition}[Term-matching (TM) Resolution]\label{def:resolution}
{\small \[\begin{array}{c}
\infer[]
{P \vdash [\ ] }
{ } \ \ \ \ \ \ \ \
\infer[\text{if}~( A \gets A_1, \ldots, A_n) \in P]
{P \vdash \sigma A }
{ P \vdash \sigma A_1 \quad \cdots \quad P \vdash \sigma A_n }
\end{array} \]} \end{definition}
In contrast, the SLD-resolution rule could be presented in the following form:
$$B_1, \ldots , B_j, \ldots , B_n \leadsto_P \sigma B_1, \ldots, \sigma A_1, \ldots, \sigma A_n, \ldots , \sigma B_n ,$$ if $(A \gets A_1, \ldots, A_n) \in P$, and $\sigma$ is the mgu of $A$ and $B_{j}$. The derivation for $A$ succeeds when $A \leadsto_P [\ ]$; we use $\leadsto_P^*$ to denote several steps of SLD-resolution.
At first sight, the difference between TM-resolution and SLD-resolution seems to be that of notation. Indeed, both $ListNat \vdash \mathtt{list(cons(0,nil))}$ and\\ $ \mathtt{list(cons(0,nil))} \leadsto^*_{ListNat} [\ ]$ by the above rules (see also Figure~\ref{pic:tree}). However, $ListNat \nvdash \mathtt{list(cons(x,y))}$ whereas $ \mathtt{list(cons(x,y))} \leadsto^*_{ListNat} [\ ]$. And, even more mysteriously, $GC \nvdash \mathtt{connected(x,y)}$ while $\mathtt{connected(x,y)} \leadsto_{GC} [\ ]$.
As it turns out, TM-resolution
reflects the \emph{theorem proving} side of LP: rules of Definition~\ref{def:resolution} can be used to semi-decide whether a given term $t$ is entailed by $P$. In contrast, SLD-resolution reflects the \emph{problem solving} aspect of LP: using the SLD-resolution rule, one asks whether, for a given $t$, a substitution $\sigma$ can be found such that $P \vdash \sigma(t)$. There is a subtle but important difference between these two aspects of proof search.
For example, when considering the successful derivation $ \mathtt{list(cons(x,y))}$ $ \leadsto^*_{ListNat} [\ ]$, we assume that $\mathtt{list(cons(x,y))}$ holds only relative to a computed substitution, e.g. $\mathtt{x \mapsto 0, \ y \mapsto nil}$. Of course this distinction is natural from the point of view of theorem proving: $\mathtt{list(cons(x,y))}$ is not a ``theorem" in this generality, but its special case, $\mathtt{list(cons(0,nil))}$, is. Thus, $ListNat \vdash \mathtt{list(cons(0,nil))}$ but $ListNat \nvdash \mathtt{list(cons(x,y))}$ (see also Figure~\ref{pic:tree}). Similarly, $\mathtt{connected(x,y)} \leadsto_{GC} [\ ]$ should be read as: $\mathtt{connected(x,y)}$ holds relative to the computed substitution $\mathtt{y\mapsto x}$.
According to the soundness and completeness theorems for SLD-resolution~\cite{Llo}, the derivation $\leadsto$ has \emph{existential} meaning, i.e. when $\mathtt{list(cons(x,y))} \leadsto^*_{ListNat} [\ ]$, the succeeded goal $\mathtt{list(cons(x,y))}$ is not meant to be read as universally quantified over $\mathtt{x}$ an $\mathtt{y}$. On the contrary, TM-resolution proves a universal statement. That is, $GC \vdash \mathtt{connected(x,x)}$ reads as: $\mathtt{connected(x,x)}$ is entailed by GC for any $\mathtt{x}$.
Much of our recent work has been devoted to formal understanding of the relation between the theorem proving and problem solving aspects of LP~\cite{JohannKK15,FK15}. The type-theoretic semantics of TM-resolution, given by ``Horn clauses as types, $\lambda$-terms as proofs" is given in~\cite{FK15,FKSP16}.
Definition~\ref{def:resolution} gives rise to derivation trees. E.g. the derivation (or, equivalently, the proof) for $ListNat \vdash \mathtt{list(cons(0,nil))}$ can be represented by the following derivation tree:
\begin{tikzpicture}[level 1/.style={sibling distance=18mm}, level 2/.style={sibling distance=18mm}, level 3/.style={sibling distance=18mm},scale=.8,font=\footnotesize,baseline=(current bounding box.north),grow=down,level distance=10mm] \node (root) {$\mathtt{list(cons(0,nil))}$}
child { node {$\mathtt{nat(0)}$}
child { node {$[\ ]$}
}}
child { node {$\mathtt{list(nil)}$}
child { node {$[\ ]$}}};
\end{tikzpicture}
In general, given a term $t$ and a program $P$, more than one derivation for $P \vdash t$ is possible.
For example, if we add a fifth clause to program $ListNat$:\\ $5. \ \mathtt{list(cons(0,x)) \gets list(x)}$\\ then yet another, alternative, proof is possible for the extended program: $ListNat^+ \vdash \mathtt{list(cons(0,nil))}$ via the clause $5$:
\begin{tikzpicture}[level 1/.style={sibling distance=18mm}, level 2/.style={sibling distance=18mm}, level 3/.style={sibling distance=18mm},scale=.8,font=\footnotesize,baseline=(current bounding box.north),grow=down,level distance=10mm] \node (root) {$\mathtt{list(cons(0,nil))}$}
child { node {$\mathtt{list(nil)}$}
child { node {$[\ ]$}}};
\end{tikzpicture}
To reflect the choice of derivation strategies at every stage of the derivation, we can introduce a new kind of nodes, \emph{or-nodes}.
For our example, this would give us the tree shown in Figure~\ref{pic:tree}, note the $\bullet$-nodes.
\begin{figure}
\caption{\textbf{Left:} a coinductive tree for $\mathtt{list(cons(0,nil))}$ and the extended program $ListNat^+$. \textbf{Right:} a coinductive tree for $\mathtt{list(cons(x,y))}$ and $ListNat^+$. The $\bullet$-nodes mark different clauses applicable to every atom in the tree.}
\label{pic:tree}
\end{figure}
This intuition is made precise in the following definition of a \emph{coinductive tree}, which first appeared in~\cite{KoP,KoPS} and was later refined in~\cite{JohannKK15} under the name of a rewriting tree. Note the use of mgms (rather than mgus) in the last item.
\begin{definition}[Coinductive tree]\label{def:cointree} Let P be a logic program and $A$ be an atomic formula. The \emph{coinductive tree} for $A$ is the possibly infinite tree T satisfying the following properties. \begin{itemize} \item $A$ is the root of $T$ \item Each node in $T$ is either an and-node or an or-node \item Each or-node is given by $\bullet$ \item Each and-node is an atom \item For every and-node $A'$ occurring in $T$, if there exist exactly $m > 0$ distinct clauses $C_1, \ldots ,C_m$ in P (a clause $C_i$ has the form $B_i \gets B^i_1,\ldots ,B^i_{n_i}$ for some $n_i$), such that $A' = B_1\theta_1 = \ldots = B_m\theta_m$, for mgms $\theta_1,\ldots ,\theta_m$, then $A'$ has exactly $m$ children given by or-nodes, such that, for every $i \in m$, the $i$-th or-node has $n_i$ children given by and-nodes $B^i_1\theta_i, \ldots ,B^i_{n_i}\theta_i$. \end{itemize} \end{definition}
Coinductive trees provide a convenient model for proofs by TM-resolution.
Let us make one final observation on TM-resolution.
Generally, given a program $P$ and an atom $t$, one can prove that
\begin{center} $ t \leadsto^*_P [\ ]$ with computed substitution $\sigma$ iff $P \vdash \sigma t$. \end{center}
This simple fact may give an impression that proofs (and corresponding coinductive trees) for TM-resolution are in some sense fragments of reductions by SLD-resolution. Compare e.g. the right-hand tree of Figure~\ref{pic:tree} before substitution and a grown left-hand tree obtained after the substitution.
In this case, we could emulate the problem solving aspect of SLD-resolution by using coinductive trees and allowing to apply substitutions within coinductive trees, as was proposed in~\cite{KoP2,JohannKK15,FK15}. Such intuition would hold perfectly for e.g. ListNat, but would not hold for existential programs: although there is a one step SLD-derivation for $ \mathtt{connected(x,y)} \leadsto_{GC} [\ ]$ (with $\mathtt{y \mapsto x}$), TM-resolution proof for $ \mathtt{connected(x,y)}$ diverges and gives rise to the following infinite coinductive tree:
\vspace*{-0.1in}
\begin{tikzpicture}[level 1/.style={sibling distance=30mm}, level 2/.style={sibling distance=25mm}, level 3/.style={sibling distance=25mm},scale=.8,font=\footnotesize,baseline=(current bounding box.north),grow=down,level distance=8mm]
\node {$\mathtt{connected(x,y)}$}
child {[fill] circle (2pt)
child { node {$\mathtt{edge(x,z)}$}}
child { node {$\mathtt{connected(z,y)}$}
child {[fill] circle (2pt)
child { node{$\mathtt{edge(x,z_1)}$}}
child { node{$\mathtt{connected(z_1,y)}$}
child {node{$\vdots$}}}}
}};
\end{tikzpicture}
Not only the proof for $GC \vdash \mathtt{connected(x,y)}$ is not in any sense a fragment of the derivation $\mathtt{connected(x,y)} \leadsto_{GC} [\ ]$, but it also takes larger (i.e. infinite) signature. Thus, operational semantics of TM-resolution and SLD-resolution can be very different for existential programs: both in aspects of termination and signature size.
This problem is orthogonal to non-termination. Consider the non-terminating (but not existential) program Bad:
$\mathtt{bad(x)} \gets \mathtt{bad(x)}$\\ For Bad, operational behavior of TM-resolution and SLD-resolution are similar: derivations with both do not terminate and require finite signature. Once again, such programs can be analysed using similar coinductive methods in TM- and SLD-resolution~\cite{FKSP16,SimonBMG07}.
The problems caused by existential variables are known in the literature on theorem proving and term-rewriting~\cite{Terese}. In TRS~\cite{Terese}, existential variables are not allowed to appear in rewriting rules, and in type inference based on term rewriting or TM-resolution, the restriction to non-existential programs is common~\cite{Jones97}.
So theorem-proving, in contrast to problem-solving, is modelled by term-matching; term-matching gives rise to coinductive trees; and as explained in the introduction and, in more detail, later, coinductive trees give rise to laxness. So in this paper, we use laxness to model coinductive trees, and thereby theorem-proving in LP, and relate our semantics with Bonchi and Zanasi's work, which we believe models primarily problem-solving aspect of logic programming.
Categorical semantics for existential programs, which are known to be challenging for theorem proving, is the main contribution of Section~\ref{sec:derivation} and this paper.
\section{Modelling coinductive trees for variable-free logic programs}\label{sec:parallel}
In this section, we recall and develop the work of~\cite{KMP} and in particular we
restrict our semantics to variable-free logic programs, i.e. we take $Var = \emptyset$ in Definition~\ref{df:syntax}. Variable-free logic programs are operationally equivalent to propositional logic programs, as substitutions play no role in derivations. In this (propositional) setting, coinductive trees coincide with the and-or derivation trees known in the LP literature~\cite{GC}.
\begin{proposition}\label{const:coal}
For any set $\mathrm{\textrm{At}}$, there is a bijection between the set of
variable-free logic programs over the set of atoms $\mathrm{\textrm{At}}$ and the set
of $P_fP_f$-coalgebra structures on $\mathrm{\textrm{At}}$, where $P_f$ is the finite
powerset functor on $Set$.
\end{proposition}
\begin{theorem}\label{constr:Gcoalg}
Let $C(P_fP_f)$ denote the cofree comonad on $P_fP_f$. Then, for $p:
\mathrm{\textrm{At}} \longrightarrow P_f P_f(\mathrm{\textrm{At}})$, the corresponding
$C(P_fP_f)$-coalgebra $\overline{p}: \mathrm{\textrm{At}} \longrightarrow C(P_fP_f)(\mathrm{\textrm{At}})$ sends an atom $A$ to the coinductive tree for $A$. \end{theorem}
\begin{proof} Applying the work of~\cite{W} to this setting, the cofree comonad is in general determined as follows: $C(P_fP_f)(\mathrm{\textrm{At}})$ is the
limit of the diagram
$$\ldots \longrightarrow \mathrm{\textrm{At}} \times P_fP_f(\mathrm{\textrm{At}}
\times P_fP_f(\mathrm{\textrm{At}})) \longrightarrow \mathrm{\textrm{At}} \times P_fP_f(\mathrm{\textrm{At}})
\longrightarrow \mathrm{\textrm{At}}.$$ with maps determined by the projection $\pi_0:At\times P_fP_f(At)\longrightarrow At$, with applications of the functor $At \times P_fP_f(-)$ to it.
Putting $\mathrm{\textrm{At}}_0 = \mathrm{\textrm{At}}$ and $\mathrm{\textrm{At}}_{n+1} = \mathrm{\textrm{At}} \times
P_fP_f\mathrm{\textrm{At}}_n$, and defining the cone
\begin{eqnarray*}
p_0 & = & id: \mathrm{\textrm{At}} \longrightarrow \mathrm{\textrm{At}} ( = \mathrm{\textrm{At}}_0)\\
p_{n+1} & = & \langle id, P_fP_f(p_n) \circ p \rangle : \mathrm{\textrm{At}}
\longrightarrow \mathrm{\textrm{At}} \times P_fP_f \mathrm{\textrm{At}}_n ( = \mathrm{\textrm{At}}_{n+1})
\end{eqnarray*}
the limiting property of the diagram determines the coalgebra
$\overline{p}: \mathrm{\textrm{At}} \longrightarrow C(P_fP_f)(\mathrm{\textrm{At}})$. The image $\overline{p}(A)$ of an atom $A$ is given by an element of the limit, equivalently a map from $1$ into the limit, equivalently a cone of the diagram over $1$.
To give the latter is equivalent to giving an element $A_0$ of $At$, specifically $p_0(A) = A$, together with an element $A_1$ of $At\times P_fP_f(At)$, specifically $p_1(A) = (A,p_0(A)) = (A,p(A))$, together with an element $A_2$ of $At\times P_fP_f(At\times P_fP_f(At))$, etcetera. The definition of the coinductive tree for $A$ is inherently coinductive, matching the definition of the limit, and with the first step agreeing with the definition of $p$. Thus it follows by coinduction that $\overline{p}(A)$ can be identified with the coinductive tree for $A$.
\end{proof}
\begin{example} \label{ex:free} Let $At$ consist of atoms $\mathtt{A,B,C}$ and $\mathtt{D}$. Let $P$ denote the logic program
\begin{eqnarray*}
\mathtt{A} & \gets & \mathtt{B,C} \\ \mathtt{A} & \gets & \mathtt{B,D} \\
\mathtt{D} & \gets & \mathtt{A,C}\\
\end{eqnarray*} So $p(\mathtt{A}) = \{ \{ \mathtt{B,C}\} , \{ \mathtt{B,D} \} \}$, $p(\mathtt{B}) = p(\mathtt{C}) = \emptyset$, and $p(\mathtt{D}) = \{ \{ \mathtt{A,C}\} \}$.
Then $p_0(\mathtt{A}) = \mathtt{A}$, which is the root of the coinductive tree for $\mathtt{A}$.
Then $p_1(\mathtt{A}) = (\mathtt{A},p(\mathtt{A})) = (\mathtt{A},\{ \{ \mathtt{B,C}\} , \{ \mathtt{B,D} \} \})$, which consists of the same information as in the first three levels of the coinductive tree for $\mathtt{A}$, i.e., the root $\mathtt{A}$, two or-nodes, and below each of the two or-nodes, nodes given by each atom in each antecedent of each clause with head $\mathtt{A}$ in the logic program $P$: nodes marked $\mathtt{B}$ and $\mathtt{C}$ lie below the first or-node, and nodes marked $\mathtt{B}$ and $\mathtt{D}$ lie below the second or-node, exactly as $p_1(\mathtt{A})$ describes.
Continuing, note that $p_1(\mathtt{D}) = (\mathtt{D},p(\mathtt{D})) = (\mathtt{D},\{ \{\mathtt{A,C}\} \})$. So \[ \begin{array}{ccl} p_2(\mathtt{A}) & = & (\mathtt{A},P_fP_f(p_1)(p(\mathtt{A})))\\
& = & (\mathtt{A},P_fP_f(p_1)( \{ \{ \mathtt{B,C}\} , \{ \mathtt{B,D} \} \}))\\
& = & (\mathtt{A}, \{ \{( \mathtt{B},\emptyset),(\mathtt{C},\emptyset)\} , \{( \mathtt{B},\emptyset),(\mathtt{D},\{ \{\mathtt{A,C}\} \}) \} \}) \end{array} \] which is the same information as that in the first five levels of the coinductive tree for $\mathtt{A}$: $p_1(\mathtt{A})$ provides the first three levels of $p_2(\mathtt{A})$ because $p_2(\mathtt{A})$ must map to $p_1(\mathtt{A})$ in the cone; in the coinductive tree, there are two and-nodes at level 3, labelled by $\mathtt{A}$ and $\mathtt{C}$. As there are no clauses with head $\mathtt{B}$ or $\mathtt{C}$, no or-nodes lie below the first three of the and-nodes at level 3. However, there is one or-node lying below $\mathtt{D}$, it branches into and-nodes labelled by $\mathtt{A}$ and $\mathtt{C}$, which is exactly as $p_2(\mathtt{A})$ tells us. \end{example}
For pictures of such trees, see~\cite{KoPS}.
\section{Modelling coinductive trees for logic programs without existential variables}\label{sec:recall}
We now lift the restriction on $Var = \emptyset$ in Definition~\ref{df:syntax}, and consider first-order terms and atoms in full generality, however, we restrict the definition of clauses in Definition~\ref{df:syntax} to those not containing existential variables.
The \emph{Lawvere theory} $\mathcal{L}_{\Sigma}$ \emph{generated by} a signature $\Sigma$ is (up to isomorphism, as there are several equivalent formulations) the category defined as follows: $\texttt{ob}(\mathcal{L}_{\Sigma})$ is the set of
natural numbers. For each natural number $n$, let $x_1,\ldots ,x_n$ be a specified list
of distinct variables. Define
$\mathcal{L}_{\Sigma}(n,m)$ to be the set of $m$-tuples
$(t_1,\ldots ,t_m)$ of terms generated by the function symbols in
$\Sigma$ and variables $x_1,\ldots ,x_n$. Define composition in
$\mathcal{L}_{\Sigma}$ by substitution.
One can readily check that these constructions satisfy the axioms for
a category, with $\mathcal{L}_{\Sigma}$ having
strictly associative finite products given by the sum of natural
numbers. The terminal object of $\mathcal{L}_{\Sigma}$ is the natural
number $0$.
\begin{example}\label{ex:arrows}
Consider ListNat.
The constants $\mathtt{O}$ and $\mathtt{nil}$ are maps
from $0$ to $1$ in $\mathcal{L}_{\Sigma}$, $\mathtt{s}$ is modelled by
a map from $1$ to $1$, and $\mathtt{cons}$ is modelled by a map from
$2$ to $1$. The term $\mathtt{s(0)}$ is the map
from $0$ to $1$ given by the composite of the maps modelling
$\mathtt{s}$ and $\mathtt{0}$.
\end{example}
Given an arbitrary logic program $P$ with signature $\Sigma$,
we can extend the set $At$ of atoms for a variable-free
logic program to the functor $At:\mathcal{L}_{\Sigma}^{op}
\rightarrow Set$ that sends a natural number $n$ to the set of all
atomic formulae, with variables among $x_1,\ldots ,x_n$, generated by
the function symbols in $\Sigma$ and by the predicate symbols in $P$. A map $f:n \rightarrow m$ in $\mathcal{L}_{\Sigma}$ is sent to
the function $At(f):At(m) \rightarrow At(n)$ that sends an atomic
formula $A(x_1, \ldots,x_m)$ to $A(f_1(x_1, \ldots ,x_n)/x_1, \ldots
,f_m(x_1, \ldots ,x_n)/x_m)$, i.e., $At(f)$ is defined by
substitution.
As explained in the Introduction and in~\cite{KMP}, we cannot model a logic program by a
natural transformation of the form $p:At\longrightarrow P_fP_fAt$ as
naturality breaks down, e.g., in ListNat. So, in~\cite{KoP,KoPS}, we relaxed naturality to lax naturality. In order to define it, we extended
$At:\mathcal{L}_{\Sigma}^{op}\rightarrow Set$ to have codomain $Poset$ by composing $At$ with the inclusion of $Set$ into
$Poset$. Mildly overloading notation, we denote the
composite by $At:\mathcal{L}_{\Sigma}^{op}\rightarrow Poset$.
\begin{definition}
Given functors $H,K:\mathcal{L}_{\Sigma}^{op} \longrightarrow Poset$, a {\em lax
transformation} from $H$ to $K$ is the assignment to each
object $n$ of $\mathcal{L}_{\Sigma}$, of an order-preserving function $\alpha_n: Hn \longrightarrow Kn$ such
that for each map $f:n \longrightarrow m$ in $\mathcal{L}_{\Sigma}$, one has
$(Kf)(\alpha_m) \leq (\alpha_{n})(Hf)$, pictured as follows: \begin{diagram} Hm & \rTo^{\alpha_m} & Km \\ \dTo<{Hf} & \geq & \dTo>{Kf} \\ Hn & \rTo_{\alpha_n} & Kn \end{diagram}
\end{definition}
Functors and lax transformations, with pointwise composition, form a locally ordered category
denoted by $Lax(\mathcal{L}_{\Sigma}^{op},Poset)$. Such categories and generalisations have been studied extensively, e.g., in~\cite{Ben,BKP,Kelly,KP1}.
\begin{definition}\label{def:poset}
Define $P_f:Poset\longrightarrow Poset$ by letting
$P_f(P)$ be the partial order given by the set of finite subsets of
$P$, with $A\leq B$ if for all $a \in A$, there exists
$b \in B$ for which $a\leq b$ in $P$, with behaviour on maps
given by image. Define $P_c$ similarly but with countability
replacing finiteness.
\end{definition}
We are not interested in arbitrary posets in modelling logic programming, only those that arise, albeit inductively, by taking subsets of a set qua discrete poset. So we gloss over the fact that, for an arbitrary poset $P$, Definition~\ref{def:poset} may yield factoring, with the underlying set of $P_f(P)$ being a quotient of the set of subsets of $P$. It does not affect the line of development here.
\begin{example}\label{ex:listnat2} Modelling Example~\ref{ex:listnat}, ListNat generates a lax transformation of the form $p:At\longrightarrow P_fP_fAt$ as follows: $At(n)$ is the set of atomic formulae in $ListNat$ with at most $n$ variables.
For example, $At(0)$ consists of $\mathtt{nat(0)}$, $\mathtt{nat(nil)}$, $\mathtt{list(0)}$, $\mathtt{list(nil)}$, $\mathtt{nat(s(0))}$, $\mathtt{nat(s(nil))}$, $\mathtt{list(s(0))}$, $\mathtt{list(s(nil))}$, $\mathtt{nat(cons(0, 0))}$, $\mathtt{nat(cons( 0, nil))}$, \\ $\mathtt{nat(cons (nil, 0))}$, $\mathtt{nat(cons( nil, nil))}$, etcetera.
Similarly, $At(1)$ includes all atomic formulae containing at most one (specified) variable $x$, thus all the elements of $At(0)$ together with $\mathtt{nat(x)}$, $\mathtt{list(x)}$, $\mathtt{nat(s(x))}$, $\mathtt{list(s(x))}$, $\mathtt{nat(cons( 0, x))}$, $\mathtt{nat(cons (x, 0))}$, $\mathtt{nat(cons (x, x))}$, etcetera.
The function $p_n:At(n)\longrightarrow P_fP_fAt(n)$ sends each element of $At(n)$, i.e., each atom $A(x_1,\ldots ,x_n)$ with variables among $x_1,\ldots ,x_n$, to the set of sets of atoms in the antecedent of each unifying substituted instance of a clause in $P$ with head for which a unifying substitution agrees with $A(x_1,\ldots ,x_n)$.
Taking $n=0$, $\mathtt{nat(0)}\in At(0)$ is the head of one clause, and there is no other clause for which a unifying substitution will make its head agree with $\mathtt{nat(0)}$. The clause with head $\mathtt{nat(0)}$ has the empty set of atoms as its tail, so $p_0(\mathtt{nat(0)}) = \{ \emptyset \}$.
Taking $n=1$, $\mathtt{list(cons( x, 0))}\in At(1)$ is the head of one clause given by a unifying substititution applied to the final clause of ListNat, and accordingly $p_1(\mathtt{list(cons (x, 0))}) = \{ \{ \mathtt{nat(x)},\mathtt{list(0)} \} \}$.
The family of functions $p_n$ satisfy the inequality required to form a lax transformation precisely because of the allowability of substitution instances of clauses, as in turn is required to model logic programming. The family does not satisfy the strict requirement of naturality as explained in the introduction. \end{example}
\begin{example}\label{ex:lp2} Attempting to model Example~\ref{ex:lp} by
mimicking the model of ListNat as a lax transformation of the form $p:At\longrightarrow P_fP_fAt$ in Example~\ref{ex:listnat2} fails.
Consider the clause \[
\mathtt{connected(x,y)} \gets \mathtt{edge(x,z)},
\mathtt{connected(z,y)} \]
Modulo possible renaming of variables, the head of the clause, i.e., the atom $\mathtt{connected(x,y)}$, lies in $At(2)$ as it has two variables. However, the tail does not lie in $P_fP_fAt(2)$ as the tail has three variables rather than two.
We dealt with that inelegantly in~\cite{KoP}: in order to allow $p_2(\mathtt{connected(x,y)})$ to model GC in any reasonable sense, we allowed substitutions for $z$ by any term on $x,y$ on the basis that there is no unifying such, so we had better allow all possibilities. So, rather than modelling the clause directly, recalling that $At(2)\subseteq At(3)\subseteq At(4)$, etcetera, modulo renaming of variables, we put
{\small{ \begin{eqnarray*} p_2(\mathtt{connected(x,y)}) & = & \{ \{\mathtt{edge(x,x)},\mathtt{connected(x,y)}\}, \{\mathtt{edge(x,y)},\mathtt{connected(y,y)}\}\}\\ p_3(\mathtt{connected(x,y)}) & = & \{ \{\mathtt{edge(x,x)},\mathtt{connected(x,y)}\}, \{\mathtt{edge(x,y)},\mathtt{connected(y,y)}\},\\
& & \{\mathtt{edge(x,z)},\mathtt{connected(z,y)}\} \}\\ p_4(\mathtt{connected(x,y)}) & = & \{ \{\mathtt{edge(x,x)},\mathtt{connected(x,y)}\}, \{\mathtt{edge(x,y)},\mathtt{connected(y,y)}\},\\
& & \{\mathtt{edge(x,z)},\mathtt{connected(z,y)}\},\{\mathtt{edge(x,w)},\mathtt{connected(w,y)}\} \} \end{eqnarray*}}} etcetera: for $p_2$, as only two variables $x$ and $y$ appear in any element of $P_fP_fAt(2)$, we allowed substitution by either $x$ or $y$ for $z$; for $p_3$, a third variable may appear in an element of $P_fP_fAt(3)$, allowing an additional possible subsitution; for $p_4$, a fourth variable may appear, etcetera.
Countability arises if a unary symbol $s$ is added to GC, as in that case, for $p_2$, not only did we allow $x$ and $y$ to be substituted for $z$, but we also allowed $s^n(x)$ and $s^n(y)$ for any $n>0$, and to do that, we replaced $P_fP_f$ by $P_cP_f$, allowing for the countably many possible substitutions.
Those were inelegant decisions, but they allowed us to give some kind of model of all logic programs. \end{example}
We now turn to the relationship between the lax transformation $p:At\longrightarrow P_cP_fAt$ modelling a logic program $P$ and $\overline{p}:At\longrightarrow C(P_cP_f)At$, the corresponding coalgebra for the cofree comonad $C(P_cP_f)$ on $P_cP_f$.
We recall the central abstract result of~\cite{KoP}, the notion of an "oplax" map of coalgebras being required to match that of lax transformation. Notation of the form \mbox{$H$-$coalg$} refers to coalgebras for an endofunctor $H$, while notation of the form \mbox{$C$-$Coalg$} refers to coalgebras for a comonad $C$. The subscript $oplax$ refers to oplax maps, and given an endofunctor $E$ on $Poset$, the notation $Lax(\mathcal{L}_{\Sigma}^{op},E)$ denotes the endofunctor on $Lax(\mathcal{L}_{\Sigma}^{op},Poset)$ given by post-composition with $E$; similarly for a comonad.
\begin{theorem}\label{main}
For any locally ordered endofunctor $E$ on $Poset$, if $C(E)$ is
the cofree comonad on $E$, then there is a canonical isomorphism
\[
Lax(\mathcal{L}_{\Sigma}^{op},E)\mbox{-}coalg_{oplax} \simeq
Lax(\mathcal{L}_{\Sigma}^{op},C(E))\mbox{-}Coalg_{oplax}
\]
\end{theorem}
\begin{corollary}\label{oldcor} $Lax(\mathcal{L}_{\Sigma}^{op},C(P_cP_f))$ is the cofree comonad on $Lax(\mathcal{L}_{\Sigma}^{op},P_cP_f)$. \end{corollary}
Corollary~\ref{oldcor} gives a bijection between lax transformations \[ p:At\longrightarrow P_cP_fAt \] and lax transformations \[ \overline{p}:At\longrightarrow C(P_cP_f)At \] subject to the two conditions required of a coalgebra of a comonad. Subject to the routine replacement of the outer copy of $P_f$ by $P_c$ in the construction in Theorem~\ref{constr:Gcoalg}, the same construction, if understood pointwise, extends to this setting, i.e., if one uniformly replaces $At$ by $At(n)$ in the construction of Theorem~\ref{constr:Gcoalg}, and replaces the outer copy of $P_f$ by $P_c$, one obtains a description of $C(P_cP_f)At(n)$ together with the construction of $\overline{p}_n$ from $p_n$.
That is fine for ListNat, modelling the coinductive trees generated by ListNat, the same holding for any logic program without existential variables, but for GC, as explained in Example~\ref{ex:lp2}, $p$ did \emph{not} model the clause \[
\mathtt{connected(x,y)} \gets \mathtt{edge(x,z)},
\mathtt{connected(z,y)} \] directly, and so its extension \emph{a fortiori} could \emph{not} model the coinductive trees generated by $\mathtt{connected(x,y)}$.
For arbitrary logic programs, $\overline{p}(A(x_1,\ldots ,x_n))$ was a variant of the coinductive tree generated by $A(x_1,\ldots ,x_n)$ in two key ways: \begin{enumerate} \item coinductive trees allow new variables to be introduced as one passes down the tree, e.g., with \[
\mathtt{connected(x,y)} \gets \mathtt{edge(x,z)},
\mathtt{connected(z,y)} \] appearing directly in it, whereas, extending Example~\ref{ex:lp2}, $\overline{p_1}(\mathtt{connected(x,y)})$ does not model such a clause directly, but rather substitutes terms on $x$ and $y$ for $z$, continuing inductively as one proceeds. \item coinductive trees are finitely branching, as one expects in logic programming, whereas $\overline{p}(A(x_1,\ldots ,x_n))$ could be infinitely branching, e.g., for GC with an additional unary operation $s$. \end{enumerate}
\section{Modelling coinductive trees for arbitrary logic progams}\label{sec:derivation}
We believe that our work in~\cite{KoP} provides an interesting model of ListNat, in particular because it agrees with the coinductive trees generated by ListNat. However, the account in~\cite{KoP} is less interesting when applied to GC, thus in the full generality of logic programming. Restriction to non-existential examples such as ListNat is common for implementational reasons~\cite{KoPS,JohannKK15,FK15,FKSP16}, so~\cite{KoP} does allow the modeling of coinductive trees for a natural class of logic programs. Here we seek to model coinductive trees for logic programs in general, \emph{a fortiori} doing so for GC.
In order to model coinductive trees, it follows from Example~\ref{ex:lp2} that the endofunctor $Lax(\mathcal{L}_{\Sigma}^{op},P_fP_f)$ on $Lax(\mathcal{L}_{\Sigma}^{op},Poset)$ that sends $At$ to $P_fP_fAt$, needs to be refined as $\{ \{\mathtt{edge(x,z),connected(z,y)}\}\}$ is not an element of $P_fP_fAt(2)$ as it involves three variables $x$, $y$ and $z$. Motivated by that example, we refine our axiomatics in general so that the codomain of $p_n$ is a superset of $P_fP_fAt(m)$ for every $m\geq n$. There are six injections of $2$ into $3$, inducing six inclusions $At(2)\subseteq At(3)$, so six inclusions $P_fP_fAt(2)\subseteq P_fP_fAt(3)$, and one only wants to count each element of $P_fP_fAt(2)$ once. So we refine $P_fP_fAt(n)$ to become $(\Sigma_{m\geq n} P_fP_fAt(m))/\equiv$, where $\equiv$ is generated by the injections $i:n\longrightarrow m$. This can be made precise in abstract category theoretic terms as follows.
For any Lawvere theory $L$, there is a canonical identity-on-objects functor from the category $Inj$ of injections $i:n\longrightarrow m$ of natural numbers into $L^{op}$. So, in particular, there is a canonical identity on objects functor $J:Inj\longrightarrow \mathcal{L}_{\Sigma}^{op}$, upon which $\Sigma_{m\geq n} P_fP_fAt(m)/\equiv$ may be characterised as the colimit (see~\cite{Mac} or, for the enriched version,~\cite{K}) \[ \int^{m\in n/Inj} P_fP_fAtJ(m) \] or equivalently, given $n\in Inj$, the colimit of the functor from $n/Inj$ to $Poset$ that sends an injection $j:n\longrightarrow m$ to $P_fP_fAtJ(m)$.
This construction extends to a functor $P_{ff}(At):\mathcal{L}_{\Sigma}^{op}\longrightarrow Poset$ by sending a map $f:n\longrightarrow n'$ in $\mathcal{L}_{\Sigma}$ to the order-preserving function \[ \int^{m\in n'/Inj} P_fP_fAtJ(m) \longrightarrow \int^{m\in n/Inj} P_fP_fAtJ(m) \] determined by the fact that each $m\in n'/Inj$ is, up to coherent isomorphism, uniquely of the form $n'+k$, allowing one to apply $P_fP_fAt$ to the map $f+k:n+k\longrightarrow n'+k = m$ in $\mathcal{L}_{\Sigma}$. This is similar to the behaviour of the monad for local state on maps~\cite{PP2}.
It is routine to generalise the construction from $At$ to make it apply to an arbitrary functor $H:\mathcal{L}_{\Sigma}^{op}\longrightarrow Poset$.
In order to make the construction functorial, i.e., in order to make it respect maps $\alpha:H\Rightarrow K$, we need to refine $Lax(\mathcal{L}_{\Sigma}^{op},Poset)$ as the above colimit strictly respects injections, i.e., for any \emph{injection} $i:n\longrightarrow m$, we want the diagram \begin{diagram} Hn & \rTo^{\alpha_n} & Kn \\ \dTo<{Hi} & & \dTo>{Ki} \\ Hm & \rTo_{\alpha_m} & Km \end{diagram} to commute.
Summarising this discussion yields the following:
\begin{definition}\label{def:laxinj} Let $Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset)$ denote the category with objects given by functors from $\mathcal{L}_{\Sigma}^{op}$ to $Poset$, maps given by lax transformations that strictly respect injections, and composition given pointwise. \end{definition}
\begin{proposition}\label{prop:ff} cf~\cite{PP2} Let $J:Inj\longrightarrow \mathcal{L}_{\Sigma}^{op}$ be the canonical inclusion. Define \[ P_{ff}:Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset)\longrightarrow Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset) \]
by
$(P_{ff}(H))(n) = \int^{m\in n/Inj} P_fP_fHJ(m)$,
with, for any map $f:n\longrightarrow n'$ in $\mathcal{L}_{\Sigma}$, \[ (P_{ff}(H))(f):\int^{m\in n'/Inj} P_fP_fHJ(m) \longrightarrow \int^{m\in n/Inj} P_fP_fHJ(m) \] determined by the fact that each $m\in n'/Inj$ is, up to coherent isomorphism, uniquely of the form $n'+k$, allowing one to apply $P_fP_fH$ to the map $f+k:n+k\longrightarrow n'+k = m$ in $\mathcal{L}_{\Sigma}$.
Given $\alpha:H\Rightarrow K$, define $P_{ff}(\alpha)(n)$ by the fact that $m\in n/Inj$ is uniquely of the form $n+k$, and using \[ \alpha_{n+k}:H(m) = H(n+k)\longrightarrow K(n+k) = K(m) \] Then $P_{ff}$ is an endofunctor on $Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset)$. \end{proposition}
The proof is routine but requires lengthy calculation involving colimits. Observe that we have not required countability anywhere in the definition of $P_{ff}$, using only finiteness as we sought at the end of Section~\ref{sec:recall}.
We can now model an arbitrary logic program by a map $p:At\longrightarrow P_{ff}At$ in $Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset)$, modelling ListNat as we did in Example~\ref{ex:listnat2} but now modelling the clauses of GC directly rather than using the awkward substitution instances of Example~\ref{ex:lp2}.
\begin{example}\label{ex:listnat3} Except for the restriction of $Lax(\mathcal{L}_{\Sigma}^{op},Poset)$ to $Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset)$, ListNat is modelled in exactly the same way here as it was in Example~\ref{ex:listnat2}, the reason being that no clause in ListNat has a variable in the tail that does not already appear in the head. We need only observe that, although $p$ is not strictly natural in general, it does strictly respect injections. For example, if one views $\mathtt{list(cons( x, 0))}$ as an element of $At(2)$, its image under $p_2$ agrees with its image under $p_1$. \end{example}
\begin{example}\label{ex:lp3} In contrast to Example~\ref{ex:lp2}, using $P_{ff}$, we can emulate the construction of Examples~\ref{ex:listnat2} and~\ref{ex:listnat3} for ListNat to model GC.
Modulo possible renaming of variables, $\mathtt{connected(x,y)}$ is an element of $At(2)$. The function $p_2$ sends it to the element $\{ \{ \mathtt{edge(x,z)},\mathtt{connected(z,y)}\}\}$ of $(P_{ff}(At))(2)$. This is possible by taking $n=2$ and $m=3$ in the formula for $P_{ff}(At)$ in Proposition~\ref{prop:ff}. In contrast, $\{ \{ \mathtt{edge(x,z)},\mathtt{connected(z,y)}\}\}$
is not an element of $P_fP_fAt(2)$, hence the failure of Example~\ref{ex:lp2}.
The behaviour of $P_{ff}(At)$ on maps ensures that the lax transformation $p$ strictly respects injections. For example, if $\mathtt{connected(x,y)}$ is seen as an element of $At(3)$, the additional variable is treated as a fresh variable $w$, so does not affect the image of $\mathtt{connected(x,y)}$ under $p_3$. \end{example}
\begin{theorem} The functor $P_{ff}:Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset)\longrightarrow Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset)$ induces a cofree comonad $C(P_{ff})$ on $Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset)$. Moreover, given a logic progam $P$ qua $P_{ff}$-coalgebra $p:At\longrightarrow P_{ff}(At)$, the corresponding $C(P_{ff})$-coalgebra $\overline{p}:At\longrightarrow C(P_{ff})(At)$ sends an atom $A(x_1,\ldots ,x_n)\in At(n)$ to the coinductive tree for $A(x_1,\ldots ,x_n)$. \end{theorem}
\begin{proof} The construction of Theorem~\ref{constr:Gcoalg}, subject to mild rephrasing, continues to work here. Specifically, $(C(P_{ff})At)(n)$ is given by the same limit as in Theorem~\ref{constr:Gcoalg} but with $At$ replaced by $At(n)$ and with $P_fP_f$ replaced by $P_{ff}$: products in the category $Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset)$ are given pointwise, so the use of projections is the same; $[Inj,Poset]$ is locally finitely presentable and $P_{ff}$ is an accessible functor, allowing us to extend the construction of the cofree comonad pointwise to $[Inj,Poset]$. It is routine, albeit tedious, to verify functoriality of $C(P_{ff})$ with respect to all maps and to verify the universal property. The construction of $\overline{p}$ is given pointwise, with it following from its coinductive construction that it yields the coinductive trees as required. \end{proof}
The lax naturality in respect to general maps $f:m\longrightarrow n$ means that a substitution applied to an atom $A(x_1,\ldots ,x_n)\in At(n)$, i.e., application of the function $At(f)$ to $A(x_1,\ldots ,x_n)$, followed by application of $\overline{p}$, i.e., taking the coinductive tree for the substituted atom, or application of the function $(C(P_ff)At)f)$ to the coinductive tree for $A(x_1,\ldots ,x_n)$ potentially yield different
trees: the former substitutes into $A(x_1,\ldots ,x_n)$, then takes its coinductive tree, while the latter applies a substitution to each node of the coinductive tree for $A(x_1,\ldots ,x_n)$, then prunes to remove redundant branches.
\begin{example}\label{ex:lp4} Extending Example~\ref{ex:lp3}, consider $\mathtt{connected(x,y)}\in At(2)$. In expressing GC as a map $p:At\longrightarrow P_{ff}At$ in Example~\ref{ex:lp3}, we put \[ p_2(\mathtt{connected(x,y)}) = \{ \{ \mathtt{edge(x,z)},\mathtt{connected(z,y)}\}\} \] Accordingly, $\overline{p}_2(\mathtt{connected(x,y)})$ is the coinductive tree for $\mathtt{connected(x,y)}$, thus the infinite tree generated by repeated application of the same clause modulo renaming of variables.
If we substitute $x$ for $y$ in the coinductive tree, i.e., apply the function $(C(P_{ff})At)(x,x)$ to it (see the definition of $L_{\Sigma}$ at the start of Section~\ref{sec:recall} and observe that $(x,x)$ is a $2$-tuple of terms generated trivially by the variable $x$), we obtain the same tree but with $y$ systematically replaced by $x$. However, if we substitute $x$ for $y$ in $\mathtt{connected(x,y)}$, i.e., apply the function $At(x,x)$ to it, we obtain $\mathtt{connected(x,x)}\in At(1)$, whose coinductive tree has additional branching as the first clause of GC, i.e., $\mathtt{connected(x,x)}\gets \,$ may also be applied.
In contrast to this, we have strict naturality with respect to injections: for example, an injection $i:2\longrightarrow 3$ yields the function $At(i):At(2)\longrightarrow At(3)$ that, modulo renaming of variables, sends $\mathtt{connected(x,y)}\in At(2)$ to itself seen as an element of $At(3)$, and the coinductive tree for $\mathtt{connected(x,y)}$ is accordingly also sent by $(C(P_{ff})At)(i)$ to itself seen as an element of $(C(P_{ff})At)(3)$. \end{example}
Example~\ref{ex:lp4} illustrates why, although the condition of strict naturality with respect to injections holds for $P_{ff}$, it does not hold for $Lax(\mathcal{L}_{\Sigma}^{op},P_fP_f)$ in Example~\ref{ex:lp2} as we did not model the clause \[
\mathtt{connected(x,y)} \gets \mathtt{edge(x,z)},
\mathtt{connected(z,y)} \] directly there, but rather modelled all substitution instances into all available variables.
\section{Complementing saturated semantics}\label{sec:sat}
Bonchi and Zanasi's approach to modelling logic programming in~\cite{BZ} was to consider $P_fP_f$ as we did in~\cite{KoP}, sending $At$ to $P_fP_fAt$, but to ignore the inherent laxness, replacing $Lax(\mathcal{L}_{\Sigma}^{op},Poset)$ by $[ob(\mathcal{L}_{\Sigma}),Set]$, where $ob(\mathcal{L}_{\Sigma})$ is the set of objects of $\mathcal{L}_{\Sigma}$ treated as a discrete category, i.e., as one with only identity maps.
The central mathematical fact that supports saturated semantics is that, regarding $ob(\mathcal{L}_{\Sigma})$ as a discrete category, with inclusion functor $I:ob(\mathcal{L}_{\Sigma})\longrightarrow \mathcal{L}_{\Sigma}$, the functor \[ [I,Set]:[\mathcal{L}_{\Sigma}^{op},Set]\longrightarrow [ob(\mathcal{L}_{\Sigma})^{op},Set] \] that sends a functor $H:\mathcal{L}_{\Sigma}^{op}\longrightarrow Set$ to the composite functor $HI:ob(\mathcal{L}_{\Sigma}) = ob(\mathcal{L}_{\Sigma})^{op} \longrightarrow Set$ has a right adjoint. That adjoint is given by right Kan extension. It is primarily the fact of the existence of the right adjoint, rather than its characterisation as a right Kan extension, that enabled Bonchi and Zanasi's various constructions, in particular those of saturation and desaturation.
That allows us to mimic Bonchi and Zanasi's saturation semantics, but starting from $Lax(\mathcal{L}_{\Sigma}^{op},Poset)$ rather than from $[ob(\mathcal{L}_{\Sigma}),Set]$. We are keen to allow this as laxness is an inherent fact of the situation, as we have explained through the course of this paper. Such laxness has been valuable in related semantic endeavours, such as in Tony Hoare's pioneering work on the modelling of data refinement~\cite{HH,HH1,KP1}, of which substitution in logic programming can be seen as an instance.
The argument, which was originally due to Ross Street, cf~\cite{S}, goes as follows.
\begin{theorem}~\cite{BKP}\label{thm:BKP} For any finitary $2$-monad $T$ on a cocomplete $2$-category $K$, the inclusion \[ J:T\mbox{-}Alg_s\longrightarrow T\mbox{-}Alg_l \] of the category of strict $T$-algebras and strict maps of $T$-algebras into the category of strict $T$-algebras and lax maps of $T$-algebras has a left adjoint. \end{theorem}
\begin{example}\label{left} For any Lawvere theory $L$, there is a finitary locally ordered monad $T$ on $[ob(L),Poset^{op}]$ for which $[L,Poset^{op}]$ is isomorphic to $T$-$Alg_s$, with $T$-$Alg_l$ isomorphic to $Lax(L,Poset^{op})$. The monad $T$ is given by the composite of the functor \[ [J,Poset^{op}]:[L,Poset^{op}]\longrightarrow [ob(L),Poset^{op}] \] where $J:ob(L)\longrightarrow L$ is the inclusion, cf Bonchi and Zanasi's construction~\cite{BZ}, with its left adjoint, which is given by left Kan extension. The fact that the functor $[J,Poset^{op}]$ also has a right adjoint, given by right Kan extension, implies that the monad $T$ is finitary. \end{example}
\begin{corollary} For any Lawvere theory $L$, the inclusion \[ [L^{op},Poset]\longrightarrow Lax(L^{op},Poset) \] has a right adjoint. \end{corollary}
\begin{proof} $Poset$ is a complete $2$-category as it is a complete locally ordered category. So $Poset^{op}$ is a cocomplete $2$-category, and so $[ob(L),Poset^{op}]$ is a cocomplete $2$-category. So the conditions of Theorem~\ref{thm:BKP} hold for Example~\ref{left}, and so the inclusion \[ [L,Poset^{op}]\longrightarrow Lax(L,Poset^{op}) \] has a left adjoint. But $[L,Poset^{op}]^{op}$ is canonically isomorphic to $[L^{op},Poset]$, and $Lax(L,Poset^{op})^{op}$ is canonically isomorphic to $Lax(L^{op},Poset)$, and in general, a functor $H:A\longrightarrow B$ has a right adjoint if and only if $H:A^{op}\longrightarrow B^{op}$ has a left adjoint. The combination of these facts yields the result. \end{proof}
With this result in hand, one can systematically work through Bonchi and Zanasi's paper, adapting their constructions for saturation and desaturation, without discarding the inherent laxness that logic programming, cf data refinement, possesses.
We have stated the results here for arbitrary lax transformations, but they apply equally to those that strictly respect injections, i.e., a subtle extension of the above argument shows that the inclusion \[ [L^{op},Poset]\longrightarrow Lax_{Inj}(L^{op},Poset) \] has a right adjoint, that right adjoint being a further variant of the right Kan extension that Bonchi and Zanasi used. The argument for lax naturality from the Introduction retains its force, so in Bonchi and Zanasi's sense, this does not yield compositionality of lax semantics, but it does further refine their analysis of saturation, eliminating more double counting.
\section{Conclusions}\label{sec:concl} For variable-free logic programs, in~\cite{KMP}, we used the cofree comonad on $P_fP_f$ to model the coinductive trees generated by a logic program. The notion of coinductive tree had not been isolated at the time of writing of~\cite{KMP}, or of~\cite{KoP}, so we did not explicitly explain the relationship in~\cite{KMP}, hence our doing so here, but the result was effectively in~\cite{KMP}, just explained in somewhat different terms.
Using lax transformations, we extended the result in~\cite{KoP}, albeit again not stating it explicitly but again explained explicitly here, to arbitrary logic programs, including existential programs
a leading example being GC, as studied extensively by Sterling and Shapiro~\cite{SS}.
The problem of existential clauses is well-known in the literature on theorem proving and within communities that use term-rewriting, TM-resolution or their variants. In TRS~\cite{Terese}, existential variables are not allowed to appear in rewriting rules, and in type inference, the restriction to non-existential programs is common~\cite{Jones97}. In LP, the problem of handling existential variables when constructing proofs with TM-resolution marks the boundary between the theorem-proving and problem-solving aspects, as explained in Section~\ref{sec:backr}.
The papers~\cite{KoP,KoPS} also contained a kind of category theoretic semantics for existential logic programs such as GC, but that semantics was limited, not modelling the coinductive trees generated by TM-resolution for such logic programs. Here, we have refined lax semantics, refining $Lax(\mathcal{L}_{\Sigma}^{op},Poset)$ to $Lax_{Inj}(\mathcal{L}_{\Sigma}^{op},Poset)$, thus insisting upon strict naturality for injections, and refining the construction $P_cP_fAt$ to $P_{ff}(At)$, thus allowing for additional variables in the tail of a clause in a logic program and not introducing countability, cf the modelling of local state in~\cite{PP2}. This has allowed us to model coinductive trees for arbitrary logic programs.
We have further mildly refined Bonchi and Zanasi's saturation semantics for logic programming~\cite{BZ}, showing how it may be seen to complement rather than to replace lax semantics.
\end{document} |
\begin{document}
\title{Cats} \author{\textsc{Daniel Gerigk}\footnote{\,Universit\"at Bonn, Germany. \quad \href{mailto:[email protected]}{[email protected]}}} \date{March 7, 2014} \maketitle
\begin{center} \textsl{To the memory of Christoph S.} \end{center}
\begin{abstract} \noindent A generalization of the notion of an $\infty$-category is presented, allowing for ($\infty$-)cat(egorie)s that may have non-invertible higher morphisms.\\ The first step is to find a suitable category $\DDelta$ of (generalized) simplices. In fact, the category $\DDelta$ which we will employ has already been introduced a long time ago. Consider $\Set[\DDelta]$. Every simplex $A \in \DDelta$ has \emph{(inner) faces}, corresponding \emph{(inner) horns}, and a \emph{spine}. We call an object $X \in \Set[\DDelta]$ a \emph{cat} if every inner horn in $X$ can be filled. We conjecture that every spine is \emph{inner anodyne}, and that the Cisinski model structure generated by the set of spines is equal to the Cisinski model structure generated by the set of inner horns. It is conjectured that the fibrant objects of this model structure are precisely the cats. \end{abstract}
\section{Preface}
The notion of an \emph{$\infty$-category} was first defined by Boardman \& Vogt \cite{boardmanvogt} (who call them \emph{weak Kan complexes}) and was further developed most notably by Joyal \cite{joyal.qcakc,joyal.qcvsss,joyal.noqc,joyal.ttoqcaia} (who calls them \emph{quasi-categories}) and later by Lurie \cite{lurie.htt,lurie.ha}.
\begin{notation} Denote by $\Delta$ the category of (classical) simplices. Define $\dbN:=\{0,1,2,\ldots\}$ and $\dbN_+:=\dbN \setminus \{0\}$. \end{notation}
\section{The category of (generalized) simplices}
\begin{definition} Define $\mathcal{A}} \def\calB{\mathcal{B}} \def\calC{\mathcal{C}} \def\calD{\mathcal{D}:=\prod'_{n \in \dbN_+}\Delta \subset \prod_{n \in \dbN_+}\Delta$ to be the full subcategory generated by the objects which have only finitely many components not equal to $\Delta^0$. For a morphism $f \colon A \to B$ in $\mathcal{A}} \def\calB{\mathcal{B}} \def\calC{\mathcal{C}} \def\calD{\mathcal{D}$, define $\deg f$ to be the smallest $k \in \dbN_+$ such that $f_k$ is constant, i.\,e. factors through $\Delta^ 0$. For morphisms $f,g \colon A \to B$ in $\mathcal{A}} \def\calB{\mathcal{B}} \def\calC{\mathcal{C}} \def\calD{\mathcal{D}$, define $f \sim g$ if and only if $\deg f = \deg g =: d$ and $f_1=g_1,\ldots,f_d=g_d$. This defines an equivalence relation on the set of morphisms in $\mathcal{A}} \def\calB{\mathcal{B}} \def\calC{\mathcal{C}} \def\calD{\mathcal{D}$ which is compatible with composition of morphisms. For $A \in \mathcal{A}} \def\calB{\mathcal{B}} \def\calC{\mathcal{C}} \def\calD{\mathcal{D}$, define $\dim A$ to be the smallest $d \in \dbN$ such that $A_{d+1} = \Delta^0$. Define $\DDelta \subset \mathcal{A}} \def\calB{\mathcal{B}} \def\calC{\mathcal{C}} \def\calD{\mathcal{D}/{\sim}$ to be the full subcategory generated by the objects $A=(A_1,A_2,\ldots)$ having the property that $A_n = \Delta^0$ for all $n > \dim A$. \end{definition}
The category $\DDelta$ was introduced by Simpson \cite{simpson.acmsfncihnsagsvk} under the notation $\Theta$.
\begin{notation} For $n \in \dbN$ and $a_1,\ldots,a_n \in \dbN_+$, define \[\DDelta^{a_1,\ldots,a_n} := (\Delta^{a_1},\ldots,\Delta^{a_n},\Delta^0,\Delta^0,\ldots) \in \DDelta.\] In particular, $\DDelta^\emptyset = (\Delta^0,\Delta^0,\ldots)$. \end{notation}
\begin{proposition} $\DDelta$ is a skeleton of $\mathcal{A}} \def\calB{\mathcal{B}} \def\calC{\mathcal{C}} \def\calD{\mathcal{D}/{\sim}$, and the objects in $\DDelta$ have no non-identity automorphisms. Two objects $A,B \in \mathcal{A}} \def\calB{\mathcal{B}} \def\calC{\mathcal{C}} \def\calD{\mathcal{D}$ become isomorphic in $\mathcal{A}} \def\calB{\mathcal{B}} \def\calC{\mathcal{C}} \def\calD{\mathcal{D}/{\sim}$ if and only if $\dim A =\dim B =: d$ and $A_1 = B_1, \ldots, A_d = B_d$. \end{proposition}
\begin{theorem}[Ara \& Maltsiniotis] The category $\DDelta$ is a strict test category in the sense of Grothendieck \cite{grothendieck.ps}. \end{theorem}
\section{Faces, horns and spines}
\begin{definition} Let $A \in \DDelta$. The maximal proper subobjects $B \hookrightarrow A$ in $\DDelta$, and also the images of the corresponding monomorphisms $B \hookrightarrow A$ in $\Set[\DDelta]$, are called \emph{faces} of $A$. \end{definition}
\begin{lemma} Let $F \colon B \hookrightarrow A$ be a face in $\DDelta$, and define $d := \dim A$. Then $d-1 \leq \dim B \leq d \leq \deg F \leq d+1$, and the components $F_1,\ldots,F_d$ of $F$ are well-defined monomorphisms in $\Delta$. There is a unique $k \in \{1,\ldots,d\}$ such that the monomorphism $F_k \colon B_k \hookrightarrow A_k$ is a (classical) face in $\Delta$, and for $n \in \{1,\ldots,d\} \setminus \{k\}$ the monomorphism $F_n$ is an identity. \end{lemma}
\begin{definition} Let $A = \DDelta^{a_1,\ldots,a_d} \in \DDelta$, $k \in \{1,\ldots,d\}$ and $m \in \{0,\ldots,a_k\}$. Denote by $\delta_{k:m}^A \subset A$ the face whose $k$-th component is equal to the (classical) face $\delta_m^{A_k} \subset A_k = \Delta^{a_k}$. It is called an \emph{inner face} if the $k$-th component is a (classical) inner face. \end{definition}
\begin{proposition}
The simplex $A = \DDelta^{a_1,\ldots,a_d}$ has precisely $\sum_{i=1}^d(a_i-1)$ many inner faces, and precisely $2 \,| \{ i \colon a_i \geq 2\} \cup \{d\}|$ many outer faces. \end{proposition}
\begin{definition} Let $A \in \DDelta$. A simplicial subset $\Lambda \subset A$ is called a \emph{horn} if there is a face $B \subset A$ such that $\Lambda$ is the union of all faces of $A$ except $B$. It is called an \emph{inner horn} if the missing face is inner. \end{definition}
\begin{definition} Let $A = \DDelta^{a_1,\ldots,a_d} \in \DDelta$, $k \in \{1,\ldots,d\}$ and $m \in \{0,\ldots,a_k\}$. Define $\Lambda^{k:m} \subset A$ to be the horn whose missing face is $\delta_{k:m}^A \subset A$. \end{definition}
\begin{definition} A morphism $f \colon X \to Y$ in $\Set[\DDelta]$ is called an \emph{inner fibration} if it has the right lifting property with respect to every inner horn. \end{definition}
\begin{definition} A morphism $f \colon X \to Y$ in $\Set[\DDelta]$ is called \emph{inner anodyne} if it has the left lifting property with respect to every inner fibration. \end{definition}
\begin{definition} Let $A \in \DDelta$. Define the \emph{boundary} $\partial A \subset A$ to be the union of all faces of $A$. \end{definition}
\begin{proposition} The class of monomorphisms in $\Set[\DDelta]$ is generated (as a saturated class) by the set of boundaries $\partial A \subset A$, with $A \in \DDelta$. \end{proposition}
\begin{definition} Let $A \in \DDelta$. Define the \emph{spine} $I(A) \subset A$ to contain a simplex $s \colon B \to A$ if and only if for all $k \in \{1,\ldots,\deg s\}$, the $k$-th component $s_k \colon B_k \to A_k$ is contained in the (classical) spine $I(A_k) \subset A_k$. \end{definition}
\section{Cats and groupoids}
\begin{definition} $\calC \in \Set[\DDelta]$ is called a \emph{cat} if for every inner horn $\Lambda \subset A$ the map $\calC(A) \to \calC(\Lambda)$ is surjective. $\calG \in \Set[\DDelta]$ is called a \emph{groupoid} if for every horn $\Lambda \subset A$ the map $\calG(A) \to \calG(\Lambda)$ is surjective. \end{definition}
\begin{definition} A cat $\calC$ is called \emph{strict} if for every inner horn $\Lambda \subset A$ the map $\calC(A) \to \calC(\Lambda)$ is bijective. A groupoid $\calG$ is called \emph{strict} if for every horn $\Lambda \subset A$ the map $\calG(A) \to \calG(\Lambda)$ is bijective. \end{definition}
\begin{proposition} A groupoid is strict if and only if it is strict when viewed as a cat. \end{proposition}
\begin{definition} Let $n \in \dbN_+$. A cat $\calC$ is called $n$-strict if for every inner horn $\Lambda \subset A$ with $\dim A \geq n$, the map $\calC(A) \to \calC(\Lambda)$ is bijective. \end{definition} \begin{remark} A cat is strict if and only if it is 1-strict. \end{remark}
\section{The model structure for cats}
\begin{definition} The Cisinski model structure on $\Set[\DDelta]$ generated by the set of spines is called the \emph{model structure for cats}. The weak equivalences of this model structure are called \emph{weak cat equivalences}. \end{definition}
\begin{definition} Let $A \in \DDelta$. Denote by $\frakF_A$ the set of faces of $A$, by $\frakF_A^i$ the set of inner faces of $A$, and by $\frakF_A^o$ the set of outer faces of $A$. \end{definition}
\begin{lemma} \begin{compactenum}[(i)] \item $I(\DDelta^{1,\ldots,1}) = \DDelta^{1,\ldots,1}$. \item If $A \in \DDelta$ is not of the form $A=\DDelta^{1,\ldots,1}$, then $I(A) \subset \bigcup \frakF_A^o$. \end{compactenum} \end{lemma}
\begin{lemma}[cf. Proposition 2.12 in Joyal \cite{joyal.ttoqcaia}] For every $A \in \DDelta$ and every subset $\Gamma \subsetneq \frakF_A$ containing all outer faces of $A$, the inclusion $\bigcup \Gamma \subset A$ is inner anodyne. \end{lemma} \begin{proof} We proceed by induction on $A$: let $A \in \DDelta$, and assume that for every face $B$ of $A$ and every subset $\Gamma \subsetneq \frakF_B$ containing all outer faces of $B$, the inclusion $\bigcup \Gamma \subset B$ is inner anodyne. Let $\Gamma \subsetneq \frakF_A$ contain all outer faces of $A$. If $\Gamma$ contains all faces of $A$ except one inner face, then the inclusion $\bigcup \Gamma \subset A$ is an inner horn of $A$, so is inner anodyne. Assume that $\Gamma$ misses at least two inner faces of $A$. It suffices to show that for $B \in \frakF_A \setminus \Gamma$, the inclusion $\beta \colon \bigcup \Gamma \subset \bigcup \Gamma \cup B$ is inner anodyne.
We have a pushout diagram $$\begin{xy}\xymatrix{ \bigcup_{F \in \Gamma} (F \cap B) \ar@{^{(}->}[r] \ar@{^{(}->}[d]_\alpha & \bigcup \Gamma \ar@{^{(}->}[d]^\beta\\ B \ar@{^{(}->}[r] & \bigcup \Gamma \cup B }\end{xy}$$
The subset $\{F \cap B \colon F \in \Gamma\} \subset \frakF_B$ is proper and contains all outer faces of $B$. Hence, by the induction hypothesis on $B$, it follows that $\alpha$ is inner anodyne. Because the class of inner anodyne maps is stable under pushouts, we conclude that $\beta$ is inner anodyne. \end{proof}
\begin{conjecture}[cf. Proposition 2.13 in Joyal \cite{joyal.ttoqcaia}] For every $A \in \DDelta$ the inclusion $I(A) \subset I(A) \cup \bigcup \frakF_A^o$ is inner anodyne. \end{conjecture}
From now on, assume the previous conjecture to be proven.
\begin{proposition}[cf. Proposition 2.13 in Joyal \cite{joyal.ttoqcaia}] Every spine $I(A) \subset A$, $A \in \DDelta$, is inner anodyne. \end{proposition}
\begin{conjecture}[cf. Lemma 3.5 in Joyal \cite{joyal.qcvsss}] \label{spineconjecture2} For every $A \in \DDelta$, the inclusion $I(A) \subset I(A) \cup \bigcup \frakF_A^o$ is a weak cat equivalence. \end{conjecture}
From now on, assume the previous conjecture to be proven.
\begin{lemma}[cf. Lemma 3.5 in Joyal \cite{joyal.qcvsss}] For every $A \in \DDelta$ and every subset $\Gamma \subsetneq \frakF_A$ containing all outer faces of $A$, the inclusion $\bigcup \frakF_A^o \subset \bigcup \Gamma$ is a weak cat equivalence. \end{lemma} \begin{proof} We prove this by induction on $A$: let $A \in \DDelta$, and assume that for every face $B$ of $A$ and every subset $\Gamma \subsetneq \frakF_B$ containing all outer faces of $B$, the inclusion $\bigcup \frakF_B^o \subset \bigcup \Gamma$ is a weak cat equivalence.
Let $\Gamma \subsetneq \frakF_A$ contain all outer faces of $A$. Assume $B \in \frakF_A \setminus \Gamma$ such that $\Gamma \cup \{B\} \subsetneq \frakF_A$. We show that the inclusion $\beta \colon \bigcup \Gamma \subset \bigcup \Gamma \cup B$ is a weak cat equivalence.
We have a pushout diagram $$\begin{xy}\xymatrix{ \bigcup_{F \in \Gamma} (F \cap B) \ar@{^{(}->}[r] \ar@{^{(}->}[d]_\alpha & \bigcup \Gamma \ar@{^{(}->}[d]^\beta\\ B \ar@{^{(}->}[r] & \bigcup \Gamma \cup B }\end{xy}$$
Consider $I(B) \hookrightarrow \bigcup_{F \in \Gamma} (F \cap B) \xhookrightarrow{\alpha} B$. By the induction hypothesis on $B$ and conjecture \ref{spineconjecture2}, the first map is a weak cat equivalence. The composition $I(B) \hookrightarrow B$ is a weak cat equivalence by definition. It follows that $\alpha$ is a weak cat equivalence. Because the class of monomorphisms that are weak cat equivalences is stable under pushouts, we conclude that $\beta$ is a weak cat equivalence. \end{proof}
\begin{proposition} Every inner anodyne map is a weak cat equivalence. \end{proposition}
\begin{proposition} The model structure for cats is equal to the Cisinski model structure generated by the set of inner horns. In particular, if an object $X \in \Set[\DDelta]$ is fibrant with respect to the model structure for cats, then it is a cat. \end{proposition}
\begin{conjecture} The fibrant objects of the model structure for cats are precisely the cats. \end{conjecture}
\section{$\mathbf{n}$-cats}
In this section, let $n \in \dbN$.
\begin{definition} Define $\DDelta_n \subset \DDelta$ to be the full subcategory generated by the objects $A \in \DDelta$ with $\dim A \leq n$. \end{definition}
\begin{proposition} The sequence of canonical projection functors $\DDelta_0 \leftarrow \DDelta_1 \leftarrow \DDelta_2 \leftarrow \ldots \leftarrow \DDelta$ induces a sequence of (full and faithful) embeddings $\Set[\DDelta_0] \hookrightarrow \Set[\DDelta_1] \hookrightarrow \Set[\DDelta_2] \hookrightarrow \ldots \hookrightarrow \Set[\DDelta]$. \end{proposition}
\begin{definition} $\calC \in \Set[\DDelta_n]$ is called an \emph{$n$-cat} if for every inner horn $\Lambda \subset A$ with $A \in \DDelta_n$, the map $\calC(A) \to \calC(\Lambda)$ is surjective. \end{definition}
\begin{remark}
For a cat $\calC \in \Set[\DDelta]$, the restriction $\calC_{|\tiny \DDelta_n} \in \Set[\DDelta_n]$ is an $n$-cat. \end{remark}
\begin{remark} Beware that an $n$-cat $\calC \in \Set[\DDelta_n]$ is in general not a cat when viewed as an object of $\Set[\DDelta]$ via the embedding $\Set[\DDelta_n] \hookrightarrow \Set[\DDelta]$. \end{remark}
\begin{definition} A cat $\calC \in \Set[\DDelta]$ is called an \emph{$n$-cat} if $\calC$ is weakly cat equivalent to an object in $\Set[\DDelta_n]$ (viewed as an object in $\Set[\DDelta]$). \end{definition}
From now on, assume $n \geq 1$.
\begin{definition} The Cisinski model structure on $\Set[\DDelta_n]$ generated by the set of spines which are contained in $\Set[\DDelta_n]$ is called the \emph{model structure for $n$-cats}. \end{definition}
\begin{proposition} The model structure for $n$-cats is equal to the Cisinski model structure generated by the set of inner horns which are contained in $\Set[\DDelta_n]$. In particular, if an object $X \in \Set[\DDelta_n]$ is fibrant with respect to the model structure for $n$-cats, then it is an $n$-cat. \end{proposition}
\begin{conjecture} The fibrant objects of the model structure for $n$-cats are precisely the $n$-cats. \end{conjecture}
\section{The model structure for groupoids}
\begin{definition} The Cisinski model structure on $\Set[\DDelta]$ generated by the set of maps $A \to 1$, with $A \in \DDelta$, is called the \emph{model structure for groupoids}. The weak equivalences of this model structure are called \emph{weak groupoid equivalences} or \emph{weak homotopy equivalences}. \end{definition}
\begin{conjecture} The model structure for groupoids is equal to the Cisinski model structure generated by the set of horns. In particular, if an object $X \in \Set[\DDelta]$ is fibrant with respect to the model structure for groupoids, then it is a groupoid. \end{conjecture}
\begin{conjecture} The fibrant objects of the model structure for groupoids are precisely the groupoids. \end{conjecture}
\section{$\mathbf{H^2(G;A)}$}
\begin{definition} For a group $G$, define $\bfB^1 G$ to be the strict 1-groupoid which has a single object, whose 1-morphisms are in bijective correspondence with the elements of $G$, and whose composition of 1-morphisms corresponds to multiplication in $G$. \end{definition}
\begin{definition} For an abelian group $A$, define $\rmB^2 A$ to be the strict 2-cat which has a single object, a single 1-morphism, whose 2-morphisms are in bijective correspondence with the elements of $A$, and whose (vertical and horizontal) composition of 2-morphisms corresponds to addition in $A$. \end{definition}
\begin{remark} In general, $\rmB^2 A$ is not a groupoid. In fact, the map $(\rmB^2 A)(\DDelta^{2,1}) \to (\rmB^2 A)(\Lambda^{2:0})$ induced by the outer horn $\Lambda^{2:0} \subset \DDelta^{2,1}$ is not surjective if $A \neq 0$. \end{remark}
\begin{definition} We can define a groupoid $\bfB^2 A$ having no non-degenerate simplices of dimension $>2$ together with a weak homotopy equivalence $\rmB^2 A \hookrightarrow \bfB^2 A$. (Hint: $\bfB^2 A(\DDelta^2) := A$.) \end{definition}
\begin{proposition} There is a canonical bijective correspondence between the set of maps $\bfB^1 G \to \bfB^2 A$ and the set of 2-cocycles $G \times G \to A$. \end{proposition} \begin{conjecture} The bijective correspondence of the previous proposition descends to a bijective correspondence between the set of homotopy equivalence classes of maps $\bfB^1 G \to \bfB^2 A$ and the set $H^2(G;A)$ of 2-cocycles modulo 2-coboundaries. \end{conjecture}
\section{Personal note}
I would like to write a thesis about this topic, but haven't been able to find an advisor yet. If there is anyone willing to work with me on this, please let me know.
\begin{center}\includegraphics[width=4cm]{alligator.jpg}\end{center}
\end{document} |
\begin{document}
\title{Quantum Erasure: Quantum Interference Revisited\footnote{Unedited and unillustrated version of ``Quantum Erasure", \textit{American Scientist} \textbf{91} 336-343 (2003).}}
\author{Stephen P. Walborn} \email[]{[email protected]}
\affiliation{Universidade Federal de Minas Gerais, Caixa Postal 702, Belo Horizonte, MG 30123-970, Brazil}
\author{Marcelo O. Terra Cunha}
\affiliation{Universidade Federal de Minas Gerais, Caixa Postal 702, Belo Horizonte, MG 30123-970, Brazil}
\author{Sebasti\~ao P\'adua}
\affiliation{Universidade Federal de Minas Gerais, Caixa Postal 702, Belo Horizonte, MG 30123-970, Brazil}
\author{Carlos H. Monken}
\affiliation{Universidade Federal de Minas Gerais, Caixa Postal 702, Belo Horizonte, MG 30123-970, Brazil}
\date{ April 20, 2003}
\begin{abstract} Recent experiments in quantum optics have shed light on the foundations of quantum physics. Quantum erasers - modified quantum interference experiments - show that quantum entanglement is responsible for the complementarity principle. \end{abstract}
\pacs{03.65Bz, 42.50.Ar}
\maketitle
It may be somewhat surprising that Thomas Young's double-slit experiment - a staple in the freshman physics laboratory - would be such an invaluable testing ground for the foundations of quantum physics. Yet the quantum version of the double-slit experiment has been at the center of many debates over the fundamentals of quantum physics since the theory was born, nearly a century ago. In fact, Young's experiment embodies the very nature of quantum physics. Last year, the readers of Physics World magazine voted Young's double-slit experiment with electrons ``the most beautiful experiment" in physics. The significance of Young's experiment lies in the fact that interference is a phenomenon exhibited only by waves. The puzzle that quantum physics presents is that a particle, which is usually thought of as an indivisible, localized object, can also behave like a classical wave, which interferes and diffracts. In ``the most beautiful experiment", electrons pass through the slits like waves and are detected like particles! This interference behavior is perhaps the greatest mystery in quantum theory. In fact, Nobel Prize-winning physicist Richard Feynman has called quantum interference ``the only mystery" in quantum physics. Recently, some progress has been made in the understanding of these interference effects within the foundations of quantum theory. Experiments called quantum erasers - modified versions of Young's experiment - have shed light on the foundations of quantum physics. However, before we explain the notion of quantum erasure, we take a detour to explore the concept and the history of classical and quantum interference. \par In the freshman physics laboratory, the double-slit experiment is quite simple. A laser beam is directed onto two closely spaced transparent slits that are etched into an opaque microfilm. The slits and their spacing are about a tenth of a millimeter wide. The laser beam is scattered by this ``double-slit" and a pattern of alternating bright and dark stripes - commonly called interference \emph{fringes} - is projected onto a distant viewing screen. Understanding the reason for this interference is not difficult: the paths from each slit to a given observation point are not necessarily equal, so light beams traveling from each slit arrive with different phases of propagation. These light beams interfere depending upon the difference in their phases: either constructively, resulting in an interference maximum (bright stripe) or destructively, resulting in an interference minimum (dark stripe). Even if you have never set foot in a physics laboratory, you have undoubtedly observed interference. Interference effects cause many common optical phenomena, such as the color patterns seen in soap bubbles or in the oily puddles in the parking lot of a gas station. \par
Another way to visualize interference is to imagine a water wave incident on a wall with two vertical openings. When the wave front encounters the wall, a part of the wave goes through each opening, while the rest is reflected. The two sections that pass through the slots will meet up again a distance later and combine, or interfere. If a classical particle, say a tiny dust particle or even a tennis ball, is launched at the wall, it will either go through one of the openings or bounce back. To interfere, the particle would have to ``pass through both slits at the same timeΓ! So it is very surprising and almost unbelievable that when this particle is instead an electron, for example, it interferes like a wave.
\par A fundamental result of quantum theory is that light is made up of tiny quanta of energy Γ ``particles of light" - called photons. In 1909, Geoffrey Taylor demonstrated diffraction of individual photons using the tip of a needle. Diffraction occurs when a wave passes through a tiny aperture or object. The diffraction pattern is similar to an interference pattern: maxima and minima are due to the interference of different parts of the transmitted wave that meet at the detection screen.
\par
What happens when Young's experiment is repeated using individual photons instead of an intense light beam? An attenuated light source ensures that only one photon is incident on the double slit at a time. After recording data for many photons, the resulting pattern of individual points (each corresponding to the detection of one photon) on the photosensitive screen is identical to that of an intense light beam, interference fringes and all. This seems to imply that the individual photons had ``passed through both slits at the same time" and ``interfered with themselves", a seemingly astounding feat, even for something as aloof and mysterious as the photon. To date, variations of the quantum double-slit experiment have been performed using many different types of particles, including photons, electrons, neutrons and even large carbon-60 fullerenes. All results confirm the counter-intuitive result that, at the quantum level, particles ``interfere with themselves" just like classical waves. \section{The quantum coin toss} To further understand why the interference of quantum particles is an unexpected result, here is a simple example. Consider the usual coin toss, of the sort that takes place at the start of an NFL football game, where the coin has the same chance of giving heads or tails. The probability (call it $P$(heads)) that a coin lands heads is thus 50 \%. Likewise, the probability that the coin gives tails is $P$(tails) = 50\%. Obviously, there are only two possible outcomes, the coin must land either heads or tails, so the total probability to give heads or tails is just the sum of the individual probabilities: $P$(heads or tails) = $P$(heads) + $P$(tails) = 100\%. The quantum double-slit is a type of ``quantum coin toss", and so we can make a similar analysis. Given a certain position on the detection screen, one can try to assign a probability $P$(slit 1) or $P$(slit 2) that a photon detected at that point on the screen passed through slit 1 or slit 2. Here comes the surprising result: unlike the coin toss, the total probability to register a photon is not equal to the sum of the individual probabilities: $P$(slit 1 or slit 2) $\neq$ $P$(slit 1) + $P$(slit 2).
\par
The physical principle responsible for this strange behavior is called \emph{superposition}, which says that wavelike events combine according to their probability amplitudes, not their probabilities. Let's denote the probability amplitude with the letter $A$. The probability amplitude for a photon to pass through slit 1 is $A$(slit 1) and $A$(slit 2) is the amplitude for the photon to pass through slit 2. One difference between a probability and a probability amplitude is that the amplitudes are now complex numbers, to incorporate the concept of phase. The total probability amplitude for a photon to pass through slit 1 or slit 2 is $A$(slit 1 or slit 2) = $A$(slit 1) + $A$(slit 2). The probability for a given event is then obtained by calculating the ``absolute square" of the corresponding probability amplitude: $P = |A|^2$. Thus, the total probability to detect a photon is $P$(upper or lower) =$ | A$(slit 1) + $A$(slit 2)$|^2$. Computing this probability gives rise to quantities, not present in the NFL coin toss example above, which are responsible for the interference effects. Quantum particles - electrons, photons, etc - interfere because they behave according to the superposition principle, which describes the physical phenomenon of waves. Thus, when you flip a ``quantum coin", it can give both heads and tails at the same time.
\section{Particles or waves?} By the time Young performed his experiment in 1801, physicists had been debating the nature of light for many years. The question was: Is light made of waves or particles? Some scientists, such as Isaac Newton (1717), believed light was made up of tiny classical particles, like particles of dust. The movement of each particle traced out a trajectory, called a ray. Others, such as Dutch physicist Christian Huygens (1690), advocated a classical wave theory, like water waves or oscillations on a stretched string. Each theory was able to explain some of the phenomena observed up until that time, such as shadows, refraction and reflection. But when Thomas Young showed that a beam of light interferes with itself, which a classical particle could never do, the particle theory was laid to rest. That is, until Albert Einstein came along.
\par At the end of the nineteenth century, German physicist Max Planck was concerned with the following problem: explain the color spectrum of radiation emitted by a ``blackbody". A black body is basically a metal box kept at a certain temperature with a small hole allowing radiation to escape. Planck was interested in the color spectrum emitted by the box with respect to its temperature. Using classical radiation theory to describe blackbody radiation gave an inaccurate result known as the ultra-violet catastrophe. To accurately explain the radiation spectrum, Planck proposed the idea that light is made up of discrete energy units, or quanta, which we now call photons. Planck was reluctant to accept his own idea, which he thought of as a mathematical``trickΓ which happened to fit the experimental data. Planck tried vigorously to explain blackbody radiation using other physical concepts. Shortly thereafter in 1905, Albert Einstein, in addition to publishing his seminal works on relativity and Brownian motion, applied Planck's revolutionary idea to explain the photoelectric effect, the work for which he was later granted the Nobel prize in 1921 (Planck had won the Nobel prize for his research 3 years earlier). Though Planck was the first to propose the idea of quanta, it was Einstein who embraced the idea, and his work along with Planck's forced the physics community to accept it. It was the dawn of quantum physics. \section{Matter waves matter}
\par Photons and other quantum particles are absorbed in discrete units of energy. The detection of a particle corresponds to a tiny point on some type of detection screen. But above we stated that quantum particles interfere with themselves just like waves. How can quantum objects have both particle and wave characteristics? In other words, how can a photon interfere with itself when passing through a double-slit but later appear as a tiny point on a photosensitive film? This paradox is known as wave-particle duality, and is one of the cornerstones of quantum theory. Wave-particle duality is often revealed through another underlying concept called the \emph{complementarity principle}.
\par In quantum physics, physically measurable quantities (such as position, momentum, etc.) are often called \emph{observables}. The complementarity principle states that the more we know about a given observable, the less we know about its complement. For example, if we measure the exact position of an object at an instance in time, then we can have no knowledge of the object's momentum at that instance. Position and momentum are called complementary observables. To avoid any confusion with the classical and quantum aspects of the word``particle", we have now resorted to using the word``object" to describe a quantum particle - meaning a photon, an electron, a neutron, etc.
\par The concept of position corresponds to a point in space. Imagining again a water wave, or a wave on a stretched string, with series of peaks and troughs, it is easy to see that a wave does not have a well-defined position in this sense. A wave, such as those that can be seen crashing onto a sandy beach, can be localized to within a certain region, but not to a point. A classical particle does possess a well-defined position, and using the laws of classical physics, one can calculate the particle's trajectory and know its position at all instances in time. Therefore, position is identified as a particle-like property. A wave, on the other hand, can be described in terms of its frequency, wavelength, amplitude and phase. In 1927 Louis de Broglie characterized the wavelength (now known as the de Broglie wavelength) of a quantum object with its momentum, work for which he was later granted the Nobel Prize. Consequently, in quantum physics, momentum is a wave-like property. Hence the complementarity of position and momentum leads to wave-particle duality: quantum objects can behave as either particles or waves. The observed behavior depends on what type of measurement the experimenter chooses to make: if a particle-like property such as position is measured, then the quantum object behaves like a particle. Likewise if we choose to observe a wave-like property, such as momentum, the observed behavior is wave-like. Moreover, quantum physics does not provide us with the means to make any definite statement about the properties of the quantum object before we measure it. The observation of a wave-like property does not imply that the quantum object was behaving as a wave just before the measurement.
\par If this all sounds pretty unbelievable to you then you are in good company. Many of the founding fathers of quantum theory were not very satisfied with this state of affairs either, including Einstein, whose intellectual battles with Danish physicist Niels Bohr are the stuff that many physics books are made of. Bohr was the greatest proponent of the idea of complementarity, an idea that Einstein was reluctant to accept. Einstein could not come to terms with the idea that what we observe and consequently call``reality" seems to be based solely on the manner in which we choose to look. Moreover, he was bothered by the fact that according to quantum theory, this reality only exists while we are observing. He expressed his discontent to Abraham Pais by asking: ``Do you believe that the moon exists only when you look at it?" Einstein did not accept that quantum theory was a complete description of nature. Interestingly, it was Einstein's dissatisfaction that motivated and still motivates much of the modern research in quantum mechanics.
\par Many of the great Einstein-Bohr dialogs took place at the Solvay conferences in the 1920's. On several occasions, Einstein thought he could poke holes in Bohr's so-called Copenhagen Interpretation of quantum theory. Throughout the history of physics, much of the discussion and debate over the nature of the world is done through examples and counter-examples of \emph{gedanken} experiments: idealized thought experiments. One of Einstein's famous examples is the following. Repeat the quantum version of Young's experiment, but this time the double slit is suspended by sensitive springs so that it is free to move back and forth. An incident photon, scattered by the slits, suffers a change in momentum, which is absorbed by the double slit apparatus, giving it a slight kick. One could then measure the recoil of the slit apparatus together with the photon's position on the detection screen and infer the photon's trajectory, a particle-like property. The trajectory of the photon itself should not be altered by this measurement, so the interference fringes - a wave-like property - should still be observed. From the spacing between the interference fringes one can calculate the (de Broglie) wavelength and thus the momentum of the photon. In such a way it should be possible to observe the characteristic interference fringes and calculate the momentum as well as know the photon's trajectory. The complementarity principle must be a hoax!
\par
Bohr later pointed out, however, that Heisenberg's uncertainty relation prevented one from seeing interference fringes and determining the photon's trajectory simultaneously. The uncertainty relation is a quantitative statement about the best precision with which one can measure complementary observables. The recoil of the double-slit apparatus (an indicator of the momentum of the photon) disturbs the system creating an uncertainty in the detection of the photon's position on the detection screen. This uncertainty is great enough to ``wash out" or blur the interference fringes to such a degree that they no longer appear. Any attempt to measure the photon's trajectory disturbs the system and prevents the observation of interference fringes. All ideas similar to that of Einstein's have failed due to similar arguments. For many years it was thought that the uncertainty relation was the mechanism responsible for the complementarity principle. The question remained: are we able to mark the particle's path (1) without altering it's trajectory and (2) in such a way that we can get around the uncertainty principle? \section{Quantum Erasure} Roughly twenty years ago, physicists Marlan O. Scully and Kai Dr\"uhl (at the Max-Planck Institut f\"ur Quantenoptik and University of New Mexico) shook the physics community and strengthened the foundations of quantum physics, when they introduced the idea of quantum erasure. The logic of quantum erasure is the following: if the information providing the object's trajectory can be determined without significantly perturbing it, then the interference disappears, but the``erasure" of this information should bring the interference back. Through the introduction of this new concept, they showed that the complementarity principle plays a much more fundamental role in quantum physics than the uncertainty relation.
\par Later, Scully, with Berthold-Georg Englert and Herbert Walther (both at the Max-Planck Institut f\"ur Quantenoptik) proposed a way to bring this about using Rydberg atoms as the interfering objects. Rydberg atoms are excited at very high electron energy levels (for example $n=50$) with long decay times. The atoms are incident on a double-slit. Two microwave cavities, made of a pair of microwave high reflectors, are then placed one behind each slit. The microwave cavities serve as path markers. When an atom passes through a cavity it emits a photon, which remains in the cavity. In this process, the atomΓs trajectory is not disturbed. By simply looking to see which cavity contains the photon, it would be possible to know where the atom has been. So far the Scully-Englert-Walther experiment has never been realized in the laboratory. However, we have succeeded in performing an experiment that is analogous to their proposal and much easier to implement experimentally. However, first we must digress briefly to explain the concept of polarization.
\par The electromagnetic field, that is light, as well as the photon, has an internal property called polarization. In classical optics, light is viewed as a transverse electromagnetic wave and polarization refers to the direction in which it oscillates. A field that oscillates in a specific manner is said to be polarized. A field with linear polarization oscillates back and forth along a certain direction, perpendicular to the propagation direction, while a field with circular polarization oscillates in a circular pattern. Right-circular polarized light oscillates in the clockwise direction, while left-circular polarized light oscillates in the counter-clockwise direction. A circular polarized light beam can be described as a superposition of horizontally and vertically polarized beams that are a quarter cycle (or quarter wavelength) out of phase with each other. For right-circular polarization the vertical component is a quarter cycle ahead of the horizontal component, while for left-circular polarization the vertical component is a quarter cycle behind the horizontal component. Other commonly used polarization directions are the diagonal directions, $45^\circ$ and $-45^\circ$. The diagonal directions are superpositions of horizontal and vertical components just like the right- and left-circular polarizations, only now the horizontal and vertical components are in phase ($45^\circ$) or one-half cycle out of phase ($-45^\circ$) with each other. Optical components called wave plates are used to change the polarization, while the propagation direction of the electromagnetic field is left untouched. A quarter-wave plate can be used to convert a linearly polarized beam into a circularly polarized beam. Another commonly used optical components is a polarizer, which acts as polarization filter, allowing only light with a given polarization to pass. For example, if a circularly polarized beam is directed onto a horizontal polarizer, the beam which exits is horizontally polarized and half as intense as the input beam. Polarizing sunglasses use this concept to eliminate glare from reflective surfaces. \par Now imagine that we repeat Young's experiment with photons polarized linearly in the vertical direction, and we observe interference fringes on a distant screen. Suppose now that we insert two quarter-wave plates, one behind each slit, in such a way that plate 1 transforms the vertically polarized photons into right-circularly polarized photons, while plate 2 transforms the vertically polarized photons into left-circularly polarized photons. The result is that no interference pattern is observed at the detection screen. Instead, after many photons, we will observe a distribution of photon detections that produces the famous bell-shaped curve. The pattern looks something like a mountain peak, with a maximum in the middle, where photons from each slit will hit. There is only one peak because the two slits are very close together. If the slits were well separated, two peaks would appear.
\par
What happened to the interference? The quarter wave plates have marked the polarization of the photons. All we have to do is measure the circular-polarization direction (left or right) of the photons at the screen and we will know through which slit the photons have passed. Since right- and left-circular polarizations oscillate in opposite directions, they are completely distinguishable from each other. Moreover, the quarter-wave plates do not alter the propagation direction of the photons. It is important to note that we don't actually have to measure the polarization direction in order to destroy the interference pattern. It is enough that the so-called which-path information is available to us. Playing dumb will not restore the interference fringes. \par
One might note that this experiment could just as well have been performed using an intense classical light beam. We have chosen to use quantum interference - photons - because the question as to which slit the beam of light has passed through has no significance in classical optics, where a beam of light is always a wave, and thus the concept of position is meaningless. \subsection{Interference is Ignorance}
\par What happens if we instead measure polarization in the horizontal direction? If we limit our observation apparatus to only horizontally polarized photons, then we will again see interference fringes. But how can that be? The quarter-wave plates have marked the photons path. Simply ignoring the information does not bring back interference. Why do we observe interference if we measure horizontal polarization?
\par
Both right- and left-circular polarizations have a horizontal component and thus observation of a horizontally polarized photon tells us nothing about through which slit the photon has passed. The key here is that measuring horizontal polarization erases the which-path information (hence the name``quantum erasure"). If we tried to measure right- or left-circular polarization again after the horizontal polarizer, we would gain nothing in the way of which-path information. \par
Similarly, if we choose to measure vertical polarization, we again erase the which-path information and restore interference. However, in this case we observe interference in the form of \emph{antifringes} that are completely out of phase with those observed with horizontal polarization, meaning that where we had observed an interference maximum (a bright spot) we now observe a minimum (a dark spot), and vice versa. As it so happens, the sum of these interference patterns reproduces the``mountain peak" pattern that one would obtain had no polarization measurement be made. This is the essence of quantum erasure.
\par
Our choice of polarization measurement divides the experimental results into subsets. Some of these subsets give interference fringes, as in the case where we measure horizontal or vertical polarization, while other subsets give which-path information, as when we measure either right- or left-circular polarization. If we add together the measurement results for the cases which give interference, the sum reproduces the mountain peak, as though we had not made any polarization measurement. Similarly, if we add together the measurement results for the cases which give which-path information, we obtain the same result.
\par We observe interference because the two possibilities corresponding to slit 1 and slit 2 are at least somewhat indistinguishable, that is, our choice of measurement cannot tell us with certainty through which slit a detected photon has passed. If the two possibilities are completely indistinguishable, as is the case when we measure horizontal or vertical polarization, we observe perfect high-contrast interference fringes. Likewise interference is completely destroyed when the two possibilities are distinguishable, meaning that our measurement apparatus is capable of telling us with certainty through which slit the photon has passed, as is the case when we measure circular polarization. There exist quantitative mathematical relationships governing the contrast of interference fringes and amount of which-path information we can observe simultaneously.
\par What prevents us from observing interference and determining the photon's trajectory in the quantum eraser? Polarization and position are not complementary observables so there is no place for an explanation based on the uncertainty principle. Moreover, the fact that we can erase the which-path information and observe interference implies that there is no ``disturbance" involved in the measurements. Yet the fact remains, we are still unable to obtain which-path knowledge and observe interference fringes simultaneously. It must be that the complementary principle is enforced through some mechanism more fundamental than the uncertainty relation.
\par If it is not the uncertainty relation, then what is responsible for complementarity? The answer is \emph{quantum entanglement}. When a photon passes through the double-slit apparatus (just before it passes through the quarter-wave plates), it is in a superposition of position states: slit 1 + slit 2. The quarter-wave plates then perform a conditional logic operation on the photon: if a photon passes through slit 1 then it emerges with right-circular polarization, and if a photon passes through slit 2 then it emerges with left-circular polarization. The photon's polarization has become entangled with its path. The result is a more complicated quantum superposition involving two degrees of freedom: the photon's path and its polarization.
\par Entanglement is the name given to this type of quantum correlation, which is much stronger than any classical correlation. The reason for this is that entanglement correlates the probability amplitudes, while a classical correlation correlates only the probabilities. To see this, let's return to the NFL coin toss example, however, imagine now that we have two ``magical" coins, correlated such that when flipped they always give opposite results: one coin gives heads while the other gives tails. This is a type of classical correlation. Individually, each coin still lands heads 50\% of the time and tails the other 50\% of the time. If you flip both coins and then quickly hide one of them, you can always discover the result of the hidden coin simply by looking at the result of the exposed coin. \par The difference between this example of classical correlation and quantum entanglement is that the quantum correlation exists even when you look at superpositions of the individual states. For example, as we will discuss below, it is possible to create two photons that have entangled polarizations. That is, if one photon is horizontally polarized then the other is vertically polarized. If we test both photons individually, there is a 50\% chance that we will measure each photon to be either horizontal or vertical, but we will never find that they are polarized in the same direction simultaneously. One can test this experimentally using horizontal and vertical polarizers. Up to this point, this seems to be the same as the magical NFL coins. However, unlike the NFL coins, it is possible to rotate the polarizers $45^\circ$ so that they measure $45^\circ$ and $-45^\circ$ diagonal polarization. The photons will display the same correlation: each individual photon has a 50\% chance to be detected $45^\circ$ diagonally polarized and a 50\% chance to be detected $-45^\circ$ polarized, but they are never polarized in the same direction simultaneoulsy. Moreover, this is true for any mutual rotation of the polarizers. This is impossible using the magical NFL coins or any other type of classical correlation! In this sense, quantum entanglement is much stronger than any classical correlation. \par As an aside, physicists have known about quantum entanglement since the renowned 1935 paper of Albert Einstein, Boris Podolsky and Nathan Rosen. Shortly thereafter, Austrian physicist Erwin Schr\"odinger coined the name entanglement. The fathers of quantum theory, including Einstein and Bohr, puzzled over the nature of entanglement just as they did over quantum superpositions. Since then, scientists have realized that quantum entanglement is a physical resource that can actually be used in the areas of information technology. In fact, quantum entanglement is the backbone of a new and rapidly flourishing multidisciplinary field called \emph{quantum information}. \par Nearly twenty years ago, several physicists toyed with the idea of using two-level quantum systems, such as the polarization of a photon, as ``quantum bits" in a computer. Since then the same idea has been applied to many problems in cryptography, communications and computer science, and produced some promising results. For example, the ``strange" laws of quantum physics provide the only form of cryptography that is proven to be secure, certainly interesting to governments sending top secret information or to anyone making a credit card purchase via the internet. \par Returning now to the quantum eraser, the quarter-wave plates have entangled the photon's path with it's polarization. Since the two possible polarizations, right- and left-circular, are distinguishable (they oscillate in opposite senses), we can measure the polarization and determine the photon's path with certainty. Entanglement enforces the complementarity principle by coupling the photons path to different polarizations which are completely distinguishable from each other. Physicists have now come to roadblock similar to that of Einstein and Bohr. Is it possible to measure the path of the photon without entangling it? Entanglement is a fundamental player in the quantum theory of measurement. In a way, entanglement is the act of measurement: since it associates the photonΓs path (the slit) with its polarization (which we can measure). Most physicists would probably bet that the answer to this question is no. \section{Twin Photons: an entangled story} Recently, in the Quantum Optics laboratory at the Universidade Federal de Minas Gerais (UFMG), we took this experiment a step further. We created a pair of entangled photons using a non-linear optical process called spontaneous parametric down-conversion. In our experiment, we directed an ultraviolet argon laser beam onto a thin non-linear crystal, which creates two lower energy ``twin" photons. The two photons, which we will call $a$ and $b$, were generated in such a way that when photon $a$ is found to have horizontal polarization, then photon $b$ will necessarily be vertically polarized. Likewise, if $a$ is found to have vertical polarization, then $b$ has horizontal polarization. As discussed above, similar correlations exist for any type of polarization measurements made on the two photons, as long the polarizers measure perpendicular polarizations (horizontal and vertical, left- and right-circular, etc). These photons are said to be polarization-entangled. Furthermore, now that the entangled systems are two independent photons, they can be separated any arbitrary distance. It has been shown experimentally that entangled photons can remain entangled over great distances - the current record, held by physicists at the University of Geneva, is between the cities of Bellevue and Bernex, a distance of about 11 kilometers! \par After creating the entangled photons, we manuevered photon $a$ to the double-slit apparatus (double slit and quarter-wave plates) and then to a photodetector, while photon $b$ passes directly to a separate polarizer and detector. When the quarter-wave plates were removed, after many photon pairs we observed the usual interference pattern. However, since we were working with two photons, the photons pairs were detected in coincidence. Coincidence detection means that we are only interested in the cases where the two photons are registered at their respective detectors simultaneously. Experimentally, the photons are detected within a small window of time, usually on the order of $10^{-9}$ seconds. \par The biggest experimental hurdle we had to leap was figuring out a way to mount the quarter wave plates in front of the narrow double slit. To create an observable interference pattern, each slit of the double-slit was about 0.2 millimeters wide, and they were spaced 0.2 millimeters apart. The usual quarter wave plates that are commercially available are round in shape, about 1 centimeter in diameter and about 2 millimeters thick. Due their shape and size, it was necessary to modify the wave plates so that they would each cover only one slit. Using high quality sandpaper, we sanded a straight edge into each wave plate at the required angle, so that they would each cover one slit and join in the narrow space between the slits. \par When we put the quarter-wave plates in place, the interference was destroyed, just like before. This time, however, the which-path information is available only through coincidence detection. One quarter wave plate transforms $a$ 's vertical polarization to right-circular, while the other transforms to left-circular. However, now photon $a$ can be found to be either vertically or horizontally polarized. For horizontal polarization, the action of the wave plates is reversed. Thus, measuring only the polarization of photon $a$ will not provide enough information to determine through which slit $a$ has passed. Through coincidence detection, however, we \emph{are} provided sufficient information. The two-photon logic statements are: (1) ``$a$ right-circular and $b$ horizontal" or ``$a$ left-circular and $b$ vertical" implies that $a$ passed through slit 1 while (2) ``$a$ left-circular and $b$ horizontal" or ``$a$ right-circular and $b$ vertical" implies that $a$ passed through slit 2. Interestingly enough, due to the entanglement between $a$ and $b$, we can choose to observe interference or obtain which-path information of photon $a$ based solely on the polarization direction we measure on photon $b$. Instead of measuring horizontal or vertical polarization of photon $b$, we can measure diagonal (or circular) polarizations, which are superpositions of horizontal and vertical polarizations. Detecting a diagonally polarized photon erases the which-path information, and consequently we observe interference fringes. A measurement in the positive diagonal direction ($45^\circ$) gives interference fringes, while a measurement in the negative diagonal direction ($-45^\circ$) gives interference antifringes, exactly out of phase with the fringes. \section{Delayed Choice} Curiously, with this quantum eraser we could actually choose to observe interference or determine photon $a$'s path after photon $a$ has been detected. Imagine that the detector registering photon $b$ is moved very far away, so that photon$b$ is detected some time after photon $a$. The experimenter could then wait until after photon $a$ is registered to decide which measurement to perform on photon $b$, and consequently observe interference or determine $a$'s path. Moreover, we could let photons $a$ and $b$ travel several light minutes away from each other, so that no signal could travel from $a$ to inform $b$ of it's position in the time between $a$ and $b$ are detected. How can one choose to observe particle-like or wave-like behavior after the interfering particle has already passed through the double-slit? When first discussed by American physicist John A. Wheeler in 1978, before the quantum eraser concept was introduced, this type of delayed choice experiment raised serious physical and metaphysical questions. It seems to imply that the observer could alter photon $a$'s past by choosing how to measure photon $b$. However, this is not the case. To explain why, we will tell you a story about the two most famous people in quantum information: Alice and Bob. \par Two quantum physicists, Alice and Bob, decide to perform an experiment testing the foundations of quantum mechanics. Alice sets up a double slit experiment with quarter wave plates, just like we described, in her laboratory on Earth. Her friend and colleague Bob, who lives on the Mars colony, sends her photons, one by one, across a quantum ``telephone line" that they have set up between their laboratories. Alice sends the photons, one by one through the double-slit-wave-plate apparatus. For every photon, she marks it's position, writing something like``Photon 567 landed at position $x = 4.3$" in her lab notebook. When Alice later plots her experimental results, she sees that large ``dull" mountain peak, and concludes that there was no interference present in the experiment. What Bob has not told Alice is that each of her photons is entangled with another photon which Bob has kept for himself. Bob performs a series of polarization measurements on the photons, about half the time measuring horizontal and vertical polarization and the other half measuring $+45^\circ$ and $-45^\circ$ diagonal polarization. He records all of his results in his lab book, with statements such as ``Photon 567 ($b$) was detected with horizontal polarization", but he does not inform Alice of his mischief. \par Bob loves magic and a good practical joke. When visiting Alice one day, she shows him her experimental results on the computer and says ``Look Bob, I performed that quantum eraser experiment and when I plotted my date, all I got was this dull mountain peak, there was no interference". Bob says "Alice, are you sure" and, after checking his own lab book, he tells her to plot only those photons for which he measured it's entangled partner to be $+45^\circ$ diagonally polarized and ``Ta-Da!" an interference fringe pattern appears. "Wait Bob, that wasn't there before! How did you make the photons interfere after I already detected them and recorded it all in my lab book?!", Alice exclaims. Bob, who loves to play for the an audience, replies, "You think that's impressive, well check this out", and he consults his lab book and plots Alice's photons that are paired with photons for which he measured horizontal polarization and ``Ta-Da!" there is no interference pattern, just the smaller (half height) mountain peak. Alice is perplexed. Bob, not knowing when to call it quits, does the same for Alice's photons paired with his $-45^\circ$ diagonal polarization measurements and ``Ta-Da!" interference is back, this time in the form of antifringes. "Bob, that is amazing! You have control over the past! While you are at it, can you go back change my lottery ticket from last week to 67-81-138?," Alice asks with a look of awe in her eyes. Bob is loving the moment, but he is not the greatest magician, and cannot keep his mouth shut about the secret to his tricks. "No Alice, look, the photons I gave you were actually entangled with photons that I kept for myself. I did a series of polarization measurements, and recorded my results. My polarization measurements tell me how to divide up your experimental results so that we can see interference or not, but I cannot change the position at which any photon actually landed," Bob explains. He shows her by plotting all of the results for which he measured horizontal OR vertical (orthogonal directions) and they observe the large mountain peak. He then does the same with all results of $+45^\circ$ and $-45^\circ$, and they observe the same mountain peak. Of course plotting all of the results together regardless of polarization also gives the mountain peak, as Alice had already observed. So Bob was not able to alter the past, it is just that he had more information than Alice. \par Presumably, Einstein would not be happy with this state of affairs. Quantum erasure seems to confirm that the complementarity principle is indeed a fundamental part of quantum theory. Quantum physics has in its realm some strange consequences if one insists on using concepts from classical physics. The founding fathers were certainly aware of this nearly a century ago. Nowadays, physicists have learned to accept the fact that the laws of classical physics do not necessarily apply to the quantum world. We have become much more comfortable with the ``quantum weirdness". \par The quantum eraser and other experiments have done much to illustrate the dual nature of quantum theory. However, physicists today are still unable to explain why wave-particle duality exists. In this respect, it seems that we have not come too far since the 1960's, when Richard Feynman stated, in Feynman Lectures on Physics: ΓWe cannot make the mystery go away by explaining how it works. We will just tell you how it works." Yet great progress has been made. Understanding that it is not the uncertainty principle, but rather quantum entanglement responsible for complementarity is an enormous step, presumably in the right direction. Quantum entanglement is at the heart of the modern theory of quantum measurement. We have learned that it is the act of measurement itself, and not the Γquantum uncertainty" involved with the measurement that is responsible for the complementarity principle. This may seem like a subtle point, but it is one that has caused many physicists to sleep more soundly at night.
\begin{acknowledgments} This research was performed in the Quantum Optics Laboratory at the Universidade Federal de Minas Gerais (UFMG) in Belo Horizonte, Minas Gerais, Brazil, with finanical support from the Brazilian funding agencies CNPq and CAPES. \end{acknowledgments}
\end{document} |
\begin{document}
\section*{Destruction of quantum coherence and wave packet dynamics} \begin{center} Gernot Alber\\ Abteilung f\"ur Quantenphysik, Universit\"at Ulm, D-89069 Ulm, Germany \\ (to be published in {\em The Physics and Chemistry of Wave Packets}, edited by J. A. Yeazell and T. Uzer (Wiley, N. Y.)) \end{center}
The development of short, powerful laser pulses and of sophisticated trapping techniques within the last few years has stimulated numerous theoretical and experimental investigations on the dynamics of wave packets in elementary, material quantum systems. These wave packets are non stationary, spatially localized quantum states which are situated on the border between the microscopic and macroscopic domain. A detailed understanding of their dynamics is essential for our conception of quantum mechanics and of its connection with classical mechanics. So far the interplay between classical and quantum mechanical aspects of their dynamics have been investigated in Rydberg systems (Alber and Zoller 1991), in molecules (Garraway and Suominen 1995, Sepulveda and Grossmann 1996), in clusters (Knospe and Schmidt 1996) and in nano structures (Koch et al. 1996). These studies have concentrated mainly on semiclassical aspects which may be attributed to the smallness of the relevant de Broglie wave lengths. Thereby quantum aspects still manifest themselves in interferences between probability amplitudes which are associated with various families of classical trajectories.
However, for a comprehensive understanding of the emergence of classical behavior also a detailed understanding of the destruction of quantum coherence is required. Typically this destruction of coherence arises from external stochastic forces or environmental influences which cannot be suppressed. Though by now many aspects of the coherent dynamics of these wave packets are understood to a satisfactory degree still scarcely anything is known about the influence of destruction of quantum coherence.
The main aim of this article is to discuss characteristic physical phenomena which govern the destruction of quantum coherence of material wave packets. For systematic investigations on this problem it is advantageous to deal with physical systems in which wave packets can be prepared and detected in a controlled way and in which the mechanisms causing the destruction of quantum coherence can be influenced to a large extent. Rydberg atoms (Seaton 1983, Fano and Rau 1986) are paradigms of elementary quantum systems which meet these requirements. The high level density of Rydberg states close to an ionization threshold is particularly convenient for the experimental preparation of spatially localized electronic wave packets by coherent superposition of energy eigenstates (Alber and Zoller 1991). Furthermore, the dynamics of electronic Rydberg wave packets exhibits universal features which apply to atomic and molecular Rydberg wave packets as well as to Rydberg wave packets in more complex systems such as clusters. This dynamical universality might be traced back to the fact that almost over its whole classically accessible range the dynamics of a Rydberg electron is governed by the Coulomb potential of the positively charged ionic core. This universality together with the fact that Rydberg systems are amenable to a systematic theoretical description with the help of semiclassical methods makes them attractive for theoretical investigations. In recent years many detailed investigations have been performed concerning various fundamental aspects of the coherent dynamics of Rydberg wave packets, such as the influence of core scattering processes (Alber 1989, Dando et al. 1995, H\"upper et al. 1995), the connection between classical bifurcation phenomena and quantum dynamics (Beims and Alber 1993, 1996, Main et al. 1994), the influence of the stimulated light force on the atomic center of mass motion (Alber 1992, Alber and Strunz 1994) or the influence of electron correlations on wave packet dynamics in laser-induced two-electron excitation processes (Hanson and Lambropoulos 1995, Zobay and Alber 1995, van Druten and Muller 1995).
The dynamics of Rydberg electrons is governed by characteristic features which greatly influence the way in which they can be affected by external stochastic forces or environments. Most notably, Rydberg electrons can be influenced by laser fields of moderate intensities and by their statistical properties only in a small region around the atomic nucleus (Giusti and Zoller 1987). Furthermore, Rydberg systems are characterized by unique threshold phenomena which result from the infinitely many bound states and from the continuum states converging towards an ionization threshold. In addition, radiative decay rates of Rydberg states are so small that in typical situations of current experimental interest the direct influence of radiative damping can be neglected. However, the dissipative influence of radiative decay might become significant, if Rydberg systems interact with intense laser fields. In order to demonstrate characteristic physical phenomena governing the destruction of quantum coherence of electronic Rydberg wave packets in the subsequent discussion two stochastic mechanisms will be considered in detail, namely radiative damping which is mediated by electron correlations between a Rydberg wave packet and a resonantly excited, tightly bound core electron and fluctuations of laser fields.
The investigation of radiative damping mediated by electron correlation effects is motivated by the recently revived interest in laser-induced two electron excitation processes (Jones and Bucksbaum 1991, Stapelfeldt et al. 1991, Robicheaux 1993, Grobe and Eberly 1993, Hanson and Lambropoulos 1995, Zobay and Alber 1995, van Druten and Muller 1995). Non-resonant laser-induced excitation processes in which two valence electrons of an atom, e.g. an alkaline earth atom, are excited simultaneously have already been playing an important role in spectroscopy for a long time (Gallagher 1994). Typically thereby one of the valence electrons is excited into a Rydberg state and the other one into a tightly bound core state. Due to the availability of intense laser light sources recently also cases have become accessible experimentally in which both of these electrons are excited resonantly so that the influence of the laser field can no longer be treated with the help of perturbation theory. The resulting strong modifications of the electron-correlations may give rise to interesting novel phenomena. If the Rydberg electron is prepared in a wave packet state these coherent laser-modified electron correlations may even lead to an almost complete suppression of autoionization (Hanson and Lambropoulos 1995). In the subsequent discussion it will be demonstrated that these coherent effects are rather sensitive to the destruction of coherence which is caused by radiative decay of the tightly bound, excited core electron.
Due to the inherent stochastic nature of laser light the investigation of optical excitations of atoms or molecules by fluctuating laser fields is one of the central problems of laser spectroscopy. So far research on this problem has concentrated predominantly on laser-induced excitation of isolated energy eigenstates (Agarwal 1976, Dixit et al 1980, Vemuri et al. 1991). By now this special class of excitation processes is understood to a satisfactory degree. Despite these successes so far scarcely anything is known about the effect of laser fluctuations on optical excitation processes in which the level density of the resonantly excited states is large and in which wave packets are prepared. A paradigm in this respect is the laser-induced excitation of Rydberg and continuum states close to an ionization threshold which typically leads to the preparation of an electronic Rydberg wave packet. This physical system is well suited for investigating fundamental aspects of the destruction of quantum coherence on wave packet dynamics. In the subsequent discussion it will be demonstrated that this fluctuation-induced destruction of quantum coherence together with the peculiar threshold phenomena of Rydberg systems leads to a variety of novel phenomena. One of these generic effects is stochastic ionization which manifests itself in a characteristic scenario of non-exponential decays (Alber and Eggers 1997).
This paper is organized as follows: In section 1 basic theoretical concepts for describing the dynamics of Rydberg electrons in laser fields are summarized. This section focuses on coherent dynamical aspects which can be described conveniently with the help of semiclassical methods. Within this framework quantum aspects manifest themselves in the interference between probability amplitudes which are associated with those classical trajectories along which probability is transported. In section 2 recent theoretical work on the destruction of quantum coherence in wave packet dynamics is reviewed. Characteristic phenomena are exemplified by considering two dissipative mechanisms in detail. In section 2.1 the influence of radiative damping on laser-induced two-electron excitation processes is investigated. Effects of laser fluctuations on the dynamics of electronic Rydberg wave packets are discussed in Sec. 2.2.
\section{Coherent dynamics of Rydberg electrons - general theoretical concepts}
In this section a brief review of general theoretical concepts is presented which are useful for the description of the dynamics of Rydberg electrons. These concepts have already been used successfully to describe various aspects of the coherent dynamics of electronic Rydberg wave packets.
Thereby we shall concentrate mainly on cases of recent experimental and theoretical interest in which a weakly bound Rydberg electron interacts with a laser field and additional weak electric and/or magnetic fields (Alber 1989, Dando et al. 1995, H\"upper et al. 1995, Moser et al. 1997). Throughout this review Hartree atomic units will be used for which $e=\hbar=m_e=1$ ($e$ and $m_e$ are the electronic charge and mass, respectively).
Rydberg electrons are atomic or molecular electrons whose dynamics is dominated by highly excited energy eigenstates close to an ionization threshold. In the simplest possible case the energies of these Rydberg states are given by the well known relation $\epsilon_n = -1/[2(n - \alpha)^2]$ (Seaton 1983, Fano and Rau 1986). Thereby the quantum defect $\alpha$ is approximately energy independent for energies sufficiently close to the ionization threshold at energy $\epsilon = 0$. In typical optical excitation processes only Rydberg states with small values of the angular momentum $l$ are excited, i.e. $l\ll n$. These Rydberg states of low angular momenta are essentially de-localized over the whole space which is classically accessible to them, i.e. $(l + 1/2)^2 < r < 1/\mid \epsilon_n \mid$. ($r$ denotes the radial distance of the electron from the nucleus measured in units of the Bohr radius $a_0 = 5.29 \times 10^{-11} {\rm m}$.)
If a Rydberg electron interacts with a laser field of moderate intensity and with a weak, static electric and/or magnetic field one can distinguish three characteristic spatial regimes: \\ \\ (1) {\em The core region:} ($0 <r < O(1)$) \\
It extends a few Bohr radii around the atomic nucleus. Inside this core region Rydberg electrons of low angular momenta which are able to penetrate this core region interact with all other atomic core electrons. These interactions lead to characteristic electron correlations effects such as autoionization and channel coupling. Quantitatively these effects can be described by quantum defect parameters which are approximately energy independent close to an ionization threshold (Seaton 1983, Fano 1986, Aymar et al. 1996).
If a Rydberg electron of low angular momentum interacts with a laser field of moderate intensity, whose electric field strength is given by \begin{equation} {\bf E}(t) = {\bf E}_0 e^{-i\omega t} + c.c., \end{equation} two major effects take place. Firstly, the Rydberg electron experiences an intensity dependent ponderomotive energy shift of magnitude $\delta \omega_p = \mid {\bf E}_0\mid^2/\omega^2$. This energy shift is independent of the energy of the Rydberg electron and may thus be interpreted as an energy shift of the ionization threshold. Secondly, all other dominant energy exchange processes between a Rydberg electron and the laser field are localized within a region typically extending a few Bohr radii around the atomic nucleus. This localization of the electron-laser coupling inside the core region relies on two sufficient conditions, namely moderate laser intensities and sufficiently high laser frequencies preferably in the optical frequency domain (Giusti and Zoller 1987). Thereby laser intensities are considered to be moderate provided the stationary oscillation amplitude $\alpha_{osc}$ of an electron in the laser field (in the absence of the Coulomb potential of the ionic core) is significantly less than the extension of the core region, i.e. \begin{equation} \alpha_{osc} = \mid {\bf E}_0\mid/\omega^2 \ll 1. \label{alpha} \end{equation} Furthermore, in this context laser frequencies $\omega$ are considered to be high, if they are much larger than the inverse classical Kepler period $T_n$ of the Rydberg electron, i.e. $\omega T_n \gg 1$ with $T_n = 2\pi(n - \alpha)^3$. Classically speaking at these high laser frequencies it is only in a region close to the nucleus that the acceleration of a Rydberg electron is sufficiently large that an appreciable energy exchange of the order of $\Delta \epsilon \approx \omega$ can take place between the laser field and the Rydberg electron (compare also with Eq.(\ref{Bohr})). As a consequence the interaction of a Rydberg electron with a laser field of moderate intensity and sufficiently high frequency is completely different from its interaction with a microwave field whose frequency is comparable with its classical Kepler frequency $1/T_n$. Even if the field strength of such a microwave field is small in the sense that $\alpha_{osc}\ll 1$, the small frequency of the microwave field implies that energy can be exchanged with the microwave field essentially at any distance of the Rydberg electron from the atomic nucleus. \\ \\ (2){\em The Coulomb region:}($O(1)< r < a$) \\
Outside the core region the dynamics of a highly excited Rydberg electron is dominated by the $1/r$ Coulomb potential of the positively charged ionic core. If the Rydberg electron is influenced by a weak external electric or magnetic field this is only valid for distances of the Rydberg electron from the nucleus which are smaller than the critical distance $a \gg 1$ at which the external potentials are no longer negligible. If this critical distance is located inside the classically accessible region, i.e. $a < 1/\mid \epsilon_n \mid$, these external fields influence the dynamics of the Rydberg electron significantly. \\ \\ (3){\em The asymptotic region:} ($1\ll a < r$) \\
In the asymptotic region the influence of weak external fields is as important as the Coulomb force originating from the positively charged ionic core. In general, in this region the resulting dynamics of the Rydberg electron is complicated by the fact that its classical dynamics is no longer integrable and exhibits signatures of chaos. \\
In each of these characteristic spatial regimes different approximations can be applied for the dynamical description of the Rydberg electron. All photon absorption and emission processes and all electron correlation effects which take place inside the core region have to be described quantum mechanically. As the Bohr radius is small in comparison with the extension of the Coulomb region and of the asymptotic region, outside the core region the dynamics of a Rydberg electron can be described with the help of semiclassical methods.
Starting from these elementary considerations a systematic theoretical description of Rydberg electrons can be developed which is based on a synthesis of semiclassical methods and of concepts of quantum defect theory (Alber 1989, Alber and Zoller 1991). Thereby solutions of the Schr\"odinger equation which are valid inside the core region and at the boundary to the Coulomb region have to be matched to semiclassical wave functions which are valid in the Coulomb region and in the asymptotic region. The values of the wave function at the border between the core region and the Coulomb region are determined by the solution of the Schr\"odinger equation inside the core region. Within the framework of quantum defect theory these values are determined by approximately energy independent quantum defect parameters. These quantum defect parameters originate from two different types of interactions, namely electron correlation effects and laser-induced photon absorption and emission processes. For moderate laser intensities and sufficiently high frequencies these latter type of processes give rise to intensity dependent quantum defects. Thus, in the simplest case of a one-channel approximation, for example, these interactions inside the core region can be characterized by a complex quantum defect of the form (Alber and Zoller 1988) \begin{equation} \mu = \alpha + i\beta. \label{complex} \end{equation} The real part of this quantum defect defines the energies of the Rydberg electron in the absence of the laser field, i.e. $\epsilon_n = -1/[2(n - \alpha)^2]$. The imaginary part $\beta$ describes the influence of laser-induced transitions of the Rydberg electron into continuum states well above threshold. In lowest order of perturbation theory it is given by \begin{equation} \beta = \pi\mid \langle \epsilon=\omega \mid {\bf d}\cdot {\bf E}_0 \mid \epsilon = 0\rangle \mid^2 \label{imag} \end{equation} with ${\bf d}$ denoting the atomic dipole operator. For hydrogen and linearly polarized laser light, for example, this imaginary part of the quantum defect can be evaluated approximately with the help of the Bohr correspondence principle. According to this principle the dipole matrix element entering Eq.(\ref{imag}) is approximated by a Fourier coefficient of the classical trajectory of a Rydberg electron of energy $\epsilon = 0$ (Landau and Lifshitz 1975), i.e. \begin{equation} \langle \epsilon=\omega \mid {\bf d}\cdot {\bf E}_0 \mid \epsilon = 0\rangle = \frac{1}{2\pi}\int_{-\infty}^{\infty} dt e^{i\omega t} {\bf x}(t)\cdot {\bf E}_0= \frac{6^{2/3}}{2\pi\sqrt{3}}\Gamma(2/3) \omega^{-5/3}\mid {\bf E}_0\mid. \label{Bohr} \end{equation} ($\Gamma(x)=\int_0^{\infty}du u^{x-1}e^{-u}$ denotes the Euler gamma function.) Thereby ${\bf x}(t)$ describes the parabolic classical trajectory of an electron which moves in the Coulomb field of the nucleus with energy $\epsilon = 0$. Consistent with the previous qualitative discussion the $\omega^{-5/3}$-dependence in Eq.(\ref{Bohr}) demonstrates that the dominant contribution to this dipole matrix element originates from a spatial region around the nucleus with a size of the order of $r_c \approx \omega^{-2/3}$. This characteristic size $r_c$ is the distance a classical electron of (asymptotic) energy $\epsilon = 0 $ can depart from the nucleus during the relevant photon absorption time $t_{photon} \approx 1/\omega$.
In the Coulomb and asymptotic region the quantum mechanical state can be determined semi classically. In order to make these ideas more precise let us consider the general form of the semiclassical solution of the time independent Schr\"odinger equation which is valid in the Coulomb and asymptotic region. It has the general form (Maslov and Fedoriuk 1981, Delos 1986) \begin{eqnarray} \psi(\epsilon, {\bf x}) &=& \sum_j \varphi(\epsilon,{\bf y}_j) \sqrt{\frac{J(0,{\bf y}_j)}{\mid J(t_j,{\bf y}_j)\mid}} e^{i[S_j(t_j,{\bf y}_j) - \mu_j(t_j)\pi/2]}. \label{semi} \end{eqnarray} This wave function is determined by two different types of quantities, namely the probability amplitude $\varphi(\epsilon,{\bf y})$ of finding the electron at position ${\bf y}$ on the boundary between the core region and the Coulomb region and by quantities which describe the classical motion of the Rydberg electron outside the core region (compare with Fig. 1). The probability amplitude $\varphi(\epsilon, {\bf y})$ is determined by the quantum defect parameters which describe the electron correlations and the electron-laser interaction inside the core region. According to Eq.(6) the probability amplitude $\psi(\epsilon,{\bf x})$ of finding the electron at position ${\bf x}$ outside the core region is also determined by properties of all those classical trajectories $j$ which start at the boundary between the core region and the Coulomb region at position ${\bf y}$ and reach the final point ${\bf x}$ at any `time' $t$. In this context the variable $t$ represents a curve parameter and not a physical time. Together with the initial positions ${\bf y}$ the curve parameter $t$ constitutes a global coordinate system for the family of classical trajectories which leave the core region and which form a Lagrangian manifold (Maslov and Fedoriuk 1981, Delos 1986). The important classical properties of trajectory $j$ which determine $\psi(\epsilon,{\bf x})$ are: \begin{enumerate} \item its classical action (eikonal) $S_j(t_j,{\bf y}_j)$, \item the determinant of its Jacobi field $$J(t_j,{\bf y}_j) = \frac{dx_1\wedge dx_2\wedge dx_3} {dt\wedge dy_1\wedge dy_2}\mid_j$$
which characterizes its stability properties, and \item its Maslov index $\mu_j(t_j)$ which characterizes the number of conjugate points and their multiplicity. \end{enumerate}
According to this general theoretical approach it is apparent that Rydberg systems differ from one another only as far as their dynamics inside the core region is concerned. This part of the dynamics can be described generally by a few quantum defect parameters. Thus Rydberg systems exhibit universal behavior and the quantum defect parameters characterize the associated universality classes. Furthermore, the semiclassical analysis of the dynamics of the Rydberg electron in the Coulomb region and in the asymptotic region implies that probability amplitudes describing atomic transitions between an initial and a final state can be represented as a sum of contributions which are associated with all possible classical paths (including their multiple returns) which connect the regions of support of the initial and the final state. In particular, if the dominant contribution of a transition amplitude originates from the core region, for example, it is all classical paths which start and end inside the core region which are relevant for the theoretical description. On the basis of this combination of methods of quantum defect theory with semiclassical path representations for relevant quantum mechanical transition amplitudes many aspects of the coherent dynamics of electronic Rydberg wave packets have already been described successfully (Beims and Alber 1993, 1996, Alber et al. 1994, Zobay and Alber 1998).
\section{Dissipative dynamics of electronic Rydberg wave packets}
So far in the context of wave packet dynamics of material particles the investigation of dissipative and stochastic influences which destroy quantum coherence has not received much attention. Definitely, to some extent this may be attributed to the complications arising from the high level densities which have to be taken into account for a proper theoretical description of wave packet dynamics. In general they turn the solution of master equations for the relevant density operator into a difficult mathematical and numerical problem. Electronic wave packets in Rydberg systems are an extreme example of this kind due to their almost macroscopic size and the infinitely high level density of Rydberg states at an ionization threshold. In the subsequent discussion it will be demonstrated that a combination of the semiclassical methods discussed in Sec. 1 together with stochastic simulation methods constitutes a powerful theoretical approach for describing many aspects of the destruction of quantum coherence in wave packet dynamics. In addition, this theoretical approach offers insight into the intricate interplay between the semiclassical aspects of the dynamics of a Rydberg electron outside the core region and its coupling to the radiation field inside the core region. In the subsequent sections two types of physical processes will be discussed in detail by which this coupling to the radiation field can destroy the quantum coherence of an electronic wave packet, namely spontaneous emission of photons and the intrinsic fluctuations of a laser field. Motivated by the recent interest in laser-induced two-electron excitation processes, in Sec. 2.1 characteristic effects of radiative damping are explored which are mediated by the correlation between an electronic Rydberg wave packet and a resonantly excited, tightly bound core electron. In Sec. 2.2 it is demonstrated that as a result of the peculiar threshold properties of Rydberg systems the destruction of quantum coherence which is brought about by a fluctuating laser field gives rise to a variety of novel phenomena.
\subsection{Radiative damping mediated by electron correlations}
Due to the long radiative life times of Rydberg states (radiative life times scale as $(n - \alpha)^3$ (Gallagher 1994)) the direct influence of spontaneously emitted photons is negligible under typical laboratory situations. However, destruction of quantum coherence originating from radiative damping might become significant in cases in which more than one atomic or molecular electron is excited resonantly by a laser field. In such cases the influence of a photon which is emitted spontaneously by one of these excited electrons can influence another excited Rydberg electron via electron correlation effects. Isolated core excitation (ICE) processes (Cooke et al. 1978) are a particular class of laser-induced two-electron excitation processes which has received considerable attention recently. In the following it is demonstrated that in these types of excitation processes the dissipative influence of radiative damping mediated by electron correlations may influence the dynamics of electronic wave packets significantly.
ICE excitation processes have been studied extensively in the alkaline earth elements as the corresponding singly-charged ions are excited easily with laser fields in the optical or near-uv regime. In Fig. \ref{ICE}
a typical laser-induced ICE process is shown schematically for a magnesium atom. In a first step, the atom is excited from its $|3s^2\rangle$
ground state to a Rydberg state $|3snd\rangle$ by two-photon excitation. After this excitation process the atom consists of the Mg$^+$(3s) ionic core and the $nd$-Rydberg electron which tends to be located at large distances from the core. By applying a second laser pulse tuned to a resonance of the Mg$^+$ ion the remaining core electron is excited, e.g. to the $3p$-state of the ionic core. The direct influence of the laser field on the highly excited Rydberg electron is usually negligible in comparison to its interaction with the second, tightly bound valence electron. But the laser field influences the Rydberg electron indirectly by electron correlation effects. Immediately after the core transition the Rydberg electron experiences a "shakeup" by the different short-range core potential to which it has to accommodate. A quantitative measure for the degree of this shakeup is given by the difference between the quantum defects of the two channels associated with the $3s$ and the $3p$-states of the ionic core. The early work on ICE spectroscopy of alkaline earth elements has concentrated on non-resonant core transitions which can be described in lowest order of perturbation theory with respect to the laser field (Gallagher 1994). Non-perturbative effects of laser fields have become of interest only recently in connection with the development of powerful tunable laser sources (Jones and Bucksbaum 1991, Stapelfeldt et al. 1991, Robicheaux 1993, Grobe and Eberly 1993). They are particularly important in resonant core excitation processes in which one of the laser fields induces Rabi oscillations of the ionic core.
A variety of new coherent effects have been predicted theoretically in this context (Robicheaux 1993, Hanson and Lambropoulos 1995, Zobay and Alber 1995, van Druten and Muller 1995) which rely on the coherent interplay between the Rabi oscillations of the ionic core and the dynamics of an electronic Rydberg wave packet which is influenced by these Rabi oscillations through the resulting shakeup processes (For a review on these theoretical developments see Zobay and Alber 1998). However, due to the possibility of spontaneous emission of photons by the resonantly excited core electron all these effects are expected to be particularly sensitive to the resulting destruction of quantum coherence.
In order to investigate these dissipative effects in detail let us consider a typical laser-induced two-electron excitation process in an alkaline earth atom as represented in Fig. \ref{8}. It is assumed that the atom is prepared initially in its ground state
$|g \rangle$. The atom is situated in a cw-laser field whose electric field strength is given by ${\bf E}(t) = {\cal E}{\bf e} e^{-i\omega t} + c.c.$ and which is tuned near resonance with a transition of the positively charged ionic core. Typically electron correlations imply that as long as the atom remains in its initial state
$|g\rangle$ this laser field is well detuned from any atomic transition. Thus the laser field has negligible effect on the atomic dynamics. But as soon as an outer valence electron is excited to Rydberg state close to an ionization threshold the cw-laser field starts to induce transitions between the two resonantly coupled states of the ionic core which have energies $\epsilon_1$ and $\epsilon_2$, respectively. Let us concentrate on a case in which one of the valence electrons is excited coherently to Rydberg states by a short and weak laser pulse with electric field strength ${\bf E}_a(t) = {\cal E}_a (t) {\bf e}_a e^{-i\omega_a t} + c.c.$ (Typically the pulse envelope ${\cal E}_a(t)$ will be modeled by a Gaussian shape centered around time $t_a$ with pulse duration $\tau_a$). Thus a radial electronic Rydberg wave packet is prepared by this short laser pulse (Alber and Zoller 1991). This wave packet moves in the Coulomb field of the positively charged ionic core. Whenever it penetrates the core region it is shaken up by the Rabi oscillations of the resonantly driven core. Furthermore, whenever the core emits a photon spontaneously this emission process will disrupt the relative phases of the electronic wave packet and will thus destroy quantum coherence. The dynamics of this electronic wave packet under the influence of the Rabi oscillations of the ionic core can be investigated by typical pump-probe experiments, for example.
For the theoretical description of the resulting destruction of quantum coherence one has to solve the corresponding optical Bloch equation for the density operator of the two atomic valence electrons. In the case depicted in Fig. \ref{8}, for example, the optical Bloch equation is given by (Zobay and Alber 1996) \begin{equation} \dot{\rho}(t) = -i[H,\rho(t)] + \frac{1}{2}\{ [L,\rho(t) L^{\dagger}] + [L\rho(t), L^{\dagger}]\}. \label{Bloch} \end{equation} Thereby the Hamiltonian \begin{eqnarray} H&=& \sum_{i,j=1,..,3} H_{i,j} + V_{ICE} \end{eqnarray} characterizes the coherent part of the dynamics. The dynamics of the valence electrons is described by the Hamiltonian \begin{equation} H_{i,j} = ({\bf h}_{jj}\delta_{ij} +
{\bf V}_{ij} + \epsilon_{cj}\delta_{ij})|\Phi_i\rangle
\langle \Phi_j| \label{H} \end{equation} with \begin{equation} {\bf h}_{jj} = -\frac{1}{2}\frac{d^2}{dr^2} + \frac{l_j(l_j + 1)}{2r^2} - \frac{1}{r}. \end{equation} The short-range potential ${\bf V}_{ij}$ describes electron-correlation effects originating from the residual core electrons (Aymar et al. 1996). In ICE transitions the angular momentum $l$ of the excited Rydberg electron is conserved to a good degree of approximation, i.e. $l_1=l_2=l$ (Gallagher 1994). In the rotating wave approximation the channel thresholds $\epsilon_{cj}$ are given by $\epsilon_{c1} = \epsilon_1$, $\epsilon_{c2} = \epsilon_2 - \omega$, $\epsilon_{c3} = \epsilon_3 - \omega$. The operator \begin{equation}
V_{ICE} = -\frac{1}{2}\Omega(|\Phi_2\rangle \langle \Phi_1| +
|\Phi_1\rangle \langle \Phi_2|)\otimes {\bf 1}_r \label{VICE} \end{equation} describes the laser-induced core transitions between the core states $\mid \Phi_1\rangle$ and $\mid \Phi_2\rangle$ and $\Omega$ is the Rabi frequency originating from the cw-laser field. The operator ${\bf 1}_r$ denotes the identity operator for the radial coordinate of the Rydberg electron. Thus the role of the Rydberg electron as a spectator becomes obvious from Eq.(\ref{VICE}).
The stochastic part of the dynamics of the density operator $\rho(t)$ is described by the Lindblad operator \begin{equation}
L = \sqrt{\kappa}|\Phi_1\rangle \langle \Phi_2 | \otimes {\bf 1}_r \label{Lind} \end{equation} which characterizes the radiative decay of the ionic core from its excited state to its ground state by spontaneous emission of photons with rate $\kappa$.
Due to the high level density of Rydberg states close to an ionization threshold and due to the presence of the adjacent electron continuum usually severe problems arise, if one tries to solve the optical Bloch equation (\ref{Bloch}) numerically by expanding the density operator $\rho(t)$ into a basis set of atomic energy eigenfunctions. Many of these problems can be circumvented successfully by combining the semiclassical methods as discussed in Sec. 1 with stochastic simulation methods (Zobay and Alber 1996). Besides numerical advantages this approach gives also direct insight into the classical aspects of the dynamics of the Rydberg electron and the destruction of quantum coherence caused by the radiative decay of the core. Thereby the density operator is represented by a (fictitious) ensemble of pure states which are associated with definite numbers of spontaneously emitted photons (Mollow 1975), i.e. \begin{equation} \rho(t) = \sum_{N=0}^{\infty} \rho^{(N)}(t), \label{rhon} \end{equation} with the $N$-photon contributions \begin{eqnarray} \rho^{(N)}(t) &=& \int_0^{t} dt_N \int_0^{t_N} dt_{N-1} \cdots \int_0^{t_2} dt_1
|\psi(t|t_N,...,t_1)\rangle
\langle \psi(t|t_N,...,t_1)|.\nonumber \end{eqnarray} The time evolution of the $N$-photon states
$|\psi(t|t_N,...,t_1)\rangle$ is given by \begin{eqnarray}
|\psi(t|t_N,...,t_1)\rangle&=& e^{-iH_{{\rm eff}}(t - t_N)}\Theta(t - t_N)L e^{-iH_{{\rm eff}}(t_N - t_{N-1})}\Theta(t_N - t_{N-1})L \cdots\nonumber\\&&
Le^{-iH_{{\rm eff}}t_1}\Theta(t_1)|\psi(t=0)\rangle \label{jump1} \end{eqnarray} with the effective (non-Hermitian) Hamiltonian \begin{equation} H_{{\rm eff}} = H - \frac{i}{2}L^{\dagger}L. \label{Ham} \end{equation} ($\Theta(x)$ is the unit step function.) The physical interpretation of Eq.(\ref{jump1}) is straight forward. With each emission of a photon at one of the $N$ random emission times $t_1 \leq t_2 \leq ...\leq t_N$ the quantum state 'jumps' into a new state by application of the Lindblad operator of Eq.(\ref{Lind}). Between two successive jumps the state evolves according to the Hamiltonian of Eq.(\ref{Ham}). Thus the decomposition of Eq.(\ref{rhon}) may also be interpreted as an unraveling of the density operator into contributions associated with all possible quantum jumps. This decomposition of the density operator $\rho(t)$ offers significant advantages in cases in which the number of spontaneously emitted photons is small or in which the evaluation of the relevant pure states can be simplified by the application of semiclassical methods. In particular, it is possible to derive general semiclassical path representations for the $N$-photon states of the optical Bloch equation (\ref{Bloch}). Thus all physical observables of interest can be expressed as a sum of probability amplitudes which are associated with repeated returns of a Rydberg electron to the ionic core. During its motion under the influence of the Coulomb potential of the ionic core photons may be emitted spontaneously by the laser-excited core at any position of the Rydberg electron along its path. These photon emission processes disrupt the coherent quantum mechanical time evolution of the Rydberg electron.
As an example, let us consider a coherent process which has received considerable attention recently, namely laser-induced stabilization against autoionization (Hanson and Lambropoulos 1995). This effect is based on a synchronization between the dynamics of the ionic core, which performs Rabi oscillations, and the dynamics of a laser-prepared electronic wave packet. This effect may be understood as follows: At the time of the preparation of the electronic Rydberg wave packet by the short laser pulse the core is in its ground state. If the mean Kepler period $T_{orb}=2\pi(-2\overline{\epsilon})^{-3/2}$ ($\overline{\epsilon}$ is the mean excited energy of the Rydberg electron) of this wave packet is chosen equal to a multiple of the Rabi period $T_{Rabi}=2\pi/\Omega$ of the core, the Rydberg electron will encounter the core in the ground state at each of its subsequent returns to the nucleus. As autoionization of a Rydberg electron can take place only inside the core region (Seaton 1983, Fano and Rau 1986, Aymar et al 1996), this implies that the effective autoionization rate of the electronic wave packet will become much smaller than the autoionization rate of the mean excited Rydberg state $\Gamma_{\overline{n}}$ in the absence of the laser field.
In addition, it has been demonstrated (Hanson and Lambropoulos 1995) that this suppression of autoionization is also accompanied by a reduction of dispersion of the electronic wave packet. This suppression of dispersion is brought about by the Rabi-oscillating core which acts like a quantum-mechanical shutter and effectively cuts off the tails of the wave packet which arrive at the nucleus out of phase with small probability. As this stabilization against autoionization is based on the coherent interplay between electron correlations and laser-induced Rabi oscillations it is expected to be particularly sensitive against the destruction of quantum coherence due to spontaneous emission of photons by the ionic core.
In the presence of radiative decay of the ionic core the physical picture is changed significantly. In the simplest case of synchronization, i.e.
for $T_{orb}=T_{Rabi}$, the first photon will be emitted spontaneously by the ionic core most probably at a time $(M+1/2)T_{Rabi}$ (with $M$ denoting any integer) because then the core is in its excited state with high probability. Due to the synchronization at these times the electronic Rydberg wave packet is close to the outer turning point of its Kepler orbit. The spontaneous emission of a photon reduces the excited core to its ground state. Therefore, at the subsequent return of the electronic wave packet to the core at time $(M+1)T_{orb}$ the ionic core will be in its excited state so that the Rydberg electron will autoionize on a time scale of the order of $1/\Gamma_{\overline{n}}$. Thus, the laser-induced stabilization against autoionization will be destroyed. Typically, $\Gamma_{\overline{n}} \gg \kappa$ so that the Rydberg electron will autoionize with high probability long before the core can emit a second photon spontaneously. Consequently, it is expected that the influence of the radiative damping on this coherent stabilization phenomenon can be described approximately by taking into account only the zero-and one-photon contributions of the density operator $\rho(t)$.
The influence of radiative damping described above manifests itself clearly in the time-dependent autoionization rate $\gamma(t)$ into channel three, for example, which results from the dynamics of the electronic Rydberg wave packet. An experimental technique for measuring $\gamma(t)$ has been developed recently (Lankhuijzen and Noordam 1996). This time-dependent ionization rate $\gamma(t)$ can be decomposed into $N$-photon contributions with the help of semiclassical path representations, i.e. \begin{equation} \gamma(t) = \sum_{N=0}^{\infty} \int_0^{t} dt_N \cdots \int_0^{t_2} dt_1 \gamma^{(N)}(t). \end{equation} It is expected that the zero- and one-photon contributions (Zobay and Alber 1996) \begin{eqnarray} \gamma^{(0)}(t)&=&
\frac{1}{2\pi} (1 - e^{-4\pi{\rm Im}\mu_2}) \mid \int_{-\infty + i0}^{\infty + i0} d\epsilon_1 e^{-i\epsilon_1 t} (0,1,0) {\bf O} \sum_{M_1=0}^{\infty} (e^{i2\pi\tilde{\nu}_1}\tilde{\bf \chi})^{M_1}\times \nonumber\\ && e^{i2\pi\tilde{\nu}_1} \tilde{\bf{\cal D}}_{g {\bf e}_a}^{(-)} \tilde{\cal E}_a(\epsilon_1 - \epsilon)\mid^2, \nonumber\\ \gamma^{(1)}(t)&=& (\frac{1}{2\pi})^3 (1 - e^{-4\pi{\rm Im}\mu_2}) \mid \int_{-\infty + i0}^{\infty + i0} d\epsilon_1 d\epsilon_2 e^{-i\epsilon_2 (t - t_1)}e^{-i\epsilon_1 t_1} (0,1,0) {\bf O}\times\nonumber\\ && \sum_{M_2=0}^{\infty} (e^{i2\pi\tilde{\nu}_2}\tilde{\bf \chi})^{M_2} {\tilde{\bf S}}^{(M_2,M_1)}_{2,1} \sum_{M_1=0}^{\infty} (\tilde{\bf \chi}e^{i2\pi\tilde{\nu}_1})^{M_1} \tilde{\bf{\cal D}}_{g {\bf e}_a}^{(-)} \tilde{\cal E}_a(\epsilon_1 - \epsilon)\mid^2 \label{gamma} \end{eqnarray} are dominant. In Eqs.(\ref{gamma}) the laser-induced excitation by the short laser pulse is characterized by the Fourier transform of the pulse envelope \begin{equation} \tilde{\cal E}_a(\Delta \epsilon) = \int_{-\infty}^{\infty} dt {\cal E}_a(t) e^{i\Delta \epsilon (t - t_a)} \end{equation} and by the $(3\times 1)$-column vector $\tilde{\bf{\cal D}}_{g {\bf e}_a}^{(-)}$ whose components are the energy normalized photoionization dipole matrix elements (Seaton 1983) into channels one, two and three. The dynamics of the Rydberg electron under the influence of the Rabi oscillations of the ionic core are described by the $(3\times 3)$ scattering matrix $\tilde{\bf \chi}$ and by the $(3\times 3)$ diagonal matrix $e^{i2\pi\tilde{\nu}}$ with matrix elements $(e^{i2\pi\tilde{\nu}})_{jj} = e^{2i\pi [2(\tilde{\epsilon}_ {cj} - \epsilon)]^{-1/2}} \Theta(\tilde{\epsilon}_ {cj} - \epsilon)$ ($j=1,2,3$). All matrices and column vectors with a tilde refer to the basis of photon-dressed core states
$|\tilde{\Phi}_j\rangle$ ($j=1,2,3$) (Robicheaux 1993, Zobay and Alber 1995). These dressed channel states are related to the corresponding bare states
$|\Phi_j\rangle$ by the orthogonal transformation ${\bf O}$ which diagonalizes the laser-induced core coupling, i.e. \begin{eqnarray}
{\bf O}^{T} [ \epsilon_c - i\kappa/2|\Phi_2\rangle
\langle \Phi_2| -
\frac{1}{2}\Omega (|\Phi_2\rangle \langle \Phi_1| +
|\Phi_1\rangle \langle \Phi_2|)] {\bf O} = \tilde{\epsilon}_c. \end{eqnarray} Thereby the diagonal matrix $\tilde{\epsilon}_c$ ($\epsilon_c$) contains the energies of the dressed (bare) core states. Thus the relations $\tilde{\bf{\cal D}}_{g {\bf e}_a}^{(-)} = {\bf O}^T \bf{\cal D}_{g {\bf e}_a}^{(-)}$ and $\tilde{\bf \chi} = {\bf O}^T {\bf \chi} {\bf O}$ hold with the bare photoionization dipole matrix elements $\bf{\cal D}_{g {\bf e}_a}^{(-)}$ and with the bare scattering matrix \begin{equation} \mbox{\boldmath $\chi$} = \left( \begin{array}{ccc} e^{2\pi i \mu_1} & 0 & 0 \\ 0 & e^{2\pi i \mu_2} & \chi_{23} \\ 0 & \chi_{32} & \chi_{33} \end{array} \right). \label{scatt} \end{equation} The quantum defects of the bare channels one and two are denoted $\mu_j$. These channels have opposite parity and cannot be coupled by electron correlation effects. The matrix elements $\chi_{23}$ and $\chi_{32}$ characterize the configuration interaction between channels 2 and 3 which results in autoionization of channel 2. The autoionization rate of a Rydberg state of channel 2 with principal quantum number $n$ is related to the imaginary part of the quantum defect $\mu_2$ by $\Gamma_n = 2{\rm Im}(\mu_2)/[n - {\rm Re}(\mu_2)]^3$.
Eqs.(\ref{gamma}) are examples of semiclassical path representations for the zero- and one-photon ionization rates $\gamma^{(0)}(t)$ and $\gamma^{(1)}(t)$. Their physical interpretation is straight forward: After the initial excitation by the short laser pulse those fractions of the electronic Rydberg wave packet which are excited into closed photon-dressed core channels return to the core region periodically. The integers $M_1$ and $M_2$ count the numbers of these returns. Between two successive returns the Rydberg electron acquires a phase of magnitude $(2\pi\tilde{\nu})_{jj}$ while moving in the photon-dressed core channel $j$. This phase equals the classical action of motion along a purely radial Kepler orbit with zero angular momentum and energy $\epsilon - \tilde{\epsilon}_{cj} < 0$. Entering the core region the Rydberg electron is scattered into other photon-dressed core channels by laser-modified electron correlation effects which are described by the scattering matrix $\tilde{\bf \chi}$. The ionic core can emit a photon spontaneously at any time during the motion of the Rydberg electron. Quantitatively this photon emission process is described the the quantity \begin{equation} {\tilde{\bf S}}^{(M_2,M_1)}_{2,1} = \int_{0}^{T_{M_1, M_2}} d\tau e^{2i\pi\tilde{\nu}_2(1 - \tau/T_{M_1,M_2})} (e^{-i\pi/2} \tilde{\bf L}) e^{2i\pi\tilde{\nu}_1\tau/T_{M_1,M_2}} \label{S} \end{equation} in Eqs.(\ref{gamma}) with $T_{M_1,M_2} = t/(M_1 + M_2 + 1)$. According to Eq.(\ref{S}) this spontaneous photon emission by the ionic core can take place at any time $\tau$ between two successive returns of the Rydberg electron to the core region. At time $\tau$ the Rydberg electron has acquired a phase of magnitude $(2\pi\tilde{\nu})_{jj}\tau/T_{M_1,M_2}$ in channel $j$. The disruption of the phase of the Rydberg electron by this spontaneous emission process is described by the action of the Lindblad operator $\tilde{\bf L} = {\bf O}^T {\bf L} {\bf O}$. It also leads to a phase change of magnitude $(\pi/2)$. After the completion of the photon emission process the Rydberg electron acquires an additional phase of magnitude $(2\pi\tilde{\nu})_{jj}(1 - \tau/T_{M_1,M_2})$ in the photon-dressed core channel $j$ until it reaches the core region again.
A representative time evolution of the autoionization rate $\gamma(t)$ is shown in Fig. \ref{10}. The full curve in Fig. \ref{10}a has been obtained by numerical solution of the optical Bloch equation (\ref{Bloch}) with the help of a conventional basis expansion in atomic energy eigenstates. The corresponding zero- and one-photon contributions are also presented in Figs. \ref{10}b and \ref{10}c. In Fig. \ref{10} the sum of zero- and one-photon contributions are not plotted as they are indistinguishable from the numerical result (full curve in Fig.\ref{10}a). The chosen parameters represent typical values realizable in alkaline earth experiments. The comparison of $\gamma(t)$ (full curve of Fig.\ref{10}a)
with the corresponding result in the absence of radiative damping (dotted curve in Fig.\ref{10}a) demonstrates that the influence of radiative damping is already significant at interaction times of the order of $T_{orb}$. With the help of the zero- and one-photon contributions of Eq.(\ref{gamma}) the dissipative influence of radiative damping can be analyzed in detail. As apparent from Fig. \ref{10}b, the zero-photon rate vanishes at integer multiples of the mean Kepler period $T_{orb}$ because at these times the core is in its ground state. The maxima of Fig. \ref{10}b at times $(M + 1/2)T_{orb}$ originate from fractions of the electronic wave packet which are close to the core at times when the core is in its excited state. Also visible are typical revival effects at times of the order of $25T_{orb}$. The one-photon rate of Fig. \ref{10}c exhibits maxima and minima at times $MT_{orb}$ and $(M + 1/2)T_{orb}$. These maxima indicate that the photon is emitted by the ionic core most probably whenever the Rydberg electron is close to the outer turning point of its classical Kepler orbit. Thus, the core will be in its excited state when the Rydberg electron returns to the nucleus so that autoionization will take place with a high probability.
\subsection{Electronic wave packets in fluctuating laser fields}
The main aim of this section is to discuss characteristic effects which govern the dynamics of a Rydberg electron in an intense and fluctuating laser field. It is demonstrated that for moderate laser intensities (compare with Sec. 1 Eq.(\ref{alpha})) a variety of novel, non-perturbative effects appear which influence the long time behavior of Rydberg electrons significantly. A generic consequence of the interplay between the peculiar threshold phenomena of Rydberg systems and the destruction of quantum coherence due to laser fluctuations is stochastic ionization (Alber and Eggers 1997). It is demonstrated that this process also implies an upper time limit on the applicability of two-level approximations even in cases in which all characteristic frequencies, i.e. Rabi frequencies and laser bandwidths, are small in comparison with the Kepler frequency of a resonantly excited Rydberg electron.
Nowadays laser fluctuations can be controlled to such a degree that it is possible to realize various theoretical models of laser radiation in the laboratory (Vemuri et al. 1991). One of the most elementary theoretical models of laser radiation is the phase diffusion model (PDM) (Haken 1970). It describes approximately the electric field produced by an ideal single mode laser which is operated well above the laser threshold. Thereby the electric field of a laser is represented by a classical, stochastic process with well stabilized amplitude and a fluctuating phase, i.e. \begin{equation} {\bf E}(t) = {\bf E}_0 e^{i\Phi(t)}e^{-i\omega t} + c.c.~. \end{equation} The fluctuations of the phase $\Phi (t)$ are modeled by a real-valued Wiener process (Kl\"oden and Platen 1992), i.e. \begin{eqnarray} &&M d\Phi (t) = 0, [d\Phi(t)]^2 = 2b dt \label{PDM} \end{eqnarray} Thereby M indicates the mean over the statistical ensemble. The PDM implies a Lorentzian spectrum of the laser radiation with bandwidth $b$.
In order to investigate the influence of laser fluctuations on the optical excitation of Rydberg states close to an ionization threshold let us consider the simplest possible case, namely one-photon excitation from a tightly bound initial state $|g\rangle$ with energy $\epsilon_g$. In the dipole and rotating wave approximation the Hamiltonian which describes this excitation process is given by \begin{eqnarray} H(\Phi(t)) &=&
\epsilon_g |g\rangle \langle g| +
\sum_n \epsilon_n |n \rangle \langle n| -\nonumber\\ &&
\sum_n (|n \rangle \langle g| \langle n|{\bf d}|g\rangle \cdot {\bf E}_0 e^{i\Phi(t)}e^{-i\omega t} + {\rm h.c.}). \label{Hamilton} \end{eqnarray} In Eq.(\ref{Hamilton}) the index $n$ refers to Rydberg and continuum states. The energies of the excited Rydberg states are denoted $\epsilon_n$ and ${\bf d}$ is the atomic dipole operator. Let us assume for the sake of simplicity that the excited Rydberg and continuum states can be described with the help of quantum defect theory in a one channel approximation (Seaton 1983). Thus they are characterized by an approximately energy independent quantum defect $\mu=\alpha + i \beta$. As has been explained in Sec. 1 (Eq.(\ref{imag})) the imaginary part $\beta$ describes photon absorption from the highly excited Rydberg states to continuum states well above threshold.
For the description of non-perturbative aspects of this laser excitation process one has to solve the time dependent Schr\"odinger equation with the stochastic Hamiltonian (\ref{Hamilton}) (interpreted as a stochastic differential equation of the Ito type (Kl\"oden and Platen 1992)) together with the stochastic differential equation for the phase (\ref{PDM}). It is the simultaneous presence of the Coulomb threshold with its infinitely many bound states and the continuum on the one hand and the laser fluctuations on the other hand which makes this solution a highly nontrivial task. Nevertheless, for the case of the PDM the resulting mathematical and numerical problems can be circumvented successfully (Alber and Eggers 1997). Thus even analytical results can be derived in the limit of long interaction times which is dominated by stochastic ionization of the Rydberg electron. Thus, let us start from the equation of motion for the mean values $\rho_{n n'}(t)= M\langle n\mid \psi(t)\rangle \langle \psi(t)\mid n'\rangle$, $\rho_{n g}(t)=[\rho_{g n}(t)]^*= Me^{-i\Phi(t)} \langle n\mid \psi(t)\rangle \langle \psi(t)\mid g\rangle$ and $\rho_{g g}(t)= M\mid \langle g\mid \psi(t)\rangle \mid^2$ which can be combined to form a density operator $\rho(t)$ (Agarwal 1976). From Eqs. (\ref{PDM}) and (\ref{Hamilton}) it can be shown that this density operator fulfills the master equation \begin{equation} \frac{d}{dt}\rho(t) = -i[H_{mod}, \rho(t)] + \frac{1}{2} \{[L,\rho(t) L^{\dagger}] + [L\rho(t), L^{\dagger}]\}. \label{master1} \end{equation} Thereby the modified Hamiltonian $H_{mod}\equiv H(\Phi(t)\equiv 0)$ describes laser induced excitation of Rydberg states close to threshold in the absence of phase fluctuations. The destruction of quantum coherence which is brought about by the laser fluctuations is characterized by the Lindblad operator \begin{equation}
L = \sqrt{2b}|g\rangle \langle g|. \label{Lind1} \end{equation} On the basis of this master equation Fourier representations can be developed for the density matrix elements whose kernels can be determined explicitly with the help of quantum defect theory. Thus all complications arising from the Coulomb threshold are taken into account properly. These Fourier representations are useful for numerical calculations of averaged transition probabilities which are highly accurate even in the limit of long interaction times. Furthermore, these representations are convenient starting points for the derivation of analytical results. Thus, the averaged initial state probability $\rho_{gg}(t)$, for example, is given by (Alber and Eggers 1997) \begin{eqnarray} \rho_{gg}(t) &=& \sum_{N=0}^{\infty} \frac{1}{2\pi} \int_{-\infty + i0}^{\infty + i0} dz e^{-izt} A_{gg}(z)[2bA_{gg}(z)]^{N} = \nonumber\\ && \frac{1}{2\pi} \int_{-\infty + i0}^{\infty + i0} dz e^{-izt} A_{gg}(z)[1 - 2bA_{gg}(z)]^{-1} \label{Agg} \end{eqnarray} with \begin{eqnarray} A_{gg}(z) &=& U(z) + U^*(-z), \label{kernel}\\ U(z)&=&\{ -C_1(z) + C_2(z) + \nonumber\\ &&i \sum_{{\rm Re}\tilde{\epsilon}_n < 0} [1 - \frac{d}{dz}\Sigma^*(z_1 - z)]^{-1} [z_1 - \overline{\epsilon} + ib - \Sigma (z_1)]^{-1} \mid_{z_1=z+\tilde{\epsilon}^*_n} \}\Theta(z)\nonumber \end{eqnarray} and with \begin{eqnarray} C_1(z) &=& \frac{1}{2\pi(z + 2ib)}{\rm ln} \frac{z - \overline{\epsilon} + i(b + \gamma/2)} {-\overline{\epsilon} + i(\gamma/2 - b)}, \nonumber\\ C_2(z) &=& \frac{1}{2\pi[z + i(\gamma + 2b)]}{\rm ln} \frac{z - \overline{\epsilon} + i(b + \gamma/2)} {-\overline{\epsilon} + i(\gamma/2 + b)}. \end{eqnarray} In the spirit of the discussion of Sec. 2.1. (compare with Eq.(\ref{rhon})) $\rho_{gg}(t)$ is represented as a sum of contributions of all possible quantum jumps $N$ which can be induced by the Lindblad operator of Eq.(\ref{Lind1}). According to Eq.(\ref{Agg}) these contributions give rise to a geometric series which can be summed easily. The sum appearing in Eq.(\ref{kernel}) extends over all dressed states $\tilde{\epsilon}_n$ of the effective Hamiltonian $H_{\rm eff} = H_{mod} - i L^{\dagger}L/2$. The mean excited energy is given by $\overline{\epsilon} = \epsilon_g + \omega + \delta \omega $ with $\delta\omega$ denoting the relative quadratic Stark shift between the initial state $\mid g \rangle$ and the ponderomotive shift of the excited Rydberg states (compare with the general discussion in Sec. 1). Besides the threshold contributions $C_1(z)$ and $C_2(z)$ the characteristic kernel $A_{gg}(z)$ is determined by the (resonant part of the
) self energy of the initial state $|g\rangle$, i.e. \begin{eqnarray}
\Sigma(z)&=& \sum_n \frac{\mid \langle n | {\bf d}\cdot
{\bf E}_0|g\rangle \mid^2}{z - \epsilon_n} = -i\frac{\gamma}{2} - i\gamma \sum_{M=1}^{\infty} (e^{i2\pi(-2z)^{-1/2}} \chi)^{M}. \label{self} \end{eqnarray} This self energy is characterized by the laser-induced depletion rate \begin{eqnarray}
\gamma&=&2\pi \mid \langle \epsilon = 0 | {\bf d}\cdot {\bf E}_0
|g\rangle \mid ^2 \end{eqnarray}
of the initial state $|g\rangle$ and by the scattering matrix element \begin{eqnarray} \chi&=& e^{i2\pi\mu} \end{eqnarray} which describes all effects arising from scattering of the Rydberg electron by the ionic core and from photon absorption (compare with Eq.(\ref{complex})). The sum over $M$ in Eq.(\ref{self}) originates from the multiple returns of the Rydberg electron to the core region where the dominant contribution to the self energy comes from. With each of these returns the Rydberg electron of energy $z < 0$ accumulates a phase of magnitude $2\pi(-2z)^{-1/2}$ and with each traversal of the core region it accumulates a (complex) phase of magnitude $2\pi\mu$ due to scattering by the core and due to photon absorption. The laser-induced depletion rate $\gamma$, the imaginary part of the quantum defect $\beta$ and the second order Stark shift $\delta\omega$ describe the influence of the laser field on the Rydberg electron. As these quantities depend on the laser intensity they are not affected by phase fluctuations of the laser field.
Master equations of the form of Eq.(\ref{master1}) with a self adjoint Lindblad operator are of general interest as phenomenological models of continuous quantum measurement processes (Braginsky and Khalili 1992). In this context Eq.(\ref{master1}) would model excitation of Rydberg and continuum states close to an ionization threshold by a classical, deterministic laser field in the presence of continuous measurement of the initial state $\mid g\rangle$. Thereby the inverse bandwidth $1/b$ would determine the mean time between successive measurements.
Some qualitative aspects of the time evolution of an excited Rydberg electron under the influence of a fluctuating laser field are apparent from the contour plots of Figs. \ref{two} and \ref{wave} which refer to one-photon excitation of a hydrogen atom by linearly polarized laser light with
$|g\rangle = |2s\rangle$. It is assumed that Rydberg states around $\overline{n}=(-2\overline{\epsilon})^{-1/2} = 80$ are excited. According to the general discussion in Sec. 1 (compare with Eq.(\ref{imag})) the laser-induced transitions from the excited Rydberg states to continuum states well above threshold are described by an imaginary quantum defect with $\beta = 0.00375\gamma$.
In Fig. \ref{two}a
both the bandwidth of the laser field $b$ and the field-induced depletion rate $\gamma$ of state $\mid g\rangle$ are assumed to be small in comparison with the mean level spacing of the excited Rydberg states, i.e. $b,\gamma \ll \overline{n}^{-3}$. Thus, one may be tempted to think that this excitation process can be described well within the framework of a two-level approximation in which only states $|2s \rangle$ and $|80 p\rangle$ are taken into account. However, Fig. \ref{two}a
demonstrates that this expectation is only valid for sufficiently small interaction times. Indeed, the early stages of the excitation process are dominated by Rabi oscillations of the electron between the initial and the resonantly excited state. These Rabi oscillations are damped by the fluctuating laser field. An equilibrium is attained for interaction times $t\geq 1/b$ for which all coherence between the two resonantly coupled states is negligibly small and for which $\rho_{gg}(t) \approx \rho_{\overline{n} \overline{n}}(t) \approx 1/2$. This characteristic, well known two-level behavior is exemplified in Fig. \ref{two}a by the stationary probability distribution of the excited Rydberg state.
(The probability distribution of state $|g\rangle$
which is localized in a region of a few Bohr radii around the nucleus is not visible on the radial scale of Fig. \ref{two}a). Fig. \ref{two}a indicates that for interaction times which are larger than a critical time $t_1$ this simple picture of the two-level approximation breaks down and the probability distribution of the excited Rydberg electron starts to spread towards larger distances from the core. (here $t_1 \approx 5\times 10^{5}T$ with $T=2\pi\overline{n}^3$ denoting the mean classical orbit time). Simultaneously the probability distribution becomes more and more spatially de-localized with all nodes disappearing. In order to obtain a more detailed understanding of this diffusion-like process the time evolutions of the initial state probability and of the ionization probability are shown in Fig.\ref{two}b . From Fig.\ref{two}b it is apparent that this spatial spreading of the Rydberg electron is connected with a diffusion in energy space towards the ionization threshold. At interaction times $t\geq t_c\approx 7\times 10^{9}T$ the Rydberg electron has reached the ionization threshold and the ionization probability $P_{ion}(t)$ rises significantly from a negligibly small value to a value close to unity. Simultaneously the initial state probability $P_{gg}(t)$ starts to decrease faster. This stochastic diffusion of the Rydberg electron which eventually leads to ionization is a characteristic phenomenon brought about by the fluctuations of the exciting laser field. With the help of the theoretical approach presented above this characteristic stochastic ionization process can be analyzed in detail. Thus it can be shown (Alber and Eggers 1997) that the diffusion of the Rydberg electron towards the ionization threshold starts at time \begin{equation} t_1 = \frac{8}{\pi b\gamma T} \label{t1} \end{equation} and eventually leads to stochastic ionization at interaction times $t\geq t_c$ with \begin{equation} t_c = \frac{4\pi}{\sqrt{27}\gamma b} [\frac{(\overline{\epsilon}^2 + 3(b^2 + \gamma^2/4)/4)^{3/2}} { \overline{\epsilon}^2 + b^2 + \gamma^2/4 }]^{1/2}. \label{tc} \end{equation} The time evolution of $P_{gg}(t)$ is approximately given by \begin{equation} P_{gg}(t) = \frac{2}{\sqrt{\pi}}[2b\gamma T]^{-1/2} t^{-1/2} \label{1} \end{equation} for $t_1 < t < t_c$ and crosses over to the power law \begin{equation} P_{gg}(t) = \frac{(\gamma + 2b)^2}{(2b\gamma\varphi/\pi)^2} [\frac{\gamma b \Gamma^3(5/3)} {27\pi(\overline{\epsilon}^2 + b^2 + \gamma^2/4)}]^{1/3}t^{-5/3} \label{2} \end{equation} for interaction times $t > t_c$. The variable $\varphi$ characterizes the distance of the mean excited energy $\overline{\epsilon}$ from the ionization threshold and is determined by the relation $-\overline{\epsilon} + i(b + \gamma/2) = Re^{i\varphi}$ ($0\leq \varphi <\pi$). At times $t\geq t_c$ the ionization probability rises according to the power law \begin{equation} P_{ion}(t) = 1 - \frac{\pi\Gamma(2/3)(\gamma + 2b)}{6b\gamma \varphi} [\frac{\gamma b} {\pi(\overline{\epsilon}^2 + b^2 + \gamma ^2/4)}]^{1/3} t^{-2/3}. \label{ion} \end{equation} These approximate time evolutions are indicated by the dashed curves in Fig. \ref{two}b. The analytical results of Eqs.(\ref{t1}) and (\ref{tc}) explicitly show how the critical times $t_1$ and $t_c$ for the breakdown of the two-level approximation and for stochastic ionization depend on the characteristic parameters of the problem, namely the mean excited energy $\overline{\epsilon}$, the laser bandwidth $b$ and the laser-induced depletion rate of the initial state $\gamma$.
In Fig. \ref{wave}
both the laser bandwidth and the laser-induced depletion rate of the initial state $|g\rangle$ are larger than the mean level spacing $\overline{n}^{-3}$ of the excited Rydberg states. As in this case the initial state is depleted by the laser field in a time which is small in comparison with the mean Kepler period of the excited Rydberg states, i.e. $1/\gamma \ll T=2\pi\overline{n}^{3}$, an electronic Rydberg wave packet is prepared by power broadening (Alber and Zoller 1988). The initial stage of the preparation of this electronic wave packet by power broadening manifests itself in an approximately exponential decay of $P_{gg}(t)$ with rate $\gamma$. The repeated returns of fractions of this wave packet to the core region give rise to recombination maxima of $P_{gg}(t)$ which occur roughly at multiples of the mean Kepler period $T$. In the absence of laser fluctuations the non-perturbative time evolution of such an electronic wave packet under the influence of a laser field is already well understood. In the completely coherent case with each return to the core region a fraction of the electronic wave packet can be scattered resonantly
in the presence of the laser field by stimulated emission and reabsorption of a laser photon accompanied by an electronic transition to the initial state $|g\rangle$ and back again. This emission and reabsorption process of a laser photon causes a time delay of the electronic wave packet of the order of $1/\gamma$ with respect to un-scattered fractions of the electronic wave packet. These repeated scattering processes lead to a splitting of the original wave packet into many partially overlapping fractions. In the completely coherent case the interference of these overlapping fractions inside the core region eventually give rise to a complicated time dependence of $P_{gg}(t)$ (Alber and Zoller 1991).
Characteristic qualitative aspects of the time evolution of an electronic wave packet in the presence of laser fluctuations are apparent from Fig. \ref{wave}a. Clearly, the initial stages of the time evolution are dominated by the preparation of the electronic wave packet and by its repeated returns to the core region. However, at sufficiently long interaction times eventually the spatially localized electronic wave packet starts to spread out uniformly over the whole classically accessible region. Furthermore, this classical region starts to grow monotonically with increasing interaction time. Characteristic quantitative details of this time evolution are apparent from Fig. \ref{wave}b. For sufficiently small interaction times the familiar recombination maxima of the repeated returns of the electronic wave packet to the core region are clearly visible. However, as the coherence time of the laser field is small in comparison with the mean Kepler period, i.e. $1/b \ll T$, interferences between probability amplitudes which are associated with repeated returns to the core region are destroyed. Thus the details of the early stages of the time evolution of this electronic wave packet appear to be much simpler than in the completely coherent case. As a consequence of the diffusion of the electronic wave packet at longer interaction times the recombination maxima of $P_{gg}(t)$ disappear and merge into the power law of Eq.(\ref{1}). At interaction times larger than $t_c$ stochastic ionization of the Rydberg electron becomes significant and the power law decay of $P_{gg}(t)$ crosses over to the decay law of Eq.(\ref{2}). Simultaneously the ionization probability rises to a value close to unity according to the approximate power law of Eq.(\ref{ion}).
In general stochastic ionization originating from laser fluctuations will compete with other coherent ionization mechanisms such as autoionization. As a consequence a number of new interesting phenomena are expected to arise which are not yet explored. In order to obtain first insights into basic aspects of this competition let us generalize our previous model to one-photon excitation of an autoionizing Rydberg series (Eggers and Alber 1998). Thus, it will be assumed that the laser excited autoionizing Rydberg series can be described within the framework of quantum defect theory in a two-channel approximation. In particular, let us concentrate on a case in which the fluctuating laser field excites Rydberg states
close to an ionization threshold of an excited state of the ionic core (channel one) which can autoionize into channel two. For simplicity let us assume that direct excitation of channel two from the initial state $|g \rangle$ is not possible and that the effectively excited energy interval $(\overline{\epsilon} - b, \overline{\epsilon} + b)$ also covers continuum states of channel one. The early stages of this ionization process will be governed by an exponential decay of the initial state
$|g\rangle$ with the laser-induced depletion rate $\gamma$, by autoionization of the excited Rydberg states of channel one into channel two, and by direct laser-induced ionization into the continuum states of channel one. As long as stochastic ionization is negligible, i.e. for interaction times $t$ with $1/\gamma < t < t_c$, this ionization process will reach a metastable regime. Thereby the probability of ionizing into channel one is simply determined by the part of the effectively excited energy interval $(\overline{\epsilon} - b, \overline{\epsilon} + b)$ which is located above the ionization threshold, $\epsilon_1$, of channel one. However, as soon as $t > t_c$ it is expected that the branching ratio between channels one and two is changed. For interaction times $t>t_c$ all Rydberg states whose autoionization lifetimes exceed the stochastic ionization time, i.e. $1/\Gamma_n > t_c$
($\Gamma_n$ is the autoionization rate of Rydberg state $|n,1\rangle$), will no longer autoionize into channel two but will eventually ionize stochastically into channel one. Thus for interaction times $t > t_c$ it is expected that the probability of ionizing into channel one is determined by the part of the effectively excited energy interval $(\overline{\epsilon} - b, \overline{\epsilon} + b)$ which is located above an energy of the order of $\epsilon_1 - 1/t_c$. Thus stochastic ionization is expected to lead to an effective lowering of the ionization threshold $\epsilon_1$ of channel one. This manifestation of the competition between autoionization and stochastic ionization is clearly apparent from Fig. \ref{auto} where the time evolution of $P_{gg}(t)$ is depicted together with the corresponding time evolutions of $P_{ion-ch1}(t)$ and $P_{ion-ch2}(t)$. In the case depicted in Fig. \ref{auto}
the laser induced depletion rate $\gamma$ is so small that no electronic Rydberg wave packet is prepared by power broadening. However, due to the large laser bandwidth, i.e. $bT \gg 1$, many Rydberg states are involved in the excitation process. This implies that to a good degree of approximation initially state $| g \rangle$ decays exponentially with rate $\gamma$.
Financial support is acknowledged by the Deutsche Forschungsgemeinschaft within the Schwerpunktprogramm `Zeitabh\"angige Ph\"anomene in Quantensystemen der Physik und Chemie'
\subsubsection*{References} Agarwal, G. S. (1976) Phys. Rev. Lett. {\bf 37}, 1383.\\ Alber, G., and Zoller, P. (1988) Phys. Rev. A {\bf 37}, 377. \\ Alber, G. (1989) Z. Phys. D {\bf 14}, 307.\\ Alber, G., and Zoller, P. (1991) Phys. Rep. {\bf 199}, 231.\\ Alber, G. (1992) Phys. Rev. Lett. {\bf 69}, 3045.\\ Alber, G., and Strunz, W. T. (1994), Phys. Rev. A {\bf 50}, R3577.\\ Alber, G., Strunz, W. T., and Zobay, O. (1994) Mod. Phys. Lett. B {\bf 8}, 1461.\\ Alber, G., and Eggers, B. (1997) Phys.Rev. A {\bf 56}, 820.\\ Aymar, M., Greene, C. H., and Luc-Koenig, E. (1996) Rev. Mod. Phys. {\bf 68}, 1015.\\ Beims, M. W., and Alber, G. (1993) Phys.Rev. A {\bf 48}, 3123.\\ Beims, M. W., and Alber, G. (1996) J. Phys. B {\bf 29}, 4139.\\ Braginsky, V. B., and Khalili, F. Ya. (1992) {\em Quantum Measurement}, Cambridge University Press, Cambridge.\\ Cooke, W. E., Gallagher, T. F., Edelstein, S. A., and Hill, R. M. (1978) Phys. Rev. Lett. {\bf 40}, 178. \\ Dando, P. A., Monteiro, T. S., Delande, D., and Taylor, K. T. (1995) Phys.Rev. Lett {\bf 74}, 1099. \\ Delos, J. B. (1986) Adv. Chem. Phys. {\bf 65}, 161.\\ Dixit, S. N., Zoller, P., and Lambropoulos, P. (1980) Phys. Rev. A. {\bf 21}, 1289.\\ van Druten, N. J., and Muller, H. G. (1995) Phys. Rev. A {\bf 52}, 3047.\\ Eggers, B., and Alber, G. (1998) (in preparation)\\ Fano, U., and Rau, A. R. P. (1986) {\em Atomic Collision and Spectra}, Academic, New York.\\ Gallagher, T. (1994) {\it Rydberg Atoms}, Cambridge University Press, Cambridge.\\ Garraway, B. M., and Suominen, K. A. (1995) Rep. Prog. Phys. {\bf 58}, 365.\\ Giusti-Suzor, A., and Zoller, P. (1987) Phys. Rev. A {\bf 36}, 5178.\\ Haken, H.(1970) in {\em Handuch der Physik} (S. Fl\"ugge, ed.) Vol. XXV/2c, Springer, Berlin.\\ Grobe, R., and Eberly, J. H. (1993) Phys. Rev. A {\bf 48}, 623 (1993)\\ Hanson, L. G., and Lambropoulos, P. (1995) Phys. Rev. Lett. {\bf 74}, 5009.\\ H\"upper, B., Main, J., and Wunner, G. (1995) Phys. Rev. Lett. {\bf 74}, 2650.\\ Jones, R. R., and Bucksbaum, P. H. (1991) Phys. Rev. Lett. {\bf 67}, 3215\\ Kl\"oden, P. E., and Platen, E. (1992) {\em Numerical Solution of Stochastic Differential Equations}, Springer, Berlin.\\ Knospe, O., and Schmidt, R. (1996) Phys. Rev. A. {\bf 54}, 1154.\\ Koch, M., von Plessen, G., Feldmann, J., and Goebel, E. O. (1996) J. Chem. Phys. {\bf 120}, 367.\\ Landau, L. D., and Lifshitz, E. M. (1975) {\em The Classical Theory of Fields}, p. 181ff, Pergamon, Oxford.\\ Lankhuijzen, G. M., and Noordam, L. D. (1996) Phys. Rev. Lett. {\bf 76}, 1784.\\ Main, J., Wiebusch, G., Welge, K. H., Shaw, J., and Delos, J. B. (1994) Phys. Rev. A {\bf 49}, 847.\\ Moser, I., Mota- Furtado, F., O'Mahony, P. F., and dos Santos, J. P. (1997) Phys. Rev. A {\bf 55}, 3724.\\ Maslov, V. P., and Fedoriuk, M. V. (1981) {\em Semiclassical Approximation in Quantum Mechanics}, Reidel, Boston.\\ Mollow, B. R. (1975) Phys. Rev. A {\bf12}, 1919.\\ Robicheaux, F. (1993) Phys. Rev. A {\bf 47}, 1391.\\ Seaton, M. J.(1983) Rep. Prog. Phys. {\bf 46}, 167.\\ Sepulveda, M. A., and Grossmann, F. (1996) Adv. Chem. Phys. {\bf XCVI}, 191.\\ Stapelfeldt, H., Papaioannou, D. G., Noordam, L. D., and Gallagher, T. F. (1991) Phys. Rev. Lett. {\bf 67}, 3223\\ Vemuri, G., Anderson, M. H., Cooper, J., and Smith, S. J. (1991) Phys. Rev. A {\bf 44}, 7635.\\ Zobay, O., and Alber, G. (1995) Phys. Rev. A {\bf 52}, 541.\\ Zobay, O., and Alber,G. (1996) Phys. Rev. A {\bf 54}, 5361. \\ Zobay, O., and Alber, G. (1998) Prog. Phys. {\bf 46}, 3.\\
\begin{figure}
\caption{ Schematic representation of the characteristic spatial regions which determine the dynamics of a Rydberg electron. Some classical trajectories which are relevant for the semiclassical wave function are also indicated.}
\label{semi}
\end{figure}
\begin{figure}
\caption{ Schematic representation of a laser-induced isolated core excitation process in Mg. After initial preparation in a $|3snd\rangle$ Rydberg state a second laser pulse excites the core $3s \to 3p$ transition. The Rydberg states of the excited core autoionize. }
\label{ICE}
\end{figure}
\begin{figure}
\caption{ Three-channel excitation scheme including spontaneous emission process and autoionization.}
\label{8}
\end{figure}
\begin{figure}\label{10}
\end{figure}
\begin{figure}
\caption{ Excitation of an isolated Rydberg state: Radial contour plot (a) and $P_{gg}(t)$, $P_{ion}(t)$ (b) as a function of the interaction time $t$ in units of the mean Kepler period $T$. The parameters are $\overline{n}= (-2\overline{\epsilon})^{-1/2}=80$ ($T = 78{\rm ps}$), $\gamma T = 0.1$, $b T = 0.01$. Various approximate asymptotic time dependences are also indicated, namely Eq.(\ref{1}) (short dashed) and Eqs.(\ref{2}) and (\ref{ion}) (long dashed). }
\label{two}
\end{figure}
\begin{figure}
\caption{ Excitation of an electronic Rydberg wave packet by laser-induced power broadening: Radial contour plot (a) and $P_{gg}(t)$, $P_{ion}(t)$ (b) as a function of the interaction time $t$ in units of the mean Kepler period $T$. The parameters are $\overline{n}= (-2\overline{\epsilon})^{-1/2}=80$ ($T = 78{\rm ps}$), $\gamma T = 10.0$, $b T = 10.0$. Various approximate asymptotic time dependences are also indicated, namely Eq.(\ref{1}) (short dashed) and Eqs.(\ref{2}) and (\ref{ion}) (long dashed). $\gamma t = 10.0$, $b T = 10.0$. }
\label{wave}
\end{figure}
\begin{figure}
\caption{ Competition between autoionization and stochastic ionization: Time evolution of $P_{gg}(t)$ and of the ionization probabilites into channels one and two $P_{ion-ch1}(t)$ and $P_{ion-ch2}(t)$. The parameters are $\overline{n} = \alpha_1 + (-2\overline{\epsilon})^{-1/2} = 80$, $\alpha_1 = 0.1$, $\gamma T = 1.0$, $bT = 300.0$, $\Gamma_n = 2\tau(n - \alpha_1)^{-3}/\pi$ with $\tau = 10^{-5}$ a.u.~.}
\label{auto}
\end{figure}
\end{document} |
\begin{document}
\sloppy
\title{Partial desingularizations arising from non-commutative algebras}
\begin{abstract}
Let $X$ be a singular affine normal variety with coordinate ring $R$ and assume that there is an $R$-order $\Lambda$ admitting a stability structure $\theta$ such that the scheme of $\theta$-semistable representations is smooth, then we construct a partial desingularization of $X$ with classifiable remaining singularities. In dimension $3$ this explains the omnipresence of conifold singularities in partial desingularizations of quotient singularities. In higher dimensions we have a small list of singularity types generalizing the role of the conifold singularity.
\end{abstract}
\section{Introduction}
In this paper we want to give a ringtheoretical explanation for the omnipresence of conifold singularities in partial desingularizations of three-dimensional quotient singularities coming from physics (see for example \cite{Berenstein} and \cite{BG98}) and to generalize this phenomenon to higher dimensions. For a translation between physics language and the mathematical terms used in this paper, we refer to section $4$ of our previous paper \cite{BLBS}.
If $X=\mathbb{C}^3/G$ is a three-dimensional quotient singularity, one consider the McKay quiver setting $(Q,\alpha)$ of the finite group $G$ and the order over $\mathbb{C}[X]$
\[
\Lambda = \frac{\mathbb{C} Q}{R} \]
obtained by dividing out commuting matrix relations, see for example \cite{CrawNotes}. One then chooses a stability structure $\theta$ such that the moduli space $\wis{moduli}^{\theta}_{\alpha}~\Lambda$ of isomorphism classes of $\theta$-semistable $\alpha$-dimensional $\Lambda$-representations is a partial resolution of $X$. In fact, in most examples, one even has that the scheme $\wis{rep}^{\theta-semist}_{\alpha}~\Lambda$ of $\theta$-semistable $\alpha$-dimensional representations is a smooth variety. In this paper we will show that this condition implies that possible remaining singularities in the (partial) desingularization
\[
\wis{moduli}^{\theta}_{\alpha}~\Lambda \rOnto X \]
must be of conifold type. Moreover, we will extend this setting to higher dimensions.
Let $X$ be an affine normal variety with coordinate ring $R = \mathbb{C}[X]$ and function field $K=\mathbb{C}(X)$. Let $\Lambda$ be an $R$-order in central simple $K$-algebra $\Sigma$ of dimension $n^2$. We say that $\Lambda$ is a {\em smooth $R$-order} if the scheme $\wis{trep}_n~\Lambda$ of trace preserving $n$-dimensional $\Lambda$-representations is a smooth variety. However, this is a very restrictive condition and usually an order $\Lambda$ will have a non-zero {\em defect} (to be defined in \S 2) to smoothness.
Still, if $\Lambda$ has a complete set of orthogonal idempotents $\{ e_1,\hdots,e_k \}$ we have a well-defined dimension vector $\alpha=(a_1,\hdots,a_k) \in \mathbb{N}^k$ (where $a_i = tr_{\Lambda}(e_i)$) such that
\[
\wis{trep}_n~\Lambda \simeq \wis{GL}_n \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\Lambda \]
Let $\theta \in \mathbb{Z}^k$ such that $\theta.\alpha = 0$ then we define an $\alpha$-dimensional $\Lambda$-representation $V \in \wis{rep}_{\alpha}~\Lambda$ to be {\em $\theta$-semistable} if for all $\Lambda$-subrepresentations $W$ of $V$ we have $\theta.\beta \geq 0$ where $\beta$ is the dimension vector of $W$. The set of all $\alpha$-dimensional $\theta$-semistable representations $\wis{rep}_{\alpha}^{\theta-semist}~\Lambda$ is a Zariski open subset of $\wis{rep}_{\alpha}~\Lambda$.
In favorable situations we can choose a stability structure $\theta$ such that $\wis{rep}_{\alpha}^{\theta-semist}~\Lambda$ is a smooth variety. In such a {\em good setting} we can use universal localization in $\wis{alg@n}$ to construct of sheaf $\mathcal{A}$ of smooth orders over the corresponding moduli space $\wis{moduli}_{\alpha}^{\theta}~\Lambda$ (parametrizing isomorphism classes of semistable $\alpha$-dimensional representations) giving a commutative diagram
\[ \xymatrix@R=40pt@C=45pt{ \wis{spec}~{\mathcal A} \ar[d]_c \ar[rd]^{\phi} \\ \wis{moduli}^{\theta}_{\alpha}~\Lambda \ar@{->>}[r]^{\pi} & X = \wis{spec}~R } \] Here, $\wis{spec}~\mathcal{A}$ is a non-commutative variety obtained by gluing affine non-commutative structure sheaves $(\wis{spec}~\Lambda_D,\mathcal{O}^{nc}_{\Lambda_D})$ together. The map $c$ is defined locally by intersecting a prime ideal with its center and $\pi$ is a projective morphism. As $\mathcal{A}$ is a sheaf of smooth orders, one can view the resulting map $\phi$ as a {\em non-commutative desingularization} of $X$.
A good setting $(\Lambda,\alpha,\theta)$ also limits the types of remaining singularities in the partial desingularization $\pi$. If $\wis{dim}~X = 3$, the moduli space can have worst have conifold singularities, and in dimension $4,5$ resp. $6$ there is a full classification of the possible remaining singularities which consist of $4,10$ resp. $53$ types, see \cite{RBLBVdW}.
In the final section we study the special case of the conifold singularity in great detail. We give several ringtheoretical interpretations of the {\em conifold algebra} $\Lambda_c$ : as a skew-group ring over a polynomial ring and as a Clifford algebra. The latter description allows us to study the prime ideal structure of $\Lambda_c$ in great detail and determine its non-commutative structure sheaf $\mathcal{O}^{nc}_{\Lambda_c}$. We work out its scheme of $2$-dimensional representations, study the corresponding stability structures and work out the resulting desingularizations which are related by the so-called Atiyah flop.
The results contained in this paper were presented at the conference 'Sch\'emas de Hilbert, alg\`ebre non-commutative et correspondance de McKay' at CIRM, Luminy in october 2003, see \cite{LBnotes} for the lecture notes.
\section{Geometry of orders}
Let $X$ be a commutative normal variety with affine coordinate ring the normal domain $R = \mathbb{C}[X]$ and function field $K = \mathbb{C}(X)$. Let $\Sigma$ be a central simple $K$-algebra of dimension $n^2$ and let $\Lambda$ be an $R$-order in $\Sigma$, that is, $\Lambda$ is an $R$-subalgebra of $\Sigma$ which is finitely generated as an $R$-module and such that $\Lambda.K = \Sigma$. Recall that there is a reduced trace map $tr : \Sigma \rTo K$ satisfying $tr(\Lambda) = R$ (because $R$ is integrally closed). Composing $tr$ with the inclusion $R \subset \Lambda$ we get a linear map $tr_{\Lambda} : \Lambda \rTo \Lambda$. In particular, if $\Lambda = M_n(R)$ the usual trace map induces the linear map $tr_{M_n(R)} : M_n(R) \rTo M_n(R)$ sending a matrix $A \in M_n(R)$ to the diagonal matrix $tr(A) 1_n$.
The {\em scheme of trace preserving representations} $\wis{trep}_n~\Lambda$ is the affine scheme representing the functor $\wis{commalg} \rTo \wis{sets}$ determined by
\[ \wis{trep}_n~\Lambda(\mathbb{C}) = \{ \Lambda \rTo^{\phi} M_n(\mathbb{C})~|~\text{$\phi$ an algebra morphism and~} \phi \circ tr_{\Lambda} = tr_{M_n(\mathbb{C})} \circ \phi~\}. \] It is well known, see for example \cite{ProcesiCH} that conjugation of $M_n(\mathbb{C})$ by $\wis{GL}_n(C)$ makes $\wis{trep}_n~\Lambda$ into an affine $\wis{GL}_n$-variety such that the corresponding algebraic quotient map \[ \wis{trep}_n~\Lambda \rOnto \wis{trep}_n~\Lambda / \wis{GL}_n = \wis{triss}_n~\Lambda \simeq X = \wis{spec}~R \] recovers the central variety $X$. One can also recover the order $\Lambda$ from the scheme of trace preserving representations as the algebra of $\wis{GL}_n$-equivariant maps from $\wis{trep}_n~\Lambda$ to $M_n(\mathbb{C}) = \mathbb{A}^{n^2}_{\mathbb{C}}$ where the latter variety is a $\wis{GL}_n$-variety under the action by conjugation, see again \cite{ProcesiCH}. The notation $\wis{triss}_n~\Lambda$ is motivated by the fact that the algebraic quotient of $\wis{trep}_n~\Lambda$ by $\wis{GL}_n$ classifies isomorphism classes of $n$-dimensional (trace preserving) semi-simple representations of $\Lambda$. That is, if $\mathfrak{m} \triangleleft R$ is a maximal ideal of $R$ with corresponding geometric point $x_{\mathfrak{m}} \in X$, then $\mathfrak{m}$ determines an $n$-dimensional semi-simple $\Lambda$-module \[ M_{\mathfrak{m}} = S_1^{\oplus e_1} \oplus \hdots \oplus S_k^{\oplus e_k} , \] where the $S_i$ are simple $\Lambda$-modules of dimension $d_i$ (and occurring in $M_{\mathfrak{m}}$ with multiplicity $e_i$) such that $\sum d_ie_i = n$. Indeed, the geometric point $x_{\mathfrak{m}}$ determines a trace preserving algebra map \[ \overline{\Lambda}_{\mathfrak{m}} = \Lambda/ \mathfrak{m}\Lambda \rTo M_n(\mathbb{C}) \] and hence an $n$-dimensional $\Lambda$-module $N_{\mathfrak{m}}$. The semi-simple module $M_{\mathfrak{m}}$ is the semi-simplification of $N_{\mathfrak{m}}$ that is the direct sum of its Jordan-H\"older factors. We say that $\mathfrak{m}$ (or the point $x_{\mathfrak{m}} \in X$) is of representation-type $\tau(\mathfrak{m}) = (e_1,d_1;\hdots;e_k,d_k)$.
To the maximal ideal $\mathfrak{m}$ we will associate a combinatorial tool, a quiver-setting $(Q_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ where $Q_{\mathfrak{m}}$ is the quiver on $k$ vertices (with vertex $v_i$ corresponding to the simple component $S_i$) such that the number of oriented arrows from vertex $v_i$ to vertex $v_j$ is given by \[ \#~\{ \xymatrix{\vtx{v_i} \ar[r] & \vtx{v_j}} \} = \wis{dim}_{\mathbb{C}}~Ext^1_{\Lambda}(S_i,S_j) \] and where the dimension vector $\alpha_{\mathfrak{m}} = (e_1,\hdots,e_k)$ is determined by the multiplicities. By this construction we have that the space of $\alpha_{\mathfrak{m}}$-dimensional representations of $Q_{\mathfrak{m}}$, $\wis{rep}_{\alpha_{\mathfrak{m}}} Q_{\mathfrak{m}}$ can be identified with the self-extension space $Ext^1_{\Lambda}(M_{\mathfrak{m}},M_{\mathfrak{m}})$. Observe that the action of the automorphism group $Aut_{\Lambda}(M_{\mathfrak{m}}) = \wis{GL}_{e_1} \times \hdots \times \wis{GL}_{e_k} = \wis{GL}(\alpha_{\mathfrak{m}})$ on the self-extensions $Ext^1_{\Lambda}(M_{\mathfrak{m}},M_{\mathfrak{m}})$ coincides with the action of $\wis{GL}(\alpha_{\mathfrak{m}})$ on $\wis{rep}_{\alpha_{\mathfrak{m}}} Q_{\mathfrak{m}}$ by base-change. By definition of self-extensions every representation $V \in \wis{rep}_{\alpha_{\mathfrak{m}}} Q_{\mathfrak{m}}$ determines an algebra map \[ \Lambda \rTo^{\phi_V} M_n(\mathbb{C}[\epsilon]), \] where $\mathbb{C}[\epsilon] = \mathbb{C}[x]/(x^2)$ is the algebra of dual numbers. The $\wis{GL}(\alpha_{\mathfrak{m}})$-subspace of $\wis{rep}_{\alpha_{\mathfrak{m}}} Q_{\mathfrak{m}}$ consisting of all trace preserving extensions, that is such that $tr_{M_n(\mathbb{C}[\epsilon])} \circ \phi_V = \phi_V \circ tr_{\Lambda}$ can again be identified with the representation space of a {\em marked} quiver setting $\wis{rep}_{\alpha_{\mathfrak{m}}} Q^{\dagger}_{\mathfrak{m}}$ where $Q^{\dagger}_{\mathfrak{m}}$ is the same quiver as $Q_{\mathfrak{m}}$ except that certain loops may be removed and that some other loops may acquire a marking by which we mean that a representation of $Q^{\dagger}_{\mathfrak{m}}$ in a marked loop corresponds to a trace zero matrix, see \cite{LBetale} for more details. The whole point of this construction is that the normal space in $M_{\mathfrak{m}}$ to the closed orbit ${\mathcal O}(M_{\mathfrak{m}})$ in the trace preserving representation space $\wis{trep}_n~\Lambda$ \[ \frac{T_{M_{\mathfrak{m}}}~\wis{trep}_n~\Lambda}{T_{M_{\mathfrak{m}}} {\mathcal O}(M_{\mathfrak{m}})} = N_{M_{\mathfrak{m}}} \simeq \wis{rep}_{\alpha_{\mathfrak{m}}} Q^{\dagger}_{\mathfrak{m}} \] can be identified with the representation space of the marked quiver and that the automorphism is one as $\wis{GL}(\alpha_{\mathfrak{m}}) = Stab(M_{\mathfrak{m}})$ modules. This fact allows us to define a numerical {\em defect} measuring the failure of smoothness of $\wis{trep}_n~\Lambda$ over the point $x_{\mathfrak{m}}$.
\begin{definition} The {\em defect} $\wis{def}_{\mathfrak{m}}~\Lambda$ of the $R$-order $\Lambda$ in the maximal ideal $\mathfrak{m}$ is defined to be \[ \wis{def}_{\mathfrak{m}}~\Lambda = 1-\chi(\alpha_{\mathfrak{m}},\alpha_{\mathfrak{m}}) - \# \{ \text{marked loops in $Q^{\dagger}_{\mathfrak{m}}$} \} - \wis{dim}~X, \] where $\chi : \mathbb{Z}^k \times \mathbb{Z}^k \rTo \mathbb{Z}$ is the Euler form of the quiver obtained from $Q^{\dagger}_{\mathfrak{m}}$ by forgetting the markings, that is, the entry $(i,j)$ of the matrix defining $\chi$ is equal to $\delta_{ij} - \# \{ \xymatrix{\vtx{v_i} \ar[r] & \vtx{v_j}} \}$. \end{definition}
\begin{proposition} With notations as above, $\wis{def}_{\mathfrak{m}}~\Lambda \geq 0$ and the following statements are equivalent \begin{enumerate} \item{$\wis{def}_{\mathfrak{m}}~\Lambda = 0 $.} \item{$\wis{trep}_n~\Lambda$ is a smooth variety in all points lying over $x_{\mathfrak{m}}$.} \end{enumerate} \end{proposition}
\begin{proof} As $\Lambda$ is an $R$-order in an $n^2$-dimensional central simple $K$-algebra $\Sigma$, there is a Zariski open subset $\wis{azu}_n~\Lambda$ of $X$ of points $x_{\mathfrak{m}}$ such that $\overline{\Lambda}_{\mathfrak{m}} \simeq M_n(\mathbb{C})$ (the so called Azumaya locus of $\Lambda$). Over $\wis{azu}_n~\Lambda$ the algebraic quotient map $\wis{trep}_n~\Lambda \rOnto X$ is a principal $\wis{PGL}_n$-fiber whence generically the trace preserving representation scheme has dimension \[ \wis{dim}~\wis{trep}_n~\Lambda = \wis{dim}~X + n^2 - 1. \] On the other hand, the dimension of the tangent space to the representation scheme in the semi-simple representation $M_{\mathfrak{m}}$ is equal to \[ \begin{split} \wis{dim}~T_{M_{\mathfrak{m}}}~\wis{trep}_n~\Lambda &= \wis{dim}~{\mathcal O}(M_{\mathfrak{m}}) + \wis{dim}~\wis{rep}_{\alpha_{\mathfrak{m}}}~Q^{\dagger}_{\mathfrak{m}} \\ &= (n^2 - \sum_i e_i^2) + (\sum_{\xymatrix{\vtx{v_i} \ar[r] & \vtx{v_j}}} e_ie_j - \# \{ \text{marked loops in $Q^{\dagger}_{\mathfrak{m}}$} \} ) \\ &= n^2 - \chi(\alpha_{\mathfrak{m}},\alpha_{\mathfrak{m}}) - \# \{ \text{marked loops in $Q^{\dagger}_{\mathfrak{m}}$} \} \end{split} \] and as $\wis{dim}~T_{M_{\mathfrak{m}}}~\wis{trep}_n~\Lambda \geq \wis{dim}~\wis{trep}_n~\Lambda$ it follows that $\wis{def}_{\mathfrak{m}}~\Lambda \geq 0$. Moreover, it also follows that $\wis{def}_{\mathfrak{m}}~\Lambda = 0$ if and only if $\wis{trep}_n~\Lambda$ is smooth in $M_{\mathfrak{m}}$. But as the singularities of $\wis{trep}_n~\Lambda$ form a $\wis{GL}_n$-closed subvariety and as ${\mathcal O}(M_{\mathfrak{m}})$ is the unique closed orbit lying over $x_{\mathfrak{m}}$ (recall that closed orbits in $\wis{trep}_n~\Lambda$ are precisely the isomorphism classes of semi-simple representations) the equivalence of the two statements follows. \end{proof}
\begin{example} Consider the quantum plane of order two $\Lambda = \mathbb{C}_{-1}[x,y]$ determined by the commutation relation $xy+yx=0$. If $u=x^2$ and $v=y^2$ then the center of $\Lambda$ is the polynomial algebra $R=\mathbb{C}[u,v]$ and $\Lambda$ is a free module of rank $4$ over it. In fact, $\Lambda$ is an $R$-order in the quaternion-algebra \[ \Sigma = \begin{pmatrix} u & & v \\ & \mathbb{C}(u,v) & \end{pmatrix} . \] The reduced trace map is determined by its images on a $\mathbb{C}$-basis \[ tr(x^iy^j) = \begin{cases} 0 & \text{if either $i$ or $j$ is odd} \\ 2x^iy^j & \text{if both $i$ and $j$ are even.} \end{cases} \] In the affine plane $\mathbb{A}^2 = \wis{spec}~R$ the Azumaya locus of $\Lambda$ is $\wis{azu}_2~\Lambda = \mathbb{X}(uv)$ the complement of the two coordinate axes. Let $x_{\mathfrak{m}} = (a^2,b) \in \mathbb{X}(uv)$ then the corresponding $2$-dimensional simple representation $M_{\mathfrak{m}}$ is determined by \[ \Lambda \rOnto^{\phi} M_2(\mathbb{C}) \qquad \text{with} \qquad \phi(x) = \begin{bmatrix} a & 0 \\ 0 & -a \end{bmatrix} \qquad \phi(y) = \begin{bmatrix} 0 & 1 \\ b & 0 \end{bmatrix}. \] One verifies that $Ext^1_{\Lambda}(M_{\mathfrak{m}},M_{\mathfrak{m}}) \simeq \mathbb{C}^2$ and that the corresponding algebra map $\Lambda \rTo^{\psi} M_2(\mathbb{C}[\epsilon])$ corresponding to $(\alpha,\beta) \in \mathbb{C}^2$ is given by \[ \begin{cases} \psi(u) &= \begin{bmatrix} a + \epsilon \alpha & 0 \\ 0 & -a - \epsilon \alpha \end{bmatrix} \\ \psi(v) &= \begin{bmatrix} 0 & 1 \\ b+\epsilon \beta & 0 \end{bmatrix} \end{cases} \] and hence is trace preserving whence the local (marked) quiver-setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ is given by \[ \xymatrix{\vtx{1} \ar@(l,ul) \ar@(ur,r)} \] whence the defect is equal to $\wis{def}_{\mathfrak{m}}~\Lambda = 1 - (-1) - 0 - 2 = 0$ consistent with the fact that over the Azumaya locus (which is a smooth subvariety of the central scheme in this case) the algebraic quotient map is a principal $\wis{PGL}_2$-fibration whence $\wis{trep}_2~\Lambda$ will be smooth over it. For general orders $\Lambda$, if $x_{\mathfrak{m}}$ is a smooth point of the central variety and lies in the Azumaya locus, then $\wis{def}_{\mathfrak{m}}~\Lambda = 0$.
For $x_{\mathfrak{m}} = (a^2,0) \in \mathbb{A}^2$ with $a \not= 0$ (and by a similar argument for points $(0,b)$ with $b \not= 0$), the corresponding semi-simple representation has two non-isomorphic one-dimensional simple components \[ M_{\mathfrak{m}} = S_1 \oplus S_2 \qquad \text{with} \qquad S_i = \begin{cases} x \mapsto (-1)^i a \\ y \mapsto 0. \end{cases} \] One verifies that $Ext^1_{\Lambda}(S_i,S_i) = \mathbb{C}$ and that $Ext^1_{\Lambda}(S_1,S_2) \simeq Ext^1_{\Lambda}(S_2,S_1) \simeq \mathbb{C}$ whence the quiver-setting $(Q_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ is given by \[ \xymatrix{\vtx{1} \ar@(ul,dl)_{\alpha_1} \ar@/^/[r]^{\beta_1} & \vtx{1} \ar@(ur,dr)^{\alpha_2} \ar@/^/[l]^{\beta_2}} \] and the corresponding algebra map $\Lambda \rTo M_2(\mathbb{C}[\epsilon])$ is given by \[ x \mapsto \begin{bmatrix} a + \epsilon \alpha_1 & 0 \\ 0 & -a + \epsilon \alpha_2 \end{bmatrix} \qquad y \mapsto \begin{bmatrix} 0 & \beta_1 \\ \beta_2 & 0 \end{bmatrix} \] which is only trace preserving if $\alpha_2 = - \alpha_1$ so we have one linear relation among the representations and therefore the corresponding (marked) quiver-setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ is equal to \[ \xymatrix{\vtx{1} \ar@(ul,dl) \ar@/^/[r] & \vtx{1} \ar@/^/[l]} \] and the defect is equal to $\wis{def}_{\mathfrak{m}}~\Lambda = 1 - (-1) - 0 - 2 = 0$ whence also over these ramified points the trace preserving representation variety $\wis{trep}_2~\Lambda$ is smooth.
Remains the point $x_{\mathfrak{m}} = (0,0)$ where the corresponding semi-simple representation is the zero-representation $M_{\mathfrak{m}} = S_0^{\oplus 2}$ where $S_0$ is determined by $x \mapsto 0$ and $y \mapsto 0$. One verifies that $Ext^1_{\Lambda}(S_0,S_0) \simeq \mathbb{C}^2$ whence the quiver-setting $(Q_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ is equal to \[ \xymatrix{\vtx{2} \ar@(l,ul)^{\begin{bmatrix} \alpha_1 & \alpha_2 \\ \alpha_3 & \alpha_4 \end{bmatrix}} \ar@(ur,r)^{\begin{bmatrix} \beta_1 & \beta_2 \\ \beta_3 & \beta_4 \end{bmatrix}}} \] with corresponding algebra map $\Lambda \rTo M_2(\mathbb{C}[\epsilon])$ given by \[ x \mapsto \epsilon \begin{bmatrix} \alpha_1 & \alpha_2 \\ \alpha_3 & \alpha_4 \end{bmatrix} \qquad y \mapsto \epsilon \begin{bmatrix} \beta_1 & \beta_2 \\ \beta_3 & \beta_4 \end{bmatrix} \] which is only trace preserving if $\alpha_4 = - \alpha_1$ and $\beta_4 = - \beta_1$. Therefore the marked quiver-setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ is equal to \[
\xymatrix{\vtx{2} \ar@(l,ul)|{\ast} \ar@(ur,r)|{\ast}} \] and the defect is $\wis{def}_{\mathfrak{m}}~\Lambda = 1 -(-4)-2-2 = 1$ whence there must be a singularity of $\wis{trep}_2~\Lambda$ lying over $x_{\mathfrak{m}}$.
This is indeed the case as the geometric points of $\wis{trep}_2~\Lambda$ are determined by couples of $2 \times 2 $ matrices \[ ( \begin{bmatrix} x_1 & x_2 \\ x_3 & -x_1 \end{bmatrix} , \begin{bmatrix} y_1 & y_2 \\ y_3 & -y_1 \end{bmatrix} ) \quad \text{satisfying} \quad tr( \begin{bmatrix} x_1 & x_2 \\ x_3 & -x_1 \end{bmatrix}.\begin{bmatrix} y_1 & y_2 \\ y_3 & -y_1 \end{bmatrix}) = 0. \] That is, $\wis{trep}_2~\Lambda$ is the hypersurface in $\mathbb{A}^6$ determined by the equation \[ \wis{trep}_2~\Lambda = \mathbb{V}(2x_1y_1 + x_2y_3 + x_3y_2) \rInto \mathbb{A}^6 \] which is an irreducible $5$-dimensional variety having an isolated singularity at $x = (0,0,0,0,0,0)$ (the zero-representation). \end{example}
\begin{definition} The {\em smooth locus} of an $R$-order $\Lambda$ is defined to be the subset of $X = \wis{spec}~R$ \[
\wis{smooth}_n~\Lambda = \{ x_{\mathfrak{m}} \in X~|~\wis{def}_{\mathfrak{m}}~\Lambda = 0 \}. \] We say that the order $\Lambda$ is {\em smooth} if $\wis{smooth}_n~\Lambda = X$, or equivalently, that $\wis{trep}_n~\Lambda$ is a smooth variety. \end{definition}
If $X^{sm}$ denotes the smooth locus of $X = \wis{spec}~R$ then we have already seen that for any $R$-order $\Lambda$ \[ X^{sm} \cap \wis{azu}_n~\Lambda \rInto \wis{smooth}_n~\Lambda \] as the algebraic quotient map $\wis{trep}_n~\Lambda \rOnto X$ is a principal $\wis{PGL}_n$-fibration over the Azumaya locus. In fact, for many interesting classes of orders the three loci coincide, that is, \[ X^{sm} = \wis{azu}_n~\Lambda = \wis{smooth}_n~\Lambda. \] This is the case for quantum groups at roots of unity (see \cite{LBquantum}) and for orders associated at (deformed) preprojective algebras (see \cite{LBpreproj}). Later on we will prove a similar result for orders associated to quotient singularities.
If $x_{\mathfrak{m}} \in \wis{smooth}_n~\Lambda$ we know from \cite{LBetale} that the marked quiver setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ contains enough information to describe the \'etale local structure of $X$ near $x_{\mathfrak{m}}$ (that is, the structure of the $\mathfrak{m}$-adic completion $\hat{R}_{\mathfrak{m}}$) as well as the \'etale local structure of $\Lambda$ near $\mathfrak{m}$ (that is, the $\mathfrak{m}$-adic completion $\hat{\Lambda}_{\mathfrak{m}}$). We recall the result and refer to \cite{LBetale} for proof and more details.
\begin{proposition} Let $x_{\mathfrak{m}} \in \wis{smooth}_n~\Lambda$ with associated marked quiver-setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ with $\alpha_{\mathfrak{m}} = (a_1,\hdots,a_k)$. Then, \begin{enumerate} \item{The $\mathfrak{m}$-adic completion of the center $\hat{R}_{\mathfrak{m}}$ is isomorphic to the completion of the algebra generated by traces along oriented cycles in $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ at the maximal ideal generated by these traces.} \item{The $\mathfrak{m}$-adic completion of the order $\Lambda$ is of the form \[ \hat{\Lambda}_{\mathfrak{m}} \simeq \begin{bmatrix} M_{11} & \hdots & M_{1k} \\ \vdots & & \vdots \\ M_{k1} & \hdots & M_{kk} \end{bmatrix} \] where $M_{ij}$ is a block of size $a_i \times a_j$ with all entries equal to the $\hat{R}_{\mathfrak{m}}$-module generated by all paths in $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ starting at vertex $v_i$ and ending in vertex $v_j$.} \end{enumerate} \end{proposition}
In particular, if $x_{\mathfrak{m}} \in \wis{smooth}_n~\Lambda$ we can describe the finite dimensional algebra $\overline{\Lambda}_{\mathfrak{m}} = \Lambda / \mathfrak{m} \Lambda$ to be Morita equivalent to the quotient of the path algebra of the underlying quiver $\mathbb{C} Q^{\dagger}_{\mathfrak{m}}$ by the ideal generated by all cycles in $Q^{\dagger}_{\mathfrak{m}}$.
\begin{definition} Let $\wis{cat}$ be a category of $\mathbb{C}$-algebras. We say that an algebra $A \in \wis{cat}$ is {\em $\wis{cat}$-smooth} if and only if for every $B \in \wis{cat}$, every quotient $B \rOnto^{\pi} B/I$ in $\wis{cat}$ with $I$ a nilpotent ideal and every algebra morphism $A \rTo^{\phi} B/I$ in $\wis{cat}$ the diagram \[ \xymatrix@R=45pt@C=45pt{ A \ar[rd]_{\phi} \ar@{.>}[r]^{\tilde{\phi}} & B \ar@{->>}[d]^{\pi} \\ & B/I } \] can be completed by an algebra morphism $A \rTo^{\tilde{\phi}} B$ in $\wis{cat}$. \end{definition}
Grothendieck proved that an affine commutative $\mathbb{C}$-algebra $R$ is $\wis{commalg}$-smooth if and only if $R$ is regular, that is, if and only if $X = \wis{spec}~R$ is a smooth variety. Cuntz and Quillen \cite{CuntzQuillen} introduced {\em quasi-free algebras} as coordinate rings of non-commutative algebraic manifolds and they are precisely the $\wis{alg}$-smooth algebras. Similarly, smooth orders are $\wis{alg@n}$-smooth algebras where $\wis{alg@n}$ is the category of Cayley-Hamilton algebras of degree $n$ which we will describe briefly and refer to \cite{ProcesiCH} for more details.
If $M \in M_n(R)$ for $R$ a commutative $\mathbb{C}$-algebra, then its characteristic polynomial \[ \chi_M = det(t1_n-M) = t^n + a_1 t^{n-1} + \hdots + a_n \] is such that all its coefficients are polynomials with rational coefficients in traces of powers of $M$, that is, $a_i = f_i(Tr(M),Tr(M^2),\hdots,Tr(M^{n-1}))$. Hence, if $A$ is a $\mathbb{C}$-algebra having a trace map $tr_A~:~A \rTo A$ (a linear map satisfying $tr_A(tr_A(a)b)=tr_A(a)tr_A(b)$, $tr_A(ab)=tr_A(ba)$ and $tr_A(a)~b=b~tr_A(a)$ for all $a,b \in A$) then we define a {\em formal characteristic polynomial of degree $n$} for every $a \in A$ by \[ \chi_a = t^n + f_1(tr_A(a),\hdots,tr_A(a^{n-1})) t^{n-1} + \hdots + f_n(tr_A(a),\hdots,tr_A(a^{n-1})) \]
\begin{definition} An object of $\wis{alg@n}$ is a Cayley-Hamilton algebra of degree $n$, that is, a $\mathbb{C}$-algebra having a trace map $tr_A$ satisfying \[ \forall a \in A:~\chi_a(a) = 0~ \qquad \text{and} \qquad tr_A(1) = n \] Morphisms $A \rTo^f B$ in $\wis{alg@n}$ are $\mathbb{C}$-algebra morphisms preserving traces, that is \[ \xymatrix@R=45pt@C=45pt{ A \ar[r]^f \ar[d]_{tr_A} & B \ar[d]_{tr_B} \\ A \ar[r]^f & B } \] is a commutative diagram. \end{definition}
We recall from \cite{ProcesiCH} that $A \in \wis{alg@n}$ is $\wis{alg@n}$-smooth if and only if $\wis{trep}_n~A$ is a smooth variety (possibly having several irreducible components). In particular, a smooth order $\Lambda$ in a central simple $K$-algebra $\Sigma$ of dimension $n^2$ equipped with the reduced trace map is $\wis{alg@n}$-smooth.
Having identified smooth orders as a natural generalization of regular commutative algebras to the category of Cayley-Hamilton algebras and having a combinatorial local description of them (as well as their centers), we now turn to the associated {\em non-commutative smooth variety}.
\begin{definition} Let $\Lambda$ be an $R$-order in a central simple $K$-algebra $\Sigma$ of dimension $n^2$, then the {\em non-commutative spectrum}, $\wis{spec}~\Lambda$ is the set of all twosided prime ideals $P$ of $\Lambda$ (that is, the ideals satisfying $a \Lambda b \subset P \Rightarrow a$ or $b \in P$). This set is equipped with the {\em Zariski topology} with typical open sets \[
\mathbb{X}(I) = \{ P \in \wis{spec}~\Lambda~|~I \not\subset P \} \] for any twosided ideal $I$ of $\Lambda$ (see for example \cite{FVO444} and \cite{FVOAV}). The topological space $\wis{spec}~\Lambda$ comes equipped with a {\em non-commutative structure sheaf} $\mathcal{O}^{nc}_{\Lambda}$ with sections on the open set $\mathbb{X}(I)$ \[
\Gamma(\mathbb{X}(I),\mathcal{O}^{nc}_{\Lambda}) = \{ \delta \in \Sigma~|~\exists l \in \mathbb{N}~:~I^l.\delta \subset \Lambda \} \] (again see \cite{FVO444} or \cite{FVOAV} for a proof that this defines a sheaf of non-commutative algebras with global sections $\Gamma(\wis{spec}~\Lambda,\mathcal{O}^{nc}_{\Lambda}) = \Lambda$). Moreover, the {\em stalk} of $\mathcal{O}^{nc}_{\Lambda}$ at a prime ideal $P \in \wis{spec}~\Lambda$ is the symmetric localization \[
\mathcal{O}^{nc}_{\Lambda,P} = Q_{\Lambda-P}(\Lambda) = \{ \delta \in \Sigma~|~I \delta \subset \Lambda~\text{for some twosided ideal}~I \not\subset P \}. \] \end{definition}
Intersecting a twosided prime ideal $P$ of $\Lambda$ with its center gives a prime ideal of $R$ and hence we obtain a continuous map \[ \wis{spec}~\Lambda \rTo^{\pi_c} \wis{spec}~R \qquad P \mapsto P \cap R \] and if we denote with ${\mathcal O}_{\Lambda}$ the (usual) sheaf of $R$-algebras on $\wis{spec}~R$ associated to the $R$-order $\Lambda$ then $\pi_c$ induces a morphism of sheaves of algebras \[ (\wis{spec}~\Lambda, {\mathcal O}^{nc}_{\Lambda}) \rTo^{\pi_c} (\wis{spec}~R, {\mathcal O}_{\Lambda}). \] For $\mathfrak{m}$ a maximal ideal of $R$ we can relate the local marked quiver setting $(Q^{\dagger}_{\mathfrak{m}},\alpha_{\mathfrak{m}})$ to the fiber $\pi_c^{-1}(\mathfrak{m})$. This quiver setting was determined by the semi-simple $n$-dimensional $\Lambda$-representation \[ M_{\mathfrak{m}} = S_1^{\oplus e_1} \oplus \hdots \oplus S_k^{\oplus e_k} \] where $S_i$ is a simple $d_i$-dimensional $\Lambda$-representation. Then, we have that \[ \pi_c^{-1}(\mathfrak{m}) = \{ P_1,\hdots,P_k \} \qquad \text{with} \qquad \Lambda/P_i \simeq M_{d_i}(\mathbb{C}) \] so the number of vertices in $Q^{\dagger}_{\mathfrak{m}}$ determines the number of maximal twosided ideals of $\Lambda$ lying over $\mathfrak{m}$ and the dimension vector $\alpha_{\mathfrak{m}} = (e_1,\hdots,e_k)$ determines the so called Bergman-Small data, see \cite{BergmanSmall}. The finitely many maximal twosided ideals $\{ P_1,\hdots,P_k \}$ lying over the central point $\mathfrak{m}$ form a {\em clique} \cite{Jategaonkar} and should be thought of as points lying infinitesimally close together in $\wis{spec}~\Lambda$. The marked quiver $Q^{\dagger}$ encodes this infinitesimal information. If $\mathfrak{m}$ is a central singularity, the hope is that one can use these finitely many infinitesimally close points to separate tangent information in $\mathfrak{m}$ rather than having to resort to the full blown-up of $\mathfrak{m}$. In the next section we will give some examples when this non-commutative approach to desingularization actually works.
\begin{example} Let $X = \mathbb{A}^1$, that is $R=\mathbb{C}[x]$ and consider the order \[ \Lambda = \begin{bmatrix} R & R \\ \mathfrak{m} & R \end{bmatrix}, \] where $\mathfrak{m} = (x) \triangleleft R$, that is $x_{\mathfrak{m}} = 0$. For every point $\lambda \not= 0$ there is a unique maximal twosided ideal of $\Lambda$ lying over $\mathfrak{m}_{\lambda} = (x-\lambda)$ with quotient $M_2(\mathbb{C})$. For this reason we say that $X - \{ 0 \}$ is the {\em Azumaya locus} of $\Lambda$. On the other hand, the {\em ramification locus} of $\Lambda$ is the closed subset $\{ 0 \} = \mathbb{V}(x)$ and there are two maximal ideals of $\Lambda$ lying over $\mathfrak{m}$ \[ M_1 = \begin{bmatrix} \mathfrak{m} & R \\ \mathfrak{m} & R \end{bmatrix} \qquad \text{and} \qquad M_2 = \begin{bmatrix} R & R \\ \mathfrak{m} & \mathfrak{m} \end{bmatrix} \] and the quotients are $\Lambda/M_1 \simeq \mathbb{C} \simeq \Lambda/M_2$ whence they determine both a one-dimensional $\Lambda$-representation. That is, the canonical continuous map \[ \wis{spec}~\Lambda \rOnto^{\pi_c} \wis{spec}~R \] is a homeomorphism over $\mathbb{X}(x)$ and there are precisely two (infinitesimally close) points lying over $\mathbb{V}(x)$. The corresponding (marked) quiver setting is \[ \xymatrix{\vtx{1} \ar@/^/[r] & \vtx{1} \ar@/^/[l]} \] and so the defect $\wis{def}_{\mathfrak{m}}~\Lambda = 0$. Remark that in all other maximal ideals $\mathfrak{m}_{\lambda}$ the local (marked) quiver setting is \[ \xymatrix{\vtx{1} \ar@(ul,ur)} \] which also has zero defect so $\Lambda$ is a smooth order and hence $\wis{trep}_2~\Lambda$ is a smooth variety. We now turn to the structure sheaves ${\mathcal O}_{\Lambda}$ and ${\mathcal O}_{\Lambda}^{(nc)}$. The central structure sheaf is just given by central localization and therefore we find for its stalks \[ {\mathcal O}_{\Lambda,\mathfrak{m}} = \begin{bmatrix} R_{\mathfrak{m}} & R_{\mathfrak{m}} \\ R_{\mathfrak{m}} & R_{\mathfrak{m}} \end{bmatrix} \qquad {\mathcal O}_{\Lambda,\mathfrak{m}_{\lambda}} \simeq \begin{bmatrix} R_{\mathfrak{m}_{\lambda}} & R_{\mathfrak{m}_{\lambda}} \\ R_{\mathfrak{m}_{\lambda}} & R_{\mathfrak{m}_{\lambda}} \end{bmatrix}. \] Over the Azumaya locus the non-commutative structure sheaf ${\mathcal O}_{\Lambda}^{nc}$ coincides with the central structure sheaf. The stalks in the two points lying over $\mathfrak{m}$ can be computed to be \[ {\mathcal O}_{\Lambda,M_1}^{nc} \simeq \begin{bmatrix} R_{\mathfrak{m}} & R_{\mathfrak{m}} \\ R_{\mathfrak{m}} & R_{\mathfrak{m}} \end{bmatrix} \qquad {\mathcal O}_{\Lambda,M_2}^{nc} \simeq \begin{bmatrix} R_{\mathfrak{m}} & x^{-1} R_{\mathfrak{m}} \\ x R_{\mathfrak{m}} & R_{\mathfrak{m}} \end{bmatrix}, \] both of them being Azumaya algebras. Hence, we have the slightly surprising fact that the non-commutative structure sheaf ${\mathcal O}_{\Lambda}^{nc}$ over $\wis{spec}~\Lambda$ is a sheaf of Azumaya algebras whereas $\Lambda$ itself is ramified in $\mathfrak{m}$. Observe that the stalk in $\mathfrak{m}$ of the central structure sheaf is the intersection of the two Azumaya stalks of the non-commutative structure sheaf. \end{example}
\section{Moduli spaces}
In this section, $\Lambda$ will be an $R$-order in a central simple $K$-algebra of dimension $n^2$ and $\mathfrak{m}$ will be a singularity of $\wis{spec}~R = X$. W want to use $\Lambda$ to resolve the singularity in $\mathfrak{m}$. As we are only interested in the \'etale local structure of the singularity in $\mathfrak{m}$ we may restrict attention to $\hat{\Lambda}_{\mathfrak{m}}$ or more generally it is only the \'etale local structure of $\Lambda$ that is important. Hence, we may assume that $\Lambda$ is split as far as possible, or equivalently, that we have a complete set $\{ e_1,\hdots,e_k \}$ of orthogonal idempotents in $\Lambda$. That is the $e_i$ satisfy \[ e_i^2 = e_i \qquad e_i.e_j = 0~\text{for $i \not= j$} \qquad \sum_{i=1}^k e_i = 1_{\Lambda} \] These idempotents allow us to decompose finite dimensional $\Lambda$-representations. If $V \in \wis{rep}_m~\Lambda$ is an $m$-dimensional representation, we say that $V$ is of {\em dimension vector} $\alpha = (a_1,\hdots,a_n)$ for $\sum_{i=1}^k a_i = m$ provided \[ \wis{dim}_{\mathbb{C}}~e_i.V = a_i \] We denote this by $\wis{dim}~V = \alpha$. Because $S = \overbrace{\mathbb{C} \times \hdots \times \mathbb{C}}^k \rInto \Lambda$ we can restrict $m$-dimensional $\Lambda$ representations to the semi-simple subalgebra $S$ to obtain morphisms \[ \wis{rep}_m~\Lambda \rTo \wis{rep}_m~S = \bigsqcup_{\alpha}~\wis{GL}_m/\wis{GL}(\alpha) \] where the decomposition is taken over all dimension vectors $\alpha = (a_1,\hdots.a_k)$ such that $\sum_i a_i = m$ and where $\wis{GL}(\alpha) = \wis{GL}_{a_1} \times \hdots \times \wis{GL}_{a_k}$. The component $\wis{GL}_m/\wis{GL}(\alpha)$ is the orbit of the semi-simple $S$-representation $V_{\alpha}$ with action given by the matrices \[ e_i \mapsto E_{\sum_{j=1}^{i-1} a_j +1,\sum_{j=1}^{i-1} a_j + 1} + E_{\sum_{j=1}^{i-1} a_j +2,\sum_{j=1}^{i-1} a_j + 2} + \hdots + E_{\sum_{j=1}^{i} a_j,\sum_{j=1}^{i} a_j} \] where $E_{i,j}$ are the standard matrices $(\delta_{iu}\delta_{jv})_{u,v} \in M_m(\mathbb{C})$. As a consequence we can also decompose the representation schemes \[ \wis{rep}_m~\Lambda = \bigsqcup_{\alpha} \wis{GL}_m \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\Lambda \] where $\wis{rep}_{\alpha}~\Lambda$ is the scheme representing all $m = \sum_i a_i$-dimensional representations of dimension vector $\alpha = (a_1,\hdots,a_k)$ on which the action by the set of idempotents $\{ e_1,\hdots,e_k \}$ is given by the above matrices. Clearly, the reductive group $\wis{GL}(\alpha)$ acts by base-change in the subspaces $e_i.V$ on $\wis{rep}_{\alpha}~\Lambda$ and the corresponding component of $\wis{rep}_m~\Lambda$ is the principal fiber bundle $\wis{GL}_m \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\Lambda$.
A {\em character} of the reductive group $\wis{GL}(\alpha)$ is determined by an integral $k$-tuple $\theta = (t_1,\hdots,t_k) \in \mathbb{Z}^k$ \[ \chi_{\theta}~:~\wis{GL}(\alpha) \rTo \mathbb{C}^* \qquad (g_1,\hdots,g_k) \mapsto det(g_1)^{t_1} \hdots det(g_k)^{t_k} \] As the subgroup $\mathbb{C}^*(1_{a_1},\hdots,1_{a_k})$ acts trivially on $\wis{rep}_{\alpha}~\Lambda$ we are only interested in the characters $\chi_{\theta}$ such that $0 = \theta.\alpha = \sum_{i=1}^k a_it_i$. Remark that a $\Lambda$-subrepresentation $W \subset V$ for $V \in \wis{rep}_{\alpha}~\Lambda$ necessarily satisfies $W \in \wis{rep}_{\beta}~\Lambda$ for some dimension vector $\beta \leq \alpha$. We will now extend the definition of (semi)stable representations of quivers, due to A. King \cite{King} to the present setting.
\begin{definition} For $\theta \in \mathbb{Z}^k$ satisfying $\theta.\alpha = 0$, a representation $V \in \wis{rep}_{\alpha}~\Lambda$ is said to be \begin{enumerate} \item{{\em $\theta$-semistable} if and only if for every proper $\Lambda$-subrepresentation $W \subset V$ we have $\theta.\wis{dim}~W \geq 0$.} \item{{\em $\theta$-stable} if and only if for every proper $\Lambda$-subrepresentation $W \subset V$ we have $\theta.\wis{dim}~W > 0$.} \end{enumerate} \end{definition}
For any setting satisfying $\theta.\alpha = 0$ we have the following inclusions of Zariski open $\wis{GL}(\alpha)$-stable subschemes of $\wis{rep}_{\alpha}~\Lambda$ (with obvious notations) \[ \wis{rep}_{\alpha}^{simple}~\Lambda \subset \wis{rep}^{\theta-stable}_{\alpha}~\Lambda \subset \wis{rep}_{\alpha}^{\theta-semist}~\Lambda \subset \wis{rep}_{\alpha}~\Lambda \] but some of these open subsets may actually be empty.
All these definitions carry over to any affine $\mathbb{C}$-algebra $\Lambda$ but if $\Lambda$ is an $R$-order in a central simple $K$-algebra of dimension $n^2$ we have the following link with the material of the previous section \[ \wis{trep}_n~\Lambda = \wis{GL}_n \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\Lambda \] for the dimension vector $\alpha = (tr_{\Lambda}(e_1),\hdots,tr_{\Lambda}(e_k))$. Moreover, \[ R = \mathbb{C}[\wis{triss}_n~\Lambda] = \mathbb{C}[\wis{iss}_{\alpha}~\Lambda] = \mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha)} \] where $\wis{iss}_{\alpha}~\Lambda$ is the scheme representing semi-simple $\alpha$-dimensional representations of $\Lambda$. Remark that the dimension vector $\alpha$ above is such that there are $\alpha$-dimensional simple representations of $\Lambda$ so that in the above inclusion of $\wis{GL}(\alpha)$-stable subvarieties of $\wis{rep}_{\alpha}~\Lambda$ none of the subschemes is empty. From now on we fix this particular dimension vector $\alpha$ of total dimension $n$.
A polynomial function $f \in \mathbb{C}[\wis{rep}_{\alpha}~\Lambda]$ is said to be a {\em $\theta$-semi-invariant of weight $l$} if and only if we have for all $g \in \wis{GL}(\alpha)$ \[ g.f = \chi_{\theta}(g)^l f \] where, as before, $\chi_{\theta}$ is the character of $\wis{GL}(\alpha)$ corresponding to $\theta$. It follows from \cite{King} that a representation $V \in \wis{rep}_{\alpha}~\Lambda$ is $\theta$-semistable if and only if there is some $\theta$-semi-invariant $f$ of some weight $l$ such that $f(V) \not= 0$.
Clearly, $\theta$-semi-invariants of weight zero are just polynomial invariants in $\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha)} = R$ and the multiplication of $\theta$-semi-invariants of weights $l$ resp. $l'$ is a $\theta$-semi-invariant of weight $l+l'$. Therefore, the ring of all $\theta$-semi-invariants \[
\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta} = \bigoplus_{l=0}^{\infty} \{ f \in \mathbb{C}[\wis{rep}_{\alpha}~\Lambda]~|~\forall g \in \wis{GL}(\alpha)~:~g.f = \chi_{\theta}^l f \} \] is a graded algebra with part of degree zero $R = \mathbb{C}[\wis{iss}_{\alpha}~\Lambda]$. Consequently, we have a projective morphism \[ \wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta} \rOnto^{\pi} X = \wis{spec}~R \] such that all fibers of $\pi$ are projective varieties. The main results of $\pi$ are proved as in \cite{King}.
\begin{theorem} There is a one-to-one correspondence between \begin{enumerate} \item{points in $\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta}$, and} \item{isomorphism classes of direct sums of $\theta$-stable $\Lambda$ representations of total dimension $\alpha$.} \end{enumerate} Moreover, as there are simple $\alpha$-dimensional $\Lambda$-representations, the morphism $\pi$ is a birational projective map. \end{theorem}
\begin{definition} We call $\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta}$ the {\em moduli space of $\theta$-semistable representations} of $\Lambda$ and will denote it with $\wis{moduli}^{\theta}_{\alpha}~\Lambda$. \end{definition}
Let us recall some examples of current interest.
\begin{example}[Kleinian singularities] For a Kleinian singularity, that is, a quotient singularity $\mathbb{C}^2/G$ with $G \subset SL_2(\mathbb{C})$ there is an extended Dynkin diagram $D$ associated. Let $Q$ be the {\em double quiver} of $D$, that is to each arrow $\xymatrix{\vtx{} \ar[r]^x & \vtx{}}$ in $D$ we adjoin an arrow $\xymatrix{\vtx{} & \vtx{} \ar[l]_{x^*} }$ in $Q$ in the opposite direction and let $\alpha$ be the unique minimal dimension vector such that $\chi_D(\alpha,\alpha) = 0$ (the so called isotropic Schur root of the tame quiver $\vec{D}$ obtained from the graph $D$ by fixing a certain orientation on the edges). Consider the {\em moment element} \[ m = \sum_{x \in D} [x,x^*] \] then the skew-group algebra $\Lambda = \mathbb{C}[x,y] \# G$ is on $R$-order with $R = \mathbb{C}[\mathbb{C}^2/G]$ in $M_n(K)$ where $K$ is the field of fractions of $R$ and $n = \#G$. Moreover, $\Lambda$ is Morita equivalent to the {\em preprojective algebra} which is the quotient of the path algebra of $Q$ by the ideal generated by the moment element \[ \Pi_0 = \mathbb{C} Q/ (\sum [x,x^*] ).
\] For more details we refer to the lecture notes by W. Crawley-Boevey \cite{CrawleyLectNotes}.
If we take $\theta$ to be a generic character such that $\theta.\alpha = 0$, then the projective map \[ \wis{moduli}^{\theta}_{\alpha}~\Lambda \rOnto X = \mathbb{C}^2/G \] is a minimal resolution of singularities. Note that the map is birational as $\alpha$ is the dimension vector of a simple representation of $A = \Pi_0$, see \cite{CrawleyLectNotes}.
For such a stability structure $\theta$ we have that $\wis{rep}^{\theta-semist}_{\alpha}~\Pi_0$ is a smooth variety. For consider the {\em moment map} \[ \wis{rep}_{\alpha}~Q \rTo^{\mu} \wis{lie}~\wis{GL}(\alpha) = M_{\alpha}(\mathbb{C}) = M_{e_1}(\mathbb{C}) \oplus \hdots \oplus M_{e_k}(\mathbb{C}) \] defined by sending $V = (V_a,V_{a^*})$ to \[
(\sum_{\xymatrix{\vtx{} \ar[r]^a&\vtx{1}}} V_aV_{a^*} - \sum_{\xymatrix{\vtx{1} \ar[r]^a & \vtx{}}} V_{a^*}V_a, \hdots, \sum_{\xymatrix{\vtx{} \ar[r]^a&\vtx{k}} }V_aV_{a^*} - \sum_{\xymatrix{\vtx{k} \ar[r]^a & \vtx{}} }V_{a^*}V_a).\] The differential $d \mu$ can be verified to be surjective in any representation $V \in \wis{rep}_{\alpha}~Q$ which has stabilizer subgroup $\mathbb{C}^*(1_{e_1},\hdots,1_{e_k})$ (a so called {\em Schur representation}) see for example \cite[lemma 6.5]{CrawleyMoment}.
Further, any $\theta$-stable representation is Schurian. Moreover, for a generic stability structure $\theta \in \mathbb{Z}^k$ we have that every $\theta$-semistable $\alpha$-dimensional representation is $\theta$-stable as the $gcd(\alpha) = 1$. Combining these facts it follows that $\mu^{-1}(0) = \wis{rep}_{\alpha}~\Pi_0$ is smooth in all $\theta$-stable representations. \end{example}
\begin{example} Consider a quotient singularity $X = \mathbb{C}^d/G$ with $G \subset SL_d(\mathbb{C})$ and $Q$ be the {\em McKay quiver} of $G$ acting on $V=\mathbb{C}^d$. That is, the vertices $\{ v_1,\hdots,v_k \}$ of $Q$ are in one-to-one correspondence with the irreducible representations $\{ R_1,\hdots,R_k \}$ of $G$ such that $R_1 = \mathbb{C}_{triv}$ is the trivial representation. Decompose the tensorproduct in irreducibles \[ V \otimes_{\mathbb{C}} R_j = R_1^{\oplus j_1} \oplus \hdots \oplus R_k^{\oplus j_k}, \] then the number of arrows in $Q$ from $v_i$ to $v_j$ \[ \#~(v_i \rTo v_j ) = j_i \] is the multiplicity of $R_i$ in $V \otimes R_j$. Let $\alpha = (e_1,\hdots,e_k)$ be the dimension vector where $e_i = \wis{dim}_{\mathbb{C}}~R_i$.
The relevance of this quiver-setting is that \[ \wis{rep}_{\alpha}~Q = Hom_G(R,R \otimes V) \] where $R$ is the {\em regular representation}, see for example \cite{CrawNotes}. Consider $Y \subset \wis{rep}_{\alpha}~Q$ the affine subvariety of all $\alpha$-dimensional representations of $Q$ for which the corresponding $G$-equivariant map $B \in Hom_G(R,V \otimes R)$ satisfies \[ B \wedge B = 0 \in Hom_G(R,\wedge^2 V \otimes R). \] $Y$ is called the {\em variety of commuting matrices} and its defining relations can be expressed as linear equations between paths in $Q$ evaluated in $\wis{rep}_{\alpha}~Q$, say $(l_1,\hdots,l_z)$. Then, the quiver-order \[ \Lambda = \frac{\int_{\alpha} \mathbb{C} Q}{(l_1,\hdots,l_z)} \] is an order with center $R = \mathbb{C}[\mathbb{C}^d/G]$. In fact, $\Lambda$ is just the skew group algebra \[ A = \mathbb{C}[x_1,\hdots,x_d] \# G. \] Assume that the first vertex in the McKay quiver corresponds to the trivial representation. Take a character $\theta \in \mathbb{Z}^k$ such that $t_1 < 0$ and all $t_i > 0$ for $i \geq 2$, for example take \[ \theta = ( - \sum_{i=2}^k \wis{dim} R_i , 1, \hdots, 1 ). \] Then, the corresponding moduli space is isomorphic to \[ \wis{moduli}^{\theta}_{\alpha}~A \simeq G-\wis{Hilb}~\mathbb{C}^d \] the {\em $G$-equivariant Hilbert scheme} which classifies all $\# G$-codimensional ideals $I \triangleleft \mathbb{C}[x_1,\hdots,x_d]$ where \[ \frac{\mathbb{C}[x_1,\hdots,x_d]}{I} \simeq \mathbb{C} G \] as $G$-modules, hence in particular $I$ must be stable under the action of $G$. It is well known that the natural map \[ G-\wis{Hilb}~\mathbb{C}^d \rOnto X = \mathbb{C}^d/G \] is a minimal resolution if $d=2$ and if $d=3$ it is often a crepant resolution, for example whenever $G$ is Abelian, see \cite{CrawNotes} for more details. In all cases where $G-\wis{Hilb}~\mathbb{C}^d$ is a desingularization we have again that the corresponding open subvariety $\wis{rep}_{\alpha}^{\theta-semist}~\Lambda$ is smooth. For, in this case the quotient map \[ \wis{rep}_{\alpha}^{\theta-semist}~\Lambda = \wis{rep}_{\alpha}^{\theta-stable}~\Lambda \rOnto \wis{moduli}^{\theta}_{\alpha}~\Lambda = G-\wis{Hilb}~\mathbb{C}^d \] is a principal $\wis{PGL}(\alpha)$-fibration and as the base space is smooth by assumption so is the top space.
As we didn't find explicit non-Abelian examples for $\mathbb{C}^3$ in the literature, we include the following simplest example.
Let $A_{4}$ be the alternating group of $12$ elements acting on three dimensional space $\mathbb{C}^3$ via the matrices \[ A_{4} = \langle~s=\begin{bmatrix}1&0&0\\0&-1&0\\0&0&-1\end{bmatrix}, t=\begin{bmatrix}-1&0&0\\0&-1&0\\0&0&1\end{bmatrix}, r=\begin{bmatrix}0&1&0\\0&0&1\\ 1& 0&0\end{bmatrix}~\rangle \] the corresponding quotient singularity $\mathbb{C}^3/A_{4}$ has coordinate ring \[ \mathbb{C}[x,y,z]^{A_{4}}=\mathbb{C}[A(x,y,z),B(x,y,z),C(x,y,z),D(x,y,z)]. \] with \[ \begin{cases} A(x,y,z)=xyz,\\ B(x,y,z)=x^2+y^2+z^2,\\ C(x,y,z)=x^2y^2+y^2z^2+z^2x^2,\\ D(x,y,z)=x^4y^2+y^4z^2+z^4x^2. \end{cases} \] $A$, $B$, $C$ and $D$ obey the relation \[ D^2+C^3-BCD+A^2(3D-6BC+B^3+9A^2)=0, \] whence the quotient singularity $\mathbb{C}^3/A_{4}$ is a hypersurface in $\mathbb{C}^4$.
The character table of the group $A_{4}$ is given by \[
\begin{array}{c|ccccc} A_{4} & 1 & \left[\begin{smallmatrix}*&0&0\\0&*&0\\0&0&* \end{smallmatrix}\right] & \left[\begin{smallmatrix}0&*&0\\0&0&*\\ *& 0&0 \end{smallmatrix}\right] & \left[\begin{smallmatrix}0&0&*\\ *&0&0\\ 0&*&0 \end{smallmatrix}\right] \\ & & & & \\ \hline & & & & \\ V_0 & 1 & 1 & 1 & 1 \\ V_1 & 1 & 1 & \rho & \rho^2\\ V_2 & 1 & 1 & \rho^2 & \rho \\ V_3 & 3 & -1 & 0 & 0 \end{array} \] where $\rho$ is a primitive $3$-rd root of unity and therefore the regular representation is $R=V_0\oplus V_1 \oplus V_2 \oplus V_3^{(1)} \oplus V_3^{(2)} \oplus V_3^{(3)}$. From the character table we deduce the isomorphisms of $A_{4}$-representations \begin{align*} &V_3\otimes V_0=V_3\otimes V_1=V_3\otimes V_2=V_3\\ &V_3\otimes V_3=V_0\oplus V_1 \oplus V_2 \oplus V_3 \oplus V_3 \end{align*} whence the McKay quiver is of the following shape $$ \vcenter{ \xymatrix@=2.8cm{ \vtx{1}\ar@/^/[dr]^{X=\left[\begin{smallmatrix} X_1\\ X_2\\ X_3 \end{smallmatrix}\right]} & &\vtx{1}\ar@/_/[dl]_{\left[\begin{smallmatrix} Z_1\\ Z_2\\ Z_3 \end{smallmatrix}\right]=Z} \\ &\vtx{3}\ar@/^/[ul]^{x=\left[\begin{smallmatrix} x_1 & x_2 & x_3 \end{smallmatrix}\right]}\ar@/^/[d]^{y=\left[\begin{smallmatrix} y_1 & y_2 & y_3 \end{smallmatrix}\right]}\ar@/_/[ur]_{z=\left[\begin{smallmatrix} z_1 & z_2 & z_3 \end{smallmatrix}\right]}\ar@(ld,l)^{u=\left[\begin{smallmatrix} u_{11} & u_{12} & u_{13}\\ u_{21} & u_{22} & u_{23}\\ u_{31} & u_{32} & u_{33} \end{smallmatrix}\right]}\ar@(r,rd)^{v=\left[\begin{smallmatrix} v_{11} & v_{12} & v_{13}\\ v_{21} & v_{22} & v_{23}\\ v_{31} & v_{32} & v_{33} \end{smallmatrix}\right]} \\ &\vtx{1}\ar@/^/[u]^{Y=\left[\begin{smallmatrix} Y_1\\ Y_2\\ Y_3 \end{smallmatrix}\right]} } } $$ Denoting $ V_0=\mathbb{C} v_0, V_1=\mathbb{C} v_1, V_2=\mathbb{C} v_2 $ and $ V_3^{(i)}=\mathbb{C} e_1^{(i)} + \mathbb{C} e_2^{(i)} + \mathbb{C} e_3^{(i)}, $ we construct a $G$-equivariant basis for \begin{align*} V \otimes R &= V_3 \oplus V_3 \oplus V_3 \oplus (V_0\oplus V_1\oplus V_2\oplus V_3\oplus V_3)\\ &\quad \oplus (V_0\oplus V_1\oplus V_2\oplus V_3\oplus V_3) \oplus (V_0\oplus V_1\oplus V_2\oplus V_3\oplus V_3) \end{align*} determined by \begin{alignat*}{2} V \otimes V_0&=\mathbb{C}(e_1 \otimes v_0) + \mathbb{C}(e_2 \otimes v_0) + \mathbb{C}(e_3 \otimes v_0)\\ V \otimes V_1&=\mathbb{C}(\rho^2 e_1 \otimes v_1) + \mathbb{C}(e_2 \otimes v_1) + \mathbb{C}(\rho e_3 \otimes v_1)\\ V \otimes V_2&=\mathbb{C}(\rho e_1 \otimes v_2) + \mathbb{C}(e_2 \otimes v_2) + \mathbb{C}(\rho^2 e_3 \otimes v_2)\\ V \otimes V_3^{(i)}&=\mathbb{C}(e_1 \otimes e_1^{(i)}) + \mathbb{C}(e_2\otimes e_2^{(i)}) + \mathbb{C}(e_3 \otimes e_3^{(i)})&\qquad &(V_0)\\ & \quad +\mathbb{C}(\rho^2 e_1 \otimes e_1^{(i)}) + \mathbb{C}(\rho e_2 \otimes e_2^{(i)}) + \mathbb{C}( e_3 \otimes e_3^{(i)})&\qquad &(V_1)\\ & \quad +\mathbb{C}(\rho e_1 \otimes e_1^{(i)}) + \mathbb{C}(\rho^2 e_2 \otimes e_2^{(i)}) + \mathbb{C}( e_3 \otimes e_3^{(i)})&\qquad &(V_2)\\ & \quad +\mathbb{C}(e_2 \otimes e_3) + \mathbb{C}(e_3 \otimes e_1) + \mathbb{C}(e_1 \otimes e_2)&\qquad &(V_3 \sim u)\\ & \quad +\mathbb{C}(e_1 \otimes v_0) + \mathbb{C}(e_2 \otimes v_0) + \mathbb{C}(e_3 \otimes v_0)&\qquad &(V_3 \sim v) \end{alignat*} With respect to this basis we obtain the following three $12 \times 12$ matrices \[ P=\left[\begin{smallmatrix} 0 & 0& 0 & x_1 & 0 & 0 & x_2 & 0 & 0& x_3 & 0 & 0\\ 0 & 0& 0 & \rho^2y_1 & 0 & 0 & \rho^2y_2 & 0 & 0& \rho^2y_3 & 0 & 0\\ 0 & 0& 0 & \rho z_1 & 0 & 0 & \rho z_2 & 0 & 0& \rho z_3 & 0 & 0\\ X_1 & \rho^2Y_1& \rho Z_1 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\ 0 & 0& 0 &0 & 0& u_{11} &0 & 0& u_{12} &0 & 0& u_{13} \\ 0& 0 &0 & 0& v_{11} &0 & 0& v_{12} &0 & 0& v_{13} &0 \\ X_2 & \rho^2Y_2& \rho Z_2 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\ 0 & 0& 0 &0 & 0& u_{21} &0 & 0& u_{22} &0 & 0& u_{23} \\ 0& 0 &0 & 0& v21 &0 & 0& v_{22} &0 & 0& v_{23} &0 \\ X_3 & \rho^2Y_3& \rho Z_3 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\ 0 & 0& 0 &0 & 0& u31 &0 & 0& u_{32} &0 & 0& u_{33} \\ 0& 0 &0 & 0& v_{31} &0 & 0& v_{32} &0 & 0& v_{33} &0 \end{smallmatrix}\right], \] \[ Q=\left[\begin{smallmatrix} 0 &0 & 0& 0 & x_1 & 0 & 0 & x_2 & 0 & 0& x_3 & 0 \\ 0 &0 & 0& 0 & \rho y_1 & 0 & 0 & \rho y_2 & 0 & 0& \rho y_3 & 0\\ 0 &0 & 0& 0 & \rho^2 z_1 & 0 & 0 & \rho^2 z_2 & 0 & 0& \rho^2 z_3 & 0 \\ 0& 0&0 &0 & 0& v_{11} &0 & 0& v_{12} &0 & 0& v_{13} \\ X_1 & Y_1& Z_1 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0 &0 & 0& u_{11} &0 & 0& u_{12} &0 & 0& u_{13} & 0&0\\ 0& 0&0 &0 & 0& v_{21} &0 & 0& v_{22} &0 & 0& v_{23} \\ X_2 & Y_2& Z_2 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0 &0 & 0& u_{21} &0 & 0& u_{22} &0 & 0& u_{23} &0 &0\\ 0& 0 &0&0 & 0& v_{31} &0 & 0& v_{32} &0 & 0& v_{33} \\ X_3 & Y_3& Z_3 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\
0 &0 & 0& u_{31} &0 & 0& u_{32} &0 & 0& u_{33} &0 &0 \end{smallmatrix}\right], \] \[ R=\left[\begin{smallmatrix} 0&0&0 & 0& 0 & x_1 & 0 & 0 & x_2 & 0 & 0& x_3\\ 0&0&0 & 0& 0 & y_1 & 0 & 0 & y_2 & 0 & 0& y_3\\ 0&0&0 & 0& 0 & z_1 & 0 & 0 & z_2 & 0 & 0& z_3 \\ 0& 0 &0 & 0& u_{11} &0 & 0& u_{12} &0 & 0& u_{13} &0\\ 0 &0 & 0& v_{11} &0 & 0& v_{12} &0 & 0& v_{13} &0 & 0\\ X_1 & \rho Y_1& \rho^2 Z_1 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\ 0& 0 &0 & 0& u_{21} &0 & 0& u_{22} &0 & 0& u_{23} & 0 \\ 0 &0 & 0& v_{21} &0 & 0& v_{22} &0 & 0& v_{23} &0 & 0\\ X_2 & \rho Y_2& \rho^2 Z_2 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \\ 0& 0 &0 & 0& u_{31} &0 & 0& u_{32} &0 & 0& u_{33} &0\\ 0 &0 & 0& v_{31} &0 & 0& v_{32} &0 & 0& v_{33} &0 & 0\\ X_3 & \rho Y_3& \rho^2 Z_3 &0 & 0& 0 &0 & 0&0 &0 & 0& 0 \end{smallmatrix}\right]. \] Setting the three commutators equal to 0, we obtain the constraints: \begin{gather*} x(u-v)=0, \quad y(u-\rho^2 v)=0, \quad z(u-\rho v)=0,\\ (u-v)X=0, \quad (u-\rho^2 v)Y=0, \quad (u-\rho v)Z=0, u^2=Xx+Yy+Zz,\\ v^2=Xx+\rho^2Yy+\rho Zz. \end{gather*} recovering the result obtained in \cite{BG98}. \end{example}
\section{Partial desingularizations}
In the previous section we have seen that in many cases of current interest one associates to a singularity $\mathfrak{m}$ an $R$-order $\Lambda$ and a stability structure $\theta$ for the dimension vector $\alpha$ such that $\wis{trep}_n~\Lambda = \wis{GL}_n \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\Lambda$, such that the Zariski open subset \[ \wis{rep}_{\alpha}^{\theta-semist}~\Lambda \] of $\theta$-semistable representations is a smooth variety. If this is the case we will call $(\Lambda,\alpha,\theta)$ a {\em good $\mathfrak{m}$-setting}. In this section we will prove that to a good $\mathfrak{m}$-setting one associates a non-commutative desingularization of $\mathfrak{m}$ and a partial commutative desingularization with excellent control on the remaining singularities. We will sketch the procedure in general and then give an explicit description in case $\Lambda$ is a {\em quiver-order}. That is, if \[ \Lambda \simeq \int_{\alpha}~\frac{\mathbb{C} Q}{I} \] for some dimension vector $\alpha$ such that $\wis{rep}_{\alpha}~\mathbb{C} Q/I$ contains (a Zariski open subset of) simple representations and where $\int_{\alpha} \mathbb{C} Q/I$ denotes the algebra of $\wis{GL}_n$-equivariant maps \[ \wis{GL}_n \times^{\wis{GL}(\alpha)} \wis{rep}_{\alpha}~\frac{\mathbb{C} Q}{I} \rTo M_n(\mathbb{C}) \] if $n$ is the total dimension of $\alpha$.
If $(\Lambda,\alpha,\theta)$ is a good $\mathfrak{m}$-setting we have the diagram explained in the previous section \[ \xymatrix@R=45pt@C=45pt{ \wis{rep}_{\alpha}^{\theta-semist}~\Lambda \ar@{->>}[d]_q \ar@{->>}[rd]^{q_c} \\ \wis{moduli}_{\alpha}^{\theta}~\Lambda \ar@{->>}[r]^{\pi} & X = \wis{spec}~R } \] where $q$ is the algebraic quotient map and $\pi$ is a projective birational map. To $q$ we will assign a sheaf of smooth orders $\mathcal{A}$ on $\wis{moduli}^{\theta}_{\alpha}~\Lambda$. Let $\cup_D~X_D$ be a Zariski open covering by affine normal varieties of the moduli space $\wis{moduli}_{\alpha}^{\theta}~\Lambda$, then each $X_D$ determines a smooth order $\Lambda_D$ defined by taking the algebra of $\wis{GL}_n$-equivariant maps \[ \wis{GL}_n \times^{\wis{GL}(\alpha)} q^{-1}(X_D) \rTo M_n(\mathbb{C}) \] for which $q^{-1}(X_D) \simeq \wis{rep}_{\alpha}~\Lambda_D$. Remark that as $q^{-1}(X_D)$ is a smooth $\wis{GL}(\alpha)$-affine variety, we have that \[ \wis{trep}_n~\Lambda_D = \wis{GL}_n \times^{\wis{GL}(\alpha)} q^{-1}(X_D) \] is a smooth $\wis{GL}_n$-variety and therefore $\Lambda_D$ is indeed a smooth order. Taking as sections \[ \Gamma(X_D,\mathcal{A}) = \Lambda_D, \] we obtain a sheaf of smooth orders on $\wis{moduli}_{\alpha}^{\theta}~\Lambda$. We will construct the orders $\Lambda_D$ explicitly if $\Lambda$ is a quiver-order $\int_{\alpha}~\mathbb{C} Q/I$.
Because $\wis{moduli}^{\theta}_{\alpha}~\Lambda = \wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta}$ we need control on the generators of all $\theta$-semi-invariants. Such a generating set was found by Aidan Schofield and Michel Van den Bergh in \cite{SchofVdB}: {\em determinantal semi-invariants}. In order to define them we have to introduce some notation first.
Reorder the vertices in $Q$ such that the entries of $\theta$ are separated in three strings \[ \theta = (\underbrace{t_1,\hdots,t_i}_{> 0},\underbrace{t_{i+1},\hdots,t_j}_{=0},\underbrace{t_{j+1},\hdots,t_k}_{< 0}) \] and let $\theta$ be such that $\theta.\alpha = 0$. Fix a nonzero weight $l \in \mathbb{N}$ and take arbitrary natural numbers $\{ l_{i+1},\hdots,l_j \}$.
Consider a rectangular matrix $L$ with \begin{itemize} \item{$lt_1+\hdots+lt_i+l_{i+1} + \hdots + l_j$ rows and} \item{$l_{i+1} + \hdots + l_j - l t_{j+1} - \hdots - l t_k$ columns} \end{itemize} \[
L = \quad \begin{array}{cc||c|c|c|c|c|c} & & \overbrace{}^{l_{i+1}} & \hdots & \overbrace{}^{l_j} & \overbrace{}^{-lt_{j+1}} & \hdots & \overbrace{}^{-lt_k} \\ \hline \hline lt_1& \{ & L_{1,i+1} & & L_{1,j} & L_{1,j+1} & & L_{1,k} \\ \hline & \vdots & & & & & \\ \hline lt_i & \{ & L_{i,i+1} & & L_{i,j} & L_{i,j+1} & & L_{i,k} \\ \hline l_{i+1} & \{ & L_{i+1,i+1} & & L_{i+1,j} & L_{i+1,j+1} & & L_{i+1,k} \\ \hline & \vdots & & & & & \\ \hline l_j & \{ & L_{j,i+1} & & L_{j,j} & L_{j,j+1} & & L_{j,k} \end{array} \]
in which each entry of $L_{r,c}$ is a linear combination of oriented paths in the quiver $Q$ with starting vertex $v_c$ and ending vertex $v_r$.
The relevance of this is that we can evaluate $L$ at any representation $V \in \wis{rep}_{\alpha}~\Lambda$ and obtain a {\em square matrix} $L(V)$ as $\theta.\alpha = 0$. More precisely, if $V_i$ is the vertex-space of $V$ at vertex $v_i$ (that is, $V_i$ has dimension $e_i$), then evaluating $L$ at $V$ gives a linear map \[ \xymatrix@R=40pt@C=45pt{ V_{i+1}^{\oplus l_{i+1}} \oplus \hdots \oplus V_j^{\oplus l_j} \oplus V_{j+1}^{\oplus -lt_{j+1}} \oplus \hdots \oplus V_k^{\oplus -lt_k} \ar[d]^{L(V)}\\ V_1^{\oplus lt_1} \oplus \hdots \oplus V_i^{\oplus lt_i} \oplus V_{i+1}^{\oplus l_{i+1}} \oplus \hdots \oplus V_j^{\oplus l_j} } \] and $L(V)$ is a square $N \times N$ matrix where \[ l_{i+1} + \hdots + l_j - lt_{j+1} - \hdots - lt_k = N = lt_1 + \hdots + lt_i + l_{i+1} + \hdots + l_j. \] So we can consider $D(V) = \wis{det} L(V)$ and verify that $D$ is a $\wis{GL}(\alpha)$-semi-invariant polynomial on $\wis{rep}_{\alpha}~\Lambda$ of weight $\chi_{\theta}^l$. The result of \cite{SchofVdB} asserts that these {\em determinantal semi-invariants} are algebra generators of the graded algebra \[ \mathbb{C}[\wis{rep}_{\alpha}~\Lambda]^{\wis{GL}(\alpha),\theta}. \] Observe that this result is to semi-invariants what the result of \cite{LBProcesi} is to invariants. In fact, one can deduce the latter from the first.
We have seen that a representation $V \in \wis{rep}_{\alpha}~\Lambda$ is $\theta$-semistable if and only if some semi-invariant of weight $\chi_{\theta}^l$ for some $l$ is non-zero on it. This proves
\begin{theorem} The Zariski open subset of $\theta$-semistable $\alpha$-dimensional $\Lambda$-representations can be covered by affine $\wis{GL}(\alpha)$-stable open subsets \[
\wis{rep}^{\theta-semist}_{\alpha}~\Lambda = \bigcup_D \{ V~|~D(V) = \wis{det} L(V) \not= 0 \} \] and hence the moduli space can also be covered by affine open subsets \[ \wis{moduli}^{\theta}_{\alpha}~\Lambda = \bigcup_D~X_D \] where $
X_D = \{ [V] \in \wis{moduli}^{\theta}_{\alpha}~\Lambda~|~D(V)=\wis{det} L(V) \not= 0 \} $. \end{theorem}
Analogous to the rectangular matrix $L$ we define a rectangular matrix $N$ with \begin{itemize} \item{$lt_1+\hdots+lt_i+l_{i+1} + \hdots + l_j$ columns and} \item{$l_{i+1} + \hdots + l_j - l t_{j+1} - \hdots - l t_k$ rows} \end{itemize} \[
N = \quad \begin{array}{cc||c|c|c|c|c|c} & & \overbrace{}^{l t_1} & \hdots & \overbrace{}^{l t_i} & \overbrace{}^{l_{i+1}} & \hdots & \overbrace{}^{l_j} \\ \hline \hline l_{i+1} & \{ & N_{i+1,1} & & N_{i+1,i} & N_{i+1,i+1} & & N_{i+1,j} \\ \hline & \vdots & & & & & \\ \hline l_j & \{ & N_{j,1} & & N_{j,i} & N_{j,i+1} & & N_{j,j} \\ \hline -lt_{j+1} & \{ & N_{j+1,1} & & N_{j+1,i} & N_{j+1,i+1} & & N_{j+1,j} \\ \hline & \vdots & & & & & \\ \hline -l t_k & \{ & N_{k,1} & & N_{k,i} & N_{k,i+1} & & N_{k,j} \end{array} \] filled with new variables and define an {\em extended quiver} $Q_D$ where we adjoin for each entry in $N_{r,c}$ an additional arrow from $v_c$ to $v_r$ and denote it with the corresponding variable from $N$.
Let $I_1$ (resp. $I_2$) be the set of relations in $\mathbb{C} Q_D$ determined from the matrix-equations {\tiny \[ N.L = \begin{bmatrix} \boxed{(v_{i+1})_{l_{i+1} } } & & & & & 0 \\ & \ddots & & & & \\ & & \boxed{(v_j)_{l_j}} & & & \\ & & & \boxed{(v_{j+1})_{-lt_{j+1}} }& & \\ & & & & \ddots & \\ 0 & & & & & \boxed{(v_k)_{-lt_k}} \end{bmatrix} \]} respectively {\tiny \[ L.N = \begin{bmatrix} \boxed{(v_1)_{lt_1}} & & & & & 0 \\ & \ddots & & & & \\ & & \boxed{(v_i)_{lt_i} }& & & \\ & & & \boxed{(v_{i+1})_{l_{i+1}} }& & \\ & & & & \ddots & \\ 0 & & & & & \boxed{(v_j)_{l_j}} \end{bmatrix} \]} where $(v_i)_{n_j}$ is the square $n_j \times n_j$ matrix with $v_i$ on the diagonal and zeroes elsewhere. Define a new quiver order \[ \Lambda_D = \int_{\alpha}~\frac{\mathbb{C} Q_D}{(I,I_1,I_2)} \] then $\Lambda_D$ is a $\mathbb{C}[X_D]$-order in $\wis{alg@n}$. In fact, the construction of $\Lambda_D$ is nothing but a universal localization in the category $\wis{alg@}\alpha$, which is the subcategory of $\wis{alg@n}$ consisting of all $S = \underbrace{\mathbb{C} \times \hdots \times \mathbb{C}}_k$-algebras with trace map specified by $\alpha$.
That is, take $P_i = v_i \Lambda$ be the projective right ideal associated to vertex $v_i$, then $L$ determines a $\Lambda$-module morphism \[ P = P_{i+1}^{\oplus l_{i+1}} \oplus \hdots \oplus P_k^{\oplus -lt_k} \rTo^{L} P_1^{\oplus lt_1} \oplus \hdots \oplus P_j^{\oplus l_j} = Q. \] The algebra map $\Lambda \rTo^{\phi} \Lambda_D$ is universal in $\wis{alg@}\alpha$ with respect to $L \otimes \phi$ being invertible, that is, if $\Lambda \rTo^{\psi} B$ is a morphism in $\wis{alg@}\alpha$ such that $L \otimes \psi$ is an isomorphism of right $B$-modules, then there is a unique map in $\wis{alg@}\alpha$ $\Lambda_D \rTo^u B$ such that $\psi = u \circ \phi$. We claim to have the following situation \[ \xymatrix@R=40pt@C=45pt{ \wis{rep}^{\theta-semist}~\Lambda \ar@{->>}[d]_q & q^{-1}(X_D) \simeq \wis{rep}_{\alpha}~\Lambda_D \ar@{_(->}[l] \ar@{->>}[d]\\ \wis{moduli}^{\theta}_{\alpha}~\Lambda & X_D \ar@{_(->}[l] } \] which follows from the next lemma.
\begin{lemma} The following statements are equivalent \begin{enumerate} \item{$V \in \wis{rep}_{\alpha}^{\theta-semist}~\Lambda$ lies in $q^{-1}(X_D)$, and} \item{There is a unique extension $\tilde{V}$ of $V$ such that $\tilde{V} \in \wis{rep}_{\alpha}~\Lambda_D$.} \end{enumerate} \end{lemma}
\begin{proof} $1 \Rightarrow 2$ : Because $L(V)$ is invertible we can take $N(V)$ to be its inverse and decompose it into blocks corresponding to the new arrows in $Q_D$. This then defines the unique extension $\tilde{V} \in \wis{rep}_{\alpha}~Q_D$ of $V$. As $\tilde{V}$ satisfies $I$ (because $V$ does) and $I_1$ and $I_2$ (because $N(V) = L(V)^{-1}$) we have that $\tilde{V} \in \wis{rep}_{\alpha}~\Lambda_D$.
$2 \Rightarrow 1$ : Restrict $\tilde{V}$ to the arrows of $Q$ to get a $V \in \wis{rep}_{\alpha}~Q$. As $\tilde{V}$ (and hence $V$) satisfies $I$, $V \in \wis{rep}_{\alpha}~\Lambda$. Moreover, $V$ is such that $L(V)$ is invertible (this follows because $\tilde{V}$ satisfies $I_1$ and $I_2$). Hence, $D(V) \not= 0$ and because $D$ is a $\theta$-semi-invariant it follows that $V$ is an $\alpha$-dimensional $\theta$-semistable representation of $\Lambda$. An alternative method to see this is as follows. Assume that $V$ is {\em not} $\theta$-semistable and let $V' \subset V$ be a subrepresentation such that $\theta.\wis{dim} V' < 0$. Consider the restriction of the linear map $L(V)$ to the subrepresentation $V'$ and look at the commuting diagram \[ \xymatrix@R=40pt@C=45pt{
V_{i+1}^{'\oplus l_{i+1}} \oplus \hdots \oplus V_k^{'\oplus -lt_k} \ar[r]^{L(V)|V'} \ar@{^(->}[d] & V_1^{'\oplus lt_1} \oplus \hdots \oplus V_j^{'\oplus l_j} \ar@{^(->}[d] \\ V_{i+1}^{\oplus l_{i+1}} \oplus \hdots \oplus V_k^{\oplus -lt_k} \ar[r]^{L(V)} & V_1^{\oplus lt_1} \oplus \hdots \oplus V_j^{\oplus l_j} } \] As $\theta. \wis{dim} V' < 0$ the top-map must have a kernel which is clearly absurd as we know that $L(V)$ is invertible. \end{proof}
The universal property of the universal localizations $\Lambda_D$ allows us to glue these orders together into a coherent sheaf on $\wis{moduli}_{\alpha}^{\theta}~\Lambda$. Let $\Lambda_{D_1}$ (resp. $\Lambda_{D_2}$) be the order constructed from a rectangular matrix $L_1$ (resp. $L_2$), then we can construct the direct sum map $L = L_1 \oplus L_2$ for which the corresponding semi-invariant $D=D_1D_2$. As $\Lambda \rTo \Lambda_D$ makes the projective module morphisms associated to $L_1$ and $L_2$ into an isomorphism we have uniquely determined maps in $\wis{alg@}\alpha$ \[ \xymatrix{ & \Lambda_D \\ \Lambda_{D_1} \ar[ur]^{i_1} & & \Lambda_{D_2} \ar[ul]_{i_2} } \qquad \text{whence} \qquad \xymatrix@C=10pt{ & \wis{rep}_{\alpha}~\Lambda_D \ar[dl]_{i_1^*} \ar[dr]^{i_2^*}\\ \wis{rep}_{\alpha}~\Lambda_{D_1} & & \wis{rep}_{\alpha}~\Lambda_{D_2} } \] Because $\wis{rep}_{\alpha}~\Lambda_D = q^{-1}(X_D)$ (and similarly for $D_i$) we have that $i_j^*$ are embeddings as are the $i_j$. This way we can glue the sections $\Gamma(X_{D_1},{\mathcal A}) = \Lambda_{D_1}$ with $\Gamma(X_{D_2},{\mathcal A}) = \Lambda_{D_2}$ over their intersection $X_D = X_{D_1} \cap X_{D_2}$ via the inclusions $i_j$. Hence we get a coherent sheaf of non-commutative algebras ${\mathcal A}$ over $\wis{moduli}^{\theta}_{\alpha}~\Lambda$. Further, by localizing the orders $\Lambda_{D_j}$ at the central element $D$ we have that the algebra morphisms $i_j$ are central extensions, that is satisfying \[ \Lambda_D = \Lambda_{D_j} Z(\Lambda_D) \] which implies that we have morphisms between the non-commutative structure sheaves \[ (\wis{spec}~\Lambda_{D_j},{\mathcal O}^{nc}_{\Lambda_{D_j}}) \rTo (\wis{spec}~\Lambda_D,{\mathcal O}^{nc}_{\Lambda_D}) \] which allow us to define a non-commutative variety $\wis{spec}~{\mathcal A}$ by gluing the non-commutative structure sheaves of the various $\Lambda_{D_j}$ together. Observe that the central scheme of this non-commutative variety is $\wis{moduli}_{\alpha}^{\theta}~\Lambda$ with its structure sheaf. This concludes the proof of the following result.
\begin{theorem} Let $(\Lambda,\alpha,\theta)$ be a good $\mathfrak{m}$-setting. Then, there is a sheaf of smooth orders ${\mathcal A}$ over the moduli space $\wis{moduli}_{\alpha}^{\theta}~\Lambda$ such that the diagram below is commutative \[ \xymatrix@R=40pt@C=45pt{ \wis{spec}~{\mathcal A} \ar[d]_c \ar[rd]^{\phi} \\ \wis{moduli}^{\theta}_{\alpha}~\Lambda \ar@{->>}[r]^{\pi} & X = \wis{spec}~R } \] Here, $\wis{spec}~{\mathcal A}$ is a non-commutative variety obtained by gluing affine non-commutative structure sheaves $(\wis{spec}~\Lambda_D,{\mathcal O}^{nc}_{\Lambda_D})$ together and where $c$ is the map which intersects locally a prime ideal of $\Lambda_D$ with its center. Because ${\mathcal A}$ is a sheaf of smooth orders in $\wis{alg@n}$, $\phi$ can be viewed as a {\em non-commutative desingularization} of $X$.
Moreover, if $\theta$ is such that all $\theta$-semistable $\alpha$-dimensional $\Lambda$-representations are actually $\theta$-stable, then ${\mathcal A}$ is a sheaf of Azumaya algebras over $\wis{moduli}^{\theta}_{\alpha}~\Lambda$ and in this case $\pi$ is a commutative desingularization of $X$. If, in addition, also $\alpha$ is an indivisible dimension vector (that is, $gcd(\alpha) = 1$) then ${\mathcal A} \simeq End~\mathcal{P}$ for some vectorbundle $\mathcal{P}$ of rank $n$ over $\wis{moduli}^{\theta}_{\alpha}~\Lambda$. \end{theorem}
In general, there may remain singularities in $\wis{moduli}^{\theta}_{\alpha}~\Lambda$ but then have been fully classified in dimensions $\leq 6$ and reduction steps exists which prove that in each dimension there is a finite list of such possible remaining singularities. We will recall these steps briefly, the starting point being the local marked quiver setting $(Q^{\dagger},\alpha)$ associated to a point $\mathfrak{n} \in \wis{moduli}^{\theta}_{\alpha}~\Lambda$. Remark that $\mathfrak{n} \in X_D$ for some $D$ and as $\Lambda_D$ is a smooth order in $\wis{alg@n}$ the defect $\wis{def}_{\mathfrak{n}}~\Lambda_D = 0$ so the local marked quiver setting determines the \'etale local structure of $\wis{moduli}^{\theta}_{\alpha}~\Lambda$ near $\mathfrak{n}$.
The reduction steps below were discovered by R. Bocklandt in his Ph.D. thesis \cite{BocklandtThesis} (see also \cite{Bocklandtpaper}) in which he classifies quiver settings having a regular ring of invariants. These steps were slightly extended in \cite{RBLBVdW} in order to classify central singularities of smooth orders. All reductions are made locally around a vertex in the marked quiver. There are three types of allowed moves
\par \vskip 3mm \noindent {\bf 1.Vertex removal} Assume we have a marked quiver setting $(Q^{\dagger},\alpha)$ and a vertex $v$ such that the local structure of $(Q^{\dagger},\alpha)$ near $v$ is indicated by the picture on the left below, that is, inside the vertices we have written the components of the dimension vector and the subscripts of an arrow indicate how many such arrows there are in $Q^{\dagger}$ between the indicated vertices. Define the new marked quiver setting $(Q^{\dagger}_R,\alpha_R)$ obtained by the operation $R^v_V$ which removes the vertex $v$ and composes all arrows through $v$, the dimensions of the other vertices are unchanged : \[ \left[ ~\vcenter{ \xymatrix@=1cm{ \vtx{u_1}&\cdots &\vtx{u_k}\\ &\vtx{\alpha_v}\ar[ul]^{b_1}\ar[ur]_{b_k}&\\ \vtx{i_1}\ar[ur]^{a_1}&\cdots &\vtx{i_l}\ar[ul]_{a_l}}} ~\right] \quad \rTo^{R^v_V} \quad \left[~\vcenter{ \xymatrix@=1cm{ \vtx{u_1}&\cdots &\vtx{u_k}\\ &&\\
\vtx{i_1}\ar[uu]^{c_{11}}\ar[uurr]_<<{c_{1k}}&\cdots &\vtx{i_l}\ar[uu]|{c_{lk}}\ar[uull]^<<{c_{l1}}}} ~\right]. \] where $c_{ij} = a_ib_j$ (observe that some of the incoming and outgoing vertices may be the same so that one obtains loops in the corresponding vertex). One is allowed to make this reduction step provided either of the following conditions is met \[
\chi_Q(\alpha,\epsilon_v) \geq 0 \quad \Leftrightarrow \quad \alpha_v \geq \sum_{j=1}^l a_j i_j \] \[
\chi_Q(\epsilon_v,\alpha) \geq 0\quad \Leftrightarrow \quad \alpha_v \geq \sum_{j=1}^k b_j u_j \] (observe that if we started off from a marked quiver setting $(Q^{\dagger},\alpha)$ coming from an order, then these inequalities must actually be equalities).
\par \vskip 3mm \noindent {\bf 2. loop removal} If $v$ is a vertex with vertex-dimension $\alpha_v = 1$ and having $k \geq 1$ loops, then let $(Q^{\dagger}_R,\alpha_R)$ be the marked quiver setting obtained by the loop removal operation $R^v_l$ \[ \left[~\vcenter{ \xymatrix@=1cm{ &\vtx{1}\ar@{..}[r]\ar@{..}[l]\ar@(lu,ru)@{=>}^k&}} ~\right]\quad \rTo^{R^v_l} \quad \left[~\vcenter{ \xymatrix@=1cm{ &\vtx{1}\ar@{..}[r]\ar@{..}[l]\ar@(lu,ru)@{=>}^{k-1}&}} ~\right],\] removing one loop in $v$ and keeping the same dimension vector.
\par \vskip 3mm \noindent {\bf 3. Loop removal} If the local situation in $v$ is such that there is exactly one (marked) loop in $v$, the dimension vector in $v$ is $k \geq 2$ and there is exactly one arrow leaving $v$ and this to a vertex with dimension vector $1$, then one is allowed to make the reduction $R^v_L$ indicated below \[ \left[~\vcenter{ \xymatrix@=1cm{
&\vtx{k}\ar[dl]\ar@(lu,ru)|{\bullet}&&\\ \vtx{1}&\vtx{u_1}\ar[u]&\cdots &\vtx{u_m}\ar[ull]}} ~\right]\quad \rTo^{R^v_L} \quad \left[~\vcenter{ \xymatrix@=1cm{ &\vtx{k}\ar@2[dl]_{k}&&\\ \vtx{1}&\vtx{u_1}\ar[u]&\cdots &\vtx{u_m}\ar[ull]}} ~\right], \]
\[ \left[~\vcenter{ \xymatrix@=1cm{ &\vtx{k}\ar[dl]\ar@(lu,ru)&&\\ \vtx{1}&\vtx{u_1}\ar[u]&\cdots &\vtx{u_m}\ar[ull]}} ~\right]\quad \rTo^{R^v_L} \quad \left[~\vcenter{ \xymatrix@=1cm{ &\vtx{k}\ar@2[dl]_k&&\\ \vtx{1}&\vtx{u_1}\ar[u]&\cdots &\vtx{u_m}\ar[ull]}} ~\right]. \]
Similarly, if there is one (marked) loop in $v$ and $\alpha_v = k \geq 2$ and there is only one arrow arriving at $v$ coming from a vertex of dimension vector $1$, then one is allowed to make the reduction $R^v_L$ \[ \left[~\vcenter{ \xymatrix@=1cm{
&\vtx{k}\ar[d]\ar[drr]\ar@(lu,ru)|{\bullet}&&\\ \vtx{1}\ar[ur]&\vtx{u_1}&\cdots &\vtx{u_m}}} ~\right]\quad \rTo^{R^v_L} \quad \left[~\vcenter{ \xymatrix@=1cm{ &\vtx{k}\ar[d]\ar[drr]&&\\ \vtx{1}\ar@2[ur]^k&\vtx{u_1}&\cdots &\vtx{u_m}}} ~\right], \]
\[ \left[~\vcenter{ \xymatrix@=1cm{ &\vtx{k}\ar[d]\ar[drr]\ar@(lu,ru)&&\\ \vtx{1}\ar[ur]&\vtx{u_1}&\cdots &\vtx{u_m}}} ~\right]\quad \rTo^{R^v_L} \quad \left[~\vcenter{ \xymatrix@=1cm{ &\vtx{k}\ar[d]\ar[drr]&&\\ \vtx{1}\ar@2[ur]^k&\vtx{u_1}&\cdots &\vtx{u_m}}} ~\right]. \] The relevance of these reduction rules is that if \[ (Q^{\dagger}_1,\alpha_1) \rightsquigarrow (Q^{\dagger}_2,\alpha_2) \] is a sequence of legal reductions, then \[ \mathbb{C}[\wis{rep}_{\alpha_1}~Q^{\dagger}_1]^{\wis{GL}(\alpha_1)} \simeq \mathbb{C}[\wis{rep}_{\alpha_2}~Q^{\dagger}_2]^{\wis{GL}(\alpha_2)}[y_1,\hdots,y_z] \] where $z$ is the sum of all loops removed in $R^v_l$ reductions plus the sum of $\alpha_v$ for each reduction step $R^v_L$ involving a genuine loop and the sum of $\alpha_v - 1$ for each reduction step $R^v_L$ involving a marked loop. That is, marked quiver settings which belong to the same reduction tree have smooth equivalent invariant rings.
\begin{theorem} Let $(Q^{\dagger},\alpha)$ be a marked quiver setting, then there is a unique reduced setting (that is, having no further admissible reduction steps) $(Q^{\dagger}_0,\alpha_0)$ for which there exists a reduction procedure \[ (Q^{\dagger},\alpha) \rightsquigarrow (Q^{\dagger}_0,\alpha_0).. \] We will denote this unique setting by $Z(Q^{\dagger},\alpha)$. \end{theorem}
The following result is a slight adaptation of Bocklandt's main result \cite{Bocklandtpaper}.
\begin{theorem} Let $(Q^{\dagger}_{\mathfrak{n}},\alpha_{\mathfrak{n}})$ be the local marked quiver setting of $\mathfrak{n} \in \wis{moduli}^{\theta}_{\alpha}~\Lambda$. Then, $\mathfrak{n}$ is a smooth point if and only if the unique associated reduced setting \[
Z(Q^{\dagger}_{\mathfrak{n}},\alpha_{\mathfrak{n}}) \in \{~\xymatrix{\vtx{k}} \qquad \xymatrix{\vtx{k} \ar@(ul,ur)} \qquad \xymatrix{\vtx{k} \ar@(ul,ur)|{\bullet}} \quad~\qquad \xymatrix{\vtx{2} \ar@(dl,ul) \ar@(dr,ur)} \qquad~\quad~\quad~\qquad~\qquad~\qquad~\qquad~\qquad\xymatrix{\vtx{2} \ar@(dl,ul) \ar@(dr,ur)|{\bullet}}~\quad \xymatrix{\vtx{2} \ar@(dl,ul)|{\bullet} \ar@(dr,ur)|{\bullet}} \qquad~\}. \] The Azumaya points are such that $Z(Q^{\dagger}_{\mathfrak{n}},\alpha_{\mathfrak{n}}) = \xymatrix{\vtx{1}}$ hence the singular locus of $\wis{moduli}^{\theta}_{\alpha}~\Lambda$ is contained in the ramification locus of $\mathcal{A}$ but may be strictly smaller. \end{theorem}
To classify the central singularities of smooth orders we may reduce to zero-settings $(Q^{\dagger},\alpha) = Z(Q^{\dagger},\alpha)$. For such a setting we have for all vertices $v_i$ the inequalities \[ \chi_Q(\alpha,\delta_i) < 0 \qquad \text{and} \qquad \chi_Q(\delta_i,\alpha) < 0 \] and the dimension of the central variety can be computed from the Euler-form $\chi_Q$. This gives us an estimate of $d = \wis{dim}~X = \wis{dim}~\wis{moduli}^{\theta}_{\alpha}~\Lambda$ which is very efficient to classify the singularities in low dimensions.
\begin{theorem} \label{counting} Let $(Q^{\dagger},\alpha) = Z(Q^{\dagger},\alpha)$ be a reduced setting on $k \geq 2$ vertices. Then, \[ \wis{dim}~X \geq 1 + \sum_{\xymatrix@=1cm{ \vtx{a} }}^{a \geq 1} a +
\sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)|{\bullet} }}^{a > 1}(2a-1) +
\sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)}}^{a > 1}(2a) + \sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)|{\bullet}\ar@(ur,dr)|{\bullet}}}^{a > 1} (a^2+a-2) + \] \[
\sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)|{\bullet}\ar@(ur,dr)}}^{a > 1} (a^2+a-1) + \sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)\ar@(ur,dr)}}^{a > 1} (a^2+a) + \hdots +
\sum_{\xymatrix@=1cm{ \vtx{a}\ar@(ul,dl)|{\bullet}_{k}\ar@(ur,dr)^{l}}}^{a > 1} ((k+l-1)a^2+a-k) + \hdots \] In this sum the contribution of a vertex $v$ with $\alpha_v = a$ is determined by the number of (marked) loops in $v$. By the reduction steps (marked) loops only occur at vertices where $\alpha_v > 1$. \end{theorem}
For example, this shows that there are no central singularities in dimension $d=2$ and that for $d=3$ the only reduced singular setting is \[ Z(Q^{\dagger},\alpha) = \xymatrix{\vtx{1} \ar@/^2ex/[rr]_a \ar@/^4ex/[rr]^b & & \vtx{1} \ar@/^2ex/[ll]_c \ar@/^4ex/[ll]^d}. \] The ring of polynomial invariants $R^{\alpha}_{Q^{\dagger}}$ is generated by traces along oriented cycles in $Q^{\dagger}$ so is generated by the invariants \[ x = ac, \quad y = ad, \quad u = bc \quad \text{and} \quad v = bd~\qquad \text{whence}~\qquad~R^{\alpha}_{Q^{\dagger}} \simeq \frac{\mathbb{C}[x,y,u,v]}{(xy-uv)}. \] Hence, the only \'etale type of central singularity in dimension three is the {\em conifold singularity}.
\begin{example}[dimension $d=4$] If $(Q^{\dagger},\alpha)$ is a reduced setting for dimension $4$ then $Q^{\dagger}$ can have at most three vertices. If there is just one, its dimension must be $1$ (smooth setting) or $2$ in which case the only new type is \[
Z(Q^{\dagger},\alpha) = \qquad \xymatrix{\vtx{2} \ar@(ul,dl) \ar@(ur,dr)|{\bullet}} \] which is again a smooth setting. If there are two vertices, both must have dimension $1$ and have at least two incoming and outgoing arrows as in the previous example. The only new type that occurs is \[ Z(Q^{\dagger},\alpha) = \xymatrix{ \vtx{1} \ar@/^/[rr] \ar@/^3ex/[rr] & & \vtx{1} \ar@/^/[ll] \ar@/^2ex/[ll] \ar@/^3ex/[ll]} \]
\par \vskip 2mm \noindent for which one calculates as before the ring of invariants to be \[ R^{\alpha}_{Q^{\dagger}} = \frac{\mathbb{C}[a,b,c,d,e,f]}{(ae-bd,af-cd,bf-ce)}. \] If there are three vertices all must have dimension $1$ and each vertex must have at least two incoming and two outgoing arrows. There are just two such possibilities in dimension $4$ \[ Z(Q^{\dagger},\alpha) \in \left\{~\vcenter{\xymatrix{\vtx{1}\ar@/^/[rr]\ar@/^/[rd]&&\vtx{1}\ar@/^/[ll]\ar@/^/[ld]\\ &\vtx{1}\ar@/^/[ru]\ar@/^/[lu]&}} \qquad \vcenter{ \xymatrix{\vtx{1}\ar@2@/^/[rr]&&\vtx{1}\ar@2@/^/[ld]\\ &\vtx{1}\ar@2@/^/[lu]}}~ \right\}. \] The corresponding rings of polynomial invariants are \[ R^{\alpha}_{Q^{\dagger}} = \frac{\mathbb{C}[x_1,x_2,x_3,x_4,x_5]}{(x_4x_5-x_1x_2x_3)} \qquad \text{resp.} \qquad R^{\alpha}_{Q^{\dagger}} = \frac{\mathbb{C}[x_1,x_2,x_3,x_4,y_1,y_2,y_3,y_4]}{R_2} \] where $R_2$ is the ideal generated by all $2 \times 2$ minors of the matrix \[ \begin{bmatrix} x_1 & x_2 & x_3 & x_4 \\ y_1 & y_2 & y_3 & y_4 \end{bmatrix} \] \end{example}
In \cite{RBLBVdW} it was proved that there are exactly ten types of smooth order central singularities in dimension $d=5$ and $53$ in dimension $d=6$.
\section{The conifold algebra}
Quiver-diagrams play an important role in stringtheory as they encode intersection information of so called {\em wrapped $D$-branes} (higher dimensional strings) in Calabi-Yau manifolds. One of the earliest models, studied by I. R. Klebanov and E. Witten \cite{KlebanovWitten}, was based on the conifold singularity (see previous section). A {\em $D3$-brane} is a three-dimensional (over the real numbers $\mathbb{R}$) submanifold of a Calabi-Yau manifold and as this is a six-dimensional (again over the real numbers) manifold it follows that two $D3$-branes in sufficiently general position intersect each other in a finite number of points. If one wraps two sufficiently general $D3$-branes around a conifold singularity, their intersection data will be encoded in the quiver-diagram \[
\xymatrix{\vtx{} \ar@/^1ex/[rr]|{x_1} \ar@/^3ex/[rr]|{x_2} & & \vtx{} \ar@/^1ex/[ll]|{y_1} \ar@/^3ex/[ll]|{y_2}}. \] Without going into details (for more information see \cite{Berenstein}) one can associate to such a quiver-diagram a non-commutative algebra describing the vacua with respect to a certain {\em super-potential} which is a suitable linear combination of oriented cycles in the quiver-diagram. In the case of two $D3$-branes wrapped around a conifold singularity one obtains :
\begin{definition} The {\em conifold algebra} $\Lambda_c$ is the non-commutative affine $\mathbb{C}$-algebra generated by three non-commuting variables $X,Y$ and $Z$ and satisfying the following relations \[ \begin{cases} XZ &= - ZX \\ YZ &= - ZY \\ X^2Y &= YX^2 \\ Y^2X &= XY^2 \\ Z^2 &= 1 \end{cases} \] That is, $\Lambda$ has a presentation \[ \Lambda_c = \frac{\mathbb{C} \langle X,Y,Z \rangle}{(Z^2-1,XZ+ZX,YZ+ZY,[X^2,Y],[Y^2,X])} \] where $[A,B]=AB-BA$ denotes the commutator. One sometimes encounters another presentation of $\Lambda_c$ as \[ \frac{\mathbb{C} \langle X,Y,Z \rangle}{(Z^2-1,XZ+ZX,YZ+ZY,[Z[X,Y],X],[Z[X,Y],Y])} \] but as $Z$ is a unit, it is easily seen that both presentations give isomorphic $\mathbb{C}$-algebras. \end{definition}
\begin{proposition} In the conifold algebra $\Lambda_c$ the elements \[ x = X^2, \qquad y = Y^2 \qquad \text{and} \qquad z = \frac{1}{2}(XY+YX) \] are algebraically independent central elements and $\Lambda_c$ is a free module over the central subalgebra $C = \mathbb{C}[x,y,z]$ with basis \[ \Lambda_c = C.1 \oplus C.X \oplus C.Y \oplus C.Z \oplus C.XY \oplus C.XZ \oplus C.YZ \oplus C.XYZ \] In fact, the conifold algebra is a skew group algebra \[ \Lambda_c \simeq \mathbb{C}[z,X][Y,\sigma,\delta] \# \mathbb{Z}/2\mathbb{Z} \] for some automorphism $\sigma$ and $\sigma$-derivation $\delta$. In particular, $\Lambda_c$ is a regular algebra of dimension three. \end{proposition}
\begin{proof} Consider the subalgebra $S$ of $\Lambda_c$ generated by $X$ and $Y$, that is \[ S = \frac{\mathbb{C} \langle X,Y \rangle}{([X^2,Y],[Y^2,X])} \] Then clearly $x$ and $y$ are central elements of $S$ as is $z = \frac{1}{2}(XY+YX)$ because \[ (XY+YX)X = XYX+YX^2=YXY+X^2Y=X(YX+XY) \] Now, consider the \"Ore extension \[ S' = \mathbb{C}[z,X][Y,\sigma,\delta] \quad \text{with} \quad \sigma(z)=z,\sigma(X)=-X\quad \text{and} \quad \delta(z)=0, \delta(X)=2z \] This means that $z$ is a central element of $S'$ and that $YX=\sigma(X)Y+\delta(X)=-XY+2z$ whence the map \[ S \rTo S' \qquad \text{defined by} \qquad X \mapsto X \quad \text{and} \quad Y \mapsto Y \] is an isomorphism. By standard results, the {\em center} of $S'$ is equal to \[ Z(S') = \mathbb{C}[x,y,z] \] whence the three elements are algebraically independent. Consider the automorphism defined by $\phi(X) = -X$ and $\phi(Y)=-Y$ on $S$, then the conifold algebra can be written as the {\em skew group ring} \[ \Lambda_c \simeq S \# \mathbb{Z}/2\mathbb{Z} \] As $Z(S) = \mathbb{C}[x,y,z]$ is fixed under $\phi$ the elements $x = x \# 1$, $y = y \# 1$ and $z = z \# 1$ are central in $\Lambda_c$ and as $S'$ is free over $Z(S')$ with basis \[ S' = Z(S').1 \oplus Z(S').X \oplus Z(S').Y \oplus Z(S').XY \] the result on freeness of $\Lambda_c$ over $\mathbb{C}[x,y,z]$ follows. \end{proof}
If $C$ is a commutative $\mathbb{C}$-algebra and if $M_q$ is a {\em symmetric} $m \times m$ matrix with entries in $C$, then we have a {\em bilinear form} on the free $C$-module $V = C \oplus \hdots \oplus C$ of rank $m$ defined by \[ B_q(v,w) = \begin{bmatrix} v_1 & v_2 & \hdots & v_m \end{bmatrix}.\begin{bmatrix} b_{11} & b_{12} & \hdots & b_{1n} \\ b_{12} & b_{22} & \hdots & b_{2n} \\ \vdots & \vdots & & \vdots \\ b_{1n} & b_{2n} & \hdots & b_{nn} \end{bmatrix}.\begin{bmatrix} w_1 \\ w_2 \\ \vdots \\ w_m \end{bmatrix}. \] The associated {\em Clifford algebra} $Cl_q(V)$ is then the quotient of the {\em tensor algebra} $T_C(V) = C \langle v_1,\hdots,v_m \rangle$ where $\{ v_1,\hdots,v_m \}$ is a basis of the free $C$-module $V$ and the defining relations are \[ Cl_q(V) = \frac{T_C(V)}{(v \otimes w + w \otimes v - 2B_q(v,w)~:~v,w \in V)} .\] As an example, the algebra $S \simeq S'$ constructed in the above proof is the Clifford algebra of the binary quadratic form over $C = \mathbb{C}[x,y,z]$ \[ B_q = \begin{bmatrix} x & z \\ z & y \end{bmatrix} \qquad \text{on} \qquad V = C.X \oplus C.Y \] as $B_q(X,X)=x, B_q(Y,Y)=y$ and $B_q(X,Y) = z$. As the entries of the symmetric variable are independent variables, we call this algebra the {\em generic binary Clifford algebra}, see \cite{LB2x2} for more details and the structure of higher generic Clifford algebras.
\begin{lemma} The conifold algebra $\Lambda_c$ is the {\em Clifford algebra} of a non-degenerate ternary quadratic form over $\mathbb{C}[x,y,z]$. \end{lemma}
\begin{proof} Consider the free $C=\mathbb{C}[x,y,z]$-module of rank three $V = C.X \oplus C.Y \oplus C.Z$ and the symmetric $3 \times 3$ matrix \[ B_q = \begin{bmatrix} x & z & 0 \\ z & y & 0 \\ 0 & 0 & 1 \end{bmatrix} \] then it follows that $\Lambda_c \simeq Cl_q(V)$ as $B_q(X,Z)=0, B_q(Y,Z)=0$, $B_q(Z,Z)=0$ and the remaining inproducts are those of $S \simeq S'$ above. \end{proof}
Whereas $C=\mathbb{C}[x,y,z]$ is a central subalgebra of $\Lambda_c$, the center itself is strictly larger. Take $D=XYZ-YXZ$ and verify that \begin{eqnarray*} (XYZ-YXZ)X =& -X(2z-XY)Z+xYZ \\ =& -2zXZ+2xYZ \\ =& xYZ - (2zXZ-YX^2Z) \\ =& X(XYZ-YXZ) \end{eqnarray*} and a similar calculation shows that $DY=YD$ and $DZ=ZD$. Moreover, $D \notin \mathbb{C}[x,y,z]$. Indeed, in the description $\Lambda_c \simeq S \# \mathbb{Z}/2\mathbb{Z}$ we have that \[ \mathbb{C}[x,y,z] \subset S \# 1 \qquad \text{whereas} \qquad D = XYZ-YXZ = (XY-YX) \# Z \in S \# Z. \] Moreover, we have that $D^2 \in \mathbb{C}[x,y,z]$ because \[ D^2 = (XYZ-YXZ)^2 = 2z(XY+YX) - 4xy = 4(z^2-xy) \in \mathbb{C}[x,y,z]. \]
\begin{lemma} The center $R_c$ of the conifold algebra $\Lambda_c$ is isomorphic to the coordinate ring of the conifold singularity \[ R_c \simeq \frac{\mathbb{C}[a,b,c,d]}{(ab-cd)}. \] \end{lemma}
\begin{proof} Let $Z$ be the central subalgebra generated by $x,y,z$ and $D$, then a representation of $Z$ is \[ Z = \frac{\mathbb{C}[x,y,z,D]}{(D^2-4(z^2-xy))} \simeq \frac{\mathbb{C}[a,b,c,d]}{(ab-cd)} \] where the second isomorphism comes from the following change of coordinates \[ a = D + 2z,\quad b = D-2z,\quad c=2x \quad \text{and} \quad d = 2y .\] As a consequence $Z$ is the coordinate ring of the conifold singularity and is in particular integrally closed. As $\Lambda_c$ is a finite module over $Z$ it follows that if $Z \not= R_c$ then the field of fractions $L$ of $R_c$ would be a proper extension of the field of fractions $K$ of $Z$. This can be contradicted using classical results on Clifford algebras over fields. To begin, note that as the ternary form \[ B_q = \begin{bmatrix} x & z & 0 \\ z & y & 0 \\ 0 & 0 & 1 \end{bmatrix} \] has square-free determinant $xy-z^2 \notin \mathbb{C}(x,y,z)^{*2}$, the Clifford algebra over the rational field $\mathbb{C}(x,y,z)$ \[ \Lambda_c \otimes_{\mathbb{C}[x,y,z]} \mathbb{C}(x,y,z) \] is a central simple algebra of dimension $4$ over its center $K'$ which is a quadratic field extension of $\mathbb{C}(x,y,z)$ determined by adjoining the square root of the determinant. As $[K : \mathbb{C}(x,y,z)] = 2$ it follows that $K=K'$ and hence also that $K=L$ whence $Z=R_c$. \end{proof}
Let us relate the non-commutative affine variety $\wis{spec}~\Lambda_c$ with that of the central subalgebra $\wis{spec}~\mathbb{C}[x,y,z] = \mathbb{A}^3$.
\begin{lemma} Intersecting twosided prime ideals of $\Lambda_c$ with the central subalgebra $\mathbb{C}[x,y,z]$ determines a continuous map \[ \wis{spec}~\Lambda_c \rTo^{\phi} \mathbb{A}^3 \] with the following fiber information : \begin{enumerate} \item{If $\mathfrak{n} \notin \mathbb{V}(xy-z^2)$, then $\phi^{-1}(\mathfrak{n})$ consists of two points.} \item{If $(x,y,z) \not= \mathfrak{n} \in \mathbb{V}(xy-z^2)$, then $\phi^{-1}(\mathfrak{n})$ consists of one point.} \item{If $(x,y,z) = \mathfrak{n}$, then $\phi^{-1}(\mathfrak{n})$ consists of two points.} \end{enumerate} \end{lemma}
\begin{proof} For $P=(a,b,c) \in \mathbb{A}^3$ the quotient of $\Lambda_c$ by the extended two-sided ideal $\Lambda_c \mathfrak{n}_P$ is the Clifford algebra $Cl_P$ over $\mathbb{C}$ of the ternary quadratic form \[ B_P = \begin{bmatrix} a & c & 0 \\ c & b & 0 \\ 0 & 0 & 1 \end{bmatrix} \] and the elements of $\phi^{-1}(\mathfrak{n}_P)$ are the two-sided maximal ideals of $Cl_P$. We can diagonalize the symmetric matrix, that is there is a base-change matrix $M \in \wis{GL}_3$ such that \[ M^{\tau}.\begin{bmatrix} a & c & 0 \\ c & b & 0 \\ 0 & 0 & 1 \end{bmatrix}.M = \begin{bmatrix} u & 0 & 0 \\ 0 & v & 0 \\ 0 & 0 & 1 \end{bmatrix} = B_Q \] (with $uv = ab-c^2$) and hence $Cl_P \simeq Cl_Q$. The Clifford algebra $Cl_Q$ is the $8$-dimensional $\mathbb{C}$-algebra generated by $x_1,x_2$ and $x_3$ satisfying the defining relations \[ x_1^2=u,~x_2^2=v,~x_3^2=1 \qquad \text{and} \qquad x_ix_j+x_jx_i=0~\text{for $i \not= j$.} \] If $uv \not= 0$ then $B_Q$ is a non-degenerate ternary quadratic form with determinant a square in $\mathbb{C}^*$ whence $Cl_Q$ is the direct sum of two copies of $M_2(\mathbb{C})$. If $uv=0$, say $u=0$ and $v \not= 0$, then $x_1$ generates a nilpotent two-sided ideal of $Cl_Q$ and the quotient is the Clifford algebra of the non-degenerate binary quadratic form \[ B_R = \begin{bmatrix} v & 0 \\ 0 & 1 \end{bmatrix} \qquad \text{whence} \qquad Cl_R \simeq M_2(\mathbb{C}) \] as any such algebra is a quaternion algebra. Finally, if both $u=0=v$ then the two-sided ideal $I$ generated by $x_1$ and $x_2$ is nilpotent and the quotient \[ Cl_R/I = \mathbb{C}[x_3]/(x_3^2-1) \simeq \mathbb{C} \oplus \mathbb{C} .\] As the maximal ideals of a non-commutative algebra $R$ and of a quotient $R/I$ by a nilpotent ideal $I$ coincide, the statements follow. \end{proof}
\begin{lemma} Intersecting with the center $R_c$ determines a continuous map \[ \wis{spec}~\Lambda_c \rTo^{\psi} \wis{spec}~R_c, \] which is a one-to-one correspondence away from the unique singularity of $\wis{spec}~R_c$ where the fiber consists of two points. \end{lemma}
\begin{proof} The inclusion $\mathbb{C}[x,y,z] \subset R_c$ determines a two-fold cover \[ \wis{spec}~R_c = \mathbb{V}(D^2-4(z^2-xy)) \subset \mathbb{A}^4 \rOnto^c \mathbb{A}^3 \qquad (x,y,z,D) \mapsto (x,y,z) \] which is {\em ramified} over $\mathbb{V}(z^2-xy)$. That is, if $P=(a,b,c) \notin \mathbb{V}(z^2-xy)$ then there are exactly two points lying over it \[ P_1 = (a,b,c,+\sqrt{c^2-ab}) \qquad \text{and} \qquad P_2 = (a,b,c,-\sqrt{c^2-ab}). \] On the other hand, if $P = (a,b,c) \in \mathbb{V}(z^2-xy)$, then there is just one point lying over it : $(a,b,c,0)$. The statement then follows from combining this covering information with the composition map \[ \wis{spec}~\Lambda_c \rTo^{\psi} \wis{spec}~R_c \rTo^c \mathbb{A}^3 \] which is $\phi$ in the foregoing lemma. \end{proof}
Observe that $\psi$ is a homeomorphism on $\wis{spec}~\Lambda_c - \mathbb{V}(x,y,z)$ and hence can be seen as a non-commutative birational map. If $\mathfrak{m}$ lies in this open set then \[ \Lambda_c/\mathfrak{m} \simeq M_2(\mathbb{C}) \] whereas for the two maximal ideals $\mathfrak{m}_+ = (X,Y,Z-1)$ and $\mathfrak{m}_- = (X,Y,Z+1)$ lying over the conifold singularity we have \[ \Lambda_c/\mathfrak{m}_+ \simeq \mathbb{C} \simeq \Lambda_c/\mathfrak{m}_-~.\] We denote the associated one-dimensional $\Lambda_c$-representations by $\phi_+$ resp. $\phi_-$. It is easy to verify that these are the only one-dimensional $\Lambda_c$-representations.
\begin{proposition} For the conifold algebra $\Lambda_c$, the representation variety $\wis{rep}_2~\Lambda_c$ is a smooth affine variety having three disjoint irreducible components. Two of these components are a point, the third component $\wis{trep}_2~\Lambda_c$ has dimension $6$. In particular, the conifold algebra $\Lambda_c$ is a smooth order whence the birational map $\psi$ above can be viewed as a non-commutative desingularization. \end{proposition}
\begin{proof} From the defining relation $Z^2 = 1$ it follows that the image of $Z$ in any finite dimensional representation has eigenvalues $\pm 1$. Hence, after simultaneous conjugation of the images of $X$, $Y$ and $Z$ we may assume that $Z$ has one of the following three forms \[ Z \mapsto \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} \quad \text{or} \quad Z \mapsto \begin{bmatrix} -1 & 0 \\ 0 & -1 \end{bmatrix} \quad \text{or} \quad Z \mapsto \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}. \] The first two possibilities are easily dealt with. Here, the image of $Z$ is a central unit so it follows from the relations $XZ+ZX=0=YZ+ZY$ as in the previous lemma that $X \mapsto 0$ and $Y \mapsto 0$. That is, these two components consist of just one point (the action of $\wis{GL}_2$ by simultaneous conjugation fixes these matrices) corresponding to the $2$-dimensional {\em semi-simple} representations \[ M_+ = \phi_+ \oplus \phi_+ \qquad \text{and} \qquad M_- = \phi_- \oplus \phi_-~. \] The interesting case is the third one. Because $X^2$ and $Y^2$ are central elements it follows (for example using the characteristic polynomial of $2 \times 2$ matrices) that in any $2$-dimensional representation $\Lambda_c \rTo^{\phi} M_2(\mathbb{C})$ we have that $tr(\phi(X))=0$ and $tr(\phi(Y))=0$. Hence, the third component of $\wis{rep}_2~\Lambda_c$ consists of those $2$-dimensional representations $\phi$ such that \[ tr(\phi(X)) = 0 \qquad tr(\phi(Y)) = 0 \qquad \text{and} \qquad tr(\phi(Z)) = 0. \] For this reason we denote this component by $\wis{trep}_2~\Lambda_c$ and call it the variety of {\em trace preserving $2$-dimensional representations}. To describe the coordinate ring of this component we can use {\em trace zero} generic $2 \times 2$ matrices \[ X \mapsto \begin{bmatrix} x_1 & x_2 \\ x_3 & -x_1 \end{bmatrix} \quad Y \mapsto \begin{bmatrix} y_1 & y_2 \\ y_3 & -y_1 \end{bmatrix} \quad Z \mapsto \begin{bmatrix} z_1 & z_2 \\ z_3 & -z_1 \end{bmatrix} \] which drastically reduces the defining equations as $T^2$ and $TS+ST$ are both scalar matrices for any trace zero $2 \times 2$ matrices. More precisely, we have \[ XZ+ZX \mapsto \begin{bmatrix} 2x_1z_1+x_2z_3+x_3z_2 & 0 \\ 0 & 2x_1z_1+x_2z_3+x_3z_2 \end{bmatrix} \] \[ YZ+ZY \mapsto \begin{bmatrix} 2y_1z_1+y_2z_3+y_3z_2 & 0 \\ 0 & 2y_1z_1+y_2z_3+y_3z_2 \end{bmatrix} \] \[ Z^2 \mapsto \begin{bmatrix} z_1^2+z_2z_3 & 0 \\ 0 & z_1^2+z_2z_3 \end{bmatrix} \] and therefore the coordinate ring of $\wis{trep}_2~\Lambda_c$ \[ \mathbb{C}[\wis{trep}_2~\Lambda_c] = \frac{\mathbb{C}[x_1,x_2,x_3,y_1,y_2,y_3,z_1,z_2,z_3]}{(2x_1z_1+x_2z_3+x_3z_2,2y_1z_1+y_2z_3+y_3z_2,z_1^2+z_2z_3-1)}. \] To verify that $\wis{trep}_2~\Lambda_c$ is a smooth $6$-dimensional affine variety we therefore have to show that the {\em Jacobian matrix} \[ \begin{bmatrix} 2z_1 & z_3 & z_2 & 0 & 0 & 0 & 2x_1 & x_3 & x_2 \\ 0 & 0 & 0 & 2z_1 & z_3 & z_2 & 2y_1 & y_3 & y_2 \\ 0 & 0 & 0 & 0 & 0 & 0 & 2z_1 & z_3 & z_2 \end{bmatrix} \] has constant rank $3$ on $\wis{trep}_2~\Lambda_c$. This is forced by the submatrices $\begin{bmatrix} 2z_1 & z_3 & z_2 \end{bmatrix}$ along the 'diagonal' of the Jacobian unless $z_1=z_2=z_3=0$ but this cannot hold for a point in $\wis{trep}_2~\Lambda_c$ by the equation $z_1^2 + z_2z_3 = 1$. \end{proof}
Next, we will use the two idempotents $e_1=\frac{1}{2}(Z-1)$ and $e_2=\frac{1}{2}(Z+1)$ to relate the conifold algebra $\Lambda_c$ to the quiver mentioned above. Consider a representation in $\wis{trep}_2~\Lambda_c$ then we can use base change to bring the image of $Z$ into the form \[ Z \mapsto \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} .\] Taking the generic $2 \times 2$ matrices \[ X \mapsto \begin{bmatrix} x_1 & x_2 \\ x_3 & x_4 \end{bmatrix} \qquad Y \mapsto \begin{bmatrix} y_1 & y_2 \\ y_3 & y_4 \end{bmatrix} \] it follows from the relations $XZ+ZX=0=YZ+ZY$ that $x_1=x_4=0=y_1=y_4$. Therefore, a representation in $\wis{trep}_2~\Lambda_c$ can be simultaneously conjugated to one of the form \[ X \mapsto \begin{bmatrix} 0 & x_2 \\ x_3 & 0 \end{bmatrix} \quad Y \mapsto \begin{bmatrix} 0 & y_2 \\ y_3 & 0 \end{bmatrix} \quad Z \mapsto \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \] and as the images of $X^2$ and $Y^2$ are scalar matrices the remaining defining relations $[X^2,Y]=0=[Y^2,X]$ are automatically satisfied. $2$-dimensional representations of $A_{con}$ in this canonical form hence form a smooth $4$-dimensional affine space \[ \mathbb{A}^4 = \mathbb{V}(x_1,x_4,y_1,y_4,z_1-1,z_2,z_3,z_4+1) \subset \mathbb{A}^{12}. \] To recover $\wis{trep}_2~\Lambda_c$ from this affine space we let $\wis{GL}_2$ act on it. The subgroup of $\wis{GL}_2$ fixing the matrix \[
\begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \qquad \text{is} \qquad T = \{ \begin{bmatrix} \lambda & 0 \\ 0 & \mu \end{bmatrix}~|~\lambda,\mu \in \mathbb{C}^* \}, \] the two-dimensional torus. There is an action of $T$ on the product $\wis{GL}_2 \times \mathbb{A}^4$ via \[ t.(g,P) = (gt^{-1},t.P) \qquad \text{for all $t \in T, g \in \wis{GL}_2$ and $P \in \mathbb{A}^4$} \] and where $t.P$ means the action by simultaneous conjugation by the $2 \times 2$ matrix $t \in T \subset \wis{GL}_2$ on the three $2 \times 2$ matrix-components of $P$.
\begin{proposition} Under the action-map \[ \wis{GL}_2 \times \mathbb{A}^4 \rTo \wis{trep}_2~\Lambda_c \qquad (g,P) \mapsto g.P \] two points $(g,P)$ and $(g',P')$ are mapped to the same point if and only if they belong to the same $T$-orbit in $\wis{GL}_2 \times \mathbb{A}^4$. That is, we can identify $\wis{trep}_2~\Lambda_{c}$ with the principal fiber bundle \[ \wis{trep}_2~\Lambda_c \simeq \wis{GL}_2 \times^T \mathbb{A}^4 = (\wis{GL}_2 \times \mathbb{A}^4) / T. \] In particular, there is a natural one-to-one correspondence between $\wis{GL}_2$-orbits in $\wis{trep}_2~\Lambda_c$ and $T$-orbits in $\mathbb{A}^4$. Observe that one can identify the $T$-action on $\mathbb{A}^4$ with the $\wis{GL}(\alpha)$-action on the representation space $\wis{rep}_{\alpha}~Q$ for the quiver-setting \[ \xymatrix{\vtx{1} \ar@/^/[r] \ar@/^2ex/[r] & \vtx{1} \ar@/^/[l] \ar@/^2ex/[l]}. \] In particular, the conifold algebra $\Lambda_c$ is the quiver-order $\int_{\alpha}~\mathbb{C} Q$. \end{proposition}
\begin{proof} If $g.P = g'.P'$, then $P = g^{-1}g'.P'$ and as both $P$ and $P'$ have as their third $2 \times 2$ matrix component \[ \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} \] it follows that $g^{-1}g'$ is in the stabilizer subgroup of this matrix so $g^{-1}g' = t^{-1}$ for some $t \in T$ whence $g' = gt^{-1}$ and as $(g^{-1}g')^{-1}.P = P'$ also $t.P = P'$ whence \[ t.(g,P) = (gt^{-1},t.P) = (g',P') \] Hence we can identify $\wis{trep}_2~\Lambda_c = \wis{GL}_2.\mathbb{A}^4$ with the orbit-space of the $T$-action which is just $\wis{GL}_2 \times^T \mathbb{A}^4$. Incidentally, this gives another proof for smoothness of $\wis{trep}_2~\Lambda_c$ as it is the base of a fibration with smooth fibers of the smooth top space $\wis{GL}_2 \times \mathbb{A}^4$. $\wis{GL}_2$ acts on $\wis{GL}_2 \times \mathbb{A}^4$ by $g.(g',P') = (gg',P')$ and this action commutes with the $T$-action so induces a $\wis{GL}_2$-action on the orbit-space \[ \wis{GL}_2 \times (\wis{GL}_2 \times^T \mathbb{A}^4) \rTo \wis{GL}_2 \times^T \mathbb{A}^4 \qquad g.\overline{(g',P')} = \overline{(gg',P')}. \] As we have identified $\wis{GL}_2 \times^T \mathbb{A}^4$ with $\wis{trep}_2~\Lambda_c$ via the action map, that is $\overline{(g,P)} = g.P$ the remaining statements follow. \end{proof}
In this specific case we can explicitly compute polynomial (semi)-invariants using the $T$-action and relate it to the general results mentioned before.
\begin{lemma} The ring of polynomial invariants \[ \mathbb{C}[\wis{trep}_2~\Lambda_c]^{\wis{GL}_2} \simeq \mathbb{C}[\mathbb{A}^4]^T \] is isomorphic to the coordinate ring of the conifold singularity $R_c$ and the quotient map \[ \wis{trep}_2~\Lambda_c \rOnto \wis{spec}~R_c \] maps a two-dimensional representation to the direct sum of its Jordan-H\"older components. \end{lemma}
\begin{proof} The action of the two-dimensional torus $T$ on $\mathbb{A}^4 = \{ (x_2,x_3,y_2,y_3) \}$ is given by \[ \begin{bmatrix} \lambda & 0 \\ 0 & \mu \end{bmatrix}.(\begin{bmatrix} 0 & x_2 \\ x_3 & 0 \end{bmatrix}, \begin{bmatrix} 0 & y_2 \\ y_3 & 0 \end{bmatrix},\begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} ) = \] \[ (\begin{bmatrix} 0 & \lambda\mu^{-1}x_2 \\ \lambda^{-1}\mu x_3 & 0 \end{bmatrix}, \begin{bmatrix} 0 & \lambda \mu^{-1}y_2 \\ \lambda^{-1} \mu y_3 & 0 \end{bmatrix},\begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} ) . \] Hence, the action of $(\lambda,\mu) \in T$ on $\mathbb{C}[\mathbb{A}^4] = \mathbb{C}[X_2,X_3,Y_2,Y_3]$ is defined by \[ X_2 \mapsto \lambda^{-1}\mu X_2 \quad X_3 \mapsto \lambda \mu^{-1} X_3 \quad Y_2 \mapsto \lambda^{-1}\mu Y_2 \quad Y_3 \mapsto \lambda \mu^{-1} Y_3 \] and this action sends any monomial in the variables to a scalar multiple of that monomial. So, in order to determine the ring of polynomial invariants \[
\mathbb{C}[X_2,X_3,Y_2,Y_3]^T = \{ f= f(X_2,X_3,Y_2,Y_3)~|~(\lambda,\mu).f = f~ \forall (\lambda,\mu) \in T \} \] it suffices to determine all invariant monomials, or equivalently, all positive integer quadruplets $(a,b,c,d)$ such that $a-b+c-d=0$ as \[ (\lambda,\mu).X_2^aX_3^bY_2^cY_3^d = \lambda^{-a+b-c+d} \mu^{a-b+c-d} X_2^aX_3^bY_2^cY_3^d \] Clearly, such quadruplets are all generated (as Abelian group under addition) by the four basic ones \[ (1,1,0,0) \mapsto X_2X_3 \quad (1,0,0,1) \mapsto X_2Y_3 \quad (0,1,1,0) \mapsto X_3Y_2 \quad (0,0,1,1) \mapsto Y_2Y_3 \] and therefore \[ \mathbb{C}[\wis{trep}_2~\Lambda_c]^{\wis{GL}_2} \simeq \mathbb{C}[X_2,X_3,Y_2,Y_3]^T = \mathbb{C}[X_2X_3,X_2Y_3,X_3Y_2,Y_2,Y_3] \simeq \frac{\mathbb{C}[p,q,r,s]}{(ps-qr)} \] is the conifold singularity $R_c$. We know already that $\wis{spec}~R_c$ has as its points the isomorphism classes of $2$-dimensional semi-simple representations with $\phi_+ \oplus \phi_-$ as the semi-simple representation corresponding to the singularity and all other points classify a unique simple $2$-dimensional representation. \end{proof}
For the quiver-setting $(Q,\alpha)$ there are essentially two stability structures : $\theta=(-1,1)$ and $\theta'=(1,-1)$. Again, we can use elementary arguments in this case to calculate the moduli spaces.
\begin{lemma} The moduli space of all $\theta$-(semi)stable $\alpha$-dimensional representations \[ \wis{moduli}^{\theta}_{\alpha}~\Lambda_c \simeq \wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta} \] is the $\wis{proj}$ of the ring of $\theta$-semi-invariants and as the semi-invariants of weight zero are the polynomial invariants we get a projective morphism \[ \wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta} \rOnto \wis{spec}~R_c \] which is a desingularization of the conifold singularity. \end{lemma}
\begin{proof} As in the case of polynomial invariants, the space $\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta}_k$ is spanned by monomials \[ x_2^ax_3^by_2^cy_3^d \qquad \text{satisfying} \qquad -a+b-c+d=k \] and one verifies that this space is the module over the ring of polynomial invariants generated by all monomials of degree $k$ in $x_3$ and $y_3$. That is \[ \mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta} = \mathbb{C}[x_2x_3,x_2y_3,x_3y_2,y_2y_3][x_3,y_3] \subset \mathbb{C}[x_2,y_2,x_3,y_3] \] with the generators $a=x_2x_3,b=x_2y_3,c=x_3y_2$ and $d=y_2y_3$ of degree zero and $e=x_3$ and $f=y_3$ of degree one. As a consequence, we can identify $\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta}$ with the closed subvariety \[ \mathbb{V}(ad-bc,af-be,cf-de) \subset \mathbb{A}^4 \times \mathbb{P}^1 \] with $(a,b,c,d)$ the affine coordinates of $\mathbb{A}^4$ and $[e:f]$ projective coordinates of $\mathbb{P}^1$. The projection $\wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta} \rOnto \wis{spec}~R_c$ is projection onto the $\mathbb{A}^4$-component.
To prove smoothness we cover $\mathbb{P}^1$ with the two affine opens $e \not= 0$ (with affine coordinate $x = f/e$ and $f \not=0$ with affine coordinate $y = e/f$. In the affine coordinates $(a,b,c,d,x)$ the relations become \[ ad = bc \qquad ax = b \qquad \text{and} \qquad cx = d \] whence the coordinate ring is $\mathbb{C}[a,c,x]$ and so the variety is smooth on this affine open. Similarly, the coordinate ring on the other affine open is $\mathbb{C}[b,d,y]$ and smoothness follows. Moreover, $\pi$ is {\em birational} over the complement of the singularity. This follows from the relations \[ ax = b, \quad cx = d, \quad by = a, \quad dy = c \] which determine $x$ (or $y$ and hence the point in $\wis{proj}$) lying over any $(a,b,c,d) \not= (0,0,0,0)$ in $\wis{spec}~R_c$. Therefore, the map $\pi$ is a desingularization and the {\em exceptional fiber} \[ E = \pi^{-1}(0,0,0,0) \simeq \mathbb{P}^1 \] which classifies the $\theta$-stable representations which lie over $(0,0,0,0)$ (that is, those such that $x_2x_3=x_2y_3=x_3y_2=y_2y_3=0$) as they are all of the form \[
\xymatrix{\vtx{} \ar@/^1ex/[rr]|{x_3} \ar@/^3ex/[rr]|{y_3} & & \vtx{} \ar@/^1ex/[ll]|{0} \ar@/^3ex/[ll]|{0}} \] with either $x_3 \not= 0$ or $y_3 \not= 0$ and the different $T$-orbits of those are parametrized by the points of $\mathbb{P}^1$. As the smooth points of $\wis{spec}~R_c$ are known to correspond to isomorphism classes of simple (hence certainly $\theta$-stable) representations we have proved that \[ \wis{proj}~\mathbb{C}[\wis{rep}_{\alpha}~Q]^{\wis{GL}(\alpha),\theta} \simeq \wis{moduli}^{\theta}_{\alpha}~\Lambda_c \] is the moduli space of all $\theta$-stable $\alpha$-dimensional representations of $Q$. \end{proof}
Clearly, we could have done the same calculations starting with the stability structure $\theta' = (1,-1)$ and obtained another desingularization replacing the roles of $x_2,y_2$ and $x_3,y_3$. This gives us the situation \[ \vcenter{\xymatrix@C=10pt@R=30pt{ & \wis{blowup} \ar@{->>}[dl]_{\phi} \ar@{->>}[dr]^{\phi'}\\ \wis{moduli}^{\theta}_{\alpha}~\Lambda_c \ar@{->>}[dr]_{\pi} \ar@{.>}[rr]^r& & \wis{moduli}^{\theta'}_{\alpha}~\Lambda_c \ar@{->>}[dl]^{\pi'}\\ & \wis{spec}~Z_{con} }}. \] Here, $\wis{blowup}$ denotes the desingularization of $\wis{spec}~R_c$ one obtains by blowing-up the point $(0,0,0,0) \in \mathbb{A}^4$ and which has exceptional fiber $\mathbb{P}^1 \times \mathbb{P}^1$. Blowing down either of these lines (the maps $\phi$ and $\phi'$) one obtains the 'minimal' resolutions given by the moduli spaces. These spaces are related by the {\em rational map} $r$ which is called the {\em Atiyah flop} in string theory-literature.
\end{document} |
\begin{document}
\maketitle \begin{abstract} We prove properties of extremal graphs of girth $5$ and order $20\leq v \leq 32$. In each case we identify the possible minimum and maximum degrees, and in some cases prove the existence of (non-trivial) embedded stars. These proofs allow for tractable search for and identification of all non isomorphic cases. \end{abstract}
\section{Introduction} In this paper we consider graphs of girth at least $5$, i.e. graphs which have no 3-cycles or 4-cycles. In \cite{Garnick93}, the maximum number of edges in graphs of girth at least $5$ with $v$ vertices, $f_4(v)$, were established for $v\leq 30$, and $v=50$, and the number of unique (up to isomorphism) graphs with $f_4(v)$ edges, $F_4(v)$, was caculated for $v\leq 21$ and $v=50$. Note that, once $f_4(v)$ has been established, it is straightforward to generate $F_4(v)$ for $n<20$ using nauty's geng tool \cite{nauty}.In \cite{comiprst1} and \cite{comiprst2} we introduce two symmetry breaking constraints for combinatorial search and illustrated their effectiveness by applying it to the problem of finding graphs with girth at least $5$ (and other combinatorial problems). We were able to reproduce the results of \cite{Garnick93} and find values $F_4(v)$ that were previously unpublished. Our symmetry breaking constraints alone were not sufficient to crack some of the harder cases. We were however able to reduce our search space by applying new theoretical results for cases $v>22$. In many cases we were able to identify non-trivial {\it embedded stars} that must be present in a graph with the maximum number of edges and girth at least $5$, and fixing this embedded star made our search tractable. An outline of the proof of the existence of such stars is given in \cite{comiprst2}, but for space reasons did not include full proofs. We give the full proofs in this paper. All of the graphs for $F_{4}(v)$, where $20\leq v \leq 32$ are presented as incidence arrays in our appendices and a full list in {\it graph6} notation (as implemented in nauty \cite{nauty}) can be obtained by contacting the authors directly.
\section{Preliminary Definitions and Results} In this section we give some preliminary definitions and results that will be useful in this paper.
The girth of a graph $\Gamma=(V,E)$ is the size of the smallest cycle contained in it.
Let ${\cal F}_k(v)$ denote the set of graphs with $v$ vertices and girth at least $k+1$. Let $f_k(v)$ denote the maximum number of edges in a graph in ${\cal F}_k(v)$. A graph in ${\cal F}_k(v)$ with $f_k(v)$ edges is called \emph{extremal}. The number of non-isomorphic extremal graphs in ${\cal F}_k(v)$ is denoted $F_k(v)$.
Extremal graph problems involve discovering values of $f_k(v)$ and $F_k(v)$ and finding witnesses.
In~\cite{AbajoD10} the authors attribute the discovery of values $f_4(v)$ for $v\leq 24$ to \cite{Garnick93} and for $25\leq v\leq 30$ to \cite{Garnick92}. In \cite{Garnick93} the authors report values of $F_4(v)$ for $v\leq 21$. In \cite{Garnick93} and \cite{WangDM01} algorithms are applied to compute lower bounds on $f_4(v)$ for $31\leq v\leq 200$. Some of these lower bounds are improved in \cite{AbajoBD10} and improved upper bounds for $33\leq v\leq 42$ are proved in \cite{bong2017}.
Values of $f_4(v)$ for $v\leq 30$, and of $F_4(v)$ for $v\leq 21$ are available as sequences \texttt{A006856} and \texttt{A159847} of the On-Line Encyclopedia of Integer Sequences~\cite{oeis}. We have extended sequence \texttt{A159847} to include values for $22\leq v \leq 32$, following the results presented in this paper.
In the remainder of this paper, as we are only interested in graphs with girth at least $5$, we abbreviate $f_4(v)$ and $F_4(v)$ to $f(v)$ and $F(v)$ respectively.
\begin{definition} A graph $\Gamma=(V,E)$ is said to be {\it extremal} if it has girth at least $5$ and any graph on $V$ vertices with more than $e=|E|$ edges has girth less than $5$. For any $v$, $f(v)$ and $F(v)$ denote the number of edges in an extremal graph on $v$ vertices and the number of unique extremal graphs on $v$ vertices respectively. For any extremal graph, $\delta$ and $\Delta$ denote the minimum and maximum degree respectively. \end{definition}
The following lemma, and it's proof, is from \cite{Garnick93}. \begin{lemma}\label{lem:garnickLemma} If $\Gamma$ is an extremal graph of order $v$, with $e=f(v)$ edges, whose minimum and maximum degrees are $\delta$ and $\Delta$ respectively, then $$e-f(v-1)\leq \delta \leq \sqrt(v-1)\;{\rm and}\; \lceil 2e/v\rceil\leq \Delta\leq (v-1)/\delta$$ \end{lemma}
Values of $f(v)$ for $22\leq v \leq 30$ can be found in \cite{Garnick93}, and those for $v=31$ and $v=32$ are given in \cite{comiprst1}. By applying Lemma \ref{lem:garnickLemma}, and observing (by counting edges) that $\delta=\Delta$ is only possible when $v\Delta = 2f(v)$ we obtain possible values of $(\delta,\Delta)$, for $22\leq v \leq 33$. These are given in Table \ref{table:deltaTable}
\begin{table}
\begin{tabular}{l|l|l} $v$ & $f(v)$ & $(\delta,\Delta)$\\ \hline 21&44&$(3,5)$, $(3,6)$, $(4,5)$\\ 22&47&$(3,5)$, $(3,6)$, $(3,7)$, $(4,5)$\\ 23&50&$(3,5)$, $(3,6)$, $(3,7)$, $(4,5)$\\ 24&54&$(4,5)$\\ 25&57&$(3,5)$, $(3,6)$, $(3,7)$, $(3,8)$, $(4,5)$, $(4,6)$\\ 26&61&$(4,5)$, $(4,6)$\\ 27&65&$(4,5)$, $(4,6)$\\ 28&68&$(3,5)$, $(3,6)$, $(3,7)$, $(3,8)$, $(3,9)$, $(4,5)$, $(4,6)$\\ 29&72&$(4,5)$, $(4,6)$, $(4,7)$\\ 30&76& $(4,6)$, $(4,7)$\\ 31&80& $(4,6)$, $(4,7)$, $(5,6)$\\ 32&85&$(5,6)$\\ 33&87&$(2,6)$, $(2,7)$, $(2,8)$, $(2,9)$, $(2,10)$, $(2,11)$, $(2,12)$,\\ && $(2,13)$, $(2,14)$, $(2,15)$, $(2,16)$, $(3,6)$, $(3,7)$, $(3,8)$,\\ && $(3,9)$, $(3,10)$, $(4,6)$, $(4,7)$, $(4,8)$, $(5,6)$\\ \hline \end{tabular} \caption{Possible values of $(\delta,\Delta)$ for $22\leq v \leq 32$\label{table:deltaTable}} \end{table}
\begin{definition}\label{def:embedded}
If $\Gamma$ is a graph of girth at least $5$ we say that $\Gamma$ has a embedded $S_{D,[d_{0}-1,d_{1}-1, \ldots, d_{D-1}-1]}$ star if $\Gamma$ has a vertex of degree $D$ whose children have degrees at least $d_{0},d_{1},\ldots,d_{D-1}$ respectively. \end{definition}
Note that the children and grandchildren of a vertex in a a graph of girth at least $5$ are distinct, and there are no edges between the children. If $\Gamma$ is such a graph and has minimum and maximum degrees $\delta$ and $\Delta$, then any vertex $x$ of degree $\Delta$ is the centre of a {\it trivial} embedded $S_{\Delta,[\delta-1,\delta-1, \ldots, \delta-1]}$ star. In order to reduce the number of isomorphic graphs produced, any combinatorial search for extremal graphs will assume a fixed position of such a star (for example, with central node as vertex $0$). The purpose of this paper is to show that in some cases an extremal graph must contain larger (non-trivial) embedded stars, in some cases containing all of the vertices in the graph. This allows us to fix such a star and reduce both our search space and the number of isomorphic graphs generated.
\begin{definition}\label{def:sinkNode} A node $x$ of an extremal graph is said to be a {\it sink node} if it has maximum degree and is the centre of an embedded star that contains all of the vertices (i.e. $x$ is at distance at most $2$ from all other vertices). \end{definition}
From a result in \cite{Garnick93}, no two vertices in an extremal graph of girth at least $5$ are at distance greater than $3$ from each other.
\begin{definition}\label{def:setsDifferent} For a graph $\Gamma=(V,E)$, for any $m>=2$, $S_{\Gamma,m}$ is the set of sets of $m$ vertices that are at a distance of $3$ from each other. \end{definition}
\begin{lemma}\label{lem:sinknode} If $\Gamma$ is a graph of girth at least $5$ on $v$ vertices and contains a sink node $x$, then (i) $\Gamma$ has an embedded $S_{\Delta,[d_{0}-1,d_{1}-1,\ldots, d_{\Delta-1}-1]}$ star, where $\Delta+1+\Sigma_{i=0}^{\Delta-1}(d_{i}-1) = v$. (ii) for $m\geq 2$, no element of $S_{\Gamma,m}$ contains $x$ or more than one child of $x$. \end{lemma}
\noindent {\bf Proof} Follows immediately as $x$ is at distance at most $2$ from all other vertices and the children of $x$ are at distance $2$ from each other.
\begin{comment} \begin{lemma}\label{lem:deg4and5} If $\Gamma$ is an extremal graph with at most $23$ vertices and $(\delta,\Delta)=(4,5)$ then any element of $S_{\Gamma,4}$ contains only vertices of degree $4$. \end{lemma}
\noindent {\bf Proof} Suppose that $s\in S_{\Gamma,4}$ contains a vertex $x$ of degree $5$. Then $x$ is the centre of an embedded $S_{5,[3,3,3,3,3]}$ star, which contains $21$ vertices. Since the $3$ other elements of $s$ do not belong to this star, we have that $v\geq 24$. This is not even true!! \end{comment}
\begin{lemma}\label{lem:inductiveExtremal} If $\Gamma=(V,E)$ is an extremal graph and $|V|=v$ with $\delta(\Gamma)=f(v)-f(v-1)$, then $\Gamma=(V,E)$ is constructed from an extremal graph $\Gamma^{\prime}$ of order $v-1$ by adding a new vertex $x$ of degree $\delta$ to $\Gamma^{\prime}$ and $\delta$ edges from $x$ to a set $S_{\Gamma^{\prime},\delta}$. \end{lemma}
\begin{comment} $\Delta(\Gamma)\leq \Delta_{v-1}+1$. Suppose that all graphs $\Gamma^{\prime}=(V^{\prime},E^{\prime})\in F(v-1)$ for which $S_{\Gamma^{\prime},\delta}$ is non-empty have a sink vertex of degree $\Delta_{v-1}$ with children of degrees $d_{0},d_{1},\ldots, d_{\Delta_{v-1}-1}$. Then $\Gamma$ has an embedded $S_{\Delta_{v-1},[d_{0}-1,d_{1}-1,\ldots, d_{\Delta_{v-1}-1}-1]}$ star. \begin{itemize} \item If, for every $\Gamma^{\prime}\in F(v-1)$ for which $S_{\Gamma^{\prime},\delta}$ is non-empty, every element of $S_{\Gamma^{\prime},\delta}$ contains only vertices of degree less than $\Delta_{v-1}$ then $\Delta(\Gamma)=\Delta_{v-1}$. \item If, for every $\Gamma^{\prime}$ for which $S_{\Gamma^{\prime},\delta}$ is non-empty, every element of $S_{\Gamma^{\prime},\delta}$ contains a child of a sink vertex, then $\Gamma$ contains a sink vertex of degree $\Delta_{v-1}$ with children of degrees $d_{0},\ldots, d_{i-1}, d_{i}+1,d_{i+1}, \ldots, d_{\Delta_{v-1}-1}$, for some $1\leq i \leq \Delta_{v-1}$. \end{itemize} \end{lemma} \end{comment}
\begin{theorem} For each $20\leq v \leq 32$, an extremal graph $\Gamma$ must have $(\delta,\Delta)$ taking one of the pairs of values indicated in Table \ref{embeddedStarTable}. In each case, we show the number of edges ($f(v)$), the largest star known to be embedded in $\Gamma$, the number of distinct graphs $(F(v))$, and the method of proof. In all cases Lemma \ref{lem:inductiveExtremal} is employed. The ``method'' column indicates whether the graphs were obtained from \cite{Garnick93} (G) or, if not, whether search is employed (based on the proven existence of embedded stars) (S), and whether some (or all) graphs are constructed by hand (H). Note that $S,H$ denotes that some of the graphs were found using search, and some by hand. \end{theorem}
\begin{table}[t] \label{embeddedStarTable} \begin{center} \setlength{\tabcolsep}{3pt}
\begin{tabular}{|c|l|l|l|l|l|} \hline v&$f(v)$&$(\delta,\Delta)$&Embedded star&$F(v)$&method\\ \hline 20&$41$&$(3,5)$&$S_{5,[3,3,3,3,2]}$&1&G\\ \hline 21&$44$&$(3,5)$&$S_{5,[3,3,3,3,3]}$&3&H\\
& &$(4,5)$&$S_{5,[3,3,3,3,3]}$&&\\ \hline 22&$47$&$(3,5)$&$S_{5,[4,3,3,3,3]}$&3&S\\
& &$(4,5)$&$S_{5,[4,3,3,3,3]}$&&\\ \hline 23&$50$&$(3,5)$&$S_{5,[4,4,3,3,3]}$&7&S\\ & &$(4,5)$&$S_{5,[4,4,3,3,3]}$ or $S_{5,[4,3,3,3,3]}$&&\\ \hline 24&$54$&$(4,5)$&$S_{5,[4,4,4,3,3]}$&1&S\\ \hline 25&$57$&$(3,5)$&$S_{5,[4,4,4,4,3]}$ or $S_{5,[4,4,4,3,3]}$&6&S\\ & & $(4,5)$& $S_{5,[4,4,4,4,3]}$ or $S_{5,[4,4,4,3,3]}$&&\\ & & $(4,6)$&$S_{6,[3,3,3,3,3,3]}$&&\\ \hline 26& $61$&$(4,5)$&$S_{5,[4,4,4,4,4]}$&2&H\\ \hline 27&$65$& $(4,5)$&$S_{5,[4,4,4,4,4]}$&1&H\\ \hline 28&$68$& $(3,6)$ & $S_{6,[4,4,4,4,3,2]}$&4&S,H\\ && $(4,5)$ & $S_{5,[4,4,4,4,4]}$&&\\ && $(4,6)$ & $S_{6,[4,4,4,3,3,3]}$&&\\ \hline 29&$72$&$(4,6)$ & $S_{6,[4,4,4,4,3,3]}$&1&H\\ \hline 30&$76$&$(4,6)$ & $S_{6,[4,4,4,4,4,3]}$ and $S_{6,[5,4,4,4,3,3]}$&1&H\\ \hline 31&$80$&$(4,6)$ & $S_{6,[4,4,4,4,4,4]}$ and $S_{6,[5,4,4,4,4,3]}$&2&H,S\\ &&$(5,6)$ & $S_{6,[4,4,4,4,4,4]}$&&\\ \hline 32&$85$&$(5,6)$&$S_{6,[5,4,4,4,4,4]}$&1&H\\ \hline
\end{tabular} \end{center} \caption{Embedded stars\label{table:thetable} for
$20 \leq v \leq 25$, and $v=32$} \end{table}
\noindent {\bf Proof} Follows from Theorems \ref{theorem:g20}, \ref{theorem:g21}, \ref{theorem:g22}, \ref{theorem:g23}, \ref{theorem:g24}, \ref{theorem:g25}, \ref{theorem:g26}, \ref{theorem:g27}, \ref{theorem:g28}, \ref{theorem:g29}, \ref{theorem:g30}, \ref{theorem:g31} and \ref{theorem:g32}.
\begin{definition}\label{defn:linearspace} A linear space $\Lambda$ on a set of $n$ points $V$ is a collection $B=\{B_{1},\ldots,B_{b}\}$ of subsets of $V$ called blocks, such that every block has at least two points and each pair of points is in exactly one block. A prelinear space $\Lambda$ is a set of blocks on $V$ in which every block has at least two points and each pair of points is in at most one block. \end{definition}
\noindent Any prelinear space can be extended to a linear space, by adding suitable blocks of size $2$, so we tend to use the term linear space and prelinear space interchangeably. A linear space with no blocks of size $2$ is called a {\it proper} linear space.
\begin{comment}
\noindent {\bf Example 1:} The $5$ linear spaces on five points are shown graphically in Figure \ref{linearspaces5points}. Blocks are represented as lines. They can also be represented as sets of tuples thus (note that the blocks of size $2$ are omitted): $\Delta_{1}=\{(0,1,2,3,4)\}$, $\Delta_{2}=\{(0,1,2,3)\}$, $\Delta_{3}=\{(0,1,2),(2,3,4)\}$ $\Delta_{4}=\{(2,3,4)\}$ and $\Delta_{5}=\{\}$. We have arbitrarily labelled the points. A different labelling would lead to an isomorphic linear space.
\begin{figure}
\caption{Linear spaces on $5$ points }
\label{linearspaces5points}
\end{figure}
\noindent Often (especially when there are several blocks of size greater than $2$) blocks are drawn vertically. For example, in consider a linear space $\Delta$ on $16$ points whose blocks (of size $>2$) are $(0,1,2,3,4)$, $(0,5,6,7,8)$, $(0,9,10,11,12)$, $(1,5,9,13,14)$, $(2,6,10,13,15)$ and $(3,7,11,14,15)$. These blocks can be represented vertically as follows:
\noindent \begin{tabular}{cccccc} 0&0&0&1&2&3\\ 1&5&9&5&6&7\\ 2&6&10&9&10&11\\ 3&7&11&13&13&14\\ 4&8&12&14&15&15\\ \end{tabular}
\noindent A shorthand to represent a linear space $\Delta$ on $v$ points containing $r_{i}$ blocks of size $i$, for $2\leq i \leq v$ is to say that $\Delta$ is a $2^{r_{2}}3^{r_{3}}\ldots v^{r_{v}}$ design (where the $i$th term is omitted if $r_{i}=0$, and we often omit the blocks of size $2$). The numbers of linear spaces on $n$ points, for $n\leq 12$, for each possible profile, are given in \cite{bettenbetten4}. \end{comment}
\begin{lemma}\label{lem:blocks} Let $\Gamma=(V,E)$ be a graph of girth at least $5$ and $$V_{1} = \{v_{0},v_{2},\ldots, v_{m-1}\}$$ a subset of $V$. If, for all $v_{i}\in V_{1}$, $b_{i}$ is the set of elements of $V\setminus V_{1}$ that are adjacent to $v_{i}$, then $B=\{b_{i} : 0\leq i \leq m-1\;{\rm and}\; |b_{i}|\geq 2\}$ is a (pre) linear space on $V\setminus V_{1}$ and no set $b_{i}$ contains two vertices that are adjacent in $\Gamma$. \end{lemma}
\noindent {\bf Proof} No pair of elements in $V\setminus V_{1}$ can appear in more than one set $b_{i}$ as $\Gamma$ contains no $4$-cycles, so $B$ is a (pre) linear space. Similarly, no set $b_{i}$ contains two vertices that are adjacent in $\Gamma$, as $\Gamma$ contains no $3$-cycles.
\noindent Note that in the context of linear spaces, we generally refer to the elements of the blocks as {\it points}, whereas in the context of graphs the elements of edges are vertices. When applying Lemma \ref{lem:blocks} this distinction is less clear.
\begin{comment} \begin{lemma} Suppose that $\Gamma$ is a triangle and square-free graph on $v$ vertices, and for any $x\in V(\Gamma)$ let $\Pi(x)$ denote the set of vertices that are at a distance of at least two from $x$. If $x$ and $y$ are adjacent vertices in $\Gamma$, then \end{comment}
\begin{comment} \begin{lemma}\label{lemma:15.0} Let $\Lambda$ be a $4^{8}$ on $15$ points in which the blocks can be arranged into $4$ parallel pairs. Then \begin{enumerate} \item At most $2$ points are in $4$ blocks. \item If a point is in $0$ blocks, and at least one point is in $4$ blocks, then $2$ points are in $4$ blocks. \item If a point is in $4$ blocks then at most $4$ other points are in $3$ blocks. \item If $2$ points, $x$ and $y$ are in $4$ blocks, and two points $A$ and $B$ are in $1$, then all blocks contain either $x$ or $y$, and $A$ and $B$ are in different blocks, in different parallel pairs of blocks, one with $x$ and one with $y$. \end{enumerate} \end{lemma}
\noindent{\bf Proof} (1) If $x$ and $y$ are in $4$ blocks, then at most one block does not contain $x$ or $y$. So any other point can be in at most $3$ blocks. (2) Suppose that $x$ is in $4$ blocks, one from each parallel pair, and a point is in no blocks. There is one point, $y$ say, that is not in a block with $x$, and must be in all of the $4$ remaining blocks. (3) Let $x$ be as above and let $y$ and $z$ be the two points that are not in a block with $x$. Suppose there are $5$ points in $3$ blocks. At most one point from every block on $x$ can be in $3$ blocks and at most one from $\{y,z\}$. So w.l.o.g. one point from each block on $x$, and $y$ are in $3$ blocks. Each of the former is in $b$, the one block that does not contain $x$ or $y$. But one of the points in a block with $x$ that is in $3$ blocks is in the block on $x$ that is parallel to $b$, which is a contradiction. (4) Suppose that $x$ and $y$ are in a common block, and that the block parallel to the block containing $x$ and $y$ is $b$. Since the points in the block with $x$ and $y$ can be in no more blocks they must be $A$ and $B$. Let $b^{\prime}$ with any block that is not in the two already mentioned, and suppose $b^{\prime}$ contains $x$. Then the three other elements of $b^{\prime}$ are each in a distinct other block, either a block on $y$ (not parallel to $b^{\prime}$) or $b$. It follows that every other block has an element that is in $b$. Since there are $6$ such blocks, this is impossible. So $x$ and $y$ are not in a block together, and every parallel pair of blocks consists of a block on $x$ and a block on $y$. A similar argument shows that $A$ and $B$ are in different blocks, in different parallel pairs, and one on $x$ and one on $y$. \end{comment}
\begin{comment} \begin{lemma}\label{blocks15} If $\Lambda$ is a $4^{6}5^{2}$ on $15$ points in which the blocks of size $4$ can be arranged into $3$ parallel pairs, then either $0$ or $2$ points are in $4$ blocks. If $2$ points are in $4$ blocks then all other points are in $2$. \end{lemma}
\noindent {\bf Proof} Any point $x$ that is in $4$ blocks must either be in two blocks of size $4$ and two of size $5$, or in $3$ of size $4$ and one of size $5$. In the first case, every block of size $4$ must intersect all of the blocks containing $x$, and so there is no block parallel to any of the blocks containing $x$. So $x$ is in $3$ blocks of size $4$ and one of size $5$. The $3$ remaining blocks of size $4$ must intersect all but one of the blocks on $x$, and contain the single point $y$ not contained in the blocks on $x$. The remaining block of size $5$ intersects all of the blocks on $x$ and also contains $y$. Since all blocks contain $x$ or $y$, no other point is in more than $2$ blocks. It follows that every other point is in exactly $2$ blocks. \end{comment}
\begin{definition}\label{definition:packing} A $(v,k,1)$-packing design is a linear space on a set $V$ where the blocks all have size $k$. For any $v,k$ function $P(k,v)$ is called the maximum packing number of $k$ and $v$, and is the maximum number of blocks in such a packing design. \end{definition}
The following result follows from a simple counting argument (see \cite{johnson1}):
\begin{lemma} For any $k\leq v$, $$P(k,v)\leq \lfloor v\lfloor(v-1)/(k-1)\rfloor/k\rfloor$$ \end{lemma}
The upper bound is not achievable in some cases. Absolute values for $P(k,v)$ for $3\leq k\leq 6$ and $1\leq v\leq 28$ are given in Table \ref{table:packingTable}. These are obtained from \cite{spence}, \cite{brslsm}, and theoretical results in \cite{brouwer1}.
\begin{table}
\begin{tabular}{|l|l|l|l|l|} \hline $v$ & $P(3,v)$ &$P(4,v)$ & $P(5,v)$ & $P(6,v)$\\ \hline 4&1&1&0&0\\ 5&2&1&1&0\\ 6&4&1&1&1\\ 7&7&2&1&1\\ 8&8&2&1&1\\ 9&12&3&2&1\\ 10&13&5&2&1\\ 11&17&6&2&2\\ 12&20&9&3&2\\ 13&26&13&3&2\\ 14&28&14&4&2\\ 15&35&15&6&3\\ 16&37&20&6&3\\ 17&44&20&7&3\\ 18&48&22&9&4\\ 19&57&25&12&4\\ 20&60&30&16&5\\ 21&70&31&21&7\\ 22&73&37&21&7\\ 23&83&40&23&8\\ 24&88&42&24&9\\ 25&100&50&30&10\\ 26&104&52&30&13\\ 27&117&54&30&14\\ 28&121&63&33&16\\ \hline \end{tabular} \caption{Maximum packing numbers for $k=4$, $k=5$ and $k=6$\label{table:packingTable}} \end{table}
The following Lemma is a direct consequence of the definition of $P(k,v)$:
\begin{lemma}\label{lemma:packingLemma} No linear space on $v$ points contains more than $P(k,v)$ blocks of size $k$. \end{lemma}
In the lemma below, the {\it regularity} of a point $p$ ($reg_{p}$) in a linear space is the number of blocks that contain it and, for $i>0$, $deg_{i}$ is the number of points that have regularity $i$. Note that, since all the other points in the blocks containing $p$ must be distinct, if all blocks have size $r$, then $reg_{p}\leq (v-1)/(r-1)$.
\begin{lemma}\label{lemma:blockWeight}
If $\Lambda=(V,B)$ is a linear space and, for any block $b\in B$, the weight of $b$, $wt(b)$, is the sum of the regularities of the points in $b$, then (i) block $b$ intersects $wt(b)-|b|$ other blocks, and no block can have weight more than $|B|+|b|-1$ and, (ii) $\Sigma_{b\in B}wt(b) = \Sigma_{i>0} i^{2}deg_{i}$. \end{lemma}
\begin{comment} (ii) remember we are adding the weights, not adding the number of points, so this does work. If we were adding the points in the blocks, then sum(b) = sum (i.deg(i)) - a different result!
Once we get to $\Sigma_{b\in B}wt(b) = \Sigma_{p} (reg_{p}^{2})$, expand RHS as (1^{2} + 1^{2} + ... + 1^{2}) (deg1 times) + (2^{2}+2^{2} + ... + 2^{2})(deg 2 times) ..... + (v^{2} + ...) (deg v times) which gives the result \end{comment}
\noindent{\bf Proof} (i) Any point $p$ in $b$ with regularity $reg_{p}$ intersects $r_{p}-1$ other blocks, and no other block contains more than one element of $b$, so $b$ intersects $\Sigma_{p\in b}reg_{p}-1$ other blocks, and the result follows. (ii) If we sum the weights of the blocks, for every vertex $p$, $reg_{p}$ is counted $reg_{p}$ times. So $\Sigma_{b\in B}wt(b) = \Sigma_{p} (reg_{p}^{2})$. For every $i>0$ there are $deg_{i}$ vertices of regularity $i$, so $\Sigma_{b\in B}wt(b) = \Sigma_{i>0}deg_{i}.i^{2}$, as required.
\begin{lemma}\label{lemma:12.1} If $\Lambda$ is a linear space on $12$ points with $8$ blocks of size $4$, then all of the blocks intersect. \end{lemma}
\noindent{\bf Proof} There are no points of regularity greater than $3$ (or the blocks containing such a point would contain $13$ distinct points), and at most $4$ of regularity less than $3$ (if there are $5$, then $22$ spaces must be filled by $7$ points, implying at least one point of regularity greater than $3$). Suppose that there are two non-intersecting blocks, then they must each have weight at most $10$ and each contain a pair of vertices of regularity at most $2$ (from Lemma \ref{lemma:blockWeight}). It follows that $(deg_{2},deg_{3})=(4,8)$. But then two of the blocks contain only points of regularity $3$ and have weight $12$ which contradicts Lemma \ref{lemma:blockWeight}.
\begin{comment}
Let $V=V_{1}\cup V_{2}$ where $V_{1}$ is the set of vertices that are in these blocks, and $V_{2}$ the remaining vertices. Then all other blocks contain a pair $V_{1}$ and a pair from $V_{2}$. Since no element of $V_{1}$ can be in more than $2$ of the remaining blocks, $4$ elements of $V_{1}$ are in $2$ of the remaining blocks. But there are only three pairs of remaining blocks that do not intersect. Hence two elements of $V_{1}$ are in two of the same remaining blocks, which is impossible. \end{comment}
\begin{lemma}\label{lemma:13.1} If $\Lambda$ is a linear space on $13$ points with $9$ blocks of size $4$, then (1) there is no set of $3$ non-intersecting blocks. (2) There are at most $3$ pairs of non-intersecting blocks. \end{lemma}
\noindent{\bf Proof} First observe that if there is a set of $3$ parallel blocks or more than $1$ pair of parallel blocks then no point is in at most $1$ block, or removing the point leaves a linear space on at most $12$ points with $8$ blocks of size at least $4$ with a non-intersecting pair of blocks. There is no linear space on fewer than $12$ points with $8$ blocks of size at least $4$ \cite{bettenbetten4}, and if $v=12$ we have a contradiction to Lemma \ref{lemma:12.1}. So every point is in between $2$ and $4$ blocks and $(deg_{2},deg_{3},deg_{4})= (3,10,0)$, $(4,8,1)$, $(5,6,2)$ or $(6,4,3)$. By Lemma \ref{lemma:blockWeight} no block can have weight more than $12$ and $\Sigma_{b\in B}wt(b) = 4deg_{2}+9deg_{3}+16deg_{4}$.
\noindent (1) If there is a set of $3$ non-intersecting blocks, then there are $3$ blocks of weight at most $10$, and so $\Sigma_{b\in B}wt(b) \leq 102$. This is only possible if $(deg_{2},deg_{3},deg_{4})= (3,10,0)$. But the set of $3$ parallel blocks must each contain two of the vertices of degree $2$, and so must intersect, which is a contradiction.
(2) If there are $4$ pairs of non-intersecting blocks then there are $8$ blocks with weight at most $11$ and $\Sigma_{b\in B}wt(b) \leq 100$, which is impossible.
\begin{lemma}\label{lemma:16.1} If $\Lambda$ is a linear space on at most $16$ points with $6$ blocks of size $5$ then either $(deg_{1},deg_{2},deg_{3})=(3,12,1)$, $deg_{2}=15$, or $(deg_{1},deg_{2})=(2,14)$. \end{lemma}
\noindent{\bf Proof} No point can be in $4$ blocks. If $p$ has regularity $3$ then for every block containing $p$ there is a point of regularity $1$ (as all blocks have weight at most $10$, by Lemma \ref{lemma:blockWeight}). So we must solve the following set of equations/inequalities: $deg_{1}+2deg_{2}+3deg_{3}=30$; $deg_{1}+deg_{2}+deg_{3}\leq 16$, and $deg_{1}\geq 3deg_{3}$. There are $3$ solutions, giving the result required.
\begin{comment} \begin{lemma} \label{lemma:10blocksSize6} There is no linear space on $22$ points that has $10$ blocks of size $6$. \end{lemma}
\noindent{\bf Proof} Suppose that $B_{6}$ is a set of $10$ blocks of size $6$ on $22$ points. No point can be in $4$ blocks of $B_{6}$, and at least one point is in $3$ blocks of $B_{6}$ (as there are $60$ spaces in the blocks in $B_{6}$). Let $x$ be a point that is in $3$ blocks in $B_{6}$. These blocks miss exactly $6$ points. Let $V^{\prime}$ denote this set of $6$ points. The remaining $7$ blocks of $B_{6}$ can contain at most one point from each of the blocks in $B_{6}$ containing $x$ and so must contain at least $3$ of $V^{\prime}$. Hence, restricting the blocks in $B_{6}$ to $V^{\prime}$ there is a linear space with $r_{i}$ blocks of size $i$, for $3\leq i \leq 6$ on $6$ points, where $r_{3}+r_{4}+r_{5}+r_{6}=7$. Counting the pairs from $V^{\prime}$ in this linear space, we have that $3r_{3}+6r_{4}+10r_{5}+15r_{6}\leq 15$. There is no solution. \end{comment}
\begin{comment} \begin{lemma} The connected, acyclic graphs of order greater than $1$ and less than $7$ are given in Figure \ref{fig:acyclic}.
\begin{figure}
\caption{The connected acyclic graphs of order greater than $1$ and at most $7$}
\label{fig:acyclic}
\end{figure}
\end{lemma}
\begin{lemma}\label{lemma:degs4&5} If $\Gamma$ is a connected acyclic graph of order greater than $1$ and at most $7$, and where no vertex has degree more than $4$, then (i) $\Gamma$ has a pair of adjacent vertices with combined degree less than $4$, or has a vertex $x$ for which $2(4-deg(x))+\Sigma_{y\in N(x)}(4-deg(y)) > 8$, where $N(x)$ is the neighbourhood of $x$. \end{lemma}
\noindent {\bf Proof} See Figure \ref{fig:acyclic}. \end{comment}
\begin{comment} \begin{lemma}\label{lem:connectedComponents} Let $\Gamma$ be a graph with $v$ vertices and $E$ edges, where $E\leq v$, and where $\Gamma$ contains a cycle of length $\mu > v/2$ and no smaller cycle. Then (i) any vertex not in the cycle is in at most one edge with vertices that are. (ii) $\Gamma$ has $r$ acyclic components, where $v-E\leq r \leq v-E+1$. \end{lemma}
\noindent
{\bf Proof} (i) Let $S$ be the set of vertices not in the cycle. Any vertex in $S$ that is in two edges with vertices in the cycle are in a cycle of length at most $\mu/2+1$, which is a contradiction. (ii) Set $S$ is composed of $2$ sets: $S_{1}$, the set of vertices that are connected via a path to the cycle, and $S_{2}$, those that are not. Set $S_{2}$ contains $r$ acyclic components (there are no cycles smaller than $\mu$), and $|S_{2}|-r$ edges. Remaining edges are between elements of $S_{1}$, or between elements of $S_{1}$ and the cycle. There are $E-\mu-|S_{2}|+r$ of these. Either these edges contain a path of length at least $\mu/2$ connected to two vertices in the cycle, at distance $\mu/2$ apart (there is at most one such path), in which case there are $|S_{1}|+1$ such edges, or there is no such path and there are $|S_{1}|$ such edges. Since $|S_{1}|=v-\mu-|S_{2}|$ the result follows. \end{comment}
\begin{comment} \begin{lemma}\label{lemma:extremalAcyclic} If $\Gamma$ is an extremal graph on $v$ vertices, where $v=22$ or $23$, and all vertices have degree at least $4$ there are no cycles amongst the vertices of degree $4$ of length less than $n$, where $n=7$ if $v=22$ and $n=8$ if $v=23$. In each case, if there is a cycle of length $n$ or $n+1$, then the graph $\Gamma^{\prime}$ formed by removing the vertices of the cycle and the edges on them is extremal. \end{lemma}
\noindent {\bf Proof} Let $\Gamma_{4}$ denote the graph on the vertices of degree $4$. Clearly, as $\Gamma$ is triangle and diamond free, it has no cycles of length less than $5$. Suppose that there is a cycle of length $n$, where $n\geq 5$. Then the subgraph $\Gamma^{\prime}$ formed by removing the vertices of the cycle and the edges on them has $v-n$ vertices, and $f(v)-3n$ edges. If $v=22$, $f(v)-3n>f(v-n)$ if $n<7$, so $\Gamma^{\prime}$ is not triangle and square free unless $n<7$. If $n=7$ or $n=8$ then $f(v)-3n=f(v-n)$ and so $\Gamma^{\prime}$ is extremal. The proof for $v=23$ is similar. \end{comment}
\noindent In the remainder of this paper we present all extremal graphs of orders $20\leq v \leq 32$. In all cases, for all graphs $\Gamma$ we provide the adjacency matrix, degree sequence, edge set, relevant sets $S_{\Gamma,n}$, and any other relevant material one of the appendices (A-M) to this paper.
\section{Case $v=20$}
\begin{theorem}\label{theorem:g20} If $\Gamma$ is a graph of girth at least $5$ on $20$ vertices with $41$ edges, then $(\delta,\Delta)=(3,5)$ and $\Gamma$ has an embedded $S_{5,[3,3,3,3,2]}$ star. \end{theorem}
\noindent {\bf Proof} There is precisely one graph on $20$ vertices with $41$ edges. This is stated in \cite{Garnick93} and the graph is given in \cite{Garnick92}. This graph is given in Figure \ref{fig:extremal20} (the numbering of the vertices is our own). There are $3$ embedded $S_{5,[3,3,3,3,2]}$ stars. Note that the sink nodes corresponding to these stars are precisely the vertices of degree 5, namely $0$, $1$ and $2$.
\begin{figure}
\caption{Extremal graph on $20$ vertices with $41$ edges }
\label{fig:extremal20}
\end{figure}
\begin{lemma}\label{lemma:g20} If $\Gamma$ is a graph of girth at least $5$ on $20$ vertices with $41$ edges, then $(deg_{3},deg_{4},deg_{5}) =(1,16,3)$ and every element of $S_{\Gamma,3}$ contains $2$ vertices of degree $4$ and a child of a sink node, namely the single vertex of degree $3$. \end{lemma}
\noindent This graph is given in Figure \ref{fig:extremal20}, and is represented as an embedded star with sink node $0$ in Figure \ref{fig:extremal20A}. \begin{comment} (i) The set of edges is: \begin{eqnarray*} &&(0,4),(0,7),(0,10),(0,13),(0,19), (1,5),(1,8),(1,11),(1,14), (1,19),\\ &&(2,3),(2,6),(2,9), (2,12)),(2,19),(3,4), (3,14),(3,15), (4,5),(4,18),\\&&(5,6), (5,17),(6,7),(6,16), (7,8),(7,15), (8,9),(8,18),(9,10), (9,17),\\&&(10,11),(10,16), (11,12),(11,15), (12,13),(12,18), (13,14),(13,17),\\&& (14,16),(15,17),(16,18) \end{eqnarray*} \end{comment}
Note that $$S_{\Gamma, 3}=\{\{15,16,19\}, \{15,18,19\}, \{16,17,19\}, \{17,18,19\}\}$$
Since vertices $15,16,17,18$ all have degree $4$, and $19$ is a child of all roots (i.e. $0$, $1$ and $2$), of degree $3$, we are done.
\begin{figure}
\caption{Extremal graph on $20$ vertices with $41$ edges, as a star with root $0$}
\label{fig:extremal20A}
\end{figure}
\section{Case $v=21$}
\begin{theorem}\label{theorem:g21} If $\Gamma$ is a graph of girth at least $5$ on $21$ vertices with $44$ edges, then either $(\delta,\Delta)=(3,5)$ or $(\delta,\Delta)=(4,5)$. In either case, $\Gamma$ has an embedded $S_{5,[3,3,3,3,3]}$ star. \end{theorem}
\noindent {\bf Proof} Let $\Gamma$ be a graph of girth at least $5$ with $21$ vertices and $44$ edges. Then $\Gamma$ is extremal and, by Table \ref{table:deltaTable} it follows that the possible values for the minimum and maximum degrees, $\delta$ and $\Delta$ are $(\delta,\Delta)=(3,5)$, $(3,6)$, and $(4,5)$.
Suppose that there is a vertex of degree $3$, $x$ say. Since $\delta=f(21)-f(20)$, by Lemma \ref{lem:inductiveExtremal}, $\Gamma$ can be constructed from the extremal graph on $20$ vertices described in Lemma \ref{lemma:g20}, $\Gamma^{\prime}$, by adding a vertex $x$ of degree $3$ and edges from $x$ to all vertices in an element of $S_{\Gamma^{\prime},3}$. Since no element of $S_{\Gamma^{\prime},3}$ contains vertices of degree $5$, it follows that $\Delta=5$. So we have $(\delta,\Delta)=(3,5)$. Since, by Lemma \ref{lemma:g20} every element of $S_{\Gamma^{\prime},3}$ contains single vertex of $\Gamma^{\prime}$ of degree $3$ that is a child of every sink node, it follows that
every sink node of $\Gamma^{\prime}$ is a sink node of $\Gamma$
of degree $5$ whose children all have degree $4$. Hence there is an embedded $S_{5,[3,3,3,3,3]}$ star.
If $(\delta,\Delta)=(4,5)$ then any node $x$ of degree $5$ must only have children of degree $4$ and the result follows.
\begin{lemma}\label{lemma:g21} If $\Gamma$ is a graph on $21$ vertices of girth at least $5$ with $44$ edges, then one of the following holds: (i) $(deg_{3},deg_{4},deg_{5})=(1,15,5)$
and $\Gamma$ has $3$ embedded $S_{5,[3,3,3,3,3]}$ stars and is isomorphic to the graph shown in Figure \ref{fig:extremal21A}. In this case every element of $S_{\Gamma,3}$ contains $3$ vertices of degree less than $5$, and the child of a root of degree $4$. (ii) $(deg_{3},deg_{4},deg_{5})=(0,17,4)$, all vertices of degree $5$ are sink nodes and $\Gamma$ is isomorphic to one of the graphs shown in Figure \ref{fig:extremal21B}. In both cases $S_{\Gamma,3}$ is empty. \end{lemma}
\noindent {\bf Proof} From the proof of Theorem \ref{theorem:g21}, $(\delta,\Delta)=(3,5)$ or $(4,5)$. If $(\delta,\Delta)=(3,5)$ then $(deg_{3},deg_{4},deg_{5})=(1,15,5)$ and $\Gamma$ is obtained from the extremal graph $\Gamma^{\prime}$ on $20$ vertices shown in Figure \ref{fig:extremal20}, by adding an additional vertex $x$ and edges from $x$ to one of the sets $S_{\Gamma^{\prime},3}$ identified in the proof of Lemma \ref{lemma:g20}. Using nauty \cite{nauty}, it can be shown that all graphs constructed in this way are isomorphic to the graph shown in Figure \ref{fig:extremal21A} (note that in the figure (and in Figure \ref{fig:extremal21B}) neighbours of leaf nodes are indicated as sets).
In this case the sink nodes are $0$, $1$ and $2$ and $S_{\Gamma,3}=\{ \{4,9,20\}, \{5,12,20\}, (8,13,20\}, \{17,18,19\}\}$. The result follows.
\begin{figure}
\caption{Extremal graph on $21$ vertices with $44$ edges and $(\delta,\Delta)=(3,5)$, as a star with root $0$.}
\label{fig:extremal21A}
\end{figure}
Suppose then that there is no vertex of degree $3$. Then $(\delta,\Delta)=(4,5)$ and $(deg_{4},deg_{5})=(17,4)$. All vertices of degree $5$ are sink nodes. By \cite{Garnick93}, we know that there are $3$ extremal graphs $(21,44)$, so there are two graphs of this type. They must be isomorphic to the two graphs shown in Figure \ref{fig:extremal21B}, which have been shown to be non-isomorphic. Note that graph (b) can be obtained from graph (a) by replacing edge $(8,20)$ with edge $(11,20)$ and in graph (a), the $3$ grandchildren of degree $5$ have the same parent, and in graph (b) two of the grandchildren of degree $5$ have the same parent, and the other does not. In both cases $S_{\Gamma,3}$ is empty.
\begin{figure}
\caption{Extremal graphs on $21$ vertices with $44$ edges and $(\delta,\Delta)=(4,5)$, as stars with root $0$}
\label{fig:extremal21B}
\end{figure}
\section{Case $v=22$} \begin{theorem}\label{theorem:g22} If $\Gamma$ is a graph of girth at least $5$ on $22$ vertices with $47$ edges, then either $(\delta,\Delta)=(3,5)$ or $(\delta,\Delta)=(4,5)$. In either case there is an embedded $S_{5,[4,3,3,3,3]}$ star. \end{theorem}
\noindent{\bf Proof} Since $\Gamma$ is extremal, it follows from Table \ref{table:deltaTable} that the values of $(\delta,\Delta)$ are either $ (3,5)$, $(3,6)$, $(3,7)$ or $(4,5)$.
If there is a vertex, $x$, of degree $3$, then since $\delta=f(22)-f(21)$, by Lemma \ref{lem:inductiveExtremal}, $\Gamma$ can be constructed from one of the extremal graphs on $21$ vertices described in Lemma \ref{lemma:g21}, $\Gamma^{\prime}$, by adding a vertex $x$ of degree $3$ and edges from $x$ to all vertices in an element of $S_{\Gamma^{\prime},3}$. Since $S_{\Gamma^{\prime},3}$ is non-empty, it follows that $\Gamma^{\prime}$ is the first graph defined in Lemma \ref{lemma:g21}, which has a sink node of degree $5$ with $4$ children of degree $4$. Since every element of $S_{\Gamma^{\prime},3}$ only contains elements of degree less than $5$, it follows that, in $\Gamma$, $\Delta=5$. So $(\delta,\Delta)=(3,5)$. Since every element of $S_{\Gamma^{\prime},3}$ contains a child of a sink node of $\Gamma^{\prime}$ of degree $4$, $\Gamma$ has an embedded $S_{5,[4,3,3,3,3]}$ star.
Suppose then that there are no vertices of degree $3$. Thus $(\delta,\Delta)=(4,5)$ and there are $16$ vertices of degree $4$ and
$6$ of degree $5$.
If there are no sink vertices, all vertices of degree $5$ have $5$ neighbours of degree $4$ (and
$20$ grandchildren), and it follows from Lemma \ref{lem:blocks} that there is a linear space on the vertices of degree $4$. This linear space consists of $6$ blocks of size $5$. Let $B$ denote this set of blocks and $\Gamma_{4}$ the graph formed by the vertices of degree $4$ and the edges between them. It follows from Lemma \ref{lemma:16.1} that, since any vertex in $V_{4}$ that is in $i$ blocks in $B$ has degree $4-i$ in $\Gamma_{4}$, if $deg_{i}$ is the number of vertices of degree $i$ in $\Gamma_{4}$, $(deg_{1},deg_{2},deg_{3},deg_{4})=(1,12,3,0)$, $(0,15,0,1)$ or $(0,14,2,0)$. If, in $\Gamma_{4}$, there is a chain of $4$ vertices of degree $2$, $a-b-c-d$ say, then no two of $a$, $b$, $c$ can be in the same block of $B$, or there is a cycle of length at most $4$. Hence two of the blocks of $B$ contain $a$, two contain $b$ and two contain $c$. Now similarly $d$ is not in a block in $B$ with $b$ or $c$, so must be in both blocks of $B$ containing $a$, which is not possible. So we assume there is no such chain and it follows that $(deg_{1},deg_{2},deg_{3},deg_{4})\neq (0,15,0,1)$. By a similar argument, if $a$ is a vertex of degree $1$ then there is no chain $a-b-c$ where $b$ and $c$ have degree $2$. Hence, if $(deg_{1},deg_{2},deg_{3},deg_{4})=(1,12,3,0)$ and $a$ the vertex of degree $1$, with neighbour $b$, either$b$ has degree $3$, or $b$ has degree $2$ and its other neighbour $c$ has degree $3$. In either case, there is a vertex of regularity $1$ in the blocks that is in no block with $a$, hence one of the blocks on $a$ contains no vertex of regularity $1$ which is not possible (see the proof of Lemma \ref{lemma:16.1}).
So $(deg_{1},deg_{2},deg_{3},deg_{4})=(0,14,2,0)$.
Let $x$ and $y$ be the vertices of degree $3$. Since there can be no component consisting entirely of vertices of degree $2$ (or it would either contain a cycle of length $3$, or a chain of at least $4$ vertices of degree $2$) either $x$ and $y$ are in different components, or $\Gamma_{4}$ consists of a single component. If $x$ and $y$ are in different components of $\Gamma_{4}$ then their components contain a single vertex of odd degree and all other vertices of even degree, which is not possible (the sum of the degrees in a component must be even). So $\Gamma_{4}$ consists of a single component and either there is a single path (of length at most $4$) from $x$ to $y$ and cycles containing $x$ and $y$ respectively, or $3$ paths from $x$ and $y$, together containing all of the vertices of degree $2$. In either case, there is a chain of at least $4$ vertices of degree $2$.
\begin{lemma}\label{lemma:g22} There are $3$ nonisomorphic graphs of girth at least $5$ on $22$ vertices with $47$ edges. In one, $(\delta,\Delta)=(4,5)$ and $(deg_{4},deg_{5})=(16,6)$. In the other two, $(\delta,\Delta)=(3,5)$ and $(deg_{3},deg_{4},deg_{5})=(2,12,8)$ and $(1,14,7)$ respectively. In all cases, every element of $S_{\Gamma,3}$ contains $3$ vertices of degree less than $5$, and contains a child of a sink node of $\Gamma$. \end{lemma}
\noindent {\bf Proof} By Theorem \ref{theorem:g22} any such graph has $(\delta,\Delta)=(3,5)$ or $(\delta,\Delta)=(4,5)$ and there is an embedded $S_{5,[4,3,3,3,3]}$ star. Restricting search to those cases produced a set of graphs. Isomorph elimination using nauty \cite{nauty} revealed there to be three such graphs, $\Gamma_{0}$ with $(\delta,\Delta)=(4,5)$, and $(deg_{4},deg_{5})=(16,6)$, and $\Gamma_{1}$ and $\Gamma_{2}$ with $(\delta,\Delta)=(3,5)$ and $(deg_{3},deg_{4},deg_{5})=(2,12,8)$ and $(1,14,7)$ respectively.
These graphs are shown in Figures \ref{fig:extremal22A0}-\ref{fig:extremal22A2}. In each case the graph is drawn as a star about a sink node with $4$ children of degree $4$ and one of degree $5$. The vertices adjacent to each of the grandchildren is indicated as a set.
\begin{figure}
\caption{Extremal graph $\Gamma_{0}$ on $22$ vertices with $44$ edges as a star, $(\delta,\Delta)=(4,5)$}
\label{fig:extremal22A0}
\end{figure}
\begin{figure}
\caption{Extremal graph $\Gamma_{1}$ on $22$ vertices with $44$ edges as a star, $(\delta,\Delta)=(3,5)$}
\label{fig:extremal22A1}
\end{figure}
\begin{figure}
\caption{Extremal graph $\Gamma_{2}$ on $22$ vertices with $44$ edges as a star, $(\delta,\Delta)=(3,5)$}
\label{fig:extremal22A2}
\end{figure}
\noindent Examination of these graphs using a computer reveals that in each case every element of $S_{\Gamma,3}$ contains no vertex of degree greater than $4$, and contains at least one vertex that is the child of a sink node. See Appendix C for details.
\begin{lemma}\label{lemma:g22A} If $\Gamma$ is a graph of girth at least $5$ on $22$ vertices with $47$ edges then $S_{\Gamma,3}$ contains at most $4$ non-intersecting sets. If all vertices of degree $3$ (if any) are contained in a single element of $S_{\Gamma,3}$ then $\Gamma$ is $\Gamma_{1}$ or $\Gamma_{2}$ of Lemma \ref{lemma:g22}. If in addition there are two vertices of degree $4$ which have fewer than $3-deg_{3}$ neighbours of degree $4$ between them, $\Gamma$ is $\Gamma_{2}$ . In this case there are three vertices of degree $4$ which have fewer than two neighbours of degree $4$ and they each have one such neighbour, $p$, $q$ and $r$ respectively. No two of $p$, $q$ and $r$ are in a common element of $S_{\Gamma,3}$ with the vertex of degree $3$.
\begin{comment} If $S_{\Gamma,3}$ contains a set $X$ of $3$ non-intersecting sets and a further triple $t$ that either does not intersect any element of $X$ or does so only at vertices of degree $3$, then it is not possible to arrange the four vertices of degree less than $5$ that are not in $X$ or $t$ into two elements of $S_{\Gamma,2}$ where neither pair has edges to both $t$ and to an element of $X$. \end{comment} \end{lemma}
\noindent {\bf Proof} From examination of the graphs in Appendix C, $S_{\Gamma,3}$ has at most $4$ non-intersecting sets and if $S_{\Gamma,3}$ contains a set $X$ of $3$ non-intersecting sets then $\Gamma$ is graph $\Gamma_{0}$ or $\Gamma_{2}$. Examination of the vertices of degree $4$ shows that all vertices of degree $4$ have at least two neighbours of degree $4$ unless $\Gamma$ is $\Gamma_{2}$ and the vertex is $1$, $16$, or $17$, which have neighbours of degree $4$: $12$, $13$ and $9$ respectively. No two of these vertices are in an element of $S_{\Gamma,3}$ with the vertex of degree $3$ (i.e. vertex $0$).
\begin{comment}
If $\Gamma=\Gamma_{0}$ then, since $|S_{\Gamma_{0},3}|=4$, $t\cup X = S_{\Gamma_{0},3}$. As shown in Appendix C the four vertices of degree less than $5$ that do not appear in $S_{\Gamma_{0},3}$ can be uniquely arranged into two elements of $S_{\Gamma_{0},2}$ and, whichever way we choose $t$, both pairs have edges to both $t$ and at least one element of $X=S_{\Gamma_{0},3}\setminus \{t\}$.
If $\Gamma=\Gamma_{2}$ then, since there are no $4$ parallel blocks, $t$ intersects $X$ at the vertex of degree $3$. As shown in Appendix C, there are $2$ sets of $4$ elements of $S_{\Gamma_{2},3}$ such that the only vertex to appear twice is the vertex of degree $3$ and the remaining $4$ vertices of degree less than $5$ constitute two non-intersecting elements of $S_{\Gamma_{2},2}$. In each case, there are edges from the elements of one of the pairs to both triples containing the vertex of degree $3$ and one of the other triples. The result follows. \end{comment}
\section{Case $v=23$}
\begin{theorem}\label{theorem:g23} If $\Gamma$ is a graph of girth at least $5$ on $23$ vertices with $50$ edges then $(\delta,\Delta)=(3,5)$ and there is an embedded $S_{5,[4,4,3,3,3]}$ star or $(\delta,\Delta)=(4,5)$ and either there is an embedded $S_{5,[4,4,3,3,3]}$ star or there is no embedded $S_{5,[4,4,3,3,3]}$ star and there is an embedded $S_{5,[4,3,3,3,3]}$ star. \end{theorem}
\noindent {\bf Proof} Since $\Gamma$ is extremal, it follows from Table \ref{table:deltaTable} that the values of $(\delta,\Delta)$ are either $ (3,5)$, $(3,6)$, $(3,7)$ or $(4,5)$.
If there is a vertex, $x$, of degree $3$, then since $\delta=f(23)-f(22)$, by Lemma \ref{lem:inductiveExtremal}, $\Gamma$ can be constructed from one of the extremal graph on $22$ vertices described in Lemma \ref{lemma:g22}, $\Gamma^{\prime}$, by adding a vertex $x$ of degree $3$ and edges from $x$ to all vertices in an element of $S_{\Gamma^{\prime},3}$.
Since in each case $S_{\Gamma^{\prime},3}$ contains no vertex of degree greater than $4$, (by Lemma \ref{lemma:g22}), in $\Gamma$ $\Delta=5$. Hence $(\delta, \Delta)=(3,5)$. In addition, since $S_{\Gamma^{\prime},3}$ contains a child of a sink node of $\Gamma^{\prime}$, it follows that $\Gamma$ has a sink node of degree $5$ with $2$ children of degree $5$ and $3$ of degree $4$.
If there is no vertex of degree $3$ then $(\delta,\Delta)=(4,5)$ and $(deg_{4},deg_{5})=(15,8)$. If no vertex of degree $5$ has any neighbour of degree $5$ then there is a linear space on $15$ points with $8$ blocks of size $5$, contradicting Lemma \ref{lemma:packingLemma}. Since every vertex of degree $5$ has at least one neighbour of degree $5$, the result follows.
\begin{lemma}\label{lemma:g23} There are $7$ non-isomorphic graphs on $23$ vertices with $50$ edges. All of the graphs contain embedded $S_{5,[4,4,3,3,3]}$ stars. For $3$ of the graphs, $S_{\Gamma,4}$ is empty. The remaining $4$ graphs consist of: \begin{itemize} \item $1$ graph with $(\delta,\Delta)=(4,5)$ and $(deg_{4},deg_{5})=(15,8)$. Every element of $S_{\Gamma,4}$ contains $4$ vertices of degree $4$, including at least one child of a sink node \item $2$ graphs with $(\delta,\Delta)=(3,5)$ and $(deg_{3},deg_{4},deg_{5})=(1,13,9)$. Every element of $S_{\Gamma,4}$ contains the vertex of degree $3$ and $3$ vertices of degree $4$, including a child of a sink node. \item $1$ graph with $(\delta,\Delta)=(3,5)$ and $(deg_{3},deg_{4},deg_{5})=(2,11,10)$. Every element of $S_{\Gamma,4}$ contains the two vertices of degree $3$ and $2$ vertices of degree $4$, including a child of a sink node. \end{itemize} \end{lemma}
\noindent {\bf Proof} (1) By Theorem \ref{theorem:g23} any such graph has $(\delta,\Delta)=(3,5)$ and there is an embedded $S_{5,[4,4,3,3,3]}$ star or $(\delta,\Delta)=(4,5)$ and either there is an embedded $S_{5,[4,4,3,3,3]}$ star or there is no embedded $S_{5,[4,4,3,3,3]}$ star and there is an embedded $S_{5,[4,3,3,3,3]}$ star. Restricting search to those cases produced a set of
graphs. Isomorph elimination using Nauty \cite{nauty} revealed there to be $7$ such graphs, two with $(\delta,\Delta)=(4,5)$, and $(deg_{3},deg_{4},deg_{5})=(0,15,8)$, and $5$ with $(\delta,\Delta)=(3,5)$, two of which
have $(deg_{3},deg_{4},deg_{5})=(2,11,10)$ and three of which have $(deg_{3},deg_{4},deg_{5})=(1,13,9)$. In all cases there is an embedded $S_{5,[4,4,3,3,3]}$ star, and using a computer we have verified the structure of $S_{\Gamma,4}$ in each case. See Appendix D for details.
\begin{lemma}\label{lemma:g23A} Suppose that $\Gamma$ is a graph of girth at least $5$ on $23$ vertices with $(\delta,\Delta)=(4,5)$ and $\Gamma_{4}=(V_{4},E_{4})$ the subgraph of $\Gamma$ on the vertices of degree $4$, then there is at most one vertex in $V_{4}$ that is in no edges in $E_{4}$. If two vertices have degrees $0$ and $1$ in $V_{4}$, then they have a common neighbour of degree $5$ in $\Gamma$. \end{lemma}
\noindent {\bf Proof} From the proof of Lemma \ref{lemma:g23} there are two such graphs with $(\delta,\Delta)=(4,5)$. Only the first of these contains a vertex of degree $4$ that has no neighbours of degree $4$, (vertex $a$ say). There are two vertices that have degree $1$, and each of these are in an edge in $\Gamma$ with $a$. See Appendix D for details.
\begin{comment}
(2) There are two graphs with $(deg_{3},deg_{4},deg_{5})=(0,5,18)$. They are shown as graphs $A$ and $B$ in Figure
\ref{fig:graphs23A}. In all cases there is a vertex of degree $5$ that has $2$ children of degree $5$ and $3$ of degree $4$. In each case they are drawn as a star with central node a sink node. The
nodes of degree $5$ are circled, and (where necessary for our proof) for some of the grandchildren the set of
neighbours of the grandchild are indicated as elements of a set, above the label for the grandchild. Consider graph
$A$, and let $S$ be a set of $4$ vertices with the desired property. Then no element of $S$ is a child of vertex $22$
(as vertex $22$ has two neighbours of degree $5$), and no two elements of $S$ can have the same parent (as all
elements of $S$ are at a distance of at least $3$ from each other). It follows that the elements of $S$ consist of a child
of each of the vertices $4$, $5$, $6$ and $7$. Call the corresponding elements of $S$, $x_{1}$, $x_{2}$, $x_{3}$ and
$x_{4}$. As all elements of $S$ have degree $4$, it follows that $x_{1}$ is vertex $3$. Since vertex $3$ is adjacent to
vertex $8$, we must have that $x_{2}$ is vertex $2$. But vertices $3$ and $2$ are both adjacent to vertex $21$, so
are at a distance of $2$ apart, which is a contradiction. Consider graph $B$. Since no element of $S$ can be adjacent
to either vertex $4$ or vertex $2$, two of the elements of $S$ must have the same parent, which is a contradiction.
(3) There are three graphs with $(deg_{3},deg_{4},deg_{5})=(1,13,9)$. They are shown as graphs $C$, $D$ and $E$ in Figure
\ref{fig:graphs23B} in a similar way to the above, but with the vertex of degree $3$ indicated in bold face.
Consider graph $C$. Set $S$ must contain the vertex of degree $3$, namely vertex $6$, and must include a child of at least one of vertices $19$ and $3$, so must contain vertex $9$ or vertex $4$. But vertices $4$ and $9$ both share a neighbour with vertex $3$, so this is impossible.
For graph $D$, by a similar argument $S$ must contain vertex $5$. Since $S$ can contain no child of vertex $22$ it must contain vertex $13$ and one of vertices $4$ and $14$. But vertex $4$ shares a neighbour with vertex $5$, and vertex $14$ shares a neighbour with vertex $13$. For graph $D$, by a similar argument $S$ must contain vertex $3$. Since $S$ can not contain vertex $9$ (as it shares a neighbour with vertex $3$), $S$ can contain no child of vertex $19$. Hence $S$ must contain a child of vertex $18$,
For graph $E$, $S$ must contain vertex $3$. As $S$ can not contain vertex $9$ (which shares a neighbour with vertex $3$), it must contain a child of each of the vertices $17$, $18$ and $20$. The only child of $18$ of degree $4$ is vertex $12$, so $S$ contains $12$. Since $S$ contains a child of vertex $17$, it must contain either vertex $14$ (which is at a distance of $1$ from vertex $12$) or vertex $16$ (which shares a neighbour with vertex $12$). Either way we have a contradiction.
\begin{figure}
\caption{Extremal graphs on $23$ vertices, with $(deg_{3},deg_{4},deg_{5})=(0,5,18)$ }
\label{fig:graphs23A}
\end{figure}
\begin{figure}
\caption{Extremal graphs on $23$ vertices, with $(deg_{3},deg_{4},deg_{5})=(1,13,9)$ }
\label{fig:graphs23B}
\end{figure} \end{comment}
\section{Case $v=24$}
\begin{theorem}\label{theorem:g24} If $\Gamma$ is a graph of girth at least $5$ on $24$ vertices with $54$ edges then $(\delta,\Delta)=(4,5)$ and there is an embedded $S_{5,[4,4,4,3,3]}$ star. \end{theorem}
\noindent {\bf Proof} From Table \ref{table:deltaTable} we know that in this case all vertices have degree $4$ or $5$. There are $12$ vertices of degree
$4$ and $12$ vertices of degree $5$. Since $\delta=f(24)-f(23)$, by Lemma \ref{lem:inductiveExtremal}, $\Gamma$ can be constructed from one of the extremal graph on $23$ vertices described in Lemma \ref{lemma:g23}, $\Gamma^{\prime}$, by adding a vertex $x$ of degree $4$ and edges from $x$ to all vertices in an element of $S_{\Gamma^{\prime},4}$. In all cases, $S_{\Gamma^{\prime},4}$ includes precisely one child of degree $4$ of any central node of an embedded $S_{5,[4,4,3,3,3]}$ star. It follows that $\Gamma$ contains an embedded $S_{5,[4,4,4,3,3]}$ star.
\begin{lemma}\label{lemma:g24} (1) There is $1$ graph $\Gamma$ of girth at least $5$ on $24$ vertices with $54$ edges. (2) Any element of $S_{\Gamma,3}$ contains only vertices of degree $4$. (3) There are no $4$ non-intersecting sets in $S_{\Gamma,3}$ for which there are are two pairs of sets with no edge between any two vertices in the same pair. (4) If $X$ is a set of $3$ non-intersecting elements of $S_{\Gamma,3}$ then at least two of the three vertices of degree $4$ that are not in any set in $X$ are in an edge with an element in a set in $X$. \end{lemma}
\begin{comment} (2) There is one set $X$ of $4$ elements of $S_{\Gamma,3}$ that do not intersect. There is exactly one pair $X_{1},X_{2}\in X$ such that for all $x_{1}\in X_{1}$, $x_{2}\in X_{2}$ there is no edge $(x_{1},x_{2}) \in \Gamma$. (3) If $e$ is an edge between two vertices of degree $4$ that are each in one edge only in $\Gamma_{4}$, then $e$ intersects all but $2$ of the elements of $S_{\Gamma,3}$. (4) There are no $4$ non-intersecting elements of $S_{\Gamma,3}$ for which there are are two pairs with no edge between either of the pairs. \end{comment}
\noindent {\bf Proof}(1) By Theorem \ref{theorem:g24} $(\delta,\Delta)=(4,5)$ and there is an embedded $S_{5,[4,4,4,3,3]}$ star. Restricting search to this case produced a set of graphs. Isomorph elimination using nauty \cite{nauty} revealed there to be one such graph, with $2$ sink nodes. This is shown in Figure \ref{fig:graph24} as a star about vertex $13$, together with set $S_{\Gamma,3}$. Clearly all elements of $S_{\Gamma,3}$ contain only vertices of degree $4$. (2) and (3) follow from observation, see Appendix E for details.
\begin{figure}
\caption{Extremal graph on $24$ vertices }
\label{fig:graph24}
\end{figure}
\begin{comment} These sets are (0,7,18), (0,8,18), (1,4,23),(1,16,22),(4,14,17),(4,17,23),(7,14,21),(8,18,22) \end{comment}
\section{Case $v=25$}
\begin{theorem} \label{theorem:g25} If $\Gamma$ is a graph of girth at least $5$ on $25$ vertices with $57$ edges then $(\delta,\Delta)=(3,5)$, $(4,5)$ or $(4,6)$. If $(\delta,\Delta)=(3,5)$ or $(4,5)$ then $\Gamma$ has an embedded $S_{5,[4,4,4,4,3]}$ star or there is no embedded $S_{5,[4,4,4,4,3]}$ star and there is an embedded $S_{5,[4,4,4,3,3]}$ star. If $(\delta,\Delta)=(4,6)$ then $1\leq deg_{6}\leq 2$ and there is an embedded $S_{6,[3,3,3,3,3,3]}$ star. \end{theorem}
\noindent {\bf Proof} We know from Table \ref{table:deltaTable} that in this case the possible values for $(\delta,\Delta)$ are $(3,5)$, $(3,6)$, $(3,7)$, $(3,8)$, $(4,5)$ and $(4,6)$. If there is a vertex, $x$, of degree $3$, then since $\delta=f(25)-f(24)$, by Lemma \ref{lem:inductiveExtremal}, $\Gamma$ can be constructed from the extremal graph on $24$ vertices described in Lemma \ref{lemma:g24}, $\Gamma^{\prime}$, by adding a vertex $x$ of degree $3$ and edges from $x$ to all vertices in an element of $S_{\Gamma^{\prime},3}$.
Since $S_{\Gamma^{\prime},3}$ only contains vertices of degree $4$, it follows that $(\delta,\Delta)=(3,5)$. Since $\Gamma^{\prime}$ contains a star $[5,[4,4,4,3,3])$, $\Gamma$ does too, and the result for $\delta=3$ holds.
If $(\delta,\Delta)=(4,5)$ then $(deg_{4},deg_{5})=(11,14)$ and the sets of vertices of degree $4$ adjacent to each vertex of degree $5$ form a linear space (see Lemma \ref{lem:blocks}) $\Lambda$. If no vertex of degree $5$ has more than $2$ neighbours of degree $5$ then $\Lambda$ consists of $14$ blocks of size at least $3$. It follows from \cite{bettenbetten4} that $\Lambda$ has at least $11$ blocks of size $3$ (and, correspondingly, at least $11$ vertices of degree $5$ have two neighbours of degree $5$.
Let $x$ be such a vertex of degree $5$. Let S be the embedded $S_{5,[4,4,3,3,3]}$ star with $x$ as root (see Figure \ref{fig:star25false}). Note that $A$ and $B$ denote the sets of children of the two children of $x$ of degree $5$, and $R$ the set consisting of the remaining grandchildren of $x$, and the vertices $p_{1}$ and $p_{2}$ that are not contained in the star. Since we have assumed that all vertices of degree $5$ have at most one child of degree $5$, $A$ and $B$ contain at most $1$ vertex each of degree $5$ and $R$ contains at least $9$ vertices of degree $5$ and at most $2$ of degree $4$. Now if $A$ contains a vertex of degree 5 then it must have at least 2 neighbours in $R$ that do not have degree $5$. So $R$ contains exactly $9$ vertices of degree $5$ and $2$ of degree 4. Then $B$ must contain a vertex of degree $5$, which must be adjacent to the same two elements of $R$, and there is a square. It follows that $A$ and $B$ contain no vertices of degree $5$, and every element of $R$ has degree $5$. But then $p_{1}$ must be adjacent to $3$ elements of $A\cup B$ (as it can have at most $2$ neighbours of degree $5$), which is impossible, as there will then be a square. So there is at least one vertex of degree $5$ that has more than $2$ neighbours of degree $5$ and so $\Gamma$ either has an embedded $S_{5,[4,4,4,4,3]}$ star, or has no embedded $S_{5,[4,4,4,4,3]}$ star but has an embedded $S_{5,[4,4,4,3,3]}$ star.
\begin{figure}
\caption{$25$, embedded $S_{5,[4,4,3,3,3]}$ star }
\label{fig:star25false}
\end{figure}
If $(\delta,\Delta)=(4,6)$, any vertex of degree $6$ must have $6$ neighbours of degree $4$, so the result follows.
\begin{lemma}\label{lemma:g25} There are $6$ graphs of girth at least $5$ on $25$ vertices with $57$ edges. Three of the graphs have $(\delta,\Delta)=(3,5)$ and $(deg_{3},deg_{4},deg_{5})=(1,19,5)$. One of these graphs has no sink node and set $S_{\Gamma,4}$ is empty. The other two have an embedded $S_{5,[4,4,4,4,3]}$ star and non-empty sets $S_{\Gamma,4}$ where every element of $S_{\Gamma,4}$ contains a child of degree $4$ of a sink node, the vertex of degree $3$, and $2$ further vertices of degree $4$. In the remaining three graphs, $(\delta,\Delta)=(4,5)$, $(deg_{4},deg_{5})=(11,14)$ and there is an embedded $S_{5,[4,4,4,4,3]}$ star. One of these graphs has empty set $S_{\Gamma,4}$, and for the other two $S_{\Gamma,4}$ contains a child of degree $4$ of a sink node and $3$ other vertices of degree $4$. In addition, for each of $\Gamma_{3},\ldots, \Gamma_{5}$, $S_{\Gamma,3}$ does not contain $3$ non-intersecting elements consisting entirely of vertices of degree $4$. \end{lemma}
\noindent {\bf Proof} Restricting our search to graphs with values of $(\delta,\Delta)$, with the identified embedded stars from Theorem \ref{theorem:g25} produced a set of graphs. Isomorph elimination using nauty \cite{nauty} revealed there to be six graphs, $A_{0},\ldots, A_{5}$. Using a computer the graphs were shown to satisfy the stated properties. For details see Appendix F.
\begin{lemma}\label{lemma:g25A} If $\Gamma$ is a graph of girth at least $5$ on $25$ vertices with $57$ edges and $X_{1}$, $X_{2}$, $X_{3}$ parallel elements of of $S_{\Gamma,3}$ then \begin{enumerate} \item if $(\delta,\Delta)=(4,5)$ one of the sets must contain a vertex of degree $5$ \item it is not possible that $X_{1}$ contains all vertices of degree $3$ and there are no edges between $X_{1}$ and $X_{2}$, or between $X_{1}$ and $X_{3}$. \end{enumerate} \end{lemma} \begin{comment} , and any edge of $\Gamma$ consisting of two vertices of degree $4$ intersects at least one of $X_{1}$, $X_{2}$, $X_{3}$. \end{comment}
\noindent {\bf Proof} Follows from examination of graphs $\Gamma_{0}\ldots \Gamma_{5}$ (and corresponding sets $S_{\Gamma,3}$) as shown in Appendix F.
\section{Case $v=26$}
\begin{theorem} \label{theorem:g26} If $\Gamma$ is a graph of girth at least $5$ on $26$ vertices with $61$ edges then $(\delta,\Delta)=(4,5)$ and $\Gamma$ has an embedded $S_{5,[4,4,4,4,4]}$ star. \end{theorem}
\noindent {\bf Proof} We know from Table \ref{table:deltaTable} that in this case the possible values for $(\delta,\Delta)$ are $(4,5)$ and $(4,6)$. Since in all cases $\delta= f(26)-f(25)$, by Lemma \ref{lem:inductiveExtremal}, $\Gamma$ can be constructed from one of the extremal graphs on $25$ vertices described in Lemma \ref{lemma:g25}, $\Gamma^{\prime}$, by adding a vertex $x$ of degree $4$ and edges from $x$ to all vertices in an element of $S_{\Gamma^{\prime},4}$.
By Lemma \ref{lemma:g25}, it follows that, in $\Gamma$, $(\delta,\Delta)=(4,5)$ and $\Gamma$ has an embedded $S_{5,[4,4,4,4,4]}$ star.
\begin{lemma}\label{lemma:g26} There are $2$ graphs of girth at least $5$ on $26$ vertices with $61$ edges, both have $(\delta,\Delta)=(4,5)$. In one of these graphs, set $S_{\Gamma,4}$ is empty. In the other, $S_{\Gamma,4}$ contains $1$ set, which consists of $4$ vertices of degree $4$, none of which is a child of a sink node. In one graph there is a unique set $X=\{X_{1},X_{2}\}$ containing two non-intersecting elements of $S_{\Gamma,3}$ that each consist of elements of degree $4$, and, for all $x_{1}\in X_{1}$, $x_{2}\in X_{2}$ there is no edge $(x_{1},x_{2}) \in \Gamma$. In the other graph there is no such set $X$. \end{lemma}
\noindent {\bf Proof} From Theorem \ref{theorem:g26} all extremal graphs on $26$ vertices can be constructed by adding a new vertex $x$, and edges to a set of $4$ vertices that is a set in $S_{\Gamma^{\prime},4}$, where $\Gamma^{\prime}$ is one of the $6$ extremal graphs on $25$ vertices. This produced a set of $6$ graphs. Isomorph elimination using nauty \cite{nauty} revealed there to be two distinct graphs, $A_{0}, A_{1}$. Using a computer the graphs were shown to satisfy the stated properties. See Appendix G for details.
\section{Case $v=27$}
\begin{theorem} \label{theorem:g27} If $\Gamma$ is a graph of girth at least $5$ on $27$ vertices with $65$ edges then $(\delta,\Delta)=(4,5)$ and $\Gamma$ has an embedded $S_{5,[4,4,4,4,4]}$ star. \end{theorem}
\noindent {\bf Proof} We know from Table \ref{table:deltaTable} that in this case the possible values for $(\delta,\Delta)$ are $(4,5)$ and $(4,6)$. Since in all cases $\delta= f(27)-f(26)$, by Lemma \ref{lem:inductiveExtremal}, $\Gamma$ can be constructed from an extremal graph on $26$ vertices described in Lemma \ref{lemma:g26}, $\Gamma^{\prime}$, by adding a vertex $x$ of degree $4$ and edges from $x$ to all vertices in an element of $S_{\Gamma^{\prime},4}$.
By Lemma \ref{lemma:g26} it follows that, in $\Gamma$, $(\delta,\Delta)=(4,5)$ and (since any non-empty set $S_{\Gamma^{\prime},4}$ does {\it not} contain the child of a sink node), $\Gamma$ has an embedded $S_{5,[4,4,4,4,4]}$ star.
\begin{lemma}\label{lemma:g27} There is $1$ graph of girth at least $5$ on $27$ vertices with $65$ edges. In this graph $(\delta,\Delta)=(4,5)$ and $S_{\Gamma,3}$ contains $5$ sets, each of which contains $1$ vertex of degree $4$ and $2$ of degree $5$. All vertices of degree $5$ in elements of $S_{\Gamma,3}$ are roots of an embedded $S_{5,[4,4,4,4,3]}$ star. Set $S_{\Gamma,4}$ is empty. \end{lemma}
\noindent {\bf Proof} From Theorem \ref{theorem:g27} all extremal graphs on $27$ vertices can be constructed by adding a new vertex $x$, and edges to a set of $4$ vertices that is a set in $S_{\Gamma^{\prime},4}$, where $\Gamma^{\prime}$ is one of the $2$ extremal graphs on $26$ vertices. Since, by Lemma \ref{lemma:g26} only one of the extremal graphs on $26$ vertices has a non-empty set $S_{\Gamma^{\prime},4}$, and this contains only one set, this produces a single graph. Using a computer the graph was shown to satisfy the stated properties. See Appendix H for details.
\section{Case $v=28$}
\begin{theorem} \label{theorem:g28} If $\Gamma$ is a graph of girth at least $5$ on $28$ vertices with $68$ edges then $(\delta,\Delta)=(3,6)$, $(deg_{3},deg_{4},deg_{5},deg_{6})=(1,4,21,2)$, and there is an embedded $S_{6,[4,4,4,4,3,2]}$ star, or $(\delta,\Delta)=(4,5)$ or $(4,6)$.
If $(\delta,\Delta)=(4,5)$ then $(deg_{4},deg_{5})=(4,24)$ and there is an embedded $S_{5,[4,4,4,4,4]}$ star. If $(\delta,\Delta)=(4,6)$ then $(deg_{4},deg_{5},deg_{6})\in \{(i, 32-2i ,i-4):5\leq i \leq 13\}$ and there is an embedded $S_{6,[4,4,4,3,3,3]}$ star. \end{theorem}
\noindent {\bf Proof} We know from Table \ref{table:deltaTable} that in this case the possible values for $(\delta,\Delta)$ are $(3,5)$, $(3,6)$, $(3,7)$, $(3,8)$, $(3,9)$, $(4,5)$ or $(4,6)$. If $\delta=3$, by Lemma \ref{lem:inductiveExtremal}, $\Gamma$ can be constructed from the extremal graph on $27$ vertices described in Lemma \ref{lemma:g27}, $\Gamma^{\prime}$, by adding a vertex $x$ of degree $3$ and edges from $x$ to all vertices in an element of $S_{\Gamma^{\prime},3}$.
By Lemma \ref{lemma:g27} it follows that, in $\Gamma$, $(\delta,\Delta)=(3,6)$, $(deg_{3},deg_{4},deg_{5},deg_{6})=(1,4,21,2)$, and there is an embedded $S_{6,[4,4,4,4,3,2]}$ star.
If $(\delta,\Delta)=(4,5)$ then $(deg_{4},deg_{5})=(4,24)$ and there are at most $16$ edges from the vertices of degree $4$ to the vertices of degree $5$. Hence at least one vertex of degree $5$ has no neighbours of degree $4$ and the result follows.
If $(\delta,\Delta)=(4,6)$ then, since $deg_{4}>0$ and $deg_{6}>0$, $(deg_{4},deg_{5},deg_{6}) \in \{(i, 32-2i ,i-4):5\leq i \leq 16\}$. A vertex of degree $6$ can have at most one neighbour of degree $6$, and so has at least $5$ edges to the set $S$ containing the vertices of degrees $4$ and $5$. It follows that there is a linear space on $deg_{4}+deg_{5}$ points containing at least $deg_{6}$ blocks of size $5$. It follows from Lemma \ref{lemma:packingLemma} that this does not hold when $i\geq 14$.
If there is no embedded $S_{6,[4,4,4,3,3,3]}$ star then every vertex of size $6$ has at least $4$ neighbours of degree $4$. This is only possible if there is a linear space on $deg_{4}$ points that has at least $deg_{6}$ blocks of size at least $4$. By Lemma \ref{lemma:packingLemma}, this does not hold if $5<deg_{4}\leq 11$. So there is an embedded $S_{6,[4,4,4,3,3,3]}$ star except possibly where $deg_{4}=5$, $12$ or $13$.
Suppose that $deg_{4}=5$ then $(deg_{4},deg_{5},deg_{6})=(5,22,1)$. If there is no embedded $S_{6,[4,4,4,3,3,3]}$ star the vertex $x$ of degree $6$ is
adjacent to at least $4$ of the vertices of degree $4$. There are $3$ cases to consider: $x$ is adjacent to $5$ vertices of degree $4$ and $1$ of degree $5$;
$x$ is adjacent to $4$ vertices of degree $4$ and there is a further edge among the vertices of degree $4$; and $x$ is adjacent to $4$ vertices of degree $4$
and there is no further edge among the vertices of degree $4$. In all cases, let $T$ denote the set of edges from the vertices of degree $5$ to the vertices of degree $4$ and $6$.
In the first two cases, $|T|=16$ and it follows that the graph $\Gamma_{5}$ on the $22$ vertices of degree
$5$ has $47$ edges and is extremal.
In the first case, each of the vertices of degree $4$ are in edges with $3$ of the vertices of degree $5$. Since all of the vertices of degree $4$ are adjacent to $x$, it follows that $S_{\Gamma_{5},3}$ contains $5$ non-intersecting sets. This is not possible by Lemma \ref{lemma:g22A}.
In the second case, let $\Gamma_{5}$ be one of the extremal graphs $\Gamma_{0}$, $\Gamma_{1}$, $\Gamma_{2}$ described in Lemma \ref{lemma:g22}. Let $x$ be the vertex of degree $6$ and $\{a,b,c,d,e\}$ the vertices of degree $5$, where $x$ is adjacent to $a$,
$b$, $c$ and $d$, and $d$ and $e$ are adjacent. Let $A$, $B$, $C$, $D$, $E$ and $X$ be the sets of vertices in
$V_{5}$ that are adjacent to $a$, $b$, $c$, $d$, $e$ and $x$ in $\Gamma$. Note that each of these sets contain
vertices that have degree at most $4$ in $\Gamma_{5}$, $|A|=|B|=|C|=|E|=3$, and $|D|=|X|=2$. Sets $|A|$, $|B|$, $|C|$, $|D|$
and $|X|$ do not intersect, and any element that is in two of the sets is in $E$. It follows that $E$ contains all of the
vertices of degree $3$ in $\Gamma_{5}$ and all of the vertices of degree $4$ in $\Gamma_{5}$ are in precisely one
of the sets. Set $X$ must contain $2$ elements of degree $4$, $p$
and $q$ say, that are not in edges with any element in sets $A$, $B$, $C$ or $D$ (or there is a square). So any
neighbour of degree $4$ of $p$ or $q$ must be in set $E$, with the vertex of degree $3$ in $\Gamma_{5}$, and $p$ and $q$ have at most $2$ neighbours of degree $4$ between them. By
Lemma \ref{lemma:g22A} this is not possible.
\begin{comment} The second case is illustrated in Figure \ref{fig:needToDrawThis}. The vertices of degree $4$ are $a$, $b$, $c$, $d$ and $e$. Vertices $a$, $b$ and $c$ are each adjacent to parallel sets from $S_{\Gamma_{5},3}$. Call this set of triples $X$, and $e$ is adjacent to a further set from $S_{\Gamma_{5},3}$, $t$ say. There are pairs $\pi_{1}$ and $\pi_{2}$ from $S_{\Gamma_{5},2}$ that are adjacent to $d$ and $x$ respectively. All elements in $X$, $t$, $\pi_{1}$ and $\pi_{2}$ have degree less than $5$ in $\Gamma_{5}$ (as they are adjacent to vertices of degree $4$ or $6$ in $\Gamma$). Now the elements of $X$ must not intersect (or there is a square), and any vertex can appear in at most $2$ of $X$, $t$, $\pi_{1}$ or $\pi_{2}$, and at most $1$ unless it has degree $3$ in $\Gamma_{5}$. In addition, there can be no edge from $\pi_{1}$ to $t$ or from $\pi_{2}$ to any element of $X$ (or there is a square in each case). By Lemma \ref{lemma:g22A}, if $X$ and $t$ have the properties required then if the are $4$ additional vertices are allocated to pairs from $S_{\Gamma_{5},2}$, one pair has edges to both $t$ and an element of $X$. So it can be neither $\pi_{1}$ or $\pi_{2}$ and there is a contradiction.
\begin{figure}
\caption{Graph on $28$ vertices with $(deg_{4},deg_{5},deg_{6})=(5,22,1)$ and edge between vertices of degree $4$ }
\label{fig:needToDrawThis}
\end{figure} \end{comment}
If $x$ is adjacent to $4$ vertices of degree $4$ and there is no
further edge among the vertices of degree $4$, then let $\Sigma=\{a,b,c,d\}$ denote the set of vertices of degree $4$ adjacent to $x$, $e$ the remaining vertex of degree
$4$, and $X$ the set of two vertices of degree $5$ that are adjacent to $x$. Let $\Gamma_{5,e}$ denote the graph formed by removing $x\cup \Sigma$ . Then
$\Gamma_{5,e}$ has $23$ vertices and $50$ edges, is extremal and has vertices of degree $4$ or $5$. The vertices that have degree $4$ in $\Gamma_{5,e}$
are those that are in an edge in $\Gamma$ with an element of $\Sigma$, vertices in $X$ and $e$. By an argument similar to the above, the elements of $X$ must have at most $1$ neighbour (i.e. $e$) of degree $4$ In $\Gamma_{5,e}$. But Lemma \ref{lemma:g23A} the elements of $X$ have a common neighbour in
$\Gamma_{5,e}$. Since, in $\Gamma$, both elements of $X$ are adjacent to $x$, there is a cycle of length $4$. This is a contradiction, so if
$(deg_{4},deg_{5},deg_{6})=(5,22,1)$ there is an embedded $S_{6,[4,4,4,3,3,3]}$ star.
Suppose that $(deg_{4},deg_{5},deg_{6})=(12, 8,8)$. If there is no embedded $S_{6,[4,4,4,3,3,3]}$ star every vertex of degree $6$ is adjacent to at least $4$ vertices of degree $4$. So there is a linear space on $12$ points containing $8$ blocks of size at least $4$. It follows from \cite{bettenbetten4} that there must be $12$ blocks of size exactly $4$. There is one such linear space, and all of the blocks intersect, by Lemma \ref{lemma:12.1}. It follows that no two vertices of degree $6$ can be adjacent to each other, or adjacent to a common vertex of degree $5$. Since every vertex is adjacent to two vertices of degree $5$ this is impossible. So if $(deg_{4},deg_{5},deg_{6})=(12, 8,8)$ there is an embedded $S_{6,[4,4,4,3,3,3]}$ star.
Suppose that $(deg_{4},deg_{5},deg_{6})=(13, 6,9)$. If there is no embedded $S_{6,[4,4,4,3,3,3]}$ star every vertex of degree $6$ is adjacent to at least $4$ vertices of degree $4$. So there is a linear space $\Lambda$ on $13$ points containing $9$ blocks of size at least $4$. Since there is no linear space on $13$ points with a block of size $5$ and $8$ blocks of size at least $4$, there are exactly $36$ edges from the vertices of degree $6$ to the vertices of degree $4$. By Lemma \ref{lemma:13.1}, $\Lambda$ has no set of $3$ non-intersecting blocks so no vertex of degree $6$ has two neighbours of degree $6$, and so is in at least one edge with a vertex of degree $5$. Hence there are at least $9$ edges from the vertices of degree $6$ to the vertices of degree $5$. If a vertex of degree $5$ is in two edges with vertices of degree $6$, the corresponding blocks in $\Lambda$ must not intersect (or there is a square). By Lemma \ref{lemma:13.1}, $\Lambda$ has at most $3$ pairs of non-intersecting blocks and it follows that there are exactly $9$ edges from the vertices of degree $6$ to the vertices of degree $5$, and every vertex of degree $6$ is in one edge with another vertex of degree $6$. This is not possible (the sum of the degrees in the graph on the vertices of degree $6$ must be even).
\begin{lemma}\label{lemma:g28} There are $4$ graphs of girth at least $5$ on $28$ vertices with $68$ edges. \begin{itemize} \item There is one graph for which $(\delta,\Delta)=(3,6)$. Here $(deg_{3},deg_{4},deg_{5},deg_{6})=(1,4,21,2)$ and every element of $S_{\Gamma,4}$ contains the vertex of degree $3$, $1$ vertex of degree $4$ and $2$ of degree $5$. Both vertices of degree $6$ are sink nodes, and the centre of embedded $S_{6,[4,4,4,4,3,2]}$ stars. \item There is one graph for which $(\delta,\Delta)=(4,5)$. Set $S_{\Gamma,4}$ is empty. \item There are $2$ graphs for which $(\delta,\Delta)=(4,6)$. In each case there are embedded $S_{6,[444,333]}$ star. In the first graph $(deg_{4},deg_{5},deg_{6})=(6,20,2)$ and every element of $S_{\Gamma,4}$ contains $2$ vertices of degree $4$ and $2$ of degree $5$, and in the second, $(deg_{4},deg_{5},deg_{6})=(7,18,3)$ and every element of $S_{\Gamma,4}$ contains $3$ vertices of degree $4$ and $1$ of degree $5$. In both cases, for every sink node $p$, every element of $S_{\Gamma,4}$ contains exactly $1$ vertex of degree $4$ adjacent to $p$. \end{itemize} \end{lemma}
\noindent {\bf Proof} (1) From Theorem \ref{theorem:g28} all extremal graphs on $28$ vertices with $\delta=3$ can be constructed by adding a new vertex $x$, and edges to a set of $3$ vertices that is a set in $S_{\Gamma^{\prime},3}$, where $\Gamma^{\prime}$ is the extremal graph on $27$ vertices. Since, by Lemma \ref{lemma:g27} there is one graph $\Gamma^{\prime}$ and $5$ sets $S_{\Gamma^{\prime},3}$, there are $5$ graphs to check. Using nauty it was shown that all of these graphs are isomorphic, so there is only one graph in this case. Using a computer the graph was shown to satisfy the stated property.
\noindent (2) If $(\delta,\Delta)=(4,5)$ then $(deg_{4},deg_{5})=(4,24)$. If there are $e$ edges amongst the vertices of degree $4$ then there are $16-2e$ between vertices of degree $4$ and vertices of degree $5$, and $52+e$ among the vertices of degree $5$. It follows that $e\leq 2$.
If $e=2$ then the graph on the vertices of degree $5$ ($\Gamma^{\prime}$) has $54$ edges and is extremal. If $x$ is a vertex in $\Gamma$ of degree $4$ that is in no edges with other vertices of degree $4$, then the graph on the vertices of degree $5$ and $x$ has $25$ vertices and $58$ edges, which is not possible. So there are two parallel edges on the vertices of degree $4$. The sets of vertices of degree $5$ adjacent to each of the vertices of degree $4$ must not intersect (as $\Gamma^{\prime}$ contains no vertex of degree less than $4$) and for each pair of adjacent vertices of degree $4$ there is no edge between their neighbours of degree $5$ (or there is a square). It follows that $S_{\Gamma^{\prime},3}$ contains $4$ non-intersecting elements for which there are are two pairs with no edge between either of the pairs. This is not possible by Lemma \ref{lemma:g24}.
\noindent If $e=0$ then restricting search to this case showed there to be no graphs. The search in this case took $13$ hours. \begin{comment} Mike's email: i did this. i added to the encoding for each pair of vertices, i<j a clause that says: not(deg(i)=4) or not(deg(j)=4) or not(edge(i,j)) here is the result:
?- solve(alice28).
star: (4,5):star(5,[4,4,4,4,4]):alice28([ (4,4)])
0.32517, 50135, 6527, 48738.93751,unsat the (4,5) means that the min and max degrees are (4,5) the [(4,4)] means that there are 4 vetices with degree 4 (i also encoded that there are 28 verticies and 68 edges). the encoding has 50135 clauses with 6527 variables the sat solver required 48738.93751 seconds (~13 hrs) to say "NO" \end{comment}
\noindent \begin{comment} THIS IS TRUE, BUT UNNECESSARY THANKS TO MIKE'S SEARCH let $V^{\prime}=\{v_{1},v_{2},v_{3},v_{4}\}$ be the vertices of degree $4$. If a vertex of degree $5$ is in an edge with all $4$ elements of
$V^{\prime}$ then the graph on $V\setminus V^{\prime} \cup \{x\}$ has $23$ vertices and $51$ edges, which is impossible. Similarly if a vertex $x$ of degree $5$ is
edges with $3$ elements of $V^{\prime}$, and not in an edge with $v_{4}$, then the graph on $V\setminus \{x,v_{1},v_{2},v_{3}\}$ has $24$ vertices and
$54$ edges, so is maximal.
The sets of $3$ additional vertices of degree $5$ in $\Gamma$ that are adjacent to $v_{1}$, $v_{2}$ and $v_{3}$ respectively form non-intersecting elements of $S_{\Gamma^{\prime},3}$ (where $\Gamma^{\prime}$ is the extremal graph on $24$ vertices, as above). The two vertices of
degree $5$ adjacent to $x$ in $\Gamma$ are not in any of these three sets of $S_{\Gamma^{\prime},3}$, and are not in any edge with any element of any
of the sets. This is not possible by Lemma \ref{lemma:g24}.
If a vertex $x$ of degree $5$ is in two edges with vertices of degree $4$, $v_{1}$ and $v_{2}$ say, then the graph $\Gamma^{\prime}$ with vertices
$V\setminus \{x,v_{1},v_{2}\}$ has $25$ vertices and $57$ edges, so is extremal. Since neither of $v_{3}$ or $v_{4}$ is in an edge with $x$, $v_{1}$ or
$v_{2}$ in $\Gamma$, and no vertex of degree $5$ in $\Gamma$ is in two edges with $x$, $v_{1}$ or $v_{2}$, it follows that $\Gamma^{\prime}$ has no
vertices of degree $3$, and so $(deg_{4},deg_{5})=(11,14)$. Now in $\Gamma^{\prime}$, the three sets of $3$ vertices $X_{1}$, $X_{2}$ and $X_{3}$, that were adjacent to $x$, $v_{1}$ and $v_{2}$ in $\Gamma$ are parallel and consist entirely of vertices of degree $4$.
So $S_{\Gamma^{\prime},3}$
contains a set of $3$ non-intersecting sets $X_{1}$, $X_{2}$, $X_{3}$ consisting entirely of vertices of degree $4$. This is not possible by Lemma \ref{lemma:g25A}.
So there are no edges among the vertices of degree $4$, and $16$ vertices that have a neighbour of degree $4$. \end{comment}
If $e=1$ then suppose that $v_{1}$ and $v_{2}$ are adjacent. The graph $\Gamma^{\prime}$ with vertices $V\setminus\{v_{1},v_{2}\}$ has $26$ vertices and $61$ edges, so is maximal. The sets of three vertices of degree $5$ adjacent to $v_{1}$ and $v_{2}$ respectively must be distinct and there must be no edge between these sets. So $S_{\Gamma^{\prime},3}$ contains a pair of non-intersecting sets that have no edges between them. By Lemma \ref{lemma:g26} there is one maximal graph $\Gamma^{\prime}$ for which $S_{\Gamma^{\prime},3}$ contains such a pair, and one suitable pair (($X_{1},X_{2})$ say). Hence, since $\Gamma$ is constructed from $\Gamma^{\prime}$ by adding a new pair of adjacent vertices $v_{1}$ and $v_{2}$ with edges to the vertices in $X_{1}$ and $X_{2}$, there is one graph $\Gamma$.
\noindent (3) Let $\Gamma$ be a graph with $(\delta,\Delta)=(4,6)$. By the proof of Theorem \ref{theorem:g28} $(deg_{4},deg_{5},deg_{6}) \in \{(i, 32-2i ,i-4):5\leq i \leq 16\}$. If there is an edge between two vertices, $v_{1}$ and $v_{2}$ of degree $4$, then removing the vertices and all edges on them leaves an extremal graph on $26$ vertices. Since neither extremal graph on $26$ vertices has any vertices of degree $6$, it follows that the sets of vertices adjacent to $v_{1}$ (that are not $v_{2}$) and to $v_{2}$ (that are not $v_{1}$) are elements of $S_{\Gamma^{\prime},3}$ that are parallel, have no edges between them, and contain at least one vertex of degree $5$. Only one of the extremal graphs on $26$ vertices have such a pair of sets, and there are $3$ such pairs. These lead to two non-isomorphic extremal graphs $\Gamma$ which have $(deg_{4},deg_{5},deg_{6})=(6,20,2)$ and $(deg_{4},deg_{5},deg_{6})=(7,18,3)$ respectively.
\begin{comment}If a vertex $v_{1}$ of degree $5$ has $3$ neighbours, $q_{1}$, $q_{2}$ and $q_{3}$ say, of degree $4$, then removing $v_{1}$, $q_{1}$, $q_{2}$ and $q_{3}$ leaves an extremal graph on $24$ vertices (with $54$ edges), $\Gamma^{\prime}$ say. Since $\Gamma^{\prime}$ has no vertices of degree $6$ (see Theorem \ref{theorem:g24}), all of the vertices in $\Gamma$ of degree $6$ are adjacent to one of $v_{1}$, $q_{1}$, $q_{2}$ or $q_{3}$. The sets of neighbours of $q_{1}$, $q_{2}$ and $q_{3}$ are elements of $S_{\Gamma^{\prime},3}$. Since no element of $S_{\Gamma^{\prime},3}$ contains any element of degree $5$ (in $\Gamma^{\prime}$), any vertex of degree $6$ in $\Gamma$ is one of the two remaining neighbours of $v_{1}$. So there are at most $2$ vertices of degree $6$ in $\Gamma$ and $deg_{4}=5$ or $6$. Now every vertex of degree $6$ has $3$ neighbours of degree $4$, and can not be adjacent to $q_{1}$, $q_{2}$ or $q_{3}$ (or there is a triangle), so $deg_{i}=6$. But then the two remaining neighbours of $v_{1}$ are adjacent to the same $3$ vertices of degree $4$, which is impossible. \end{comment}
Let us assume then that there are no edges amongst the vertices of degree $4$. If a vertex $v_{1}$ of degree $5$ has $2$ neighbours, $q_{1}$, $q_{2}$ say, of degree $4$, then removing $v_{1}$, $q_{1}$ and $q_{2}$ leaves an extremal graph on $25$ vertices (with $57$ edges), $\Gamma^{\prime}$ say. Let $X_{1}$, $X_{2}$ and $X_{3}$ be the remaining neighbours of $v_{1}$, $q_{1}$ and $q_{2}$ in $\Gamma$. Then $X_{1}$, $X_{2}$ and $X_{3}$ are parallel elements of $S_{\Gamma^{\prime},3}$ for which there are no edges between $X_{1}$ and $X_{2}$, or between $X_{1}$ and $X_{3}$. In addition, since $\Gamma$ has no vertices of degree $3$, any vertex of degree $3$ in $\Gamma^{\prime}$ must be in $X_{1}$, $X_{2}$ or $X_{3}$. Since there are no edges between vertices of degree $4$ in $\Gamma$, if follows that all of the vertices of degree $3$ in $\Gamma^{\prime}$ are in $X_{1}$. There is no suitable extremal graph $\Gamma^{\prime}$, by Lemma \ref{lemma:g25A}. So in $\Gamma$ no vertex of degree $5$ has more than $1$ neighbour of degree $4$.
Consider the embedded $S_{6,[4,4,4,3,3,3]}$ star, with root $p$. Three of its children have degree $4$ and all other vertices of degree $4$ must be grandchildren, with parents having degree $5$ (as there are no edges between vertices of degree $4$). No two of these grandchildren can have the same parent (as no vertex of degree $5$ has more than one child of degree $4$). It follows that $i\leq 6$.
If $i=5$ then all of the grandchildren have degree $4$ or $5$. Any grandchild $x$ of degree $4$ must have $3$ edges to vertices of degree $5$ which don't have a parent of degree $4$. It follows $x$ has edges to two vertices with the same parent, and there is a square.
If $i=6$ a similar argument shows that all of the grandchildren of degree $4$ must have edges to the grandchild $p_{2}$ of degree $6$, and $p_{2}$ has a parent of degree $4$ (or a similar argument applies). So we have the situation illustrated in Figure \ref{fig:28star}. Note that $p_{2}$ is adjacent to all of the grandchildren of degree $4$ and two of the grandchildren of degree $5$, $x_{1}$ and $x_{2}$. Now the grandchildren of degree $4$ are each adjacent to $p_{2}$ and two of the grandchildren of degree $5$ that have a parent of degree $5$. Thus the vertices in $\pi_{1}$ and $\pi_{2}$ are at a distance greater than $2$ from $p_{2}$ unless they are adjacent to $x_{1}$ or $x_{2}$. At most two elements of $\pi_{1}\cup \pi_{2}$ can have edges to $x_{1}$ or $x_{2}$, so at least two vertices are at a distance greater than $2$ from $p_{2}$, which is impossible as $p_{2}$ is the root of an embedded $S_{6,[4,4,4,3,3,3]}$ star (which contains $27$ of the vertices).
\begin{figure}
\caption{Embedded star, $i=6$, no edges between vertices of degree $4$ }
\label{fig:28star}
\end{figure}
\section{Case $v=29$}
\begin{theorem} \label{theorem:g29} If $\Gamma$ is a graph of girth at least $5$ on $29$ vertices with $72$ edges then $(\delta,\Delta)=(4,6)$ and $\Gamma$ has an embedded $S_{6,[4,4,4,4,3,3]}$ star. \end{theorem}
\noindent {\bf Proof} We know from Table \ref{table:deltaTable} that in this case the possible values for $(\delta,\Delta)$ are $(4,5)$, $(4,6)$ and $(4,7)$. Since in all cases $\delta= f(29)-f(28)$, $\Gamma$ is constructed from an extremal graph on $28$ vertices, $\Gamma^{\prime}$, by adding a new vertex $x$ of degree $4$ and $4$ edges from $x$ to a set $S_{\Gamma^{\prime},4}$. By Lemma \ref{lemma:g28} there are $4$ possibilities for $\Gamma^{\prime}$. (i) $\Gamma^{\prime}$ has $(deg_{3},deg_{4},deg_{5},deg_{6})=(1,4,21,2)$. In this case, all elements of $S_{\Gamma^{\prime},4}$ contain the vertex of degree $3$, which is a child of the root of a $S_{6,[4,4,4,4,3,2]}$ star, and no vertex of degree $6$ -- the result follows. (ii) $\Gamma^{\prime}$ has $(deg_{4},deg_{5})=(4,24)$. In this case $S_{\Gamma^{\prime},4}$ is empty, so this case is impossible. (iii) $\Gamma^{\prime}$ has $(deg_{4},deg_{5},deg_{6})=(6,20,2)$ or $(7, 18,3)$ then all elements of $S_{\Gamma^{\prime},4}$ contain a child of degree $4$ of a root of a $S_{6,[4,4,4,3,3,3]}$ star and no vertex of degree $6$, and the result follows.
\begin{lemma}\label{lemma:g29} There is $1$ graph of girth at least $5$ on $29$ vertices with $72$ edges. In this graph $(\delta,\Delta)=(4,6)$ and $(deg_{4},deg_{5},deg_{6})=(5,20,4)$. Set $S_{\Gamma,4}$ contains $2$ sets, each of which contains $2$ vertices of degree $4$ and $2$ of degree $5$. \end{lemma}
\noindent {\bf Proof} From Theorem \ref{theorem:g29} any extremal graph $\Gamma$ on $29$ vertices can be constructed by adding a new vertex $x$, and edges to a set of $4$ vertices that is a set in $S_{\Gamma^{\prime},4}$, where $\Gamma^{\prime}$ is one of the $4$ extremal graphs on $28$ vertices. There are $7$ potential graphs $\Gamma$. All of these graphs are shown to be isomorphic, using nauty, and the single graph was shown to satisfy the stated property.
\section{Case $v=30$}
\begin{theorem} \label{theorem:g30} If $\Gamma$ is a graph of girth at least $5$ on $30$ vertices with $76$ edges then $(\delta,\Delta)=(4,6)$ and $\Gamma$ has an embedded $S_{6,[4,4,4,4,4,3]}$ star and an embedded $S_{6,[5,4,4,4,3,3]}$ star. \end{theorem}
\noindent {\bf Proof} We know from Table \ref{table:deltaTable} that in this case the possible values for $(\delta,\Delta)$ are $(4,6)$ and $(4,7)$. Since in all cases $\delta= f(30)-f(29)$, $\Gamma$ is constructed from the extremal graph on $29$ vertices, $\Gamma^{\prime}$, by adding a new vertex $x$ of degree $4$ and $4$ edges from $x$ to a set in $S_{\Gamma^{\prime},4}$. By Lemma \ref{lemma:g29}, $\Gamma^{\prime}$ has $(deg_{4},deg_{5},deg_{6})=(5,20,4)$. In this case, all elements of $S_{\Gamma^{\prime},4}$ contain two vertices of degree $4$ and two of degree $5$. So clearly, in $\Gamma$, $(\delta,\Delta)=(4,6)$ and $(deg_{4},deg_{5},deg_{6})=(4,20,6)$ . Both elements of $S_{\Gamma^{\prime},4}$ contain the child of an embedded $S_{6,[4,4,4,4,3,3]}$ star of $\Gamma^{\prime}$ of degree $4$, and the child of another embedded $S_{6,[4,4,4,4,3,3]}$ star of $\Gamma^{\prime}$ of degree $5$. The result follows.
\begin{lemma}\label{lemma:g30} There is $1$ graph of girth at least $5$ on $30$ vertices with $76$ edges. In this graph $(\delta,\Delta)=(4,6)$ and $(deg_{4},deg_{5},deg_{6})=(4,20,6)$. Set $S_{\Gamma,4}$ contains $2$ sets, each of which contains $2$ vertices of degree $4$ and $2$ of degree $5$, and $S_{\Gamma,5}$ is empty. \end{lemma}
\noindent {\bf Proof} From Theorem \ref{theorem:g30} any extremal graph $\Gamma$ on $30$ vertices can be constructed by adding a new vertex $x$, and edges to a set of $4$ vertices that is a set in $S_{\Gamma^{\prime},4}$, where $\Gamma^{\prime}$ is the extremal graphs on $29$ vertices. There are $2$ potential graphs $\Gamma$, which are shown to be isomorphic, using nauty, and the single graph was shown to satisfy the stated properties.
\section{Case $v=31$}
\begin{theorem} \label{theorem:g31} If $\Gamma$ is a graph of girth at least $5$ on $31$ vertices then $\Gamma$ has at most $80$ edges. If $\Gamma$ has $80$ edges then either $(\delta,\Delta)=(4,6)$ and $\Gamma$ has an embedded $S_{6,[4,4,4,4,4,4]}$ star and an embedded $S_{6,[5,4,4,4,4,3]}$ star, or $(\delta,\Delta)=(5,6)$ and $\Gamma$ has an embedded $S_{6,[4,4,4,4,4,4]}$ star. \end{theorem}
\noindent {\bf Proof} Suppose that $\Gamma$ had $81$ edges, then we must have $\delta=5$. Since the extremal graph $\Gamma^{\prime}$ on $29$ vertices with $76$ edges has empty set $S_{\Gamma^{\prime},5}$, this is not possible. So there are at most $80$ edges. We know from Table \ref{table:deltaTable} that in this case the possible values for $(\delta,\Delta)$ are $(4,6)$, $(4,7)$ and $(5,6)$.
If $\delta=4$ then since $\delta= f(31)-f(30)$, $\Gamma$ is constructed from the extremal graph on $30$ vertices, $\Gamma^{\prime}$, by adding a new vertex $x$ of degree $4$ and $4$ edges from $x$ to a set from $S_{\Gamma^{\prime},4}$. By Lemma \ref{lemma:g30} $\Gamma^{\prime}$ has $(deg_{4},deg_{5},deg_{6})=(4,20,6)$.
All elements of $S_{\Gamma^{\prime},4}$ contain two vertices of degree $4$ and two of degree $5$. So clearly, in $\Gamma$, $(\delta,\Delta)=(4,6)$ and $(deg_{4},deg_{5},deg_{6})=(3,20,8)$. By Appendix $K$, $\Gamma^{\prime}$ has two embedded $S_{6,[4,4,4,4,4,3]}$ stars, each of which has children from both elements, $s_{1}$ and $s_{2}$ say, of $S_{\Gamma^{\prime},4}$. In the first star the child from $s_{1}$ has degree $4$ and the child from $s_{2}$ has degree $5$, and in the second star, the reverse is true. The result follows.
If $(\delta,\Delta)=(5,6)$ then $(deg_{5},deg_{6})=(26,5)$. Every vertex of degree $6$ has $6$ neighbours of degree $5$.
\begin{lemma}\label{lemma:g31} There are $2$ graphs of girth at least $5$ on $31$ vertices with $80$ edges. In the first graph $(\delta,\Delta)=(4,6)$, $(deg_{4},deg_{5},deg_{6})=(3,20,8)$ and set $S_{\Gamma,5}$ contains one element, which consists of $2$ vertices of degree $5$ and $3$ of degree $4$. In the second, $(\delta,\Delta)=(5,6)$ $(deg_{5},deg_{6})=(26,5)$ and $S_{\Gamma,5}$ contains one element, which consists of $5$ vertices of degree $5$. \end{lemma}
\noindent {\bf Proof} From Theorem \ref{theorem:g31} any extremal graph $\Gamma$ on $31$ vertices can either be constructed by adding a new vertex $x$, and edges to a set of $4$ vertices that is a set in $S_{\Gamma^{\prime},4}$, where $\Gamma^{\prime}$ is the extremal graphs on $30$ vertices, or has $(\delta,\Delta)=(5,6)$ and an embedded $S_{6,[4,4,4,4,4,4]}$ star. In the first case there are $2$ potential graphs $\Gamma$, which are shown to be isomorphic, using nauty, and the single graph was shown to satisfy the stated property. When $(\delta,\Delta)=(5,6)$ restricting search to this case, and fixing the embedded $S_{6,[4,4,4,4,4,4]}$ produced $720$ graphs. Applying nauty revealed there to be $1$ unique graph, which satisfies the stated property.
\section{Case $v=32$}
\begin{theorem} \label{theorem:g32} If $\Gamma$ is a graph of girth at least $5$ on $32$ vertices with $85$ edges then there is an embedded $S_{6,[5,4,4,4,4,4]}$ star. \end{theorem}
\noindent{\bf Proof} We know from Table \ref{table:deltaTable} that in this case all vertices have degree $5$ or $6$. There are $22$ vertices of degree $5$ and $10$ vertices of degree $6$.
Suppose that no vertex of degree $6$ has a neighbour of degree $6$. Then every vertex of degree $6$ has $6$ neighbours of degree $5$. The sets of neighbours of each of the vertices of degree $6$ form a linear space on $22$ points with $10$ blocks of size $6$, contradicting Lemma \ref{lemma:packingLemma}.
\begin{lemma}\label{lemma:g32} There is $1$ graph of girth at least $5$ on $32$ vertices with $85$ edges. In this graph $(\delta,\Delta)=(5,6)$, $(deg_{5},deg_{6})=(22,10)$ and $S_{\Gamma,3}$ is empty. Every element of $S_{\Gamma,2}$ contains $2$ vertices of degree $5$, at least one of which is the child of an embedded $S_{6,[5,4,4,4,4,4]}$ star. \end{lemma}
\noindent {\bf Proof} From Theorem \ref{theorem:g32} all extremal graphs on $32$ vertices has $(\delta,\Delta)=(5,6)$ and an embedded $S_{6,[5,4,4,4,4,4]}$ star. Restricting search to this case and applying nauty revealed there to be $1$ graph. Alternatively, since $\delta=f(v)-f(v-1)$ we can construct all graphs by adding a new vertex $x$ and edges to a set of $5$ vertices that is a set in $S_{\Gamma^{\prime},5}$, where $\Gamma^{\prime}$ is an extremal graph on $30$ vertices. There are two such graphs, and in each case $S_{\Gamma^{\prime},5}$ contains $1$ element. So there are $2$ potential graphs. These are shown to be isomorphic.
\begin{comment} \section{Case $v=33$}
\begin{theorem} \label{theorem:g33} If $\Gamma$ is a graph of girth at least $5$ on $33$ vertices then $\Gamma$ has at most $87$ edges. If $\Gamma$ has $87$ edges then one of the following is true: $(deg_{2},deg_{5},deg_{6})=(1,20,12)$ and there is an embedded $S_{6,[5,5,4,4,4,4]}$ star, or there is an edge between a vertex of degree $3$ and a vertex of degree $5$, or there is an edge between two vertices of degree $4$, or?? \end{theorem}
\noindent{\bf Proof} Suppose there is a graph $\Gamma$ of girth at least $5$ on $33$ vertices with $88$ edges. Since the extremal graph $\Gamma^{\prime}$ on $32$ vertices has $85$ edges and empty set $S_{\Gamma^{\prime},3}$, clearly $\Gamma$ has $\delta\geq 4$. It follows that $(\delta,\Delta)=(4,6)$, $(4,7)$, $(4,8)$ or $(5,6)$. If there is an edge between two vertices of degree $4$ then removing those vertices and the edges on them gives a graph $\Gamma^{\prime}$ on $31$ vertices with $81$ edges, which is impossible. If there is an edge between a vertex of degree $4$ and a vertex of degree $5$ then removing them and the edges on them gives an extremal graph on $31$ vertices which has a non-intersecting pair of sets from $S_{\Gamma^{\prime},3}$ and $S_{\Gamma^{\prime},4}$ which have no edges between them. There are no such sets. So a vertex of degree $4$ has neighbours of degree at least $6$. Suppose that there are two vertices. $x$ and$y$, of degree $4$. Removing them and their neighbours leaves a graph $\Gamma^{\prime}$ with $31$ vertices and $80$ edges, which is maximal. The sets of neighbours of $x$ and $y$ are elements of $S_{\Gamma^{\prime},4}$ that intersect at most once and contain vertices of degree at least $5$ in $\Gamma^{\prime}$. No such pair of sets exist (see Appendix \ref{appendix31}), hence there is at most $1$ vertex of degree $4$. Any vertex in $\Gamma$ of degree $7$ or $8$ must have at least $2$ neighbours of degree $4$. Hence it follows that $(\delta,\Delta)=(4,6)$ and $(deg_{4},deg_{5},deg_{6})=(1,20,12)$, or $(\delta,\Delta)=(5,6)$.
Suppose that $(deg_{4},deg_{5},deg_{6})=(1,20,12)$
If $(\delta,\Delta)=(5,6)$ then $(deg_{5},deg_{6})=(22,11)$. To be continued...
Hence any extremal graph on $88$ vertices has at most $87$ edges. Since $87$ is a lower bound for $f(33)$ (see \cite{Garnick93}), $f(33)=87$. We know from Table \ref{table:deltaTable} that either $\delta=2$ or $(\delta,\Delta)\in \{(3,6), (3,7), (3,8), (3,9), (3,10), (4,6), (4,7), (4,8), (5,6)\}$.
If $\delta=2$ then, since $\delta= f(33)-f(32)$, $\Gamma$ is constructed from the extremal graph on $32$ vertices, $\Gamma^{\prime}$, by adding a new vertex $x$ of degree $2$ and $2$ edges from $x$ to a set $S_{\Gamma^{\prime},2}$. By Lemma \ref{lemma:g32} $\Gamma^{\prime}$ has $(deg_{5},deg_{6})=(22,10)$. All elements of $S_{\Gamma^{\prime},2}$ contains two vertices of degree $5$ so clearly, in $\Gamma$, $(\delta,\Delta)=(2,6)$. All elements of $S_{\Gamma^{\prime},2}$ contain a child of an embedded $S_{6,[5,4,4,4,4,4]}$ star of $\Gamma^{\prime}$ of degree $4$, and the result follows.
Other things we know: \begin{itemize} \item there are no $3-3$ or $3-4$ edges (or remove $2$ vertices and get a graph on $31$ vertices with $81$ edges, not possible. If $P_{i}(n)$ denotes the minimum number of points for which there are $n$ blocks of size $3$ then $deg_{5}+deg_{6}+\ldots \geq P_{3}(deg_{3})$. \item if there is a $4-4$ edge, then removing vertices have a graph on $31$ vertices with $80$ edges, which is extremal so only have vertices of degree at least $4$. So there are no vertices of degree $3$ in $\Gamma$. Hence, if $\delta=3$ there are no $4-4$ edges. \item If there is a vertex of degree $>=8$ there are no $3-5$ or $4-4$ edges either (as then there is a graph on $31$ vertices with $80$ edges with a vertex of degree at least $7$).Actually, eliminated $3-5$ and $4-4$ except for the few cases identified below. \item Every vertex of degree $5$ is adjacent to at most $1$ vertex of degree $4$ (as extremal on $30$ vertices has no $3$ parallel elements of $S_{\Gamma,3}$). \end{itemize}
\begin{lemma}\label{lemma:g33} There are $?$ graphs of girth at least $5$ on $33$ vertices with $87$ edges. Of these \begin{enumerate} \item $3$ have $(deg_{2},deg_{5},deg_{6})=(1,20,12)$, (add new vertex $x$ and element of $S_{\Gamma,2}$, $\Gamma$ extremal on $32$ vertices. Potentially $41$, $3$ unique. \item $5$ have a $3-5$ edge, and have $(\delta,\Delta)=(3,6)$. Of these $4$ have $(deg_{3},deg_{4},deg_{5},deg_{6})=(1,0,21,11)$ and \item $1$ has $(deg_{3},deg_{4},deg_{5},deg_{6})=(1,1,19,12)$ (add two new vertices $x,y$ of degree $5$ and $3$ to extremal graph $\Gamma^{\prime}$ on $31$ vertices. Add edge $x-y$ and edges to sets $X$ and $Y$ from $S{\Gamma^{\prime},4}$, $S_{\Gamma^{\prime},2}$ that are distinct and have no edges between them. $\Gamma^{\prime}$ is graph $0$ (must have a vertex of degree less than $5$). There are $16$ possibilities, $5$ unique. \item Rest? \end{enumerate} \end{lemma} **
\end{comment}
\section*{Appendix A}\label{appendix20} The extremal graph $(20,41)$. Note that $\Gamma_{0}$ has $(deg_{3},deg_{4},deg_{5})=(1,16,3)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccc ccccc ccccc ccccc} 0&0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&0&1\\ 0&0&0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&0&0&1\\ 0&0&1&0&1&0&0&0&0&0&0&0&0&0&1&1&0&0&0&0\\ 1&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&1&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0\\ 1&0&0&0&0&0&1&0&1&0&0&0&0&0&0&1&0&0&0&0\\ 0&1&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&1&0&0&0&0&0&1&0&1&0&0&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&1&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&1&0&1&0&0&1&0&0\\ 0&1&0&1&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&1&0&0&0&0\\ 0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3)$$ Edge set is: \begin{eqnarray*} (0,4), (0,7), (0,10), (0,13), (0,19), (1,5), (1,8),
(1,11),\\
(1,14), (1,19), (2,3), (2,6), (2,9), (2,12), (2,19),\\ (3,4),
(3,14), (3,15), (4,5), (4,18), (5,6), (5,17),\\ (6,7),
(6,16), (7,8), (7,15), (8,9), (8,18), (9,10), \\ (9,17), (10,11), (10,16), (11,12), (11,15), (12,13), \\ (12,18), (13,14), (13,17), (14,16), (15,17), (16,18) \end{eqnarray*}
\noindent Set $S_{\Gamma_{0},3}$ is given below. All elements contain vertex $19$ of degree $3$, and two other vertices of degree $4$. \begin{eqnarray*} S_{\Gamma_{0},3} &=&\{(15,16,19) (15,18,19) (16,17,19) (17,18,19)\}\\ \end{eqnarray*}
\section*{Appendix B}\label{appendix21} The $3$ extremal graphs $(21,44)$. Note that graph $\Gamma_{0}$ has $(deg_{3},deg_{4},deg_{5})=(1,15,5)$, and graphs $\Gamma_{1}$ and $\Gamma_{2}$ have $(deg_{4},deg_{5})=(17,4)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccc ccccc ccccc ccccc c} 0&0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&0&0&1&0\\ 0&0&1&0&1&0&0&0&0&0&0&0&0&0&1&1&0&0&0&0&0\\ 1&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0\\ 0&1&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&1&0&0&0\\ 0&0&1&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 1&0&0&0&0&0&1&0&1&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&1&0&1&0&0&0&0&0&0&1&0&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0\\ 0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&1&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&1&0&1&0&0&1&0&0&0\\ 0&1&0&1&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&1\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0&1\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&1&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0\\ 1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&0&0&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,5,5,4,4,4,3)$$ Edge set is: \begin{eqnarray*} (0,4), (0,7), (0,10), (0,13), (0,19), (1,5), (1,8), (1,11),\\ (1,14), (1,19), (2,3), (2,6), (2,9), (2,12), (2,19), (3,4),\\ (3,14), (3,15), (4,5), (4,18), (5,6), (5,17), (6,7),\\ (6,16), (7,8), (7,15), (8,9), (8,18), (9,10), (9,17),\\ (10,11), (10,16), (11,12), (11,15), (12,13), (12,18), (13,14),\\ (13,17), (14,16), (15,17), (15,20), (16,18), (16,20), (19,20) \end{eqnarray*}
\noindent Set $S_{\Gamma_{0},3}$ is given below. All elements contain vertices of degree $3$ or $4$. \begin{eqnarray*} S_{\Gamma_{0},3} &=&\{(4,9,20) (5,12,20) (8,13,20) (17,18,19) \}\\ \end{eqnarray*}
\noindent $\Gamma_{1}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccc ccccc ccccc ccccc c} 0&1&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1\\ 0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&1&0\\ 0&1&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&1\\ 0&0&1&0&0&0&1&0&0&0&0&0&0&1&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&1&1&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&1&0&0&0&1&1&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0\\ 0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&1&0&0&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(5,4,4,4,4,4,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4)$$ Edge set is: \begin{eqnarray*} (0,1), (0,2), (0,3), (0,4), (0,5), (1,6), (1,7), (1,8),\\ (2,9), (2,10), (2,11), (3,12), (3,13), (3,14), (4,15), (4,16), \\(4,17), (5,18), (5,19), (5,20), (6,9), (6,12), (6,15),\\ (6,18), (7,10), (7,13), (7,16), (7,19), (8,11), (8,14),\\ (8,17), (8,20), (9,13), (9,17), (10,14), (10,15), (11,12),\\ (11,16), (12,19), (13,20), (14,18), (15,20), (16,18), (17,19) \end{eqnarray*}
\noindent Set $S_{\Gamma_{1},3}$ is empty.
\noindent $\Gamma_{2}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccc ccccc ccccc ccccc ccccc} 0&1&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1\\ 0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&1&0\\ 0&1&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&0&0&1&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&1&1&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&1\\ 0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&1&0&0&0&1&1&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0\\ 0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&1&0&1&0&0&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(5,4,4,4,4,4,5,5,4,4,4,5,4,4,4,4,4,4,4,4,4)$$ Edge set is: \begin{eqnarray*} (0,1), (0,2), (0,3), (0,4), (0,5), (1,6), (1,7), (1,8), \\(2,9), (2,10), (2,11), (3,12), (3,13), (3,14), (4,15), (4,16), \\(4,17), (5,18), (5,19), (5,20), (6,9), (6,12), (6,15),\\ (6,18), (7,10), (7,13), (7,16), (7,19), (8,11), (8,14),\\ (8,17), (9,13),(9,17), (10,14), (10,15), (11,12), (11,16),\\ (11,20), (12,19), (13,20), (14,18), (15,20), (16,18), (17,19) \end{eqnarray*}
\noindent Sets $S_{\Gamma,3}$ is empty.
\section*{Appendix C}\label{appendix22} The $3$ extremal graphs $(22,47)$. Note that graph $\Gamma_{0}$ has $(deg_{4},deg_{5})=(16,6)$, graph $\Gamma_{1}$ has $(deg_{3},deg_{4},deg_{5})=(2,12,8)$ and graph $\Gamma_{2}$ has $(deg_{3},deg_{4},deg_{5})=(1,14,7)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{cccccccccccccccccccccccc} 0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0\\ 0&0&1&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0\\ 0&0&0&1&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0\\ 1&0&0&0&0&0&1&1&0&0&0&0&0&1&0&0&0&1&0&0&0&0\\ 0&0&0&1&0&1&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&1&1&0&0&0&0&0&1&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,4,4,5,4,4,4,5,4,4,4,5,4,5,4,4,4,4,4,5,5)$$ Edge set is: \begin{eqnarray*} (0,9), (0,12), (0,15), (0,20), (1,6), (1,8), (1,11), (1,20),\\ (2,5), (2,7), (2,14), (2,20), (3,4), (3,10), (3,13), (3,20),\\ (4,7), (4,11), (4,15), (4,19), (5,8), (5,13), (5,19), (6,12),\\ (6,14), (6,19), (7,12), (7,18), (8,10), (8,15), (8,18), (9,11),\\ (9,14), (9,18), (10,14), (10,17), (11,17), (12,13), (12,17), (13,16),\\ (14,16), (15,16), (16,21), (17,21), (18,21), (19,21), (20,21) \end{eqnarray*}
\noindent The sink nodes are vertices $20$ and $21$, which have sets of children$\{0,1,2,3,21\}$ and $\{16,17,18,19,20\}$.
\noindent Set $S_{\Gamma_{0},3}$ is given below. All elements of $S_{\Gamma_{0},3}$ contain no vertex of degree $5$, and contain at least one child of a sink node.
\begin{comment} S_{\Gamma_{0},2} &=& \{(0,5), (0,10), (0,19), (1,7), (1,13), (1,16), (2,11),\\&&(2,15), (2,17), (3,6), (3,9), (3,18), (4,14), (5,9),\\&& (5,11), (5,17), (6,15), (6,18), (7,10), (7,16), (8,12),\\&& (9,13), (9,19), (10,19), (11,13), (11,16), (13,18), (15,17)\} \\ \end{comment}
\begin{eqnarray*} S_{\Gamma_{0},3} &=&\{(0,10,19), (1,7,16), (2,15,17), (3,6,18)\}\\ \end{eqnarray*}
\noindent Note the elements of $S_{\Gamma_{0},3}$ do not intersect. \begin{comment} The vertices of degree less than $5$ that do not appear in any element of $S_{\Gamma_{0},3}$ can be uniquely arranged into distinct pairs from $S_{\Gamma_{0},2}$ thus: $(5,11)$, $(9,13)$. For any triple $t\in S_{\Gamma_{0},3}$ one of these pairs has edges to both $t$ and at least one of the other triples. \end{comment}
The neighbourhoods of each vertex of degree $4$ are given below, with vertices of degree $5$ shown in bold. It can be seen that every vertex of degree $4$ has at least two neighbours of degree $4$. \begin{eqnarray*} Nb(0) &=& 9, {\bf 12}, 15, {\bf 20}\\ Nb(1) &=& 6, {\bf 8}, 11, {\bf 20}\\ Nb(2) &=& 5, 7, {\bf 14}, {\bf 20}\\ Nb(3) &=& {\bf 4}, 10, 13, {\bf 20}\\ Nb(5) &=& 2, {\bf 8}, 13, 19\\ Nb(6) &=& 1, {\bf 12}, {\bf 14}, 19\\ Nb(7) &=& 2, {\bf 4}, {\bf 12}, 18\\ Nb(9) &=& 0, 11, {\bf 14}, 18\\ Nb(10) &=& 3, {\bf 8}, {\bf 14}, 17\\ Nb(11) &=& 1, {\bf 4}, 9, 17\\ Nb(13) &=& 3,5,{\bf 12}, 16\\ Nb(15) &=& 0, {\bf 4}, {\bf 8}, 16\\ Nb(16) &=&13, {\bf 14}, 15, {\bf 21}\\ Nb(17) &=& 10, 11, {\bf 12}, {\bf 21}\\ Nb(18) &=& 7,{\bf 8},9,{\bf 21}\\ Nb(19) &=&{\bf 4},5,6,{\bf 21} \end{eqnarray*}
\noindent $\Gamma_{1}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{cccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&1&0&0\\ 0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&1&0&1&0&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&1&0&0&0&1&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,3,3,5,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,15), (0,20), (1,6), (1,10), (1,13), (1,20),\\ (2,5), (2,9), (2,12), (2,20), (3,4), (3,8), (3,14), (3,20),\\ (4,11), (4,13), (4,19), (5,10), (5,15), (5,19), (6,8), (6,12),\\ (6,19), (7,9), (7,14), (7,19), (8,15), (8,18), (9,13), (9,18),\\ (10,14), (10,18), (11,12), (11,18), (12,14), (12,17), (13,15), (13,17),\\ (14,16), (15,16), (16,21), (17,21), (18,21), (19,21), (20,21) \end{eqnarray*}
\noindent The sink nodes are vertices $18,19,20$ and $21$, which have sets of children$\{8,9,10,11,21\}$, $\{4,5,6,7,21\}$, $\{0,1,2,3,21\}$ and $\{16,17,18,19,20\}$. respectively.
\noindent Set $S_{\Gamma_{1},3}$ is given below. All elements contain no vertex of degree $5$ and contains at least one child of a sink node. No element of $S_{\Gamma_{1},3}$ contains both vertices of degree $3$ ($16$ and $17$). \begin{eqnarray*} S_{\Gamma_{1},3} &=&\{(0,10,17), (1,11,16), (2,4,16), (3,5,17), (6,9,16), (7,8,17)\} \end{eqnarray*}
\noindent $\Gamma_{2}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{cccccccccccccccccccccccc} 0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&1&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&1&0&0\\ 0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 0&0&0&1&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0\\ 0&1&0&0&1&0&0&0&1&0&0&0&0&0&0&1&0&1&0&0&0&0\\ 1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&0&0&0&0&0&1&0&1&0&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&0\\ 1&0&0&0&0&1&0&1&0&0&0&1&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(3,4,4,4,4,4,5,4,4,4,5,5,4,4,5,5,4,4,4,4,5,5)$$ Edge set is: \begin{eqnarray*} (0,12), (0,15), (0,20), (1,6), (1,9), (1,11), (1,20), (2,5), \\(2,8), (2,14), (2,20), (3,4), (3,7), (3,10), (3,20), (4,11), \\(4,14), (4,19), (5,10), (5,15), (5,19), (6,7), (6,12), (6,13), \\(6,19), (7,15), (7,18), (8,11), (8,13), (8,18), (9,10), (9,14),\\ (9,18), (10,13), (10,17), (11,15), (11,17), (12,14), (12,17), (13,16),\\ (14,16), (15,16), (16,21), (17,21), (18,21), (19,21), (20,21) \end{eqnarray*}
\noindent The sink nodes are vertices $11$ and $21$, which have sets of children$\{1,4,8,15,17\}$ and $\{16,17,18,19,20\}$.
\noindent Sets $S_{\Gamma_{2},3}$ is given below. All elements of degree $5$ are highlighted in bold. Every element of $S_{\Gamma_{2},3}$ contains no vertex of degree $5$ and contains at least one child of a sink node.
\begin{eqnarray*} S_{\Gamma_{2},3} &=&\{(0,4,13), (0,4,18), (0,8,19), (0,9,19), \\&&(2,7,17), (3,8,12), (5,12,18)\} \end{eqnarray*}
\begin{comment} S_{\Gamma_{2},2} &=& \{(0,4), (0,8), (0,9), (0,10), (0,13), (0,18), (0,19),\\&& (1,5), (1,16), (2,6), (2,7), (2,17), (3,8), (3,12),\\&& (3,16), (4,13), (4,18), (5,12), (5,18), (7,14), (7,17), \\&&(8,12), (8,19), (9,15), (9,19), (12,18), (13,20)\}\\
\noindent There are $4$ sets of $4$ elements of $S_{\Gamma_{2},3}$ such that the only vertex to appear twice is the vertex of degree $3$. Of these, there are only $2$ sets, such that the remaining $4$ vertices of degree less than $5$ constitute two non-intersecting elements of $S_{\Gamma_{2},2}$. The sets are $\{(0,4,13), (0,9,19), (2,7,17), (3,8,12)\}$ and $\{(0,4,13), (0,9,19), (2,7,17), (5,12,17)\}$ and one of the pairs is $(1,16)$ in each case. Note that in each case, there are edges from the elements of pair $(1,16)$ to both triples containing $0$ and one of the other triples. \end{comment}
The neighbourhoods of each vertex of degree $4$ are given below, with vertices of degree $5$ shown in bold. It can be seen that every vertex of degree $4$ has at least two neighbours of degree $4$ or $3$, except vertices $1$, $16$ and $17$. \begin{eqnarray*} Nb(1) &=& {\bf 6}, 9, {\bf 11}, {\bf 20}\\ Nb(2) &=&5,8,{\bf 14},{\bf 20} \\ Nb(3) &=&4,7,{\bf 10},{\bf 20}\\ Nb(4) &=&3,{\bf 11},{\bf 14},19 \\ Nb(5) &=&2,{\bf 10},{\bf 15},19 \\ Nb(7) &=&3,{\bf 6},{\bf 15},18 \\ Nb(8) &=&2,{\bf 11},13,18 \\ Nb(9) &=&1,{\bf 10},{\bf 14},18 \\ Nb(12) &=&0,{\bf 6},{\bf 14},17 \\ Nb(13) &=&{\bf 6},8,{\bf 10},16 \\ Nb(16) &=&13,{\bf 14},{\bf 15},{\bf 21} \\ Nb(17) &=&{\bf 10},{\bf 11},12,{\bf 21}\\ Nb(18) &=&7,8,9,{\bf 21}\\ Nb(19) &=&4,5,{\bf 6},{\bf 21} \end{eqnarray*}
The set of vertices of degree less than $5$ that are adjacent to $1$, $16$ and $17$ is $\{9,13,12\}$. No two of these vertices are in an element of $S_{\Gamma_{2},3}$ with the vertex of degree $3$ ($0$).
\section*{Appendix D}\label{appendix23} The $7$ extremal graphs $(23,50)$. Note that graphs $\Gamma_{0}-\Gamma_{1}$ have $(deg_{3},deg_{4},deg_{5})=(0,15,8)$, graphs $\Gamma_{2}-\Gamma_{4}$ have $(deg_{3},deg_{4},deg_{5})=(1,13,9)$ and graphs $\Gamma_{5}-\Gamma_{6}$ have $(deg_{3},deg_{4},deg_{5})=(2,11,10)$.
\begin{comment} \begin{figure}
\caption{Extremal graph $A_{0}$ }
\label{fig:graph23_0}
\end{figure}
\begin{figure}
\caption{Extremal graph $A_{1}$ }
\label{fig:graph23_1}
\end{figure}
\begin{figure}
\caption{Extremal graphs on $23$ vertices, with $(deg_{3},deg_{4},deg_{5})=(0,5,18)$ }
\label{fig:graphs23A}
\end{figure}
\begin{figure}
\caption{Extremal graphs on $23$ vertices, with $(deg_{3},deg_{4},deg_{5})=(1,13,9)$ }
\label{fig:graphs23B}
\end{figure} \end{comment}
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&0&0&0&0&0&1&1&0&0&0&0&0&0&1&0\\ 0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&1&0&1&0&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0\\ 1&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,4,4,4,4,4,5,4,4,5,4,4,5,4,5,5,4,4,4,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,10), (0,16), (0,21), (1,6), (1,9), (1,15), (1,21), (2,5), \\ (2,13), (2,14), (2,21), (3,4), (3,8), (3,12), (3,21), (4,10), (4,13),\\
(4,20), (5,8), (5,15), (5,20), (6,11), (6,16), (6,20), (7,9), (7,12),\\ (7,14), (7,20), (8,16), (8,19), (9,13), (9,19), (10,11), (10,15), (10,19),\\
(11,14), (11,18), (12,15), (12,18), (13,16), (13,18), (14,17),\\
(15,17), (16,17), (17,22), (18,22), (19,22), (20,22), (21,22) \end{eqnarray*}
\noindent The sink nodes are vertices $20$ and $22$, which have sets of children$\{4,5,6,7,22\}$ and $\{17,18,19,20,21\}$.
\noindent Sets $S_{\Gamma_{0},3}$ and $S_{\Gamma_{0},4}$ are given below.
Neither set has elements containing vertices of degree $5$. Every element of $S_{\Gamma_{0},4}$ contains at least one child of a sink node.
\begin{eqnarray*} S_{\Gamma_{0},3} &=&\{(0,5,18), (1,4,14), (1,8,14), (1,8,18), (2,6,12), (2,6,19),\\&& (2,12,19), (3,9,11), (3,9,17), (5,9,11), (6,12,19)\}\\ S_{\Gamma_{0},4}&= &\{(2,6,12,19)\} \end{eqnarray*}
The subgraph on vertices of degree $4$ has vertex set $\{0,1,2,3,4,5,6,8,9,11,12,14,17,18,19\}$ and degree sequence $$(0,2,2,3,1,2,2,3,2,3,2,3,1,2,2)$$. The only vertex of degree $0$ in this subgraph is vertex $0$. This vertex has a common neighbour of degree $5$ with the vertices of degree $1$ in the subgraph (vertices $4$ and $17$), namely $10$ and $17$ respectively.
\noindent $\Gamma_{1}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&1&0&0&0&1&0&0&1&0&0&1&0&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&1&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&0&0&1&1&0&0&0&0&1&0&0&0\\ 0&1&0&0&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0\\ 0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0\\ 0&1&0&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&1&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&0&1&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,4,5,5,4,4,4,4,5,4,4,5,4,4,4,5,4,4,4,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,13), (0,16), (0,21), (1,6), (1,10), (1,12), (1,21), (2,5), \\ (2,9), (2,15), (2,21), (3,4), (3,8), (3,11), (3,14), (3,21), (4,10),\\ (4,13), (4,15), (4,20), (5,8), (5,16), (5,20), (6,9), (6,11), (6,20),\\ (7,12), (7,14), (7,20), (8,12), (8,19), (9,13), (9,14), (9,19), (10,16),\\ (10,19), (11,16), (11,18), (12,15), (12,18), (13,18), (14,17),\\ (15,17), (16,17), (17,22), (18,22), (19,22), (20,22), (21,22) \end{eqnarray*}
\noindent Sets $S_{\Gamma_{1},3}$ and $S_{\Gamma_{1},4}$ are given below. Vertices of degree $5$ are written in bold.
\begin{eqnarray*} S_{\Gamma_{1},3} &=&\{ (0,6,8), (0,6,15), (0,15,19), (1,5,13), (1,5,14), (1,13,17), \\ &&(2,7,10), (2,7,11), (2,10,18), (5,14,18), (6,8,17), (7,11,19),\\ && (8,13,17), ({\bf 9},{\bf 12},{\bf 16}), (10,14,18), (11,15,19), \}\\ S_{\Gamma_{1},4}&= &\{\} \end{eqnarray*}
The subgraph on vertices of degree $4$ has vertex set $\{0,1,2,5,6,7,8,10,11,13,14,15,17,18,19\}$ and degree sequence $(2,2,2,2,2,2,2,2,2,2,2,2,2,2,2)$.
\noindent $\Gamma_{2}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&0&1&1&0&0&0&0&0&1&0&0\\ 0&0&0&1&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0\\ 0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&1&0&1&0&0&0&0\\ 0&1&0&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&0&1&0&1&0&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&0\\ 1&0&0&0&0&0&1&0&1&0&0&0&1&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,4,4,4,4,3,4,5,4,5,4,5,5,5,4,5,4,4,4,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,10), (0,16), (0,21), (1,6), (1,9), (1,13), (1,21), (2,5), \\ (2,12), (2,15), (2,21), (3,4), (3,8), (3,14), (3,21), (4,10), (4,12),\\ (4,20), (5,8), (5,11), (5,20), (6,16), (6,20), (7,13), (7,14), (7,20),\\ (8,13), (8,16), (8,19), (9,12), (9,14), (9,19), (10,11), (10,15), (10,19),\\ (11,14), (11,18), (12,16), (12,18), (13,15), (13,18), (14,17),\\ (15,17), (16,17), (17,22), (18,22), (19,22), (20,22), (21,22) \end{eqnarray*}
\noindent Sets $S_{\Gamma_{2},3}$ and $S_{\Gamma_{2},4}$ are given below. Vertices of degree $5$ are written in bold.
\begin{eqnarray*} S_{\Gamma_{2},3} &=&\{(0,5,9), (1,4,17), (1,5,17), (2,6,{\bf 14}), (2,6,19), \\&&(2,7,19), (3,6,15), (3,6,18), (9,15,{\bf 20})\}\\ S_{\Gamma_{2},4}&= &\{\} \end{eqnarray*}
\noindent $\Gamma_{3}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&1&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&0&0&1&1&0&0&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0\\ 1&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0&0\\ 0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0\\ 1&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&0&1&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&1&0&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,5,4,4,3,4,5,5,4,5,5,4,4,4,4,5,4,4,4,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,10), (0,13), (0,21), (1,6), (1,9), (1,16), (1,21), (2,5),\\ (2,8), (2,12), (2,15), (2,21), (3,4), (3,11), (3,14), (3,21), (4,13),\\ (4,16), (4,20), (5,10), (5,20), (6,8), (6,11), (6,20), (7,9), (7,12),\\ (7,14), (7,20), (8,13), (8,14), (8,19), (9,15), (9,19), (10,11), (10,16),\\ (10,19), (11,15), (11,18), (12,16), (12,18), (13,18), (14,17),\\ (15,17), (16,17), (17,22), (18,22), (19,22), (20,22), (21,22) \end{eqnarray*}
\noindent The sink nodes are vertices $21$ and $22$, which have sets of children$\{0,1,2,3,22\}$ and $\{17,18,19,20,21\}$
\noindent Sets $S_{\Gamma_{3},3}$ and $S_{\Gamma_{3},4}$ are given below. Neither set has elements containing vertices of degree $5$. Every element of $S_{\Gamma_{3},4}$ contains the vertex of degree $3$ at least one vertex of degree $4$ that is the child of a sink node. \begin{eqnarray*} S_{\Gamma_{3},3} &=&\{(0,6,17), (1,5,13), (1,5,14), (1,5,18), (1,14,18), (3,5,9),\\&& (3,12,19), (5,9,13), (5,9,18), (5,13,17), (5,14,18)\}\\ S_{\Gamma_{3},4}&= &\{(1,5,14,18)\} \end{eqnarray*}
\noindent $\Gamma_{4}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&0&0&1&1&0&0&0&0&1&0&0&0\\ 0&1&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0\\ 1&0&0&0&0&1&0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&1&0&1&0&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0\\ 1&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0\\ 0&0&0&0&0&0&0&1&1&0&0&0&1&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&0&1&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,5,4,3,4,4,4,4,5,4,5,5,4,5,4,5,4,4,4,4,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,10), (0,13), (0,21), (1,6), (1,9), (1,12), (1,16), (1,21), \\ (2,5), (2,8), (2,11), (2,21), (3,4), (3,15), (3,21), (4,9), (4,13),\\ (4,20), (5,10), (5,16), (5,20), (6,8), (6,15), (6,20), (7,11), (7,14),\\ (7,20), (8,13), (8,14), (8,19), (9,11), (9,19), (10,12), (10,15), (10,19),\\ (11,15), (11,18), (12,14), (12,18), (13,16), (13,18), (14,17),\\ (15,17), (16,17), (17,22), (18,22), (19,22), (20,22), (21,22) \end{eqnarray*}
\noindent There is one sink node, vertex $22$ which has set of children $\{17,18,19,20,21\}$.
\noindent Sets $S_{\Gamma_{4},3}$ and $S_{\Gamma_{4},4}$ are given below. Neither set has elements containing vertices of degree $5$. All elements of $S_{\Gamma_{4},4}$ contain the vertex of degree $3$ at least vertex of degree $4$ that is child of a sink node $4$. \begin{eqnarray*} S_{\Gamma_{4},3} &=&\{(0,9,17), (2,4,12), (2,4,17), (3,5,14), (3,5,18),\\&& (3,7,16), (3,7,19), (3,16,19), (5,9,14), (7,16,19)\}\\ S_{\Gamma_{4},4}&= &\{(3,7,16,19)\} \end{eqnarray*}
\noindent $\Gamma_{5}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccc} 0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&1&0&0\\ 0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&0&0&0&0&0&1&1&0&0&0&0&0&1&0&0\\ 0&0&0&1&0&0&1&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0\\ 0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&1&0&1&0&0&0&0\\ 1&0&0&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&1&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(3,4,4,4,3,4,4,4,5,5,5,4,5,5,5,5,4,4,4,4,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,10), (0,13), (0,21), (1,7), (1,9), (1,16), (1,21), (2,6), (2,12), \\ (2,15), (2,21), (3,5), (3,8), (3,14), (3,21), (4,10), (4,15), (4,20),\\ (5,9), (5,12), (5,20), (6,8), (6,11), (6,20), (7,13), (7,14), (7,20),\\ (8,13), (8,16), (8,19), (9,11), (9,15), (9,19), (10,12), (10,14), (10,19),\\ (11,14), (11,18), (12,16), (12,18), (13,15), (13,18), (14,17),\\ (15,17), (16,17), (17,22), (18,22), (19,22), (20,22), (21,22) \end{eqnarray*}
\noindent Sets $S_{\Gamma_{5},3}$ and $S_{\Gamma_{5},4}$ are given below. Vertices of degree $5$ are written in bold.
\begin{eqnarray*} S_{\Gamma_{5},3} &=&\{(0,5,17), (0,6,17), (0,11,16), (0,16,{\bf 20}), (1,4,18),\\&& (1,6,{\bf 10}), (2,7,19), (3,4,18), (4,11,16), (4,11,{\bf 21})\}\\ S_{\Gamma_{5},4}&= &\{\} \end{eqnarray*}
\noindent $\Gamma_{6}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&1&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&1&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,5,4,4,4,4,4,4,4,4,4,5,5,3,5,5,5,3,4,5,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,14), (0,21), (1,6), (1,10), (1,13), (1,16), (1,21),\\ (2,5), (2,9), (2,15), (2,21), (3,4), (3,8), (3,12), (3,21), (4,11),\\ (4,16), (4,20), (5,10), (5,14), (5,20), (6,8), (6,15), (6,20), (7,9),\\ (7,12), (7,20), (8,14), (8,19), (9,16), (9,19), (10,12), (10,19), (11,13),\\ (11,15), (11,19), (12,15), (12,18), (13,18), (14,16), (14,18),\\ (15,17), (16,17), (17,22), (18,22), (19,22), (20,22), (21,22) \end{eqnarray*}
\noindent The sink nodes are vertices $19$, $21$ and $22$, which have sets of children$\{8,9,10,11,22\}$, $\{0,1,2,3,22\}$ and $\{17,18,19,20,21\}$.
\noindent Sets $S_{\Gamma_{6},3}$ and $S_{\Gamma_{6},4}$ are given below. Neither set has elements containing vertices of degree $5$. All elements of $S_{\Gamma_{6},4}$ contain the two vertices of degree $3$ and at least one vertex of degree $4$ that is the child of a sink node. \begin{eqnarray*} S_{\Gamma_{6},3} &=&\{(0,10,17), (2,4,18), (2,8,13), (3,5,13), (3,5,17),\\&& (3,9,13), (3,13,17), (5,13,17), (6,9,18), (7,8,13), \\&&(7,8,17), (7,13,17), (8,13,17), \}\\ S_{\Gamma_{6},4}&= &\{(3,5,13,17), (7,8,13,17)\} \end{eqnarray*}
\section*{Appendix E}\label{appendix24} The extremal graph $(24,54)$. Here $(deg_{4},deg_{5}) = (12,12)$
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{cccccccccccccccccccccccccc} 0&0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1\\ 0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&1&0&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&1&0&1\\ 0&0&1&0&0&0&0&0&0&0&0&1&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&1&1&0\\ 0&0&1&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0\\ 0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&1\\ 0&0&1&1&0&0&0&0&1&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0\\ 1&0&0&0&1&0&1&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&1&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0\\ 0&1&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&1&0\\ 0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0\\ 0&0&0&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0\\ 1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1\\ 1&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,5,5,4,5,5,4,4,5,5,5,5,5,4,5,4,4,4,5,5,4,4,4)$$ Edge set is: \begin{eqnarray*} (0,11), (0,14), (0,19), (0,23), (1,5), (1,8), (1,13), (1,19), (2,4), (2,7), \\(2,10), (2,19), (2,22), (3,10), (3,13), (3,18), (3,21), (3,23), (4,11), (4,12), \\(4,18), (5,6), (5,9), (5,14), (5,18), (6,11), (6,17), (6,21), (6,22), (7,9),\\ (7,13), (7,17), (8,10), (8,12), (8,17), (9,12), (9,16), (9,23), (10,14),\\ (10,16),(11,13), (11,16), (12,15), (12,21), (13,15), (14,15), (15,20),\\ (15,22), (16,20), (17,20), (18,20), (19,20), (19,21), (22,23), \end{eqnarray*}
\noindent Set $S_{\Gamma_{0},3}$ is given below. Elements of $S_{\Gamma_{0},3}$ contain only vertices of degree $4$. \begin{eqnarray*} S_{\Gamma_{0},3} &=&\{(0,7,18), (0,8,18), (1,4,23), (1,16,22), (4,14,17), \\ &&(4,17,23), (7,14,21), (8,18,22)\}\\ \end{eqnarray*}
\noindent The subgraph on the vertices of degree $4$, $\Gamma_{4}$, has vertex set $$V_{4}=\{0,1,4,7,8,14,16,17,18,21,22,23\}$$ and edge set $E_{4}=\{(0,14), (0,23), (1,8), (4,18), (7,17), (8,17), (22,23)\}$.
\noindent
The only set of $4$ non-intersecting elements of $S_{\Gamma_{0},3}$ is $$X=\{(0,8,18),(1,16,22),(4,17,23),(7,14,21)\}$$ The only pair $X_{1},X_{2}\in X$ such that for all $x_{1}\in X_{1}$, $x_{2}\in X_{2}$ there is no edge $(x_{1},x_{2}) \in \Gamma$ is $X_{1}=(1,16,22)$ and $X_{2}=(7,14,21)$.
\noindent There are $10$ sets of $3$ non-intersecting elements of $S_{\Gamma,3}$. These are shown in Table \ref{missedTable}. In each case, at least $2$ of the vertices of degree $4$ not contained in any set are in an edge with a vertex that is in one of the sets.
\begin{table}
\begin{tabular}{|cc|} \hline Non-intersecting sets & vertices missed\\ \hline $(0,7,18)$, $(1,16,22)$, $(4,14,17)$ & $8$, $21$, $23$\\ $(0,7,18)$, $(1,16,22)$, $(4,17,23)$ & $8$, $14$, $21$\\ $(0,8,18)$, $(1,4,23)$, $(7,14,21)$ & $16$, $17$, $22$\\ $(0,8,18)$, $(1,16,22)$, $(4,14,17)$ & $7$, $21$, $23 $\\ $(0,8,18)$, $(1,16,22)$, $(4,17,23)$ & $7$, $14$, $21$\\ $(0,8,18)$, $(1,16,22)$, $(7,14,21)$ & $4$, $17$, $23$\\ $(0,8,18)$, $(4,17,23)$, $(7,14,21)$ & $1$, $16$, $22$\\ $(1,4,23)$, $(7,14,21)$, $(8,18,22)$ & $0$, $16$, $17$\\ $(1,16,22)$, $(4,17,23)$, $(7,14,21)$ & $0$, $8$, $18$\\ $(4,17,23)$, $(7,14,21)$, $(8,18,22)$ & $0$, $1$, $16$\\ \hline \end{tabular} \caption{Sets of $3$ non-intersecting elements of $S_{\Gamma,3}$ and the points missed by those elements \label{missedTable}} \end{table}
\section*{Appendix F}\label{appendix25} The $6$ extremal graphs on $25$ vertices. In each case there are $57$ edges. Graphs $\Gamma_{0},\ldots, \Gamma_{2}$ have $(deg_{3},deg_{4},deg_{5})=(1,9,15)$ and graphs $\Gamma_{3},\ldots,\Gamma_{5}$ have $(deg_{4},deg_{5})=(11,14)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&1\\ 0&0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0\\ 0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&1\\ 0&0&1&0&0&0&0&0&0&0&0&1&1&0&0&1&0&0&0&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0\\ 0&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0\\ 0&1&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1\\ 1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&0&0&1&0&1&0&0&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&1&0&1&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1\\ 0&0&0&1&1&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&1&0&1&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0\\ 1&0&0&0&0&1&0&0&0&0&1&0&0&1&0&1&0&0&0&0&0&0&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,4,5,3,4,5,5,4,5,4,4,5,5,5,5,5,5,4,4,5,5,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,11), (0,17), (0,22), (0,24), (1,7), (1,10), (1,14), (1,22), (2,6), (2,9), \\ (2,13), (2,22), (3,5), (3,8), (3,12), (3,16), (3,22), (4,14), (4,16), (4,21),\\
(5,9), (5,21), (5,24), (6,11), (6,12), (6,15), (6,21), (7,8), (7,13), (7,17),\\
(7,21), (8,15), (8,20), (9,14), (9,17), (9,20), (10,12), (10,20),
(10,24), \\ (11,16), (11,20), (12,17), (12,19), (13,16), (13,19), (13,24), (14,15), (14,19),\\ (15,18), (15,24), (16,18), (17,18), (18,23), (19,23), (20,23), (21,23), (22,23) \end{eqnarray*}
\noindent Sets $S_{\Gamma_{0},3}$ and $S_{\Gamma_{0},4}$ are given below. Neither set has elements containing vertices of degree $5$. \begin{eqnarray*} S_{\Gamma_{0},3} &=&\{(0,4,8), (0,8,19), (1,5,11), (1,5,18), (2,4,8), \\&&(2,4,10), (2,10,18), (5,11,19)\}\\ S_{\Gamma_{0},4}&= &\{\} \end{eqnarray*}
\noindent Note that the only sets of $3$ parallel elements from $S_{\Gamma_{0},3}$ which contain the vertex of degree $3$ (i.e. $4$) in the first set and for which neither of the other sets contain a neighbour of the vertex of degree $3$ (i.e. $14$, $16$ or $21$) are: are: $(0,4,8)$, $(1,5,11)$, $(2,10,18)$; $(0,4,8)$, $(2,10,18)$, $(5,11,19)$; $(2,4,10)$, $(0,8,19)$, $(1,5,11)$ and $(2,4,10)$, $(0,8,19)$, $(1,5,18)$. In all cases there is an edge from the first set to one of the other sets (via edge $(0,11)$ or $(1,10)$).
\noindent $\Gamma_{1}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,5,4,5,4,5,4,3,5,5,5,4,5,5,5,4,5,4,4,5,5,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (1,6), (1,10), (1,16), (1,22), (2,5), (2,9), \\ (2,14), (2,15), (2,22), (3,4), (3,13), (3,22), (3,24), (4,11), (4,14), (4,16),\\
(4,21), (5,10), (5,12), (5,21), (6,8), (6,15), (6,21), (6,24), (7,9), (7,13), \\ (7,21), (8,14), (8,20), (9,16), (9,20), (9,24), (10,13), (10,17), (10,20),\\ (11,12), (11,15), (11,20), (12,19), (12,24), (13,15), (13,19), (14,17), (14,19), \\(15,18), (16,18), (17,18), (17,24), (18,23), (19,23), (20,23), (21,23), (22,23) \end{eqnarray*}
\noindent The sink nodes are vertices $2$ and $15$ and $22$, which have sets of children $\{5,9,14,15,22\}$ and $\{2,6,11,13,18\}$.
\noindent Sets $S_{\Gamma_{1},3}$ and $S_{\Gamma_{1},4}$ are given below. Vertices of degree $5$ are written in bold. Every element of $S_{\Gamma_{1},4}$ contains the vertex of degre $3$ ($8$), a child of a sink node, and two other vertices of degree $4$. \begin{eqnarray*} S_{\Gamma_{1},3} &=&\{(0,5,8), (0,5,16), (0,{\bf 6},19), (0,8,16), (0,16,19), (1,7,12), (1,7,{\bf 14}),\\ &&(3,5,8), (3,5,18), (3,8,18), (5,8,16), (5,8,18), (7,8,12), (7,8,18), \\&&(7,12,18), (8,12,16), (8,12,18), (8,12,{\bf 22}), (8,{\bf 13},16)\}\\ S_{\Gamma_{1},4}&= &\{(0,5,8,16)(3,5,8,18)(7,8,12,18)\} \end{eqnarray*}
\noindent Note that the only sets of $3$ parallel elements from $S_{\Gamma_{0},3}$ which contain the vertex of degree $3$ (i.e. $8$) in the first set and for which neither of the other sets contain a neighbour of the vertex of degree $3$ (i.e. $6$, $14$ or $20$) are: $(3,8,18)$, $(0,5,16)$, $(1,7,12)$; $(0,8,16)$, $(1,7,12)$, $(3,5,18)$; $(3,5,8)$, $(0,16,19)$, $(1,7,12)$; $(3,8,18)$, $(0,16,19)$, $(1,7,12)$; $(5,8,18)$, $(0,16,19)$, $(1,7,12)$; $(3,5,8)$, $(0,16,19)$, $(7,12,18)$; $(7,8,12)$, $(0,16,19)$, $(3,5,18)$; $(8,12,22)$, $(0,16,19)$, $(3,5,18)$; and $(8,13,16)$, $(1,7,12)$, $(3,5,18)$.
In all cases there is an edge from the first set to one of the other sets (via edge $(16,18)$ or $(5,12)$).
\noindent $\Gamma_{2}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&1\\ 0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&0&1&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&1&0&0&1\\ 0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0\\ 0&0&1&0&1&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&1\\ 0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&0&0&1&0&1&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&0&0\\ 0&0&1&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&1\\ 1&0&0&0&0&0&1&0&0&0&1&0&1&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0\\ 0&1&0&0&1&0&0&0&0&0&0&1&1&0&0&0&1&0&0&0&0&0&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,5,4,4,4,5,5,3,4,5,5,5,4,5,5,5,5,4,4,5,5,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (1,6), (1,14), (1,22), (1,24), (2,5), (2,10),\\ (2,13), (2,16), (2,22), (3,4), (3,9), (3,15), (3,22), (4,10), (4,21), (4,24), \\(5,12), (5,15), (5,21), (6,9), (6,13), (6,17), (6,21), (7,8), (7,14), (7,16),\\ (7,21), (8,12), (8,20), (9,16), (9,20), (10,14), (10,17), (10,20), (11,13), \\ (11,15), (11,20), (11,24), (12,17), (12,19), (12,24), (13,19), (14,15), (14,19), \\(15,18), (16,18), (16,24), (17,18), (18,23), (19,23), (20,23), (21,23), (22,23) \end{eqnarray*}
\noindent The sink node is vertex $10$ which has set of children $\{2,4,14,17,20\}$.
\noindent Sets $S_{\Gamma_{2},3}$ and $S_{\Gamma_{2},4}$ are given below. Vertices of degree $5$ are written in bold. Every element of $S_{\Gamma_{2},4}$ contains the vertex of degre $3$ ($8$), a child of a sink node, and two other vertices of degree $4$. \begin{eqnarray*} S_{\Gamma_{2},3} &=&\{(0,4,19), (0,5,9), (0,9,19), (1,5,{\bf 20}), (1,8,18), (3,{\bf 7},13),\\&& (3,8,13), (4,8,13), (4,8,18), (4,13,18), (6,8,{\bf 15}), (8,13,18), \}\\ S_{\Gamma_{2},4}&= &\{(4,8,13,18)\} \end{eqnarray*}
\noindent Note that the only sets of $3$ parallel elements from $S_{\Gamma_{0},3}$ which contain the vertex of degree $3$ (i.e. $8$) in the first set and for which neither of the other sets contain a neighbour of the vertex of degree $3$ (i.e. $7$, $12$ or $20$) are: $(6,8,15)$, $(0,5,9)$, $(4,13,18)$ and $(6,8,15)$, $(0,9,19)$, $(4,13,18)$. In both cases there is an edge from the first set to one of the other sets (via edge $(6,13)$.
\noindent $\Gamma_{3}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&1\\ 0&0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&1&0&0&1&1&0&0&0&0&0&1&0&0&0\\ 0&0&0&1&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1\\ 0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&1\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1\\ 0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1\\ 0&1&0&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&1&0&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0\\ 1&0&0&0&0&0&0&1&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,5,4,4,4,5,4,5,5,4,4,5,4,4,5,5,5,5,4,4,5,5,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,11), (0,17), (0,22), (0,24), (1,7), (1,10), (1,14), (1,16), (1,22), (2,6),\\
(2,9), (2,15), (2,22), (3,5), (3,8), (3,13), (3,22), (4,11), (4,14), (4,15),\\
(4,21), (5,10), (5,12), (5,17), (5,21), (6,8), (6,16), (6,21), (7,9), (7,13),\\
(7,21), (7,24), (8,14), (8,20), (8,24), (9,17), (9,20), (10,15), (10,20), \\ (11,13), (11,16), (11,20), (12,16), (12,19), (12,24), (13,19), (14,17), (14,19),\\
(15,18), (15,24), (16,18), (17,18), (18,23), (19,23), (20,23), (21,23), (22,23) \end{eqnarray*}
\noindent The sink node is vertex $1$ which has set of children $\{7,10,14,16,22\}$
\noindent Sets $S_{\Gamma_{3},3}$ and $S_{\Gamma_{3},4}$ are given below. Vertices of degree $5$ are written in bold. Every element of $S_{\Gamma_{3},4}$ contains a child of the sink node, and three other vertices of degree $4$. \begin{eqnarray*} S_{\Gamma_{3},3} &=&\{ (0,6,10), (0,6,19), (0,10,19), (2,5,{\bf 11}), (3,4,9), (3,9,{\bf 16}),\\&&
(4,9,12), (4,12,{\bf 22}), (6,10,13), (6,10,19), (6,13,{\bf 17})\}\\
S_{\Gamma_{3},4} &=&\{(0,6,10,19)\} \end{eqnarray*}
\noindent Note that \begin{enumerate} \item $S_{\Gamma_{3},3}$ does not contain $3$ non-intersecting sets consisting entirely of vertices of degree $4$. \item The only pairs of elements of $S_{\Gamma_{3},3}$ that are parallel and have no edges between them are: \begin{eqnarray*} && ((0,6,10),(3,4,9)),\\ && ((0,6,10),(4,9,12)), \\ &&((0,6,19),(3,4,9)), \\ &&((0,10,19),(3,4,9)), \\ &&((0,10,19), (3,9,{\bf 16})),\\ &&((3,4,9),(6,10,19)), \\ &&((4,9,12),(6,10,13)),\\ &&((4,12,{\bf 22}),(6,10,13))\; {\rm and} \\ &&((4,12,{\bf 22}),(6,13,17)). \end{eqnarray*} \end{enumerate} If there is a set $X_{1}$, $X_{2}$, $X_{3}$ of mutually parallel elements of $S_{\Gamma_{3},3}$ where there are no edges from $X_{1}$ to $X_{2}$ or $X_{3}$, then there would be two pairs of triples from the list above which intersect in one triple, and where the other two triples are distinct. This is not the case.
\noindent $\Gamma_{4}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0&1\\ 0&0&0&0&1&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0&0&0&1&0&0\\ 0&0&0&1&0&0&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&0&1&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&1\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1\\ 0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&1&0&0&1&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&1\\ 0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1\\ 0&1&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0\\ 0&0&1&0&0&0&0&1&1&0&0&0&0&0&1&1&0&0&0&0&0&0&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,5,5,5,4,4,4,4,4,5,4,5,4,5,5,5,5,4,4,5,5,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (1,6), (1,14), (1,16), (1,22), (2,5), (2,10),\\
(2,13), (2,22), (2,24), (3,4), (3,9), (3,12), (3,15), (3,22), (4,8), (4,13),\\
(4,17), (4,21), (5,9), (5,16), (5,21), (6,10), (6,15), (6,21), (7,12), (7,21),\\
(7,24), (8,16), (8,20), (8,24), (9,14), (9,20), (10,12), (10,17), (10,20), \\ (11,13), (11,15), (11,20), (12,16), (12,19), (13,19), (14,17), (14,19), (14,24), \\(15,18), (15,24), (16,18), (17,18), (18,23), (19,23), (20,23), (21,23), (22,23) \end{eqnarray*}
\noindent The sink nodes are vertices $3$ and $10$ which have sets of children $\{4,9,12,15,22\}$ and $\{2,6,12,17,17\}$.
\noindent Sets $S_{\Gamma_{4},3}$ and $S_{\Gamma_{4},4}$ are given below. Vertices of degree $5$ are written in bold. Every element of $S_{\Gamma_{4},4}$ contains a child of the sink node, and three other vertices of degree $4$. \begin{eqnarray*} S_{\Gamma_{4},3} &=&\{ (0,5,19), (0,6,8), (0,6,9), (0,6,19), (0,8,19), (1,7,13),\\&& (1,7,{\bf 20}), (5,{\bf 15},19), (6,8,19), (6,9,13), (7,9,13), (7,9,18), \\&&(7,13,18), (9,13,18), (11,{\bf 14},{\bf 21}), \}\\
S_{\Gamma_{4},4} &=&\{(0,6,8,19), (7,9,13,18)\} \end{eqnarray*}
Note that \begin{enumerate} \item $S_{\Gamma_{4},3}$ does not contain $3$ non-intersecting sets consisting entirely of vertices of degree $4$.\item The only pairs of elements of $S_{\Gamma_{4},3}$ that are parallel and have no edges between them are: \newline $((0,6,8),(9,13,18))$, $((1,7,20),(5,15,19))$ and $((6,8,19),(7,9,18))$. \end{enumerate}
If there is a set $X_{1}$, $X_{2}$, $X_{3}$ of mutually parallel elements of $S_{\Gamma_{3},3}$ where there are no edges from $X_{1}$ to $X_{2}$ or $X_{3}$, then there would be two pairs of triples from the list above which intersect in one triple, and where the other two triples are distinct. This is not the case.
\noindent $\Gamma_{5}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0&1&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&1&0&1\\ 0&0&0&0&1&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&1&0&1&0&0&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1\\ 0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&1\\ 0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&1\\ 1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&1&0&0&0&1&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0\\ 0&0&1&0&0&0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,5,4,4,5,5,4,5,4,5,4,5,5,5,5,4,5,4,4,4,5,5,5,5,4)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,14), (0,22), (1,6), (1,10), (1,13), (1,17), (1,22), (2,5),\\
(2,12), (2,22), (2,24), (3,4), (3,9), (3,16), (3,22), (4,11), (4,13), (4,15), \\ (4,21), (5,8), (5,14), (5,17), (5,21), (6,9), (6,12), (6,21), (7,10), (7,16),\\
(7,21), (7,24), (8,13), (8,16), (8,20), (9,14), (9,20), (9,24), (10,15), \\ (10,20), (11,12), (11,17), (11,20), (12,16), (12,19), (13,19), (13,24), (14,15),\\
(14,19), (15,18), (16,18), (17,18), (18,23), (19,23), (20,23), (21,23), (22,23) \end{eqnarray*}
\noindent The sink node is vertex $21$ which has set of children $\{4,5,6,7,23\}$.
\noindent Sets $S_{\Gamma_{5},3}$ and $S_{\Gamma_{5},4}$ are given below. Vertices of degree $5$ are written in bold. \begin{eqnarray*} S_{\Gamma_{5},3} &=&\{(0,6,8), (0,6,18), (0,{\bf 13},18), ({\bf 1},{\bf 14},{\bf 16}), (3,{\bf 5},10),\\&& (3,10,19), (3,17,19), (6,8,15), ({\bf 7},17,19), (8,15,{\bf 22})\}\\
S_{\Gamma_{5},4} &=&\{\} \end{eqnarray*}
Note that \begin{enumerate} \item $S_{\Gamma_{5},3}$ does not contain $3$ non-intersecting sets consisting entirely of vertices of degree $4$. \item The only pairs of elements of $S_{\Gamma_{3},3}$ that are parallel and have no edges between them are: \begin{eqnarray*} && ((0,6,8,),(3,10,19)),\\ &&((0,6,8,),(3,17,19),\\ &&((0,6,18,),(3,5,10),\\ &&((0,6,18,),(3,10,19),\\ &&((0,13,18,),(3,5,10),\\ &&((3,17,19,),(6,8,15),\\ &&((6,8,15,),(7,17,19),\\ &&((7,17,19,),(8,15,22). \end{eqnarray*} \end{enumerate}
If there is a set $X_{1}$, $X_{2}$, $X_{3}$ of mutually parallel elements of $S_{\Gamma_{3},3}$ where there are no edges from $X_{1}$ to $X_{2}$ or $X_{3}$, then there would be two pairs of triples from the list above which intersect in one triple, and where the other two triples are distinct. This is not the case.
\section*{Appendix G}\label{appendix26} The $2$ extremal graphs on $26$ vertices. In each case there are $61$ edges and $(deg_{4},deg_{5}) = (8,18)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{cccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0&\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0&\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&0&\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(5,4,5,4,5,5,5,4,4,5,5,5,4,5,5,5,5,5,4,4,5,5,5,5,5,4)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (0,25), (1,6), (1,10), (1,16), (1,22), (2,5), (2,9),\\ (2,14), (2,15), (2,22), (3,4), (3,13), (3,22), (3,24), (4,11), (4,14), (4,16), (4,21), \\ (5,10), (5,12), (5,21), (5,25), (6,8), (6,15), (6,21), (6,24), (7,9), (7,13), (7,21), \\ (8,14), (8,20), (8,25), (9,16), (9,20), (9,24), (10,13), (10,17), (10,20), (11,12),\\
(11,15), (11,20), (12,19), (12,24), (13,15), (13,19), (14,17), (14,19),(15,18),\\
(16,18), (16,25), (17,18), (17,24), (18,23), (19,23), (20,23), (21,23), (22,23) \end{eqnarray*}
\noindent The sink node is vertex $2$ which has set of children $\{5,9,14,15,22\}$.
\noindent Sets $S_{\Gamma_{0},3}$ and $S_{\Gamma_{0},4}$ are given below. Vertices of degree $5$ are written in bold. Note that no element of $S_{\Gamma_{0},4}$ contains a child of the sink node. \begin{eqnarray*} S_{\Gamma_{0},3} &=&\{({\bf 0}, {\bf 6},19), (1,7,12), (1,7, {\bf 14}), (3, {\bf 5},18), (3,8,18), (7,8,12), (7,8,18),\\ &&\; (7,12,18), (8,12,18), (8,12, {\bf 22}), \}\\ S_{\Gamma_{0},4}&= &\{(7,8,12,18)\} \end{eqnarray*}
The pairs of elements from $S_{\Gamma_{0},3}$ that are parallel and have no edges between them are: $(({\bf 0}, {\bf 6},19), (3, {\bf 5},18))$, $( (1,7,12), (3, 8,18))$, $((1,7, {\bf 14}), (3, {\bf 5},18))$ and $((1,7, {\bf 14}), ( {\bf 23}, {\bf 24},25))$. Only one of these pairs consists entirely of vertices of degree $4$, namely $( (1,7,12), (3, 8,18))$.
\noindent $\Gamma_{1}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{cccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&1\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&0&1&0&1&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0 \end{array} $ \end{tiny}
\noindent and degree sequence: $$(4,4,5,5,5,5,5,4,4,5,5,5,4,5,5,5,4,5,5,4,5,5,5,5,5,4)$$
Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (1,6), (1,10), (1,16), (1,22), (2,5), \\ (2,9), (2,14), (2,15), (2,22), (3,4), (3,13), (3,22), (3,24), (3,25),\\
(4,11), (4,14), (4,16), (4,21), (5,10), (5,12), (5,21), (5,25), (6,8),\\
(6,15), (6,21), (6,24), (7,9), (7,13), (7,21),(8,14), (8,20), (8,25), \\ (9,16), (9,20), (9,24), (10,13), (10,17), (10,20), (11,12), (11,15), (11,20),\\
(12,19), (12,24), (13,15), (13,19), (14,17), (14,19), (15,18), (16,18),\\
(17,18),(17,24), (18,23), (18,25), (19,23), (20,23) (21,23), (22,23) \end{eqnarray*}
\noindent Sets $S_{\Gamma_{1},3}$ and $S_{\Gamma_{1},4}$ are given below. Vertices of degree $5$ are written in bold. \begin{eqnarray*} S_{\Gamma_{1},3} &=&\{(0,{\bf 5},16), (0,{\bf 6},19), (0,8,16), (0,16,19), (0,19,25), (1,7,12), \\ &&\; (1,7,{\bf 14}),(1,7,25), (1,{\bf 11},25), (1,19,25), (7,8,12), (7,12,{\bf 18}),\\&&\; (8,12,16), (8,12,{\bf 22}), (8,13,16), ({\bf 9},19,25) \}\\ S_{\Gamma_{1},4}&= &\{\} \end{eqnarray*}
\noindent Note that $S_{\Gamma_{1},3}$ has no pair of distinct sets that consist of vertices of degree $4$ and have no edges between them.
\section*{Appendix H}\label{appendix27} The $1$ extremal graph on $27$ vertices. There are $65$ edges and $(deg_{4},deg_{5}) = (5,22)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&1\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&1\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&1&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&0 \end{array} $ \end{tiny}
\noindent and degree sequence: $$(5,4,5,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,5,5,5,5,5,4,4)$$
Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (0,25), (1,6), (1,10), (1,16), (1,22), (2,5),\\ (2,9), (2,14), (2,15), (2,22), (3,4), (3,13), (3,22), (3,24), (4,11), (4,14), \\ (4,16), (4,21), (5,10), (5,12), (5,21), (5,25), (6,8), (6,15), (6,21), (6,24), \\ (7,9), (7,13), (7,21), (7,26), (8,14),(8,20), (8,25),
(8,26), (9,16), (9,20),\\ (9,24), (10,13), (10,17), (10,20), (11,12), (11,15), (11,20), (12,19), (12,24), \\(12,26), (13,15), (13,19), (14,17), (14,19), (15,18),
(16,18), (16,25), \\(17,18), (17,24), (18,23), (18,26), (19,23), (20,23), (21,23), (22,23) \end{eqnarray*}
\noindent Sets $S_{\Gamma_{0},3}$ and $S_{\Gamma_{0},4}$ are given below. Vertices of degree $5$ are written in bold. \begin{eqnarray*} S_{\Gamma_{0},3} &=&\{({\bf 0},{\bf 6},19), (1,{\bf 7},{\bf 14}), (3,{\bf 5},{\bf 18}), ({\bf 4},{\bf 10},26), ({\bf 23},{\bf 24},25)\}\\ S_{\Gamma_{0},4}&= &\{\} \end{eqnarray*}
All of the vertices of degree $5$ in $S_{\Gamma_{0},3}$ have $4$ neighbours of degree $5$ and one of degree $4$.
\section*{Appendix I}\label{appendix28} The $4$ extremal graph on $28$ vertices. There are $68$ edges. Graph $\Gamma_{0}$ has $(deg_{3},deg_{4},deg_{5}) =(1,4,21,2)$, $\Gamma_{1}$ has $(deg_{4},deg_{5})=(4,24)$, $\Gamma_{2}$ has $(deg_{4},deg_{5},deg_{6}) = (6,20,2)$ and $\Gamma_{3}$ has $(deg_{4},deg_{5},deg_{6}) = (7,18,3)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{cccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&1\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&1\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&1&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&1&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&0&1\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&1&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(6,4,5,4,5,5,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,3)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (0,25), (0,27), (1,6), (1,10), (1,16), (1,22),(2,5), \\ (2,9), (2,14), (2,15), (2,22), (3,4), (3,13), (3,22), (3,24), (4,11),
(4,14), (4,16),\\ (4,21), (5,10), (5,12), (5,21), (5,25), (6,8), (6,15), (6,21), (6,24),(6,27),\\ (7,9), (7,13), (7,21), (7,26), (8,14), (8,20), (8,25), (8,26),
(9,16),(9,20), \\ (9,24), (10,13), (10,17), (10,20), (11,12), (11,15), (11,20), (12,19),
(12,24),\\ (12,26), (13,15), (13,19), (14,17), (14,19), (15,18), (16,18), (16,25), (17,18),\\
(17,24), (18,23),(18,26), (19,23), (19,27), (20,23), (21,23), (22,23), \end{eqnarray*}
\noindent
All vertices of degree $5$ are the root of an embedded $S_{5,[4,4,4,4,4]}$ star.
\noindent Sets $S_{\Gamma_{0},3}$ and $S_{\Gamma_{0},4}$ are given below. Vertices of degree $5$ are written in bold. Note that both elements of $S_{\Gamma_{0},4}$ contain the vertex of degree $3$ (i.e. $27$), which is adjacent to the vertex of degree $6$ (i.e. $0$). \begin{eqnarray*} S_{\Gamma_{0},3} &=&\{(1,{\bf 7},{\bf 14}),({\bf 2},26,27),(3,{\bf 5},{\bf 18}),(3,{\bf 5},27),(3,{\bf 18},27), \\&&(3,{\bf 20},27),(3,26,27),({\bf 4},{\bf 10},26),({\bf 4},{\bf 10},27),({\bf 4},26,27),\\&& ({\bf 5},{\bf 18},27),({\bf 10},26,27),({\bf 23},{\bf 24},25)\}\\ S_{\Gamma_{0},4}&= &\{(3,{\bf 5},{\bf 18},27) ({\bf 4},{\bf 10},26,27)\} \end{eqnarray*}
\noindent $\Gamma_{1}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{cccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&1\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&1\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&1&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0\\ 0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,5,5,5,5,5,4,4,4)$$
Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (0,25), (1,6), (1,10), (1,16), (1,22), (1,26), (2,5), \\ (2,9), (2,14), (2,15), (2,22), (3,4), (3,13), (3,22), (3,24), (3,27), (4,11), (4,14),\\ (4,16), (4,21), (5,10), (5,12), (5,21), (5,25), (6,8), (6,15),
(6,21), \\(6,24), (7,9), (7,13), (7,21), (7,26), (8,14), (8,20), (8,25), (8,27),
(9,16), \\(9,20), (9,24), (10,13), (10,17), (10,20), (11,12), (11,15), (11,20), (12,19),\\ (12,24), (12,26), (13,15), (13,19), (14,17), (14,19), (15,18), (16,18), (16,25), \\ (17,18), (17,24), (18,23), (18,27), (19,23), (20,23), (21,23), (22,23), (26,27), \end{eqnarray*}
\noindent Sets $S_{\Gamma_{1},3}$ and $S_{\Gamma_{1},4}$ are given below. Vertices of degree $5$ are written in bold. \begin{eqnarray*} S_{\Gamma_{1},3} &=&\{({\bf 0},{\bf 6},19), ({\bf 0},19,27), ({\bf 8},{\bf 12},{\bf 22}), ({\bf 9},19,27),\\&& ({\bf 15},25,26), ({\bf 23},{\bf 24},25), ({\bf 23},25,26)\}\\ S_{\Gamma_{1},4}&= &\{\} \end{eqnarray*}
\noindent $\Gamma_{2}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{cccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&1\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&1\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&1&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0\\ 0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(5,5,5,5,5,6,5,5,4,5,5,5,4,5,6,5,5,5,5,4,5,5,5,5,5,4,4,4)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (0,25), (1,6), (1,10), (1,16), (1,22), (1,26), (2,5),\\
(2,9), (2,14), (2,15), (2,22), (3,4), (3,13), (3,22), (3,24), (3,27), (4,11), (4,14),\\
(4,16), (4,21), (5,10), (5,12), (5,21), (5,25), (5,27), (6,8), (6,15), (6,21), \\ (6,24), (7,9), (7,13), (7,21), (7,26), (8,14), (8,20), (8,25), (9,16), (9,20),\\
(9,24), (10,13), (10,17), (10,20), (11,12), (11,15), (11,20), (12,19), (12,24), \\ (13,15), (13,19), (14,17), (14,19), (14,26), (15,18), (16,18), (16,25), (17,18),\\
(17,24), (18,23), (18,27), (19,23), (20,23), (21,23), (22,23), (26,27) \end{eqnarray*}
\noindent Vertices of degree $6$ are $5$ and $14$. Sets $S_{\Gamma_{2},3}$ and $S_{\Gamma_{2},4}$ are given below. Vertices of degree $5$ are written in bold. Elements of both sets contain only vertices of degree $4$ and $5$. Note that for every vertex $p$ of degree $6$ ($5$ and $14$) all $3$ elements of $S_{\Gamma_{2},4}$ contain precisely one vertex (of degree $4$) adjacent to $p$. (If $p=5$, vertices adjacent to $p$ in the elements of $S_{\Gamma_{2},4}$ are $27$, $12$ and $25$ respectively, and if $p=14$, vertices adjacent to $p$ in the elements of $S_{\Gamma_{2},4}$ are $19$, $8$ and $26$ respectively. \begin{eqnarray*} S_{\Gamma_{2},3} &=&\{({\bf 0},{\bf 6},19),({\bf 0},{\bf 6},27),({\bf 0},19,27),({\bf 6},19,27),({\bf 7},8,12),\\&& ({\bf 7},8,{\bf 18}),({\bf 7},12,{\bf 18}),(8,12,{\bf 18}),(8,12,{\bf 22}),({\bf 9},19,27),\\&& ({\bf 15},25,26),({\bf 23},{\bf 24},{\bf 25}),({\bf 23},{\bf 24},26),({\bf 23},25,26),({\bf 24},25,26)\}\\ S_{\Gamma_{2},4}&= &\{({\bf 0},{\bf 6},19,27) ({\bf 7},8,12,{\bf 18}) ({\bf 23},{\bf 24},25,26)\} \end{eqnarray*}
\noindent $\Gamma_{3}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{cccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&1&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&1\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&1\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&1&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&1&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1\\ 0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(6,4,5,5,5,6,6,4,4,5,5,5,4,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (0,25), (0,26), (1,6), (1,10), (1,16), (1,22), (2,5),\\
(2,9), (2,14), (2,15), (2,22), (3,4), (3,13), (3,22), (3,24), (3,27), (4,11), (4,14),\\
(4,16), (4,21), (5,10), (5,12), (5,21), (5,25), (5,27), (6,8), (6,15), (6,21),\\
(6,24), (6,26), (7,9), (7,13), (7,21), (8,14), (8,20), (8,25), (9,16), (9,20),\\
(9,24), (10,13), (10,17), (10,20), (11,12), (11,15), (11,20), (12,19), (12,24),\\
(13,15), (13,19), (14,17), (14,19), (15,18), (16,18), (16,25), (17,18), (17,24),\\ (18,23), (18,27), (19,23), (19,26), (20,23), (21,23), (22,23), (26,27), \end{eqnarray*}
\noindent Vertices of degree $6$ are $0$, $5$ and $6$. Sets $S_{\Gamma_{3},3}$ and $S_{\Gamma_{3},4}$ are given below. Vertices of degree $5$ are written in bold. Elements of both sets contain only vertices of degree $4$ and $5$. Note that for every vertex $p$ of degree $6$ ($0$, $5$ and $6$) both elements of $S_{\Gamma_{3},4}$ contain precisely one vertex (of degree $4$) adjacent to $p$. (If $p=0$, vertices adjacent to $p$ in the elements of $S_{\Gamma_{3},4}$ are $7$ in both cases, if $p=5$ or $p=6$ vertices adjacent to $p$ in the elements of $S_{\Gamma_{3},4}$ are $1$, and $8$ respectively.
\begin{eqnarray*} S_{\Gamma_{3},3} &=&\{ (1,7,12),(1,7,{\bf 14}),(1,7,27),(1,{\bf 11},27),(1,{\bf 14},27), ({\bf 4},{\bf 10},26),(7,8,12),\\ &&(7,8,{\bf 18}),(7,8,27),(7,12,{\bf 18}), (7,{\bf 14},27),(8,12,{\bf 18}),(8,12,{\bf 22}),({\bf 23},{\bf 24},25) \}\\ S_{\Gamma_{3},4}&= &\{(1,7,{\bf 14},27) (7,8,12,{\bf 18}) \} \end{eqnarray*}
\section*{Appendix J}\label{appendix29} The $1$ extremal graph on $29$ vertices. There are $72$ edges and $(deg_{4},deg_{5},deg_{6}) =(5,20,4)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccccccccccccccccccccccccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&1&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&1\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&1&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&1&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&1&0&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&0&1&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&1&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1\\ 0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(6,4,5,5,5,6,6,5,5,5,5,5,5,5,5,5,5,5,6,5,5,5,5,5,5,4,4,4,4)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (0,25), (0,27), (1,6), (1,10), (1,16), (1,22),\\
(2,5), (2,9), (2,14), (2,15), (2,22), (3,4), (3,13), (3,22), (3,24), (3,28),\\(4,11), (4,14), (4,16), (4,21), (5,10), (5,12), (5,21), (5,25), (5,28),(6,8),\\(6,15), (6,21), (6,24), (6,27), (7,9), (7,13), (7,21), (7,26), (8,14), \\ (8,20), (8,25), (8,26), (9,16), (9,20), (9,24), (10,13), (10,17), (10,20),\\ (11,12), (11,15), (11,20), (12,19), (12,24), (12,26), (13,15), (13,19), \\(14,17), (14,19),(15,18), (16,18), (16,25), (17,18), (17,24), (18,23), \\(18,26), (18,28), (19,23), (19,27), (20,23), (21,23), (22,23), (27,28) \end{eqnarray*}
\noindent Sets $S_{\Gamma,3}$ and $S_{\Gamma,4}$ are given below. Vertices of degree $5$ are written in bold, all other elements have degree $4$..
\begin{eqnarray*} S_{\Gamma_{0},3} &=&\{(1,{\bf 7},{\bf 14}),(1,{\bf 7},28),(1,{\bf 11},28),(1,{\bf 14},28),\\&&({\bf 2},26,27), ({\bf 4},{\bf 10},26),({\bf 4},{\bf 10},27),({\bf 4},26,27),\\&&({\bf 7},{\bf 14},28),(10,26,27) ({\bf 23},{\bf 24},25)\}\\ S_{\Gamma_{0},4}&= &\{(1,{\bf 7},{\bf 14},28) ({\bf 4},{\bf 10},26,27)\} \end{eqnarray*}
\section*{Appendix K}\label{appendix30} The $1$ extremal graph on $30$ vertices. There are $76$ edges and $(deg_{4},deg_{5},deg_{6}) =(4,20,6)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccc ccccc ccccc ccccc ccccc ccccc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&1&0\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&1&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&1\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&1&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&1&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&1\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&0&1&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&1&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1\\ 0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0\\
\end{array} $ \end{tiny}
\noindent and degree sequence: $$(6,5,5,5,5,6,6,6,5,5,5,5,5,5,6,5,5,5,6,5,5,5,5,5,5,4,4,4,5,4)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (0,25), (0,27), (1,6), (1,10),\\
(1,16), (1,22), (1,29), (2,5), (2,9), (2,14), (2,15), (2,22),\\ (3,4), (3,13), (3,22), (3,24), (3,28), (4,11), (4,14), (4,16), \\ (4,21), (5,10), (5,12), (5,21), (5,25), (5,28), (6,8), (6,15), \\ (6,21), (6,24), (6,27), (7,9), (7,13), (7,21), (7,26), (7,29),\\ (8,14), (8,20), (8,25), (8,26), (9,16), (9,20), (9,24), (10,13), \\ (10,17), (10,20), (11,12), (11,15), (11,20), (12,19), (12,24), \\ (12,26), (13,15), (13,19), (14,17), (14,19), (14,29), (15,18), \\ (16,18), (16,25), (17,18), (17,24), (18,23), (18,26), (18,28), \\ (19,23), (19,27), (20,23), (21,23), (22,23), (27,28), (28,29) \end{eqnarray*}
\noindent Sets $S_{\Gamma,3}$ and $S_{\Gamma,4}$ are given below. Vertices of degree $5$ are written in bold. Elements of $S_{\Gamma,4}$ contain two vertices of degree $4$ and two of degree $5$. \begin{eqnarray*} S_{\Gamma_{0},3} &=&\{({\bf 2},26,27),({\bf 4},{\bf 10},26),({\bf 4},{\bf 10},27),({\bf 4},26,27),({\bf 10},26,27),\\&& ({\bf 15},25,29),({\bf 23},{\bf 24},25),({\bf 23},{\bf 24},29),({\bf 23},25,29),({\bf 24},25,29)\}\\ S_{\Gamma_{0},4}&= &\{({\bf 4},{\bf 10},26,27) ({\bf 23},{\bf 24},25,29)\} \end{eqnarray*}
Let $s_{1}=({\bf 4},{\bf 10},26,27)$ and $s_{2}=({\bf 23},{\bf 24},25,29)$ (the two elements of $S_{\Gamma_{0},4}$ respectively). Consider the embedded $S_{6,[4,4,4,4,4,3]}$ stars with roots $6$ and $14$ respectively. The first star has children $27$ and $24$ which are from $s_{1}$ and $s_{2}$ respectively, with degrees $4$ and $5$, and the second star has children $4$ and $29$ which are from $s_{1}$ and $s_{2}$ respectively, with degrees $5$ and $4$ respectively.
\section*{Appendix L}\label{appendix31} The $2$ extremal graph on $31$ vertices. There are $80$ edges. Graph $\Gamma_{0}$ has $(deg_{4},deg_{5},deg_{6}) =(3,20,8)$ and $\Gamma_{1}$ has $(deg_{5},deg_{6})=(26 ,5)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccc ccccc ccccc ccccc ccccc ccccc c} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&1&0&0&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&1&0&0\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&1&0&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0&1\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&0&1&0&1&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&0&0&0&0&0\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&1&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1\\ 0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0\\ 0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&0&0&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(6,5,5,5,6,6,6,6,5,5,6,5,5,5,6,5,5,5,6,5,5,5,5,5,5,4,5,5,5,4,4)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (0,25), (0,27), (1,6), (1,10), (1,16),\\ (1,22), (1,29), (2,5), (2,9), (2,14), (2,15), (2,22), (3,4), (3,13),\\ (3,22), (3,24), (3,28), (4,11), (4,14), (4,16), (4,21), (4,30), (5,10),\\ (5,12), (5,21), (5,25), (5,28), (6,8), (6,15), (6,21), (6,24),\\ (6,27),
(7,9), (7,13), (7,21), (7,26), (7,29), (8,14), (8,20),\\ (8,25), (8,26),
(9,16), (9,20), (9,24), (10,13), (10,17), (10,20),\\ (10,30), (11,12),
(11,15), (11,20), (12,19), (12,24), (12,26), (13,15),\\ (13,19), (14,17), (14,19), (14,29), (15,18), (16,18), (16,25),\\ (17,18), (17,24), (18,23), (18,26), (18,28), (19,23), (19,27), \\(20,23), (21,23), (22,23), (26,30), (27,28), (27,30), (28,29) \end{eqnarray*}
\noindent \begin{comment} Don't need this stuff now: Set $S_{\Gamma_{0},3}$ is given below. Vertices of degree $4$ are written in bold. All sets contain at least one vertex of degree $4$.
\begin{eqnarray*} S_{\Gamma_{0},3} &=&(15,{\bf 25},{\bf 29}) (15,{\bf 25},{\bf 30}) (15,{\bf 29},{\bf 30}) (23,24,{\bf 25}) \\&&(23,24,{\bf 29}) (23,24,{\bf 30}) (23,{\bf 25},{\bf 29}) (23,{\bf 25},{\bf 30}) \\&& (23,{\bf 29},{\bf 30}) (24,{\bf 25},{\bf 29}) (24,{\bf 25},{\bf 30}) (24,{\bf 29},{\bf 30}) ({\bf 25},{\bf 29},{\bf 30}) \end{eqnarray*}
\noindent Set $S_{\Gamma_{0},4}$ is given below. Vertices of degree $4$ are written in bold. All sets contain at least one vertex of degree $4$. \begin{eqnarray*} S_{\Gamma_{0},4} &=&\{(15,{\bf 25},{\bf 29},{\bf 30}) (23,24,{\bf 25},{\bf 29}) (23,24,{\bf 25},{\bf 30})\\&& (23,24,{\bf 29},{\bf 30}) (23,{\bf 25},{\bf 29},{\bf 30}) (24,{\bf 25},{\bf 29},{\bf 30}) \} \end{eqnarray*}
\noindent \end{comment}
Set $S_{\Gamma_{0},5}$ is given below. Vertices of degree $5$ are written in bold, all other vertices have degree $4$. \begin{eqnarray*} S_{\Gamma_{0},5} &=&\{({\bf 23},{\bf 24},25,29,30)\} \end{eqnarray*}
\begin{comment} Don't need this now
There are $16$ pairs of sets, $(X,Y)$ where $X\in S_{\Gamma_{0},4}$, $Y\in S_{\Gamma_{0},2}$, where $X$ and $Y$ do not intersect and there is no edge from an element of $X$ to an element of $Y$. These $16$ sets are:
\noindent $
\begin{array}{c|c} $X$ & $Y$ \\ \hline (15,25,29,30) & (9,19) \\ (15,25,29,30) & (12,22) \\ (15,25,29,30) & (17,21) \\ (15,25,29,30) & (23,24) \\ (23,24,25,29) & (2,26)\\ (23,24,25,29) & (2,27)\\ (23,24,25,29) & (2,30) \\ (23,24,25,29) & (15,30) \\ (23,24,25,30) & (1,11)\\ (23,24,25,30) & (11,28) \\ (23,24,25,30) & (11,29) \\ (23,24,25,30) & (15,29) \\ (23,24,29,30) & (8,13) \\ (23,24,29,30) & (13,16) \\ (23,24,29,30) & (13,25) \\ (23,24,29,30) & (15,25) \\ \hline \end{array} $
There are $3$ distinct pairs of sets, $(X,Y)$ where $X,Y\in S_{\Gamma_{0},3}$, where $X$ and $Y$ do not intersect and there is no edge from an element of $X$ to an element of $Y$. These $5$ sets are:
\noindent $
\begin{array}{c|c} $X$ & $Y$ \\ \hline (15,25,30) & (23,24,29)\\ (15,29,30) & (23,24,25)\\ (23,24,30) & (15,25,29) \\ \hline \end{array} $ \end{comment}
\noindent $\Gamma_{1}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccc ccccc ccccc ccccc ccccc ccccc c} 0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0\\ 0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0\\ 0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0\\ 0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&1&0&0&0&1&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0\\ 0&1&0&0&1&0&0&0&0&0&0&0&0&0&0&1&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0\\ 1&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&0&0&0\\ 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&0&0&0&0&1&0&0&0&0&1&0&0&0&0\\ 0&0&0&1&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&1&0&0&1&0&0&0&1&0&0&0&0\\ 1&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0\\ 0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&0\\ 0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0\\ 0&1&0&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&1&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&1&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&1&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(5,5,5,5,5,5,5,5,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6)$$ Edge set is: \begin{eqnarray*} (0,11), (0,15), (0,19), (0,23), (0,29), (1,7), (1,10), (1,14), (1,18), \\ (1,29), (2,6), (2,9), (2,17), (2,22), (2,29), (3,5), (3,8), (3,13),\\
(3,21), (3,29), (4,10), (4,13), (4,16), (4,22), (4,28), (5,11), (5,17),\\
(5,20), (5,28), (6,8), (6,15), (6,18), (6,28), (7,9), (7,12),\\ (7,23),(7,28), (8,14), (8,16), (8,23), (8,27), (9,13), (9,19),\\
(9,20), (9,27), (10,15), (10,17), (10,21), (10,27), (11,12), (11,18),\\
(11,22), (11,27), (12,16), (12,21), (12,26), (13,18), (13,26), (14,19),\\ (14,22), (14,26), (15,20), (15,26), (16,20), (16,25), (17,23), \\ (17,25), (18,25), (19,21), (19,25), (20,24), (21,24), (22,24),\\
(23,24), (24,30), (25,30), (26,30), (27,30), (28,30), (29,30) \end{eqnarray*}
\begin{comment} Don't need this now
\noindent Set $S_{\Gamma_{1},4}$ is given below. All pairs of sets intersect at least twice. \begin{eqnarray*} S_{\Gamma_{1},4} &=&\{(3,7,15,22) (3,7,15,25) (3,7,22,25) \\&&(3,15,22,25) (7,15,22,25) \} \end{eqnarray*} \end{comment}
\noindent Set $S_{\Gamma_{1},5}$ is given below. Vertices of degree $5$ are written in bold. \begin{eqnarray*} S_{\Gamma_{1},5} &=&\{({\bf 3},{\bf 7},{\bf 15},{\bf 22},{\bf 25})\} \end{eqnarray*}
\begin{comment} Don't need this now:
\noindent There are no pairs of sets, $(X,Y)$ where $X\in S_{\Gamma_{1},4}$, $Y\in S_{\Gamma_{1},2}$, where $X$ and $Y$ do not intersect and there is no edge from an element of $X$ to an element of $Y$. Similarly there are no such sets where $X,Y\in S_{\Gamma_{1},3}$. \end{comment}
\section*{Appendix M}\label{appendix32} The $1$ extremal graph on $32$ vertices. There are $85$ edges and $(deg_{5},deg_{6})=(22 ,10)$.
\noindent $\Gamma_{0}$ has incidence array:
\noindent \begin{tiny} $ \begin{array}{ccccc ccccc ccccc ccccc ccccc ccccc cc} 0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&1&0&0&0&0\\ 0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0\\ 0&0&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0\\ 0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&1&0&0&0\\ 0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0\\ 0&0&1&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0\\ 0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&1&0&0&1&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&0&1&0&0\\ 0&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&1&1&0&0&0&0&0\\ 0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0\\ 0&1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&0&0&0&0&0&1&0\\ 1&0&0&0&1&0&0&0&0&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&0\\ 0&0&0&1&0&0&0&1&0&0&1&0&0&0&0&1&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0\\ 0&0&1&0&1&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&1&0&0\\ 0&0&1&0&0&0&1&0&0&0&0&1&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0\\ 0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0\\ 1&0&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&1&0&0&1&0&1&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&0&0&0&0&0&1&0&0&0&1&0&0&0&0\\ 0&0&0&0&0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&0&0&1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0\\ 1&1&1&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&1&1&0&0&0&0&0&0&0&0&1\\ 0&0&0&1&0&0&1&0&0&1&0&0&1&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 1&0&0&0&0&1&0&0&1&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1\\ 0&0&0&0&0&0&0&1&1&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&1&0\\ 1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0\\ 0&0&0&1&0&1&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&1&0&1&0&0\\ 0&1&0&0&0&0&0&1&0&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0&1&0&0&1\\ 0&0&0&0&1&0&0&0&0&0&1&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&0&0&0&1\\ 0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&0&1&1&1&0&0&0&1&1&0\\ \end{array} $ \end{tiny}
\noindent and degree sequence: $$(6,5,5,5,6,6,6,6,5,5,6,5,5,5,6,5,5,5,6,5,5,5,5,6,6,5,5,5,5,5,5,5)$$ Edge set is: \begin{eqnarray*} (0,7), (0,11), (0,17), (0,22), (0,25), (0,27), (1,6), (1,10), (1,16), \\ (1,22), (1,29), (2,5), (2,9), (2,14), (2,15), (2,22), (3,4), (3,13), \\ (3,22), (3,24), (3,28), (4,11), (4,14), (4,16), (4,21), (4,30), (5,10),\\
(5,12), (5,21), (5,25), (5,28), (6,8), (6,15), (6,21), (6,24), (6,27),\\
(7,9), (7,13), (7,21), (7,26), (7,29), (8,14), (8,20),\\ (8,25), (8,26), (9,16), (9,20), (9,24), (10,13), (10,17),\\ (10,20), (10,30), (11,12), (11,15), (11,20), (12,19), (12,24), \\(12,26), (13,15), (13,19),
(14,17), (14,19), (14,29), (15,18),\\ (16,18), (16,25), (17,18), (17,24),
(18,23), (18,26),(18,28),\\ (19,23), (19,27), (20,23), (21,23), (22,23),
(23,31), (24,31),\\ (25,31), (26,30), (27,28), (27,30), (28,29), (29,31), (30,31) \end{eqnarray*}
\noindent Sets $S_{\Gamma_{0},j}$, for $j>2$ are empty.
\begin{eqnarray*} S_{\Gamma_{0},2} &=&\{ (1,11), (1,12), (1,19), (1,26), (2,26), (2,27),\\ && (2,30), (2,31), (3,8), (3,20), (3,25), (3,26),\\&& (8,13), (8,22), (8,28), (9,19), (9,27), (9,28), \\&&(9,30), (11,28), (11,29), (11,31), (12,16), (12,22),\\&& (12,29), (13,16), (13,25), (13,31), (15,25), (15,29), \\&& (15,30), (15,31), (16,19), (16,27), (17,21), (19,25), \\&&(20,27), (20,28), (20,29), (22,26), (22,30)\} \end{eqnarray*}
\end{document} |
\begin{document}
\title{Quasi exact solution of the Rabi Hamiltonian} \date{\today} \author{Ramazan Ko\c{c}} \email{[email protected]} \affiliation{Department of Physics, Faculty of Engineering University of Gaziantep, 27310 Gaziantep, Turkey} \author{Mehmet Koca} \email{[email protected]} \affiliation{Department of Physics, College of Science, Sultan Qaboos University, PO Box 36, Al-Khod 123, Muscat, Sultanate of Oman} \author{Hayriye T\"{u}t\"{u}nc\"{u}ler} \email{[email protected]} \affiliation{Department of Physics, Faculty of Engineering University of Gaziantep, 27310 Gaziantep, Turkey}
\begin{abstract} A method is suggested to obtain the quasi exact solution of the Rabi Hamiltonian. It is conceptually simple and can be easily extended to other systems. The analytical expressions are obtained for eigenstates and eigenvalues in terms of orthogonal polynomials. \end{abstract}
\maketitle
Considerable attentions have been paid over the years to the solution of the Rabi and Jahn-Teller(JT) Hamiltonians\cite{judd1, judd2, loor}. The $ E\otimes \epsilon $ JT problems has been solved by Judd when certain relations between the parameters of the Hamiltonian invoked\cite{judd2}. Such solutions are known as Juddian isolated exact solutions. The problem has been studied in the Bargmann-Fock space by Reik \textit{et al}\cite {reik1} and its canonical form has been obtained by Szopa \textit{et al}\cite {szopa1}. It has been proven \cite{reik2, szopa2} that the Rabi Hamiltonian i.e. $E\otimes \beta $ JT system and $E\otimes \epsilon $ JT system are mathematically identical.
In this letter we take a new look at the solution of Rabi Hamiltonian through the method of quasi exact solvability. \ The $E\times \beta $ Jahn-Teller system coupled to a system executing harmonic oscillation which is separated in energy by $2\mu $ is characterized by the Rabi Hamiltonian \cite{reik2}: \begin{equation} H=a^{+}a+\kappa \sigma _{3}(a^{+}+a)+\mu (\sigma ^{+}+\sigma ^{-}) \label{eq:1} \end{equation} where $\sigma ^{\pm }=\frac{1}{2}\left( \sigma _{1}\pm i\sigma _{2}\right) $ and $\sigma _{1},\sigma _{2},\sigma _{3}$ are Pauli matrices and the parameter $\kappa $ is linear coupling constant. The Hamiltonian (\ref{eq:1} ) can be expressed as a differential equation in the Bargmann-Fock space by using the realizations of the bosonic operators, \begin{equation} a^{+}=z,\quad a=d/dz. \label{eq:2} \end{equation} \ Substituting of (\ref{eq:2}) into (\ref{eq:1})we obtain a system of two linear differential equation for the functions $\psi _{1}(z)$ and $\psi _{2}(z):$ \begin{subequations} \begin{eqnarray} (z+\kappa )\frac{d\psi _{1}(z)}{dz}+(\kappa z-E)\psi _{1}(z)+\mu \psi _{2}(z) &=&0 \label{eq:1a} \\ (z-\kappa )\frac{d\psi _{2}(z)}{dz}-(\kappa z+E)\psi _{2}(z)+\mu \psi _{1}(z) &=&0 \label{eq:1b} \end{eqnarray} where $E$ is the eigenvalue of the Rabi Hamiltonian. We eliminate $\psi _{2}(x)$ between two equations and substituting \end{subequations} \begin{equation} z=\kappa (2x-1),\quad \psi _{1}(z)=e^{-2\kappa ^{2}x}\Re (x) \end{equation} we obtain a second order differential equation \begin{eqnarray} x(1-x)\frac{d^{2}\Re (x)}{dx^{2}}+\left[ \kappa ^{2}(4x^{2}-2x-1)+E(2x-1)-x+1 \right] \frac{d\Re (x)}{dx} && \nonumber \\ +\left[ \kappa ^{4}(-4x+3)-E^{2}+2E\kappa ^{2}(-2x+1)+\mu ^{2}\right] \Re (x) &=&0. \label{eq:3} \end{eqnarray} In order to solve (\ref{eq:3}) we first introduce the following linear and bilinear combinations of the operators of the $sl(2,R)$ Lie algebra, \begin{equation} J_{+}J_{-}+J_{-}J_{0}-jJ_{-}-4\kappa ^{2}J_{+}+(4\kappa ^{2}+2j-1)J_{0}+(j(4\kappa ^{2}-1)+\mu ^{2}-2j)=0 \label{eq:4} \end{equation} which is quasi exactly solvable(QES)\cite{turb, koc}. The differential realizations of the generators of the algebra is given by, \begin{equation} J_{-}=\frac{d}{dx},\quad J_{0}=x\frac{d}{dx}-j,\quad J_{+}=-x^{2}\frac{d}{dx} +2j. \label{eq:5} \end{equation} The insertion of (\ref{eq:5}) into (\ref{eq:4}) leads to the following differential equation, \begin{eqnarray} x(1-x)\frac{d^{2}\Re (x)}{dx^{2}}+\left[ 2j(2x-1)+(x-1)(4\kappa ^{2}x-1) \right] \frac{d\Re (x)}{dx} && \nonumber \\ +\left[ 8j\kappa ^{2}(1-x)+\mu ^{2}-4j^{2}\right] \Re (x) &=&0. \label{eq:6} \end{eqnarray} The function $\Re (x)$ is a polynomial of degree $2j.$ The equations (\ref {eq:3}) and (\ref{eq:6}) are identical under the condition \begin{equation} E=2j-\kappa ^{2}. \label{eq:7} \end{equation} The resulting differential equation(\ref{eq:6}) \ and the equation which we have discussed in a paper is identical, if some parameters are reordered\cite {koc}. Now we can easily obtain the results given in the paper\cite{koc} by defining the parameters \begin{eqnarray} \alpha &=&\frac{1}{2},\quad \lambda =-4j(2\kappa ^{2}-j)-\mu ^{2},\quad L=-2j-\frac{1}{2},\quad A=-S-\frac{1}{2} \nonumber \\ q &=&\frac{16\kappa ^{2}}{(2S+1)^{2}},\quad S=\left[ 4j(j+1)+(4\kappa ^{2}+1)^{2}\right] ^{1/2}. \label{eq:8} \end{eqnarray} Then $\Re (x)$ can be expressed in terms of the the polynomial $P_{m}(\kappa )$: \begin{equation} \Re (x)=\sum\limits_{m=0}^{2j}\frac{4^{4j+m}j\Gamma (1-4j)\Gamma (2j)\sin \left[ (2j-m)\pi \right] P_{m}(\kappa )(-\kappa ^{2}x)^{m}}{\sqrt{\pi } (2j-m)\Gamma (m+1)}. \label{eq:9} \end{equation} Here $P_{m}(\kappa )$ satisfies the recurrence relation \begin{eqnarray} 4\kappa ^{2}(m-2j)P_{m+1}(\kappa )-\left[ (2j-m)(2j-4\kappa ^{2}-m)+\mu ^{2} \right] P_{m}(\kappa ) && \nonumber \\ +4\kappa ^{2}(m-2j)P_{m-1}(\kappa ) &=&0 \end{eqnarray} with the normalization $P_{0}(\kappa )=1.$ Certain properties of the polynomial $P_{m}(\kappa )$ have been discussed in some recent works\cite {finkel}. The polynomial $P_{m}(\kappa )$ vanishes for $m\eqslantgtr 2j+1$ and the roots of \ $P_{2j+1}(\kappa )$ leads to the relations between the parameters of the Hamiltonian. The first three of them are given by \begin{align} P_{1}(\kappa )& =4\kappa ^{2}+\mu ^{2}-1 \nonumber \\ P_{2}(\kappa )& =32\kappa ^{4}+4(3\mu ^{2}-8)\kappa ^{2}+\mu ^{2}(\mu ^{2}-5)+4 \nonumber \\ P_{3}(\kappa )& =384\kappa ^{6}+16(11\mu ^{2}-54)\kappa ^{4}+8(3\mu ^{4}-29\mu ^{2}+54)\kappa ^{2}+\mu ^{2}(\mu ^{2}-7)^{2}-36. \label{eq:11} \end{align} These relations are exactly the same results obtained by the method of Juddian isolated exact solution\cite{loor, kus}. The solution obtained for the eigenfunction $\psi _{1}(x)$ can be substituted in (\ref{eq:1a}) to determine the \ other component $\psi _{2}(x)$ of the wave function.
In conclusion we have shown there exists a quasi exact solution of the Rabi Hamiltonian implying that $E\otimes \epsilon $ JT system also has a quasi exact solution. The method given here can be extended other JT or multi-dimensional atomic system problems. Another interesting implication of the method is that the existence of the relation between the QES P\"{o} schl-Teller family potentials and Rabi systems. Details of the work is under investigation.
\end{document} |
\begin{document}
\title{
A genuine reinterpretation of the Heisenberg's (``uncertainty'') relations } \author{Spiridon Dumitru} \address{Department of Physics, \'{}Transilvania\'{} University, Bd. Eroilor 29,\\ R-2200, Brasov, Romania, e-mail: [email protected]} \maketitle
\begin{abstract} In spite
of their popularity the {\bf H}eisenberg's (``uncertainty'') {\bf R}elations (HR) still generate controversies. The {\bf T}raditional {\bf I}nterpretation of HR\ (TIHR) dominate our days science, although over the years a lot of its defects were signaled. These facts justify a reinvestigation of the questions connected with the interpretation / significance of HR. Here it is developped such a reinvestigation starting with a revaluation of the main elements of TIHR. So one finds that all the respective elements are troubled by insurmountable defects. Then it results the indubitable failure of TIHR and the necessity of its abandonment. Consequently the HR must be deprived of their quality of crucial physical formulae. Moreover the HR are shown to be nothing but simple fluctuations formulae with natural analogous in classical (non-quantum) physics. The description of the maesuring uncertainties (traditionally associated with HR) is approached from a new informational perspective. The Planck's constant $\hbar $ (also associated with HR) is revealed to have a significance of generic indicator for quantum stochasticity, similarly with the role of Boltzmann's constant k in respect with the thermal stochasticity. Some other adjacent questions are also briefly discussed in the end. \end{abstract}
\tableofcontents
Motto: {\it ''uncertainty principle: it has to do with the uncertainty in predictions rather than the accuary of measurement. I think in fact that the word ''measurement'' has been so abused in quantum mechanics that it would be good to avoid it altogether''}
John S. Bell, 1985.
\section{INTRODUCTION}
\label{sec:introduc}
The {\bf H}eisenberg's (or uncertainty) {\bf R}elations (HR) have a large popularity, being frequently regarded as crucial formulae of physics or (Martens 1991) even as expression of ''the most important principle of the twentieth century physics''. Nevertheless today one knows (Bunge 1977) that HR ''are probably the most controverted formulae in the whole of the theoretical physics''. The controversies originate in the association of the (supposed special) characteristics of measurements at atomic scale with HR respectively with the foundation and interpretation of quantum theory. The respective association was initiated and especially sophisticated within the {\bf T}raditional ( conventional or orthodox) {\bf I}nterpretation of{\bf \ HR} (TIHR). Very often the TIHR\ is amalgamated with the so-called Copenhagen interpretation of quantum mechanics.
Elements of the alluded association were preserved one way or another in almost all investigations of HR subsequent to TIHR. It is notable that, in spite of their number and variety, the mentioned investigations have not yet solved in essence the controversies connected with TIHR. But, curiously, today, large classes of publications and scientists seem to omit (or even to ignore) discussions about the controversies and defects characterizing the TIHR. So, tacitly, in our days TIHR seems to remain a largely adopted doctrine which dominates the questions regarding the foundation and interpretation of quantum theory. For all that (Piron 1982) ''the idea that there are defects in the foundations of orthodox quantum theory is unquestionable present in the conscience of many physicists''.
No doubt, first of all, the above quoted idea regards questions connected with TIHR. Then the respective questions require further studies and probably new views. We believe that a promising strategy to satisfy such requirements is to develop an investigation guided by the goals presented under the following {\bf P}oints ({\bf P}):
${\bf \underline{P-1.1}}$ : From the vague multitude of sophisticated statements of TIHR to identify its main elements (hypotheses, arguments/motivations and assertions).$\blacktriangle $
${\bf \underline{P-1.2}}$ : To add together the significant defects of TIHR located in connection with the above mentioned elements.$\blacktriangle $
${\bf \underline{P-1.3}}$ : To examine the verity of the respective defects as well as their significance with respect to TIHR.$\blacktriangle $
${\bf \underline{P-1.4}}$ : To see if such an examination defends TIHR or irrefutably pleads against it.$\blacktriangle $
${\bf \underline{P-1.5}}$ : In the latter case to admit the failure of TIHR and to abandon it as an incorrect and useless doctrine.$\blacktriangle $
${\bf \underline{P-1.6}}$ : To see if HR are veritable physical formulae.$ \blacktriangle $
${\bf \underline{P-1.7}}$ : To search for a genuine reinterpretation of HR.$ \blacktriangle $
${\bf \underline{P-1.8}}$ : To give a (first) evaluation of the direct consequences of the mentioned reinterpretation.$\blacktriangle $
${\bf \underline{P-1.9}}$ : To note a few remarks on some adjacent questions. $\blacktriangle $
In this paper we wish to develop an investigation of the HR problematic in the spirit of the above mentioned points ${\bf P-1.1}${\bf \ --- }${\bf P-1.8}${\bf .} For such a purpose we will appeal to some elements (ideas and results) from our works published in last two decades (Dumitru 1974 a, 1974 b, 1977, 1980, 1984, 1987, 1988, 1989, 1991, 1993, 1996, 1999; Dumitru and Verriets 1995). But here we strive to incorporate the respective elements into a more argued and elaborated approach. Also we try to make our exposition as self-contained as possible so that the reader should find it sufficiently meaningful and persuasive without any important appeals to other texts.
Through the announced investigation we shall find that all the main elements of TIHR are affected by insurmountable defects. Therefore we shall reveal the indubitable failure of TIHR and the necessity of its abandonment. Then it directly follows that in fact HR do not have any significance connected with the (measuring) uncertainties. That is why in this paper for the respective relations we do not use the wide-spread denomination of ''uncertainty relations''.
A consequence of the above alluded revelations is the fact that HR must be deprived of their quality of crucial physical formulae. So we come in consonance with the guess (Dirac 1963) that: ''uncertainty relations in their present form will not survive in the physics of future''.
The failure of TIHR leaves open a conceptual space which firstly requires justified answers to the questions from ${\bf P-1.6}$ and ${\bf P-1.7}$. The respective answers must be incorporated in a concordant view about the subjects of the following points:
${\bf \underline{P-1.10}}$ : The genuine description of the measurements.$ \blacktriangle $
${\bf \underline{P-1.11}}$ : The foundation and the interpretation of the actually known quantum theory.$\blacktriangle $
The above mentioned subjects were amalgamated by TIHR trough a lot of assertions/assumptions which now appear as fallacious. That is why we suggest that an useful view to be built on a natural differentiation of the respective subjects.
In such a view the actual quantum theory must be considered as regarding only intrinsic properties of the entities (particles and fields) from the microworld. The aspects of the respective properties included in the theoretical version of HR refer to the stochastic characteristics of the considered entities. But note that stochastic attributes are specific also in the case some macroscopic physical systems (e.g. thermodynamical ones), characterized by a class of macroscopic formulae similar with HR. Also the Planck's constant $\hbar $ (involved in quantum HR) proves itself to be similar with the Boltzmann's constant k (involved in the mentioned macroscopic formulae). Both mentioned constants appear as generic indicators of stochasticity.
In the spirit of the above suggested view the description of the measurements remains a question which is extrinsic as regards the properties of the considered physical systems. Also it must be additional and independent from the actually known branches of theoretical physics (including the quantum mechanics). The respective branches refer only to the intrinsic properties of the considered systems. Then the measurements appear as processes which supply out-coming (received) information/data about the intrinsic properties of the measured systems. So regarded the measurements can be described through some mathematical models. In such models the measuring uncertainties can be described by means of various estimators.
The above announced views about the HR problematic facilitate reconsiderations and (we think) nontrivial comments about some questions regarding the foundations of quantum mechanics.
For developing our exposition in the next sections we will quote directly only a restricted number of references. This because our goal is not to give on exhaustive review of the literature dealing with TIHR. The readers interested in such reviews are invited to consult the known monographical and bibliographical publications (e.g.: Jammer, 1966; De Witt and Graham, 1971; Jammer, 1974; Nilson, 1976; Yanase {\it et. al.} 1978; Primas, 1981; Ballentine, 1982; Cramer, 1986; Dodonov and Man'ko, 1987; Martens, 1991; Braginski and Khalili, 1992; Omnes 1992,1994; Bush {\it et. al}., 1996).
\section{THE MAIN ELEMENTS OF T I H R}
In spite of its popularity, in its promoting literature, TIHR is reported rather as a vague multitude of sophisticated statements but not as a systematized ensemble of clearly defined main elements (hypotheses, arguments/motivations and assertions). However, from the respective publications there can be identified and sorted out such on ensemble which, in our opinion, can be presented as follows:
On the best authority (Heisenberg, 1977) today it is known that the TIHR story originates in the search of general answers to the primary questions mentioned under the following points:
\underline{${\bf P-2.1}$} ${\bf :}$Are all measurements affected by measuring uncertainties ?$\blacktriangle $
\underline{${\bf P-2.2}$} ${\bf :}$How can the respective uncertainties be represented quantitatively in a mathematical scheme ?$\blacktriangle
$
In connection with{\bf \ }${\bf P-2.1}${\bf ,}TIHR adopted the following hypotheses:
\underline{${\bf P-2.3}$} ${\bf :}$The measuring uncertainties are due to the perturbations of the measured system as a result of its interactions with the measuring instrument.$\blacktriangle $
\underline{${\bf P-2.4}$} ${\bf :}$In the case of macroscopic systems the mentioned perturbations can be made arbitrarily small and, consequently, always the corresponding uncertainties can be considered as negligible.
\underline{${\bf P-2.5}$} ${\bf :}$In the case of quantum systems (microparticles of atomic size) the alluded perturbations are essentially unavoidable and consequently for certain measurements (see below ${\bf P-2.12 }$) the corresponding uncertainties are non-negligible.$\blacktriangle $
In the shadow of the hypotheses mentioned in ${\bf P-2.4}$ and ${\bf P-2.5}$ the TIHR attention was limited only to the quantum cases. For approaching such cases with respect to ${\bf P-2.4}$ TIHR restored to the following motivation resources:
${\bf \underline{P-2.6}:}$Analysis of some thought (gedanken) measuring experiments.$\blacktriangle $
${\bf \underline{P-2.7}}$ : Appeal to some theoretical formulae from the existing quantum mechanics.$\blacktriangle $
The two resources were used in undisguised association. So, from the starting-point, in TIHR the questions regarding the description of the measurements, respectively the foundation and interpretation of the existing quantum theory, were explicitly amalgamated..
For accuracy of the discussions in the following we shall use the term {\it variable} in order to denote a physical quantity which describes a specific property/characteristic of a physical system. With adequate delimitations the respective term will be used in both theoretical and experimental sense. In the former case it is connected with the theoretical modeling of system. In the latter case it is related with the data given by measurements about the system.
In connection with ${\bf P-2.6}$ there was considered (Heisenberg, 1927, 1930) the case of simultaneous measurements of two (canonically) conjugated quantum variables A and B (such are coordinate q and momentum p or time t and energy E). The correspondingly Thought Experimental (TE) uncertainties are $\Delta _{TE}A$ and $\Delta _{TE}B.$ They were found to be interconnected trough the following A-B formula
\begin{equation} \Delta _{TE}A\cdot \Delta _{TE}B\approx \hbar \eqnum{2.1} \end{equation} where $\hbar $ is the quantum Planck's constant.
As regards ${\bf P-2.7}$ firstly there was introduced (Heisenberg 1927, 1930) the following q-p theoretical formula:
\begin{equation} \Delta _\Psi q\cdot \Delta _\Psi p\geq \frac \hbar 2 \eqnum{2.2} \end{equation} (with equality only for Gaussian wave function $\Psi ).$ Afterwards, TIHR partisans replaced Eq. (2.2) by the more general A-B theoretical formula
\begin{equation}
\Delta _\Psi A\cdot \Delta _\Psi B\geq \frac 12\cdot \left| \left\langle
\left[ \widehat{A},\widehat{B}\right] _{-}\right\rangle _\Psi \right| \eqnum{2.3} \end{equation} Here $\left[ \widehat{A},\widehat{B}\right] _{-}$ denotes the commutator of the quantum operators $\widehat{A}$ and $\widehat{B}$, with $\left[ \widehat{ A},\widehat{B}\right] _{-}=\pm i\hbar $ in the case of conjugated variables. (For further details about the quantum notations, in actual usance version, see below the Sec. V).
Equations (2.1) - (2.2)/(2.3) were taken by TIHR as motivation supports. Based on the such supports TIHR partisans promoted a whole doctrine (vision). The main (essential) elements of the respective doctrine come down to the following points, grouped in pairs of Assertions (A) and Motivations (M):
${\bf \underline{P-2.8/A}}$ : The quantities $\Delta _{TE}A$ and $\Delta _\Psi A$ from Eqs. (2.1) and (2.2)/(2.3) denoted by an unique symbol $\Delta A$, have an identical significance of measuring {\it uncertainty} for the quantum variable A.$\blacktriangle $
${\bf \underline{P-2.8/M}}$ : The above mentioned TIHR presumptions about $ \Delta _{TE}A$ and the (formal) resemblance between Eqs. (2.1) and (2.2).$ \blacktriangle $
${\bf \underline{P-2.9/A}}$ : Equations (2.1) and (2.2)/(2.3) admit the same generic interpretation of uncertainty relations for simultaneous measurements of the variables $A$ and $B$.$\blacktriangle $
$\underline{{\bf P-2.9/M}}$ : The presumed significance for $\Delta _{TE}A$ and $\Delta _{TE}B$ from Eq. (2.1) and the resemblance between Eqs. (2.1) and (2.12/(2.3).$\blacktriangle $
${\bf \underline{P-2.10/A}}$ : A solitary quantum variable $A$ can be measured without any uncertainty (with unlimited accuracy).$\blacktriangle $
${\bf \underline{P-2.10/M}}$ : For such a variable, considered independently from other variables, the Eqs. (2.1) - (2.3) do not impose a lower bound for the uncertainty $\Delta A$.$\blacktriangle $
${\bf \underline{P-2.11/A}}$ : Two commutable variables $A$ and $B$ can be measured simultaneously with arbitrarily small (even null) uncertainties $ \Delta A$ and $\Delta B$.$\blacktriangle $
${\bf \underline{P-2.11/M}}$ : For such variables $\left[ \widehat{A}, \widehat{B}\right] _{-}=0$ and in Eq. (2.3) the product $\Delta A\cdot \Delta B$ has not a lower bound.$\blacktriangle $
${\bf \underline{P-2.12/A}}$ : Two non-commutable variables $A$ and $B$ can be measured simultaneously only with non-null and interdependent uncertainties $\Delta A$ and $\Delta B$.$\blacktriangle $
${\bf \underline{P-2.12/M}}$ : In such a case $\left[ \widehat{A},\widehat{B} \right] _{-}\neq 0$ and in Eq. (2.3) as well as in Eq. (2.1) the product $ \Delta A\cdot \Delta B$ of the corresponding simultaneous uncertainties has as a lower bound a non-null quantity.$\blacktriangle $
${\bf \underline{P-2.13/A}}$ : The HR defined by Eqs. (2.1) - (2.3) (and named uncertainty relations) are typically quantum formulae and they have no similar in classical (non-quantum) physics.$\blacktriangle $
${\bf \underline{P-2.13/M}}$ : Presence of the quantum (Planck) constant $ \hbar $ in Eqs. (2.1) - (2.3) and its absence in all known formulae of classical physics.$\blacktriangle $
The above mentioned points ${\bf P\dashrightarrow 2.8-P-2.13}$ can be regarded as main elements of TIHR. This because any piece of the variety of TIHR statements is obtained and advocated by means of some combinations of the respective elements.
Among the alluded pieces we mention here the ones regarding the mutual relations of quantum variables. TIHR adopted the idea:
${\bf \underline{P-2.14}}$ : A variable exists /(can be defined) only when it is measurable with absolute accuracy (without uncertainty).$ \blacktriangle $
$\frac {}{}$By combining this idea with ${\bf P-2.11}$ and ${\bf P-2.12}$ in the TIHR literature it is often promoted the statement:
${\bf \underline{P-2.15}}$ : Two quantum variables are compatible respectively incompatible as their operators are respectively are not commutable. Consequently a complete description of a quantum system must be made in terms of a set of mutually compatible variables.${\bf \blacktriangle }$
In the same literature one finds also the opinion that:
${\bf \underline{P-2.16}}$ : Two incompatible variables (especially the canonically conjugated ones) are complementary (i.e. mutually exclusive) - similarly as in the complementarity relation for the corpuscular and wave characteristics of microparticles of atomic size.${\bf \blacktriangle }$
\section{A FEW REMARKS ON T I H R HISTORY}
TIHR was initiated by Heisenberg but later on it was developed and especially promoted by the Copenhagen School guided by N. Bohr. In a first stage TIHR had a relatively modest motivation, based only on the Eqs. (2.1) and (2.2). However it was largely accepted in scientific and academic communities, partly due to the authority of its promoters. So, the establishing of TIHR as a doctrine started.
In a second stage TIHR partisans introduced a multitude of thought-experimental or theoretical formulae which resemble more or less the Eqs. (2.1)-(2.3). In spite of their (conceptual and/or mathematical) diversity the respective formulae were declared as {\it uncertainty relations } and their existence and interpretation were regarded as supports for an extended motivation of TIHR. So, for its partisans, TIHR was viewed as a well established and irrefutable doctrine. Such a view was widely promoted in leading publications (especially in textbooks).
In the meantime the alluded view was confronted with the notification of some defects of TIHR. But, as a rule, the respective notifications appeared disparately, sometimes in marginal publications and from non-leading authors. So the mentioned defects were not presented as a systematized ensemble and TIHR was criticized on certain points but not in its totality. An appreciation viewing somehow the alluded totality was noted altogether solitarily (Primas 1981). Referring to the post-Copenhagen interpretation of quantum mechanics it says: ''Heisenberg\'{}s uncertainty relations are no longer at the heart of the interpretation but are simple consequences of the basic mathematical formalism''. Here one should remark that, as we know, such an appreciation has never been used in order to elucidate the shortcomings of the TIHR doctrine. Moreover it seems that, even in the our-days publications regarding the interpretation of quantum mechanics, the respective appreciation is not taken properly into account.
In the presented circumstances the TIHR partisans ignored or even denied the alluded defects. Such an attitude was sustained mainly by putting forward thought experiments and/or the authority of the mentioned partisans. But note that, in this way, for most of the cases, the notifications of TIHR defects were not really counteracted and the corresponding controversies were not elucidated. For all that TIHR survived over the decades with the appearance of an uncontroversial doctrine and became a veritable myth. Undoubted signs of the respective myth are present even today in publications (especially in textbooks) and in the thinking of peoples (particularly in academic communities).
Here, it is interesting to observe Heisenberg\'{}s own attitude towards TIHR story. It is surprising to see in the afferent literature that, although he was the initiator of TIHR doctrine, Heisenberg was not involved in the subsequent history of the respective doctrine. So he did not develop mathematical generalizations or interpreting extensions/sophistications of the Eqs. (2.1)-(2.3). Also he did not participate in the controversies regarding the TIHR defects. Probably that was the reason why in one of his last publications on HR (Heisenberg, 1977) he did not refer to such developments and controversies but reminded only his thoughts connected with the beginning of the TIHR history. Can the alluded attitude be regarded as an evidence of the supposition that in fact Heisenberg was conscious of the insurmountable defects of TIHR ? A pertinent answer to such a question is expected to be (eventually) known by the publication of all volumes of a planned monography (Mehra and Rechenberg, 1982) due, in part, to one of Heisenberg\'{}s last collaborators.
With the Heisenberg case one discloses a particularity in the attitude of many scientists who promoted TIHR. As individuals, each of the respective scientists did not regard the TIHR as a whole doctrine but argued for only a few of its elements and ignored almost all of the defects. Often their considerations were amalgamated with ideas which do not pertain strictly to TIHR. That is why, probably, by the term {\it TIHR-partisans} it is more adequate to understand a fuzzy class of people rather than a rigorously delimited group of scientists.
Now looking back over the time, we believe that the verity and true significance of TIHR defects still remain open questions which require to be elucidated.. Such a requirement implies the necessity of an argued and complete revaluation of TIHR. Then, there directly appears the need for a search of a genuine reinterpretation of HR. The alluded beliefs will guide our investigations in the following sections.
\section{STARTING CONSIDERATIONS OF T I H R}
TIHR introduced its main elements presented in ${\bf P-2.8}$ {\bf --- }${\bf P-2.13}$ by appealing to some starting considerations about the Eqs. (2.1)-(2.3). The appeals viewed the scientific achievements from the first years of quantum mechanics.. Here, for a correct (re)evaluation of TIHR, it is the place to remember briefly the respective considerations.
Firstly it must be noted that the Eqs. (2.1) were introduced by using the wave characteristics of quantum microparticles. Consequently the quantum measurements were regarded by similitude with the optical ones. But in the mentioned years the performances of the optical measurements were restricted by the classical limitative criteria of resolution (due to Abbe and Rayleigh). Then TIHR promoted as starting consideration the following point:
${\bf \underline{P-4.1}}$: The estimation of performances respectively of uncertainties for the quantum measurements must be done by using the alluded limitative criteria, transposed in quantum framework through de Broglie formula $\lambda =h/p$ ($\lambda $ = wave lenght).$\blacktriangle $
By means of this consideration TIHR partisans obtained some relations similar with Eqs. (2.1), for all the thought-experiments promoted by them.
Referring to the Eqs. (2.2)-(2.3) the starting considerations promoted by TIHR can be resumed as follows. The state of a quantum microparticle is described by the wave function $\Psi =\Psi (q)$ regarded as a vector in a Hilbert space (q denotes the set of specific orbital variables). In the respective vectorial space the scalar product ($\Psi _a,\Psi _b)$ of two functions $\Psi _a$ and $\Psi _b$ is given by
\begin{equation} \left( \Psi _a,\Psi _b\right) =
\displaystyle \int
\limits_{\Omega _q}\Psi _a^{*}\Psi _ad\Omega _q \eqnum{4.1} \end{equation}
where $\Psi _a^{*}$=the complex conjugate of $\Psi _a$, whereas $\Omega _q$ and $d\Omega _q$ denote the accessible respectively infinitesimal domains in q-space. $A$ quantum variable $A$ is described by the operator $\widehat{A}$ and its expected (mean) value $\left\langle A\right\rangle _\Psi $ is defined by
\begin{equation} \left\langle A\right\rangle _\Psi =\left( \Psi ,\widehat{A}\Psi \right) \eqnum{4.2} \end{equation} The quantity $\Delta _\Psi A$ from Eqs. (2.2)-(2.3) is defined as follows:
\[ D_\Psi A=\left( \delta _\Psi \widehat{A}\Psi ,\delta _\Psi \widehat{A}\Psi \right) \,\,\,\,\,,\,\,\,\,\,\,\delta _\Psi \widehat{A}=\widehat{A} -\left\langle A\right\rangle _\Psi \] \begin{equation} \Delta _\Psi A=\sqrt{D_\Psi A} \eqnum{4.3} \end{equation} Then for two variables $A$ and $B$ the following evident relation was appealed
\begin{equation} \left( \left( \alpha \delta _\Psi \widehat{A}-i\delta _\Psi \widehat{B} \right) \Psi ,\left( \alpha \delta _\Psi \widehat{A}-i\delta _\Psi \widehat{B }\right) \Psi \right) \geq 0 \eqnum{4.4} \end{equation} with $\alpha $ an arbitrary and real parameter. In the TIHR literature this relation is transcribed into the formula
\begin{equation} \left( \Psi ,\left( \alpha \delta _\Psi A+i\delta _\Psi \widehat{B}\right) \left( \alpha \delta _\Psi \widehat{A}-i\delta _\Psi \widehat{B}\right) \Psi \right) \geq 0 \eqnum{4.5} \end{equation} which is equivalent with the relation
\begin{equation} \alpha ^2\left( \Delta _\Psi A\right) ^2-\alpha \left\langle i\left[ \widehat{A},\widehat{B}\right] _{-}\right\rangle _\Psi +\left( \Delta _\Psi B\right) ^2\geq 0 \eqnum{4.6} \end{equation} where $\left[ \widehat{A},\widehat{B}\right] _{-}=\widehat{A}\widehat{B}- \widehat{B}\widehat{A}$ is the commutator of the operators $\widehat{A}$ and $\widehat{B}.$ As Eq. (4.6) is satisfied for any value of $\alpha $ it directly results.
\begin{equation} \left( \Delta _\Psi A\right) ^2\cdot \left( \Delta _\Psi B\right) ^2\geq \frac 14\left\langle i\left[ \widehat{A},\widehat{B}\right] _{-}\right\rangle _\Psi ^2 \eqnum{4.7} \end{equation} or
\begin{equation}
\Delta _\Psi A\cdot \Delta _\Psi B\geq \frac 12\left| \left\langle \left[
\widehat{A},\widehat{B}\right] _{-}\right\rangle _\Psi \right| \eqnum{4.8} \end{equation} This latter formula is just the Eq. (2.3).
Now we point out here the following notable facts. The above mentioned starting considerations were founded on old scientific achievements. For all that, they are preserved and promoted in an unchanged form even in today\'{}s literature (especially in textbooks).
\section{ULTERIOR SCIENTIFIC\ ACHIEVEMENTS}
For an up-to-date re-evaluation of TIHR the above mentioned starting considerations must be supplemented with elements regarding some ulterior scientific achievements. The respective elements were reported in the decades after the debut of quantum mechanics but, surprisingly, even today they seem to have little popularity. Our alluded supplement regards the following things.
Firstly let us refer to the thought-experimental Eqs. (2.1) and, correspondingly, to the limitative criteria involved in the starting consideration mentioned in ${\bf P-4.1}${\bf .} In the last decades, in optical measurements, some super-resolution techniques have been achieved (Roychoudhuri, 1978; Scheer {\it et. al.}, 1989; Croca {\it et al}., 1996). The performances of the respective techniques overstep the alluded limitative criteria. Then it seems to be possible that instead of ${\bf P-4.1 }$ one should operate with the following up-to-date consideration:
$\underline{{\bf P-5.1}}$ : The accuracies and uncertainties of the quantum measurements can be estimated by transposition in adequate terms (by means of de Broglie formula $\lambda =h/p$)of the mentioned super-resolution performances.$\blacktriangle $
Based on this consideration, it is easy to imagine some{\bf \ S}uper-{\bf R} esolution {\bf T}hought {\bf E}xperiments (SRTE). Then for the measurement of two variables $A$ and $B$ the corresponding uncertainties are $\Delta _{SRTE}A$ and $\Delta _{SRTE}B.$ By rationating similarly as in the case of Eq. (2.1) one finds \begin{equation} \Delta _{SRTE}A\cdot \Delta _{SRTE}B<\hbar \eqnum{5.1} \end{equation} This $SRTE$ - relation must be taken into account for un up-to-date re-evaluation of TIHR.
Now let us refer to some ulterior achievements connected with the theoretical Eqs. (2.2)-(2.3) or (4.8). The respective achievements regard mathematical generalizations of the Eq. (4.8). Note that there is known (Dodonov and Man\'{}ko, 1987; Dumitru, 1988) a large variety of such generalized relations. But here we shall consider only a few of the respective relations which are of direct significance for the questions approached in this paper.
Then we consider a quantum microparticle for which the orbital state and variables are described by the wave function $\Psi $ respectively by the operators $\widehat{A}_k(k=1,2,.....n).$ With the same significance of notations as in Eqs. (4.1)-(4.4), we can define the correlations:
\[ C_\Psi \left( A_jA_k\right) =\left( \delta _\Psi \widehat{A}_j\Psi ,\delta _\Psi \widehat{A}_k\Psi \right) \] \begin{equation} D_\Psi A_j=C_\Psi \left( A_jA_j\right) \,\,\,\,,\,\,\,\Delta _\Psi A_j=\sqrt{ D_\Psi A_j} \eqnum{5.2} \end{equation} If $\alpha _k\left( k=1,2,.....,n\right) $ are a set of arbitrary and complex parameters we can write the evident relation
\begin{equation} \left( \sum_{k=1}^n\alpha _k\delta _\Psi \widehat{A}_k\Psi ,\sum_{l=1}^n\alpha _l\delta _\Psi \widehat{A}_l\Psi \right) \geq 0 \eqnum{5.3} \end{equation} which can be transcribed directly as: \begin{equation} \sum_k\sum_l\alpha _k^{*}\alpha _l\left( \delta _\Psi \widehat{A}_k\Psi ,\delta _\Psi \widehat{A}_l\Psi \right) \geq 0 \eqnum{5.4} \end{equation} The quantities $\left( \delta _\Psi \widehat{A}_k\Psi ,\delta _\Psi \widehat{ A}_l\Psi \right) (k;l=1,2,...,n)$ represents the {\it correlation matrix} of the set of variables $A_k$. It is obvious that
\begin{equation} \left( \delta _\Psi \widehat{A}_k\Psi ,\delta _\Psi \widehat{A}_l\Psi \right) ^{*}=\left( \delta _\Psi \widehat{A}_l\Psi ,\delta _\Psi \widehat{A} _k\Psi \right) \eqnum{5.5} \end{equation} i.e. the correlation matrix is Hermitian. Equation (5.4) shows that the respective matrix is also non-negative definite. Then from the matrix algebra (see Korn and Korn, 1968) it results \begin{equation} \det \left[ \left( \delta _\Psi \widehat{A}_k\Psi ,\delta _\Psi \widehat{A} _l\Psi \right) \right] \geq 0 \eqnum{5.6 $_{CR}$} \end{equation} where $\det \left[ a_{kl}\right] $ denotes the determinant with the elements $a_{kl}.$ Here, and in the following notations, the index CR added to the number of a formula shows the belonging of the respective formula to a general family of similar {\it correlation relations} (CR).
For two operators $\widehat{A}_1=\widehat{A}$ and $\widehat{A}_2=\widehat{B}$ from Eq. (5.6) one obtains
\begin{equation} \left( \delta _\Psi \widehat{A}\Psi ,\delta _\Psi \widehat{A}\Psi \right) \cdot \left( \delta _\Psi \widehat{B}\Psi ,\delta _\Psi \widehat{B}\Psi
\right) \geq \left| \left( \delta _\Psi \widehat{A}\Psi ,\delta _\Psi
\widehat{B}\Psi \right) \right| ^2 \eqnum{5.7 $_{CR}$} \end{equation} If the two operators satisfy the conditions
\begin{equation} \left( \widehat{A}_k\Psi ,\widehat{A}_l\Psi \right) =\left( \Psi ,\widehat{A} _k\widehat{A}_l\Psi \right) \left( k=1,2;l=1,2)\right) \eqnum{5.8} \end{equation} equation (5.6) gives directly
\begin{equation}
\Delta _\Psi A\cdot \Delta _\Psi B\geq \left| \left\langle \delta _\Psi
\widehat{A}\cdot \delta _\Psi \widehat{B}\right\rangle _\Psi \right| \eqnum{5.9 $_{CR}$} \end{equation} When Eq. (5.8) is satisfied we have also
\begin{equation} \left\langle \delta _\Psi \widehat{A}\delta _\Psi \widehat{B}\right\rangle _\Psi =\frac 12\left\langle \left[ \delta _\Psi \widehat{A},\delta _\Psi \widehat{B}\right] _{+}\right\rangle _\Psi -\frac i2\left\langle i\left[ \widehat{A},\widehat{B}\right] \right\rangle _\Psi \eqnum{5.10} \end{equation} where $\left[ \widehat{A},\widehat{B}\right] _{\pm }=\widehat{A}\widehat{B} \pm \widehat{B}\widehat{A}$ (i.e. the anticomutator respectively commutator of $\widehat{A}$ and $\widehat{B}$) while $\left\langle \left[ \delta _\Psi \widehat{A},\delta _\Psi \widehat{B}\right] _{+}\right\rangle _\Psi $ and $ \left\langle i\left[ \widehat{A},\widehat{B}\right] _{-}\right\rangle _\Psi $ are real quantities. Then Eq. (5.9) can be transcribed as
\begin{equation} \Delta _\Psi A\cdot \Delta _\Psi B\geq \sqrt{\frac 14\left\langle \left[ \delta _\Psi \widehat{A},\delta _\Psi \widehat{B}\right] _{+}\right\rangle _\Psi ^2+\frac 14\left\langle i\left[ \widehat{A},\widehat{B}\right] _{-}\right\rangle _\Psi ^2} \eqnum{5.11 $_{CR}$} \end{equation} This formula implies the following two less restrictive relations
\begin{equation} \Delta _\Psi A\cdot \Delta _\Psi B\geq \frac 12\left\langle \left[ \delta _\Psi \widehat{A},\delta _\Psi \widehat{B}\right] _{+}\right\rangle _\Psi \eqnum{5.12 $_{CR}$} \end{equation} and \begin{equation}
\Delta _\Psi A\cdot \Delta _\Psi B\geq \frac 12\left| \left\langle \left[
\widehat{A},\widehat{B}\right] _{-}\right\rangle _\Psi \right| \eqnum{5.13 $_{CR}$} \end{equation} One can see that the latter relation is exactly the theoretical version from Eqs.(2.3)/(4.8) of HR.
Note now that there are situations when the Eq. (5.8) is satisfied for $k=l$ but not for $k\neq l$. In such situations from Eq. (5.7) instead of Eq. (5.9) one obtains the relation
\begin{equation}
\Delta _\Psi A\cdot \Delta _\Psi B\geq \left| \left( \delta _\Psi \widehat{A}
\Psi ,\delta _\Psi \widehat{B}\Psi \right) \right| \eqnum{5.14 $_{CR}$} \end{equation}
From the above presented considerations it results that the true generalizations of the theoretical HR given by Eqs. (2.2/(2.3)/(4.8)/(5.13) are exactly the Eqs. (5.7) and (5.6).
The above discussed relations refer to the orbital variables of a quantum microparticle. But such a microparticle has also spin variables characterized by similar relations. So if for an electron the spin state and the spin variables are described by the spinor $\chi $ (spin wave function) respectively by the matrices (operators) $\widehat{A}_j$ the alluded relations can be introduced as follows. With the usual notations (see Bransden and Joachain, 1994) the expected values are $\left\langle A_j\right\rangle _\chi =\chi ^{+}A_j\chi $ while the correlations $C_\chi \left( A_jA_e\right) ,$ dispersions $D_\chi A_j$, and standard deviation $ \Delta _\chi A_j$ are given by
\[ C_\chi \left( A_jA_l\right) =\left( \delta _\chi \widehat{A}_j\chi \right) ^{+}\cdot \left( \delta _\chi \widehat{A}_l\chi \right) \text{,\thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace }\delta _\chi \widehat{A}_j=\widehat{A}_j-\left\langle A_j\right\rangle _\chi \] \begin{equation} D_\chi A_j=C_\chi \left( A_jA_j\right) \text{,\thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace \thinspace }\Delta _\chi A_j=\sqrt{D_\chi A_j} \eqnum{5.15} \end{equation} Similarly to the orbital Eqs. (5.3)-(5.6) it is easy to see that the spin-correlations $C_\chi \left( A_jA_l\right) $satisfy the relation \begin{equation} \det \left[ C_\chi \left( A_jA_l\right) \right] \geq 0 \eqnum{5.16 $_{CR}$} \end{equation} For two variables $A_1=A$ and $A_2=B$, which satisfy conditions similar to (5.8), from (5.16) one obtains
\begin{equation}
\Delta _\chi A\cdot \Delta _\chi B\geq \left| \left\langle \delta _\chi
\widehat{A}\delta _\chi \widehat{B}\right\rangle _\chi \right| \eqnum{5.17$_{CR}$} \end{equation} \begin{equation}
\Delta _\chi A\cdot \Delta _\chi B\geq \frac 12\left| \left\langle \left[
\widehat{A},\widehat{B}\right] _{-}\right\rangle _\chi \right| \eqnum{5.18 $_{CR}$} \end{equation} Equations (5.18) and (5.16) are nothing but spin similars of orbital HR and of their generalizations given by Eqs. (5.13) respectively (5.6).
Theoretical versions of HR (as well as their generalizations) imply, for the variables of the quantum microparticles, a lot of probabilistic parameters such as: expected/mean values, dispersions, standard deviations and correlations. This means that the respective variables have stochastic (or random) characteristics. Then there follows directly the question: are there similars of HR for other physical systems, different from quantum microparticles, which have also variables with stochastic characteristics ? The answer to the mentioned questions is affirmative and it regards macroscopic systems studied in both classical and quantum statistical physics. Here we shall illustrate the respective answer by taking over some ideas from our earlier works (Dumitru, 1974a, 1977, 1988, 1993).
Firstly let us refer to a macroscopic system, consisting of a large number of microparticles, considered in a thermodynamic equilibrium state. In the framework of classical (nonquantum) statistical physics such a system can be approached in terms of: (a) phenomenological (quasithermodynamic) fluctuations theory respectively (b) classical statistical mechanics. In the mentioned approaches the state of the system is described (Landau and Lifchitz, 1984; Zubarev, 1971; Ruppeiner, 1995) by the distribution function $w=w(x)$. The variable $x$ denotes in the two cases: (a) the set of independent macroscopic variables of the system as a whole, respectively (b) the phase space coordinates of the microparticles constituting the system. In both cases a specific variable $A$ characterizing the system is a real stochastic (random) quantity with a continuous spectrum of values which depends on $x$, i.e. $A=A(x)$. The mean (or expected) value of $A(x)$ is given by
\begin{equation} \left\langle A\right\rangle _w=\int_{\Omega _X}A\left( x\right) w\left( x\right) d\Omega _x \eqnum{5.19} \end{equation} where $\Omega _x$ and $d\Omega _x$ denote the accessible respectively infiniesimal domains in the $x$-space. Then, in the case of such a macroscopic system, for a set $A_j(j=1,2,...,n)$ of specific variables, the {\it thermal fluctuations} are described by the correlations $C_w\left( A_jA_e\right) $, dispersions $D_wA_j$ and standard deviations $\Delta _wA_j$ given by
\[ C_w\left( A_jA_l\right) =\left\langle \delta _wA_j\delta _wA_l\right\rangle _w,\,\,\,\,\,\,\,\,\,\,\,\,\,\delta _wA_j=A_j-\left\langle A_j\right\rangle _w \] \begin{equation} D_wA_j=C_w\left( A_jA_j\right) ,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\Delta _wA_j= \sqrt{D_wA_j} \eqnum{5.20 } \end{equation} By relations similar to Eqs.(5.3)-(5.5) it is easy to see that the correlations $\left\langle \delta _wA_j\delta _wA_l\right\rangle $ are the elements of a non-negative real matrix. Then similarly with Eq. (5.6) one can write
\begin{equation} \det \left[ \left\langle \delta _wA_j\delta _wA_l\right\rangle _w\right] \geq 0 \eqnum{5.21$_{CR}$} \end{equation} Particularly for two variables $A_1=A$ and $A_2=B$ one obtains
\begin{equation} D_wA\cdot D_wB\geq \left\langle \delta _wA\cdot \delta _wB\right\rangle _w^2 \eqnum{5.22 $_{CR}$} \end{equation} \begin{equation}
\Delta _wA\cdot \Delta _wB\geq \left| \left\langle \delta _wA\cdot \delta _wB\right\rangle _w\right| \eqnum{5.23 $_{CR}$} \end{equation} Equations (5.20)-(5.22) can be called {\it thermal correlation relations. } Some examples of such relations are given below in Sec. VI. K. (see also Dumitru, 1974a, 1988, 1993).
One observes that Eqs. (5.23) and (5.21) are the macroscopic similars of microscopic HR and of their generalizations defined by Eqs. (2.3)/(4.8)/(5.13) respectively (5.6). Here it must be noted that there are also other macroscopic similars of HR, namely relations from the framework of quantum statistical mechanics. Such relations can be obtained as follows: The state respectively the specific variables of a macroscopic system in the mentioned framework are described by the statistical operator (density matrix) $\widehat{\rho }$ respectively by the operators $\widehat{A} _j\,\,(j=1,2,...,n).$ With the expected values defined as $\left\langle A_j\right\rangle _\rho =T_r\left( \widehat{A}_j\widehat{\rho }\right) $ the macroscopic correlations $C_\rho \left( A_jA_e\right) ,$ dispersions $D_\rho A_j$ and standard deviations $\Delta _\rho A_j$ are given by
\[ C_\rho \left( A_jA_e\right) =\left\langle \delta _\rho \widehat{A}_j\cdot \delta _\rho \widehat{A}_l\right\rangle _\rho ,\,\,\,\,\,\,\,\delta _\rho \widehat{A}_j=\widehat{A}_j-\left\langle A_j\right\rangle _{\rho \,}\,\,\,\,\,\, \] \begin{equation} D_\rho A_j=C_\rho \left( A_jA_j\right) ,\,\,\,\,\,\ \Delta _\rho A_j=\sqrt{ D_\rho A_j} \eqnum{5.24} \end{equation} In sufficiently general circumstances (among them the most important being some conditions similar with Eq.(5.8)) the quantities from Eqs.(5.24) satisfy the relations
\begin{equation} \det \left[ \left\langle \delta _\rho \widehat{A}_j\delta _\rho \widehat{A_l} \right\rangle _\rho \right] \geq 0 \eqnum{5.25} \end{equation} \begin{equation}
D_\rho A\cdot D_\rho B\geq \left| \left\langle \delta _\rho A\cdot \delta _\rho B\right\rangle _\rho \right| ^2 \eqnum{5.26 $_{CR}$} \end{equation} \begin{equation}
\Delta _\rho A\cdot \Delta _\rho B\geq \left| \left\langle \delta _\rho A\cdot \delta _\rho B\right\rangle _\rho \right| \eqnum{5.27$_{CR}$} \end{equation} with $\widehat{A}=\widehat{A}_1$, $\widehat{B}=\widehat{A}_2$. From Eq.(5.27) one obtains also the following truncated (less restrictive) relations
\begin{equation} \Delta _\rho A\cdot \Delta _\rho B\geq \frac 12\left\langle \left[ \delta _\rho \widehat{A},\delta _\rho \widehat{B}\right] _{+}\right\rangle _\rho \eqnum{5.28$_{CR}$} \end{equation} \begin{equation}
\Delta _\rho A\cdot \Delta _\rho B\geq \frac 12\left| \left\langle \left[
\widehat{A},\widehat{B}\right] _{-}\right\rangle _\rho \right| \eqnum{5.29 $_{CR}$} \end{equation} The latter relation is exactly a macroscopic similar of HR defined by Eqs. (2.3)/(4.8)/(5.13).
The above discussed relations are unitemporal in the sense that the implied probabilistic parameters (correlations, dispersions, standard deviations) of the stochastic variables $A_j$ are considered for the same moment of time. But it easy to see that similar relations can be written if the mentioned parameters are taken into account for different time moments. So, if the orbital quantum state of a microparticle is described by the time t dependent wave function $\Psi \left( q,t\right) $, instead of unitemporal Eq. (5.14) one can write the following bitemporal relation:
\begin{equation}
\Delta _{\Psi _1}A\cdot \Delta _{\Psi _2}B\geq \left| \left( \delta _{\Psi _1}\widehat{A}\Psi _1,\delta _{\Psi _2}\widehat{B}\Psi _2\right) \right| \eqnum{5.30} \end{equation} where $\Psi _1=\Psi \left( q,t_1\right) $ and $\Psi _2=\Psi \left( q,t_2\right) $ with $t_1\neq t_2$.
Another well-known way of introducing the theoretical HR for orbital variables is based on Fourier analysis as follows. Let be $f(x)$ a continuous and quadratically integrable function in the range $x\in \left( -\infty ,\infty \right) $. Then its Fourier transforms $\widetilde{f}\left( k\right) $ is defined by: \begin{equation} \widetilde{f}\left( k\right) =\frac 1{\sqrt{2\pi }}\int_{-\infty }^\infty f\left( x\right) e^{-ikx}dx \eqnum{5.31} \end{equation}
If $\left| f\left( x\right) \right| ^2$ and $\left| \widetilde{f}\left(
k\right) \right| ^2$ are normalized to unity by using the Parseval formula one can write \begin{equation}
\int_{-\infty }^\infty \left| f\left( x\right) \right| ^2dx=\int_{-\infty
}^\infty \left| f\left( k\right) \right| ^2dk=1 \eqnum{5.32} \end{equation}
Then $\left| f\left( x\right) \right| ^2$ and $\left| \widetilde{f}\left(
k\right) \right| ^2$ can be interpreted as probability densities for the variables $x$ respectively $k$. Consequently the mean (expected) values of functions like $A(x)$ or $B(k)$ are given by \begin{equation} \left\langle A\left( x\right) \right\rangle =\int_{-\infty }^\infty A\left(
x\right) \left| f\left( x\right) \right| ^2dx \eqnum{5.33} \end{equation} \begin{equation} \left\langle B\left( k\right) \right\rangle =\int_{-\infty }^\infty B\left(
k\right) \left| \widetilde{f}\left( k\right) \right| ^2dk \eqnum{5.34} \end{equation} With the above mentioned elements one can demonstrate (De Bruijn, 1967) the following relation \begin{equation} \left\langle \left( x-a\right) ^2\right\rangle \cdot \left\langle \left( k-b\right) ^2\right\rangle \geq \frac 14 \eqnum{5.35} \end{equation} with {\it a} and {\it b} as two arbitrary constants.
One observes that from Eq. (5.35) it results directly the HR defined by Eq. (2.2), for the Cartesian coordinate $x$ and momentum p. For such a result one must take $f\left( x\right) $ as the wave function $\Psi \left( x\right) $ and respectively $a=\left\langle x\right\rangle $, $b=\left\langle k\right\rangle $ and $k=p/\hbar $.
If $f\left( x\right) $ is periodic in $x$, with the period $\Lambda $, or is defined in the range $x\in \left[ x_0,\Lambda \right] $ with the same values on the boundaries, it satisfies the relation \begin{equation} f\left( x_0\right) =f\left( x_0+\Lambda \right) \eqnum{5.36} \end{equation} Then instead of $\widetilde{f}\left( k\right) $ from Eq. (5.31) we have the Fourier coefficients \begin{equation} \widetilde{f}_n=\frac 1{\sqrt{\Lambda }}\int_{x_0}^{x_0+\Lambda }f\left( x\right) e^{-ik_nx}dx \eqnum{5.37} \end{equation} with $k_n=n\cdot 2\pi /\Lambda $ and $n=0,\pm 1,\pm 2,......$ Similarly with Eq. (5.33) we can take: \begin{equation}
\int_{x_0}^{x_0+\Lambda }\left| f\left( x\right) \right|
^2dx=\sum_{n=-\infty }^\infty \left| \widetilde{f}_n\right| ^2=1 \eqnum{5.38} \end{equation}
This means that $\left| f\left( x\right) \right| ^2$ can be interpreted as probability density for the continuous variable $x$ while $\left| \widetilde{
f}_n\right| ^2$ signify the probabilities associated with the discrete variable $k_n.$ In such a case instead of Eqs. (5.33) and (5.34) one must write \begin{equation} \left\langle A\left( x\right) \right\rangle =\int_{x_0}^{x_0+\Lambda
}A\left( x\right) \left| f\left( x\right) \right| ^2dx \eqnum{5.39} \end{equation} \begin{equation}
\left\langle B\left( k\right) \right\rangle =\sum_{n=-\infty }^\infty B\left( k_n\right) \left| \widetilde{f}_n\right| ^2,\,\,\,\,\,\,\,\,k_n=n \frac{2\pi }\Lambda \eqnum{5.40} \end{equation} and instead of Eq. (5.35) one obtains \begin{equation} \left\langle \left( x-a\right) ^2\right\rangle \cdot \left\langle \left(
k-b\right) ^2\right\rangle =\frac 14\left| \left( \Lambda f\left( x_0\right)
-1\right) \right| ^2 \eqnum{5.41$_{CR}$} \end{equation} This latter formula is applicable in some cases for the variables azimuthal angle $\varphi $ and angular momentum $L_z$ (see below the Sec. VI.F). In such cases $f\left( x\right) $ is the periodic wave function $\Psi \left( \varphi \right) $and respectively $a=\left\langle x\right\rangle ,\,b=\left\langle k\right\rangle $ with $x=\varphi ,\,k=L_z/\hbar $ and $ \Lambda =2\pi .$
We end this section with a notification regarding the relations expressed by Eqs.: (5.6), (5.9), (5.11)-(5.14), (5.16)-(5.18), (5.21)-(5.23), (5.25)-(5.29), (5.30), (5.35) and (5.41). From a mathematical viewpoint all the respective relations refer to variables with stochastic characteristics. Also, by their mathematical significances, they belong to the same family of similar formulae which can be called {\it correlation relations} (CR). That is why we added the index CR to the numbers of all the respective relations.
\section{DEFECTS OF\ T I H R}
With the above mentioned facts now we can proceed to present the defects of TIHR. Note that the respective defects appear not as a systematized ensemble but rather as a dispersed set of (relatively) distinct cases. That is why our approach aims not at a precisely motivated order of exposition. We mostly wish to show that taken together the set of the alluded defects irrefutably incriminate all the main elements of TIHR reviewed in Sec. II. Then our presentation includes the defects revealed in the following sub-sections:
\subsection{Groundlessness of the term ''uncertainty''}
Trough the assertion ${\bf P-2.8/A}$ of TIHR the thought respectively theoretical quantities $\Delta _{TE}A$ and $\Delta _\Psi A$ from HR are termed measuring uncertainties. But the respective term appears as groundless if it is regarded comparatively with a lot of facts which we present here.
Firstly note that a minute examination of all thought experiments referred in connection with Eq. (2.1) does not justify the mentioned term for one of the implied quantities $\Delta _{TE}A$ and $\Delta _{TE}B.$ So $\Delta _{TE}p $ (in the coordinate q -momentum p case) and $\Delta _{TE}E$ (in the time t - energy E case) represent the jumps of the respective variable from an initial value (before the measurements) to a final value (after the measurements). Then it results that $\Delta _{TE}p$ and $\Delta _{TE}E$ can not be regarded as uncertainties (i.e. measuring parameters) with respect to the measured state which is the initial one. This because (Albertson, 1963): ''it seems essential to the notion of a measurements that it answer a question about the given situation existing before measurement. Whether the measurement leaves the measured system unchanged or brings about a new and different state of that system is a second and independent question''.
The remaining quantities $\Delta _{TE}q$ and $\Delta _{TE}t$ from Eq. (2.1) are also in the situation of infringing the term ''measuring uncertainties'' in the sense attributed by TIHR. The respective situation is generated by the same facts which will be presented below in the Sec. VI.B.
As regards the theoretical quantity $\Delta _\Psi A$ the following observations must be taken into account. $\Delta _\Psi A$ depends only on the theoretical model (wave function $\Psi )$ of the considered microparticle but not on the characteristics of the measurements on the respective microparticle. Particularly note that the value of $\Delta _\Psi A $ can be modified only by changing the microparticle state (i.e. its wave function $\Psi ).$ Comparatively the measuring uncertainties can be modified by improving (or worsening) the performances of the experimental techniques, even if the state of the measured microparticle remains unchanged.
In connection with the term ''uncertainty'' it is here the place to point out also the following remarks. In quantum mechanics a variable $A$ is described by an adequate operator $\widehat{A}$ which (Gudder, 1979) is a generalized stochastic (or random) quantity. The probabilistic (stochastic) characteristics of the considered microparticle are incorporated in its wave function $\Psi .$ Then the expected value $\left\langle A\right\rangle _\Psi $ and the standard deviation $\Delta _\Psi A$ appears as quantities which are exclusively of intrinsic nature for the respective microparticle. In such a situation a measurements must consist of a {\it statistical} {\it sampling} but not of a {\it solitary detection} (determination). The respective sampling give an output-set of data on the recorder of the measuring instrument. From the mentioned data one obtains the out-mean $ \left\langle A\right\rangle _{OUT}$ and out-deviation $\Delta _{OUT}A=\sqrt{ \left\langle \left( A-\left\langle A\right\rangle _{OUT}\right) ^2\right\rangle _{OUT}}$. Then it results that in fact the measuring uncertainties must be described by means of the differences $\left\langle A\right\rangle _{OUT}-\left\langle A\right\rangle _\Psi $ and $\Delta _{OUT}A-\Delta _\Psi A$ but not through the quantity $\Delta _\Psi A$. (For other comments about the measurements regarded as here see below the Sec. IX).
The above mentioned facts prove the groundlessness of the term ''uncertainty'' in connection with the quantities $\Delta _{TE}A$ and $ \Delta _\Psi A.$ But such a proof must be reported as a defect of TIHR.
{\it Observation}: Sometimes, particularly in old texts, the quantities $ \Delta _{TE}A$ and $\Delta _\Psi A$ are termed ''indeterminacy'' of the quantum variable $A.$ If such a term is viewed to denote the ''\'{n}on-deterministic'' or ''random'' character of $A$ it can be accepted for a natural interpretation of $\Delta _\Psi A.$ So the HR given by Eqs. (2.3)/(4.8)/(5.13) and their generalizations from Eq. (5.6) appear to be proper for a denomination of ''indeterminacy relations''. But then it seems strange for the respective relations to be considered as ''crucial and fundamental formulae'' (as in TIHR conception). This because in non-quantum branches of probabilistic sciences an entirely similar ''indeterminacy relation'' is regarded only as a modest, and, by no means a fundamental formula. The alluded non-quantum ''indeterminacy relation'' expresses simply the fact that the correlation coefficient $\left( \gamma _{AB}=\left\langle \delta A\cdot \delta B\right\rangle /\Delta A\cdot \Delta B\right) $ takes values within the range $\left[ -1,1\right] .$ (Schilling, 1972; Gellert {\it et.al., }1975{\it ). }One can see that such a non-quantum relation is also Eq. (5.23).
\subsection{The nature and the performances of the referred experiments}
As it was shown in Sec. II one of the major supports of TIHR is the reference to experiments of ''thought'' nature. In such a reference, by means of ''thought motivations'', the results (ideas) from known real experiments were transplanted in a new context. But such a transplantation seems to be inadequate for a true scientific acceptance, mainly if the new context is practically and conceptually different from the old one. Also it is known that the alluded acceptance must be founded only on two pieces of resistance: (a) concrete data from the real and specially designed experiments, and (b) correct rigorous mathematical (logical) reasonings.
One must add another less known observation about the nature of the experiments referred by TIHR. Practically all the respective experiments are of ''thought''-type and (Jammer, 1974, p.81) there are not known any real experiments capable of attesting (verifying) the TIHR with a convincing accuracy.
The above presented facts reveal the uselessness respectively the incorrectness of the main elements ${\bf P-2.6}$ and ${\bf P-2.8}$ of TIHR. This means that by their existence the mentioned facts evidentiate a defect of TIHR.
The thought experiments referred by TIHR operate with the limitative criteria included in ${\bf P-4.1}$ and consequently with Eq. (2.1). But, as it was mentioned in Sec. V, today there are known real experiments with super- resolutions which overstep the respective criteria. Then it is easy to imagine some Super-Resolution-Thought-Experiments (SRTE) for which instead of Eq. (2.1) is satisfied the SRTE relation given by Eq. (5.1). One observes now directly that the existence of the respective SRTE relation incriminates the assertion ${\bf P-2.12/A}$ of TIHR. Such an incrimination must be reported also as a defect of TIHR.
\subsection{Inaccuracy of the referred theoretical formulae}
Among the main supports of TIHR we find the theoretical Eqs. (2.3)/(4.8). But, mathematically, the respective formula is incompletely accurate because it fails if the condition expressed by Eq. (5.8) is not satisfied. The complete accuracy is given by the more general Eqs. (5.7) and (5.14).
The mentioned incompleteness seems to be regarded as entirely unnatural by the TIHR partisans - e.g. when they consider the case of the variables angular momentum $\widehat{L}_z$ and azimuthal angle $\varphi $ (see also below Sec. VI.F.). So, instead of the failing Eqs. (2.1)/(4.8), in order to preserve at any price the elements like ${\bf P-2.12}$ and ${\bf P-2.13}$, the respective partisans use a strange lot of unnaturally ''adjusted'' relations. But one can easily see that the natural attitude in the alluded case is to refer to the Eqs. (5.7)/(5.14) or, equivalently, to Eq. (5.41). Of course that in the discussed case the respective equations degenerate into trivial equality 0=0 which is evidently in contradiction with the elements ${\bf P-2.12}$ and ${\bf P-2.13}$ of TIHR.
Then one finds that one way or another TIHR is incompatible with the absolute accuracy of the referred theoretical equations: This fact constitutes a notable defect of TIHR
\subsection{Solitary variables}
By ${\bf P-2.10/A;M}$ TIHR states that in a measurement for a solitary variable the quantity $\Delta A$ can be taken as unlimitedly small. But if $ \Delta A$ is identified with $\Delta _\Psi A$ the respective quantity has a precisely defined value (dependent on the wave function $\Psi $ describing the state of the considered microparticle) which can not be diminished in a measurement. Then it results that ${\bf P-2.10/A;M}$ are incorrect. The respective results points another defect of TIHR.
\subsection{Commutable variables{\bf \ }}
The main idea of TIHR about the commutable variables is asserted in ${\bf
P-2.11/A;M.}$ It is based on the fact that in Eq. (2.3) the product of the corresponding quantities $\Delta _\Psi A$ and $\Delta _\Psi B$ has not a non-null lower bound. But besides the Eq. (2.3), the respective product satisfies also the Eq. (5.12) where the term $\left| \left\langle \left[
\delta _\Psi \widehat{A},\delta _\Psi \widehat{B}\right] _{+}\right\rangle _\Psi \right| $ can be a non-null quantity. In this respect we quote the following example (Dumitru, 1988):
Let be a quantum microparticle moving in a two-dimensional potential well, characterized by the potential energy $V$=0 for $0<x<a,$ $0<y<b$ and $V$=$ \infty $ otherwise. The corresponding wave functions are
\begin{equation} \Psi _{n_1n_2}=\frac 2{\sqrt{ab}}\sin \left( \frac{n_1\pi }ax\right) \sin \left( \frac{n_2\pi }by\right) \left( n_1,n_2=1,2,3.....\right) \eqnum{6.1} \end{equation}
As two commutable variables $A$ and $B$ with $\left| \left\langle \left[
\delta _\Psi \widehat{A},\delta _\Psi \widehat{B}\right] _{+}\right\rangle _\Psi \right| $ $\neq 0$ we consider the Cartesian coordinates $x\acute{}$ and $y\acute{}$ given by
\[ \widehat{A}=x\acute{}=x\cos \varphi +y\sin \varphi \] \begin{equation} \widehat{B}=y\acute{}=x\sin \varphi -y\cos \varphi \eqnum{6.2} \end{equation} with $0<\varphi <\frac \pi 2.$ For the case pointed by Eqs. (6.1) and (6.2) one obtains:
\[ \Delta _\Psi A=\left[ \frac{a^2}{12}\left( 1-\frac 6{\pi ^2n_1^2}\right) \cos ^2\varphi +\frac{b^2}{12}\left( 1-\frac 6{\pi ^2n_2^2}\right) \sin ^2\varphi \right] ^{1/2} \] \begin{equation} \Delta _\Psi B=\left[ \frac{a^2}{12}\left( 1-\frac 6{\pi ^2n_1^2}\right) \sin ^2\varphi +\frac{b^2}{12}\left( 1-\frac 6{\pi ^2n_2^2}\right) ^2\cos ^2\varphi \right] ^{1/2} \eqnum{6.3} \end{equation} \[
\left| \left\langle \delta _\Psi A,\delta _\Psi B\right\rangle _{+}\right|
_\Psi =\left[ 2\sin 2\varphi \left| \frac{a^2-b^2}{12}-\frac 6{\pi ^2}\left(
\frac 1{n_1^2}-\frac 1{n_2^2}\right) \right| \right] ^{1/2} \] Then it results that in the mentioned case the Eq. (5.12) is satisfied with non-null quantity in the right-hand term. But such a result shows that the main idea of TIHR about the commutable variables is doubtful. So we find another defect of TIHR.
\subsection{The ${\bf L}_z{\bf -\varphi }$ case}
The situation of the variables $L_z$ and $\varphi $ (z-component of angular momentum and azimuthal angle) represents one of the most controverted case in connection with TIHR.
Firstly, it was noted that the respective variables are canonically conjugated their quantum operators being $\widehat{L}_z=-i\hbar \frac \partial {\partial \varphi }$ and $\widehat{\varphi }=\varphi \cdot .$ Then it was supposed that for $L_z{\bf -\varphi }$ case the TIHR must be applicable similarly as for other pairs of conjugated variables-e.g. for $ p_x $ and $x$ (Cartesian momentum and coordinate). Consequently it was expected to have the following ordinary $L_z{\bf -\varphi }$ theoretical HR
\begin{equation} \Delta _\Psi L_z\cdot \Delta _\Psi \varphi \geq \frac \hbar 2 \eqnum{6.4} \end{equation} Also one knows attempts (Kompaneyets, 1966) for introducing a thought-experimental $L_z{\bf -\varphi }$ relation of the form
\begin{equation} \Delta _{TE}L_z\cdot \Delta _{TE}\varphi \thickapprox \hbar \eqnum{6.5} \end{equation} which is similar with the $p_x-x$ version of Eq. (2.1). But note that in fact Eq. (6.5) is only an unmasked conversion of Eq.. (2.1).
Secondly, for the $L_z{\bf -\varphi }$ case the inapplicability of TIHR in its usual form (presented in Sec. II) was remarked. Such remarks were signaled in a lot of debating works (Judge, 1963; Judge and Lewis, 1963; Judge, 1964; Bouten {\it et al}, 1965; Evett and Mahmoud, 1965; Krauss, 1965, 1968; Carruthers and Nietto, 1968; Levy-Leblond, 1976; Harris and Strauss, 1978; Hasse, 1980; Holevo, 1981; Yamada, 1982; Galitski {\it et al., } 1985). The mentioned inapplicability regards mainly from the HR Eq. (6.4) referred to some known systems such as: an atomic electron, a rotator or a bead shifting on a ring. Note that by their properties all the respective systems are $\varphi $-circular (or azimuthally finite). This means that for them: (i) $\varphi \in \left[ 0,2\pi \right] ,$ (ii) the states with $ \varphi =0$ and $\varphi =2\pi $ coincide and (iii) the states with $\varphi \gtrsim 0$ and $\varphi \lesssim 2\pi $ are closely adjacent. Moreover for the mentioned systems one considers only the states which are nondegenerate in respect with $L_z$. This means that each of such state correspond to a distinct (eigen-) value of $L_z.$ The alluded states are described by the wave functions: \begin{equation} \Psi _m\left( \varphi \right) =\frac 1{\sqrt{2\pi }}e^{im\varphi }\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,(m=0,\pm 1,\pm 2,......) \eqnum{6.6} \end{equation} Then, for the demarcated states, one finds $\Delta _\Psi L_z=0$, $\Delta _\Psi \varphi =\pi /\sqrt{3}$ and (6.4) gives the absurd results $0\geq \hbar /2.$ Such a result drives TIHR\ in a evident deadlock.
For avoiding the alluded deadlock the THR partisans advocated the following idea: In the $L_z{\bf -\varphi }$ case the theoretical HR must not have the ordinary form of Eq. (6.4) but an ''adjusted version'', concordant with TIHR vision. Thus the following lot of ''adjusted $L_z{\bf -\varphi }$ HR'' was invented:
\begin{equation} \frac{\Delta _\Psi L_z\cdot \Delta _\Psi \varphi }{1-3\left( \Delta _\Psi \varphi /\pi \right) ^2}\geq 0.16\hbar \eqnum{6.7} \end{equation} \begin{equation} \frac{\left( \Delta _\Psi L_z\right) ^2\cdot \left( \Delta _\Psi \varphi \right) ^2}{1-\left( \Delta _\Psi \varphi \right) ^2}\geq \frac{\hbar ^2}4 \eqnum{6.8} \end{equation} \begin{equation} \left( \Delta _\Psi L_z\right) ^2+\left( \frac{\hbar \omega }2\right) ^2\left( \Delta _\Psi \varphi \right) ^2\geq \frac{\hbar ^2}2\left[ \left( \frac 9{\pi ^2}+\omega ^2\right) ^{1/2}-\frac 3{\pi ^2}\right] \eqnum{6.9} \end{equation} \begin{equation} \frac{\Delta _\Psi L_z\cdot \Delta _\Psi \varphi }{1-3\left( \Delta _\Psi \varphi /\pi \right) ^2}\geq \hbar \frac 23\left( \frac{V_{\min }}{V_{\max }} \right) \eqnum{6.10} \end{equation} \begin{equation} \left( \Delta _\Psi L_z\right) ^2\cdot \left\langle \left( \delta _\Psi f\left( \varphi \right) \right) ^2\right\rangle _\Psi \geq \frac{\hbar ^2}4 \left\langle \frac{df}{d\varphi }\right\rangle _\Psi ^2 \eqnum{6.11} \end{equation} \begin{equation} \Delta _\Psi L_z\cdot \Delta _\Psi \varphi _1\geq \frac \hbar 2 \eqnum{6.12} \end{equation} \begin{equation}
\Delta _\Psi L_z\cdot \Delta _\Psi \varphi \geq \frac \hbar 2\left|
\left\langle E\left( \varphi \right) \right\rangle _\Psi \right| \eqnum{6.13} \end{equation} \begin{equation}
\Delta _\Psi L_z\cdot \Delta _\Psi \varphi \geq \frac \hbar 2\left| 1-2\pi
\left| \Psi \left( 2\pi \right) \right| ^2\right| \eqnum{6.14} \end{equation} In Eq. (6.9) $\omega $ is a real nonnegative parameter. In Eq. (6.10) $
V_{\min }$ and $V_{\max }$ represents the minimum respectively the maximum values of $V\left( \theta \right) =\int_{-\pi }^\pi \theta \left| \Psi
\left( \varphi +\theta \right) \right| ^2d\varphi ,$ where $\theta \in \left[ -\pi ,\pi \right] .$ In Eq. (6.11) $f\left( \varphi \right) $is a real periodic function of $\varphi $ e.g. $f\left( \varphi \right) =\sin \varphi $ or $f\left( \varphi \right) =\cos \varphi $ and $\delta _\Psi f=f-\left\langle f\right\rangle _\Psi .$ In Eq. (6.12) $\varphi _1=\varphi +2\pi N,\,\Delta _\Psi \varphi _1=\left[ 2\pi ^2\left( \frac 1{12} +N^2-N_1^2+N-N_1\right) \right] ^{1/2}$ and $N,N_1$=two arbitrary integer numbers with $N\neq N_1$. In Eq. (6.13) E$\left( \varphi \right) $is a complicated expression of $\varphi .$
Connected with the Eqs. (6.7)-(6.14) and the afferent TIHR debates the following facts are easily observed. From a subjective view, in TIHR literature, none of the Eqs. (6.7)-(6.14) is unanimously accepted as the true version for theoretical $L_z{\bf -\varphi }$ HR. In a objective view the Eqs. (6.7)-(6.14) appear as a set of completely dissimilar formulae. This because they are not mutually equivalent and each of them is applicable only in particular and different ''circumstances''. Moreover it is doubtful that in the cases of Eqs. (6.7)-(6.13) the respective ''circumstances'' should have in fact real physical significances. Another aspect from an objective view is the fact that the Eqs. (6.7)-(6.13) have no correct support in the natural (non-adjusted) mathematical formalism of quantum mechanics. Only Eq. (6.14) has such a support through the Eq. (5.41). The alluded observations evince clearly the persistence of TIHR deadlock as regards the $L_z{\bf -\varphi }$ case. But in spite of the mentioned evidence, in our days almost all of the publications seem to cultivate the belief that the problems of $L_z{\bf -\varphi }$ case are solved by the adjusted Eqs. (6.7)-(6.14). In the TIHR literature the mentioned belief is often associated with a more inciting opinion. According to the respective opinion the ordinary theoretical HR expressed by Eq. (6.4) is incorrect for any physical system and, consequently, the respective relation must be prohibited. Curiously, through the alluded association, TIHR partisans seem to ignore the thought experimental Eq. (6.5). But note that the simple removal of the respective ignorance can not solve the TIHR deadlock regarding the $L_z{\bf -\varphi }$ case. Moreover, such a removal is detrimental for TIHR because the Eq. (6.5) is only a conversion of Eq. (2.1) which is an unjustified relation (see Sec. VI.B)
As regards the TIHR attitude towards the Eq. (6.4) there is another curious ignorance/omission. In the afferent literature it is omitted any discussion on the $L_z$ - degenerate states of the circular systems. Such a state is associated with a set of eigenvalue of $\widehat{L}_Z$ and it is described by a linear superpositions of eigenfunctions of $\widehat{L}_Z$ . As example can be taken the state of a free rigid rotator with a given energy $ E_l=\hbar ^2l(l+1)/2J$ ($l$ = orbital quantum number, $J$ = moment of inertia). The respective state is described by the wave function \begin{equation} \Psi _l\left( \theta ,\varphi \right) =\sum_{m=-l}^lC_mY_{lm}\left( \theta ,\varphi \right) \eqnum{6.15} \end{equation} where $Y_{lm}\left( \theta \right) $ are the spherical functions, $l$ and $m$
denote the orbital respectively magnetic quantum numbers while $C_m$ are arbitrary complex constants which satisfy the condition $\sum_m\left|
C_m\right| ^2=1.$ In respect with the wavefunction given in Eq. (6.15) for the operators $\widehat{L}_Z=-i\hbar \frac \partial {\partial \varphi }$ and $\widehat{\varphi }=\varphi $ one obtains \begin{equation}
\left( \Delta _\varphi L_Z\right) ^2=\sum_m\left| C_m\right| ^2\hbar
^2m^2-\left( \sum_m\left| C_m\right| ^2\hbar m\right) ^2 \eqnum{6.16} \end{equation} \[ \left( \Delta _\Psi \varphi \right) ^2=\sum_m\sum_{m\acute{}}C_m^{*}C_{m \acute{}}\left( Y_{lm},\varphi ^2Y_{lm\acute{}}\right) \] \begin{equation} -\left[ \sum_m\sum_{m\acute{}}C_m^{*}C_{m\acute{}}\left( Y_{lm},\varphi Y_{lm \acute{}}\right) \right] ^2 \eqnum{6.17} \end{equation} With the expressions $\Delta _\Psi L_Z$ and $\Delta _\Psi \varphi $ given by Eqs. (6.16) and (6.17) it is possible that the HR from Eq. (6.4) to be satisfied (For more details see below the discussions about the Eq. (8.3) in Sec. VIII). But surprisingly such a possibility was not examined by TIHR partisans which persevere to opine that Eq. (6.4) must be prohibited as incorrect in respect with any physical situation. We think that such an attitude has to be considered as a defect of TIHR\ doctrine.
Contrary to the TIHR partisans opinion about the HR given by Eq. (6.4) it is easy to see (Dumitru, 1988, 1991), that the respective relation remains rigorously valid at least in the case of one quantum system. The respective system is a Quantum Torsion Pendulum (QTP) oscillating around the z-axis. Such a QTP is completely analogous with the well-known (recti)linear oscillator.
The states of the QTP are described by the wave functions: \begin{equation} \Psi _n\left( \varphi \right) =\Psi _n\left( \xi \right) =\left( \frac{ J\omega }{\pi \hbar }\right) ^{1/4}\frac 1{\sqrt{2^nn!\sqrt{\pi }}}e^{-\frac{ \xi ^2}2}H_n\left( \xi \right) \eqnum{6.18} \end{equation} with $\xi =\varphi \cdot \sqrt{J\omega /\hbar }$, $\varphi $=azimuthal angle, $J$=moment of inertia, $\omega $=angular frequency, $n$=0,1,2,...=the oscillation quantum number and $H_n\left( \xi \right) =\left( -1\right) ^n\left( e^{\xi ^2}\right) \cdot \left( d^ne^{-\xi ^2}/d\xi ^n\right) $are the Hermite polynomials.. By its properties, QTP is $\varphi $-torsional ($ \varphi -$non-circular or azimuthally infinite). This means that for it: (i) $\varphi \in \left( -\infty ,\infty \right) $ , (ii) the states with $ \varphi =0$ and $\varphi =2\pi $ do not coincide and (iii) the states with $ \varphi \gtrsim 0$ and $\varphi \lesssim 2\pi $ are not closely adjacent.
In the case of QTP for the variables $L_z$ and $\varphi $, described by the operators $\widehat{L}_z=-i\hbar \frac \partial {\partial \varphi }$ and $ \widehat{\varphi }=\varphi \cdot $, one obtains the expression:
\begin{equation} \Delta _\Psi L_z\sqrt{\hbar J\omega \left( n+\frac 12\right) } \,\,\,\,,\,\,\,\,\,\,\Delta _\Psi \varphi =\sqrt{\frac \hbar {J\omega } \left( n+\frac 12\right) } \eqnum{6.19} \end{equation} With these expressions one finds that for QTP the $L_z{\bf -\varphi }$ theoretical HR is satisfied in the ordinary/common form given by Eq. (6.4). So the existence of QTP example invalidate the above mentioned opinion of TIHR partisans about the HR from Eq. (6.4).The alluded invalidation makes even deeper the deadlock of TIHR with respect to the $L_z{\bf -\varphi }$ case.
All the above mentioned deadlooks of TIHR-doctrine in connection with the pair $L_z{\bf -\varphi }$ must be reported as indubitable defects of the respective doctrine.
\subsection{The N-${\bf \Phi }$ case}
Another case which drived TIHR in deadlock is that of pair N-${\bf \Phi }$ (number-phase) connected with the study of quantum oscillator. N{\bf \ } represents the quantum oscillation number, described by the operator $ \widehat{N}=\widehat{a}^{+}\widehat{a}$ (where $\widehat{a}^{+}$ and $ \widehat{a}$ are the known ladder operators) while $\Phi $ is taken as variable conjugated with $N.$ Often, if the oscillator is considered as radiative, $N$ and $\Phi $ are regarded as number respectively phase of the radiated particles (photons or phonons). In the $\Phi $ - representation we have
\begin{equation} \widehat{N}=i\frac \partial {\partial \Phi }\,\,\,\,,\,\,\,\widehat{\Phi } =\Phi \cdot \,,\,\,\,\,\,\,\,\,\,\,\left[ \widehat{N},\widehat{\Phi }\right] _{-}=i \eqnum{6.20} \end{equation} Note that in the mentioned representation the states under the considerations are $\Phi -$circular (in a similar way with the $\varphi $ -circular states discussed above in connection with the $L_z{\bf -\varphi }$ case). The respective states are described by the wave functions
\begin{equation} \Psi _N\left( \Phi \right) =\frac 1{\sqrt{2\pi }}e^{iN\Phi }\,\,\,\,\,\,\,\,\,\,\,\,\,\,(N=0,1,2,........) \eqnum{6.21} \end{equation} For the $N-\Phi $ case the TIHR doctrine requires that through the Eq. (2.3) one should have the ordinary relation:
\begin{equation} \Delta _\Psi N\cdot \Delta _\Psi \Phi \geq \frac 12 \eqnum{6.22} \end{equation} But it is easy to see that this relation is incorrect because with (6.20) and (6.21) one obtains $\Delta _\Psi N=0$ and $\Delta _\Psi \Phi =\pi /\sqrt{ 3}.$
The incorrectness of the Eq. (6.22) derives TIHR in another deadlock. With the aim of avoiding the respective deadlock TIHR partisans promoted the idea to replace the Eq. (6.22) with some adjusted relations, concordant with TIHR doctrine. So in literature (Fain and Khanin, 1965; Carruthers and Nieto, 1968; Davydov, 1973; Opatrny, 1995; Lindner et al., 1996) were promoted a few adjusted relations such as:
\begin{equation} \Delta _\Psi N\cdot \Delta _\Psi C\geq \frac 12\left\langle S\right\rangle \eqnum{6.23} \end{equation} \begin{equation} \Delta _\Psi N\cdot \Delta _\Psi S\geq \frac 12\left\langle C\right\rangle \eqnum{6.24} \end{equation} \begin{equation} \left( \Delta _\Psi \Phi \right) ^2\cdot \left[ \left( \Delta _\Psi N\acute{} \right) ^2+\left( \left\langle \widehat{N}\acute{}\right\rangle _\Psi +\frac 12\right) -\left\langle \widehat{L}\acute{}_z\right\rangle \right] \geq \frac{\hbar ^2}4\left[ 1-\frac 3{\pi ^2}\left( \Delta _\Psi \Phi \right) ^2\right] \eqnum{6.25} \end{equation} The new quantities appearing in Eqs.(6.23)-(6.25) are defined through the relations:
\begin{equation} \widehat{C}=\frac 12\left( \widehat{E}_{-}+\widehat{E}_{+}\right) \,\,\,\,,\,\,\,\,\,\,\,\,\,\,\,\,\widehat{S}=\frac 1{2i}\left( \widehat{E} _{-}-\widehat{E}_{+}\right) \eqnum{6.26.a} \end{equation} \begin{equation} \widehat{E}_{-}\left( N+1\right) ^{-1/2}\widehat{a}\,\,\,\,\,\,\,\,\,\,,\,\, \,\,\,\,\,\widehat{E}_{+}=\widehat{a}^{+}\left( N+1\right) ^{-1/2} \eqnum{6.26.b} \end{equation} \begin{equation} \widehat{N}\acute{}=\widehat{I}\widehat{L}\acute{}_z-\frac 12 \,\,\,\,\,\,\,\,\,\,,\,\,\,\,\,\,\,\,\widehat{L}\acute{}_z=-i\hbar \frac \partial {\partial \Phi } \eqnum{6.27} \end{equation} It is the place here to note the following observations. The replacement of the ordinary Eq. (6.22) with the adjusted Eqs. (6.23)-(6.25) is only a redundant mathematical operation without any true utility for physics. This happens because for the interests of physics those of real utility are the observables $N$ and $\Phi $ but not the above mentioned adjusted quantities $ N\acute{},C$ or $S$. So if the interest of physics are connected on the particles (photons or phonons) radiated by quantum oscillators the real measuring instruments are counters respectively the polarizers. Such instruments measure directly $N$ and $\Phi $ but not $N\acute{},C$ or $S.$ So the measuring uncertainties, appealed by TIHR as corner-stone pieces, must regard $N$ and $\Phi $ but not $N\acute{},C$ or $S$.
The above noted observations show that the relations like Eqs. (6.23)-(6.24) (or other adjusted formulae) do not solve the nonconformity of the pair $ N-\Phi $ with TIHR doctrine. The respective nonconformity remains an open question which can not be solved by means of inner elements of TIHR. This means that the $N-\Phi $ case appears as an irrefutable defect of TIHR.
\subsection{The energy - time case}
The pair energy E - time t was and still is the subject of many debates in the literature (Aharonov and Bohm, 1961, 1964; Fock, 1962; Alcook, 1969; Bunge, 1970; Fujivara, 1970; Surdin, 1973; Kijovski, 1974; Bauer and Mello, 1978; Voronstov, 1981; Kobe and Aquilera-Navaro, 1994). The respective debates originate in the following facts:
On the one hand $E$ and $t$ are considered as (canonically) conjugated variables whoose ordinary operators $\widehat{E}=i\hbar \frac \partial { \partial t}$ and $\widehat{t}=t\cdot $ satisfy the commutation relation $ \left[ \widehat{E},\widehat{t}\right] =i\hbar .$ Then for these variables the theoretical HR expressed by Eq. (2.3) should take the ordinary form \begin{equation} \Delta _\Psi E\cdot \Delta _\Psi t\geq \frac \hbar 2 \eqnum{6.28} \end{equation}
On the other hand as $t$ is not a random but a deterministic variable $ \Delta _\Psi t\equiv 0$ for any state (wave function). Moreover the energy is described by the Hamiltonian operator $\widehat{H}$ and then $\Delta _\Psi E=\Delta _\Psi H.$ Or $\Delta _\Psi H$ is a null quantity (in the case of stationary states which are pure eigenstates of $\widehat{H}$) or a non-null but finite quantity in the case of nonstationary states or of stationary ones which are mixtures of eigenstates of $\widehat{H}).$ Then one finds that in fact for the pair $E-t$ the theoretical HR given by Eqs. (2.3)/(6.28) degenerate into the absurd result $0\geq \left( \hbar /2\right) .$ With such a result the $E-t$ case radically deviates from the TIHR stipulations. For avoiding the respective deviation the TIHR partisans invented a lot of adjusted $E-t$ relations destined to replace Eq. (6.28) and to remain concordant with the alluded stipulations. More of the mentioned relations have the following generic form: \begin{equation} \Delta _vE\cdot \Delta _vt\geq \frac \hbar 2 \eqnum{6.29} \end{equation} where $\Delta _vE$ and $\Delta _vt$ have various significances such as: (i) $ \Delta _1E=$ the line breadth characterizing the decay of an excited quantum state and $\Delta _1t=$ the duration life of the respective state (ii) $ \Delta _2E=$ $\hbar \Delta _2\omega $ = the energetic width of a wave packet and $\Delta _2t$ = temporal width of the packet (while Eq. (6.29) is introduced by means of Eq.(5.35) with $x=t$ and $k=\omega =E/\hbar ).$ (iii) $\Delta _3E=\Delta _\Psi H$ and $\Delta _3t=\Delta _\Psi A\cdot \left( d\left\langle A\right\rangle _\Psi /dt\right) ^{-1}$ with $A$ as an arbitrary variable.
Other substitutions of Eq. (6.28) were adjusted by means of some strange ideas such as
(i) to take
\begin{equation} \Delta t=\left[ \left\langle t^2\right\rangle -\left\langle t\right\rangle ^2\right] ^{1/2} \eqnum{6.30} \end{equation} with
\begin{equation}
\left\langle t^n\right\rangle =\frac{\int_{-\infty }^\infty t^n\left| \Psi
\left( q,t\right) \right| ^2dt}{\int_{-\infty }^\infty \left| \Psi \left(
q,t\right) \right| ^2dt} \eqnum{6.31} \end{equation}
(ii) to fabricate a time (or tempus) operator $\widehat{T}$ capable of satisfying the commutation relation:
\begin{equation} \left[ \widehat{T},\widehat{E}\right] =i\hbar \eqnum{6.32} \end{equation} In connection with the above mentioned substitutions of Eq. (6.28) the following major shortcomings must be notified: (i) The Eqs. (6.29)-(6.32) do not result from the standard mathematical procedures specific for the true theoretical HR (as presented in sections III, IV, and V), (ii) The variety of significances for $\Delta _vE$ and $\Delta _vt$ from Eq. (6.29) still generates persistent disputes among TIHR partisans, (iii) None of the substitution alternatives for Eq. (6.28) was ratified until now by natural and credible arguments.
The above notifications together with the presented observations about Eq. (6.28) clearly disclose an important and persistent defect of TIHR.
\subsection{Bitemporal relations}
The bitemporal relation given by Eq. (5.30) facilitates the detection of another defect of TIHR. If in the respective relation one takes $\left|
t_2-t_1\right| \rightarrow \infty $ in the TIHR vision the quantities $ \Delta _{\Psi 1}A$ and $\Delta _{\Psi 2}B$ refer to two variables $A$ and $B$ each of them considered as solitary. In such a case the TIHR assertion ${\bf P-2.10/A}$ claims that both $\Delta _{\Psi 1}A$ and $\Delta _{\Psi 2}B$ should be boundlessly small quantities. But from Eq. (5.30) one can see that such a claim is refuted in the cases when $\left( \delta _{\Psi _1}\widehat{A }\Psi _1,\,\delta _{\Psi _2}\widehat{A}\Psi _2\right) \neq 0$. An example of such a case can be provided as follows.
For a QTP the time-dependent wave functions describing its states are
\begin{equation} \Psi _{nt}=\Psi _n\left( \xi ,t\right) =\exp \left( \frac{-iE_n}\hbar t\right) \Psi _n\left( \xi \right) \eqnum{6.33} \end{equation} where $\Psi _n\left( \xi \right) $are given by Eq. (6.18) and $E_n=\hbar \omega \left( n+\frac 12\right) .$ Then with $\widehat{A}=\widehat{L}_z$ and $\widehat{B}=\widehat{\varphi }$ the Eq. (5.30) becomes
\begin{equation}
\Delta _{\Psi _{nt_1}}L_z\cdot \Delta _{\Psi _{nt_2}}\varphi \geq \left|
\left( \delta _{\Psi _{nt_1}}\widehat{L}_z\Psi _{nt_1},\,\delta _{\Psi _{nt_2}}\widehat{\varphi }\Psi _{nt_2}\right) \right| \eqnum{6.34} \end{equation} with
\begin{equation} \Delta _{\Psi _{nt_1}}L_z=\sqrt{\hbar J\omega \left( n+\frac 12\right) } ,\,\,\,\,\Delta _{\Psi _{nt_2}}\varphi =\sqrt{\frac \hbar {J\omega }\left( n+ \frac 12\right) }\, \eqnum{6.35} \end{equation} \begin{equation} \left( \delta _{\Psi _{nt_1}}\widehat{L}_z\Psi _{nt_1},\delta _{\Psi _{nt_2}} \widehat{\varphi }\Psi _{nt_2}\right) =-\frac{i\hbar }2\exp \left\{ -i\omega \left( n+\frac 12\right) \left( t_2-t_1\right) \right\} \eqnum{6.36} \end{equation}
The above mentioned refutation of TIHR claimed by the bitemporal relations given by Eqs. (5.30)/(6.34) must be notified as a clear defect of TIHR. Various attempts (Levly-Leblond 1967; Ghanapragasam and Srinivas, 1979) of extrapolating TIHR vision onto the relations of Eq. (5.30) type seem to us to be without any real physical foundation.
\subsection{Multivariable relations}
In Sec. $V$ we have shown that the multivariable relations from Eqs.(5.6) belong to the same family of theoretical formulae as the primary HR given by Eqs. (2.2)/(2.3). For an example of such a multivariable relation we can consider the QTP described by (6.18) and the following set of three variables: $A_1=L_z$,\thinspace $A_2=\varphi $ and $A_3=T=L_z^2/2J=$the kinetic energy. Then from Eq. (5.6) one obtains:
\[ \left( \Delta _\Psi L_z\right) ^2\cdot \left( \Delta _\Psi \varphi \right) ^2\cdot \left( \Delta _\Psi T\right) ^2\geq \left( \Delta _\Psi L_z\right)
^2\cdot \left| \left( \delta _\Psi \widehat{\varphi }\Psi ,\delta _\Psi
\widehat{T}\Psi \right) \right| ^2+ \]
\begin{equation}
+\left( \Delta _\Psi \varphi \right) ^2\cdot \left| \left( \delta _\Psi
\widehat{T}\Psi ,\delta _\Psi \widehat{L}_z\Psi \right) \right| ^2+\left(
\Delta _\Psi T\right) ^2\cdot \left| \left( \delta _\Psi \widehat{L}_z\Psi
,\delta _\Psi \widehat{\varphi }\Psi \right) \right| ^2- \eqnum{6.37} \end{equation}
\[ -2
\mathop{\rm Re}
\left\{ \left( \delta _\Psi \widehat{L}_z\Psi ,\delta _\Psi \widehat{\varphi }\Psi \right) \cdot \left( \delta _\Psi \widehat{\varphi }\Psi ,\delta _\Psi \widehat{T}\Psi \right) \cdot \left( \delta _\Psi \widehat{T}\Psi ,\delta _\Psi \widehat{L}_z\right) \right\} \]
where $
\mathop{\rm Re}
f$ = real part of $f,\Delta _\Psi L_z$ respectively $\Delta _\Psi \varphi $ are given by Eq.(6.19) and
\[ \Delta _\Psi T=\frac{\hbar \omega }2\sqrt{\frac{n^2+n+1}2,} \,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\left( \delta _\Psi \widehat{L}_z\Psi ,\delta _\Psi \widehat{\varphi }\Psi \right) =-i\frac \hbar 2 \] \begin{equation} \left( \delta _\Psi \widehat{\varphi },\delta _\Psi \widehat{T}\Psi \,\right) =0,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, \left( \delta _\Psi \widehat{T}\Psi ,\delta _\Psi \widehat{L}_z\Psi \right) =0 \eqnum{6.38} \end{equation}
The alluded relationship of the multivariable Eqs. (5.6), (6.37) with the HR from Eqs. (2.9)/(2.10) naturally requires an argued answer to the question: In what rapports must be the (physical) interpretation of the respective relations with TIHR? The question was approached (Synge, 1971) from a viewpoint of extrapolating TIHR vision. In the spirit of such a view there was promoted the idea that a 3-variable relation of Eq. (5.6) type (obtained by Synge on a different way) must be interpreted as describing a fundamental interconnection among the uncertainties characterizing the simultaneous measurements for the corresponding variables. Other scientists (Dodonov and Man'ko, 1987), who investigate from a mathematical viewpoint the generalizations of HR given by Eq. (2.2)/(2.3), have deliberately (and declaratively) omitted any approach of the above mentioned question.
A careful examination of the facts shows that the above mentioned extrapolation of TIHR is unjustifiable at least because of the following reasons: (i) {}It cannot be sustained by real (nonfictitious) arguments regarding the true characteristics of the measurements, (ii) As it was pointed out in Sec. VI.A the theoretical quantities implied in Eqs. (5.6) and (6.37) (like $\Delta _\Psi A_j$ or ($\delta _\Psi \widehat{A}_j\Psi ,\delta _\Psi A_e\Psi ))$ have not significance of measuring uncertainties. Then it results that, although mathematically the multivariable relations are closely related with HR, they cannot be interpreted in a manner consonant with TIHR. But such a result must be notified as another defect of TIHR regarded as a comprehensive doctrine.
\subsection{Thermal relations}
TIHR\ was promoted in connection with the quantum HR given by Eqs. (2.2)/(2.3). But we have shown that, mathematically, the respective HR are completely similar with the thermal relations from Eqs. (5.21)-(5.23). Firstly, we shall present here some concrete examples of such thermal relations.
So from the phenomenological (quasithermodynamic) theory of fluctuations we can quote (Dumitru, 1974 a, 1988) the following P-V (pressure-volume) formula
\begin{equation}
\Delta _wV\cdot \Delta _wP\geq \left| \left\langle \delta _wV\delta _\omega P\right\rangle _w\right| \eqnum{6.39} \end{equation} where
\[ \Delta _wV=\sqrt{-k\overline{T}\left( \frac{\partial \overline{V}}{\partial \overline{p}}\right) _{\overline{T}}},\,\,\,\,\,\,\,\,\,\,\,\Delta _wP=\sqrt{ -k\overline{T}\left( \frac{\partial \overline{p}}{\partial \overline{V}} \right) _{\overline{S}}}\,\,\, \] \begin{equation} \left\langle \delta _wV\delta _wP\right\rangle _w=-k\overline{T} \eqnum{6.40} \end{equation} with $\overline{A}=\left\langle A\right\rangle _w$, k = Boltzmann's constant and S = entropy.
Also we can quote a formula of Eq. (5.23) type from classical statistical physics. For this let us consider an ideal gas situated in a cylindrical recipient of height b on the Earth surface. The gas is supposed to be in a thermodynamical equilibrium state described by the canonical distribution $ w\sim \exp \left\{ -H/kT\right\} $, with $H$ = the Hamiltonian of the molecules. If the molecules are considered as identic point particles, we can take $H$ of the form $H=\sum_{i=1}^{3N}\left( p_i^2/2m\right) +\sum_{i=1}^Nmgz_i$ ($N$, $p_i$ and $z_i$ represent respectively the number, linear momenta and altitude of the molecules; $m$ denotes the mass of a molecule and $g$ is the gravitational acceleration). As stochastic variables for the gas regarded as a statistical system we consider $A=H$ = Hamiltonian and $B=Z_c$= the altitude of the centre of mass. For such variables Eq. (5.23) transcribes as
\begin{equation}
\Delta _wH\cdot \Delta _wZ_c\geq \left| \left\langle \delta _wH\delta _wZ_c\right\rangle _w\right| \cdot \eqnum{6.41} \end{equation} The terms from this relation are given by
\begin{equation} \left( \Delta _wH\right) ^2=k\nu RT^2\left[ \frac 52-a^2f(a)\right] \eqnum{6.42a} \end{equation} \begin{equation} \left( \Delta _wZ_c\right) =k\frac{b^2}{\nu R}\left[ \frac 1{a^2}-f(a)\right] \eqnum{6.42b} \end{equation} \begin{equation} \left\langle \delta _wH\cdot \delta Z_c\right\rangle =kTb\left[ \left( e^a-1\right) ^{-1}-f(a)\right] \eqnum{6.42c} \end{equation} where $\nu =$ number of moles, $f(a)=e^a\left( e^a-1\right) ^{-2}$ and $ a=\left( \mu gb/RT\right) $with $\mu =$ molar mass of the gas, $R=$ universal gas constant.
Another nontrivial exemplification of Eq. (5.22) is given by the early known F\H{u}rth\'{}s formula from the theory of Brownian motion (F\H{u}rth, 1933). In the respective formula $A=x$ and $B=v$ i.e. the coordinate and velocity of a Brownian particle, whereas $\left\langle \delta _wA\delta _wB\right\rangle _w=\left\langle \delta _wx\cdot \delta _wv\right\rangle _w=D $ diffusion coefficient of the particle. Then the F\H{u}rth\'{}s formula is: \begin{equation} \Delta _wx\cdot \Delta _wv>D \eqnum{6.43} \end{equation} Now let us return to the mentioned mathematical similarity between thermal relations given by Eqs. (5.21)-(5.23), (6.39), (6.41), (6.43) and HR from Eqs. (2.2)/(2.3). The respective similarity suggests an investigation of the possible connections between the interpretation of the alluded thermal relations respectively the TIHR. Such an investigation was approached by some traditionalist-scientists (Frank-Kamenetsky, 1940; Bohm, 1957; Rosenfeld, 1961 and 1962; Terletsky, 1974). Mainly, they promoted the idea that the mentioned thermal relations must be interpreted in terms of a macroscopic complementarity. Consequently, by extrapolation of the TIHR statements ${\bf P-2.15}$ and ${\bf P-2.16}$, in Eq. (5.23) the variables $A$ and $B$ must be regarded as complementary (i.e. mutually exclusive) while $ \Delta _wA$ and $\Delta _wB$ have to be interpreted as uncertainties in simultaneous measurements. The mentioned traditionalist idea was partially reviewed from various perspectives (Shaposhnikov, 1947; Bazarov, 1979; Uffink and van Lith, 1999). But in the alluded reviews there are not pointed out explicitely the facts that the quantities like DwA and DwB describe the thermal fluctuations respectively that such fluctuations are inrinsic properties of tehe thermodynamic systems.
Our opinion is that the alluded idea of macroscopic complementarity must be rejected due to the following reasons: (i) The quantity $\Delta _wA$ is not a measuring uncertainty but a parameter (standard deviation) characterizing the thermal fluctuations. (ii) The value of $\Delta _wA$ can be modified only by changing the inner state (i.e. the distribution function {\it w}) of the considered system but not by means of improvements of measuring precision. (iii) Regarded as fluctuation parameter $\Delta _wA$ can be measured without restriction of principle. So in noise spectroscopy (Weissmann, 1981) it is possible to measure even the ''constitutive'' (i.e. spectral) components of $\Delta _wA.$ (iv) In classical physics the variables characterizing a macroscopic system are not mutually exclusive (i.e. complementary), (v) The true conception about the macroscopic measurements does not include any reference to the mutual unavoidableness of simultan uncertainties for two (or more) variables.
The above mentioned reasons clearly guide us to the following conclusion. In spite of the mathematical similarity between HR and the discussed thermal relations, TIHR can not be extended (by similitude) to the interpretation of the respective relations. But as the mentioned mathematical similarity is of a conceptually fundamental nature the concluded inextensibility must be notified as a true defect of TIHR.
\subsection{The so-called macroscopic operators}
Controversies about TIHR also included several discussions regarding the macroscopic relation given by Eq. (5.29) (see Jancel, 1973 and references). The respective discussions were generated by the following conflicting findings: (i) On the one hand the respective relation appears within quantum theory and, mathematically, it is completely similar with HR expressed by Eqs. (2.2)-(2.3). Then by extrapolating TIHR the Eq. (5.29) should be interpreted as an interconnection of the macroscopic uncertainties $\Delta _\rho A$ and $\Delta _\rho B$ regarding the simultaneous measurements of the variables $A$ and $B$ afferent to a macroscopic system.
(ii) On the other hand, according to ${\bf P-2.4}$, TIHR operates with the hypothesis that a macroscopic variable can be measured without any uncertainty (i.e. with unbounded accuracy), irrespective of the fact that it is measured solitary or simultaneously with other variable. Then, of course, it is useless to speak of an interconnection between the uncertainties of two macroscopic variables-even if, in theoretical framework, they are described by quantum statistical operators.
To elude the mentioned conflict the TIHR partisans promoted the strange idea to abrogate the Eq. (5.29) and to replace it by an adjusted macroscopic formula concordant with TIHR vision. With this aim, the common operators $ \widehat{A}$ and $\widehat{B}$ from Eq. (5.29) were substituted by the so-called ''macroscopic operators''{\it \ }$\widehat{A}_M$ and $\widehat{B} _M $ which (in any representation?) can be pictured as quasi-diagonal matrices. Then one supposes that $\left[ \widehat{A}_M,\widehat{B}_M\right] _{-}=0$ and instead of Eq. (5.29) one obtains \begin{equation} \Delta _\rho A_M\cdot \Delta _\rho B_M\geq 0 \eqnum{6.44} \end{equation} In this relation TIHR partisans view the fact that, simultaneously, the uncertainties $\Delta _\rho A$ and $\Delta _\rho B$ can be arbitrarily small. Such a view is concordant with the main concept of TIHR. Today many scientists believe that the adjusted Eq. (6.44) solves all the troubles of TIHR generated by the Eq. (5.29).
It is easy to remark that the mentioned belief proves to be unfounded if one takes into account the following observations:
(i) Equation (5.29) can not be abrogated unless the entire mathematical apparatus of quantum statistical physics is abrogated too. More exactly, the substitution of the common operators $\widehat{A}_M$ and $\widehat{B}_M$ with macroscopic operators $\widehat{A}_M$ and $\widehat{B}_M$ is a useless invention. This because in the practical domain of quantum statistical physics (see for example Tyablikov, 1975) the common operators but not the macroscopic ones are used.
(ii) The above mentioned substitution of operators does not metamorphose automatically Eq. (5.29) into Eq. (6.44). This because, if two operators are quasi-diagonal (in the sense required by the TIHR partisans) they can be non-commutable. As an example in this sense we refer to a macroscopic system formed by a large number N of independent $\frac 12$-spins (Dumitru, 1988, 1989). The hinted macroscopic variables are components $M_j$ $\left( j=x,y,z\right) $of the magnetization $\overrightarrow{M}$. The corresponding operators are \begin{equation} \widehat{A}_j=\widehat{M}_j=\frac{\gamma \hbar }2\widehat{\sigma }_j^{\left( 1\right) }\oplus \frac{\gamma \hbar }2\widehat{\sigma }_j^{\left( 2\right) }\oplus ......\oplus \frac{\gamma \hbar }2\widehat{\sigma }_j^{\left( N\right) } \eqnum{6.45} \end{equation} where $\gamma $ = magneto-mechanical factor, $\widehat{\sigma }_j^{\left( n\right) }$= Pauli matrices associated with the ''n-th'' spin (microparticle). One can see that the operators defined by Eqs. (6.45) are quasidiagonal in the sense required for ''macroscopic operators'', but they are not commutable among them, as we have for example $\left[ \widehat{M_x}, \widehat{M_y}\right] =i\hbar \gamma \widehat{M_z}.$ Consequently one can say that by the mentioned substitution of operators the Eq. (5.29) is transposed in fact not in (6.44) but into the formula \begin{equation}
\Delta _\rho A_M\cdot \Delta _\rho B_M\geq \left| \left\langle \left[
\widehat{A}_M,\widehat{B}_M\right] _{-}\right\rangle _\rho \right| \eqnum{6.46} \end{equation} But such a formula is not helpful for TIHR if $\left\langle \left[ \widehat{A }_M,\widehat{B}_M\right] \right\rangle _\rho \neq 0$, as in the case of operators defined by Eqs. (6.45).
(iii) The alluded substitution of operators does not solve the troubles of TIHR even if the macroscopic operators are commutable. This because Eq. (5.29) is only a truncated version of the more general Eq. (5.27). Then by the mentioned substitution, in fact, one must consider the methamorphosis of Eq. (5.27) which gives \begin{equation}
\Delta _\rho A_M\cdot \Delta _\rho B_M\geq \left| \left\langle \delta _\rho
\widehat{A}_M\cdot \delta _\rho \widehat{B}_M\right\rangle _\rho \right| \eqnum{6.47} \end{equation} In this formula, if $\left[ \widehat{A}_M,\widehat{B}_M\right] _{-}=0$, one obtains $\left\langle \Delta _\rho \widehat{A}_M\cdot \Delta _\rho \widehat{B }_M\right\rangle _\rho =\frac 12\left\langle \left[ \delta _\rho \widehat{A} _M,\delta _\rho \widehat{B}_M\right] _{+}\right\rangle _\rho $ i.e. a quantity which can have a non-null value. Then it results that the macroscopic product $\delta _\rho A_M\cdot \delta _\rho B_M$ can have a non-null lower bound. But such a result opposes to the agreements of TIHR.
So we conclude that in fact the mentioned macroscopic operators cannot solve the TIHR deficiencies connected with the Eq. (5.29). This means that the respective deficiencies remain unsolved and they must be reported as another insurmountable defect of TIHR.
\section{INDUBITABLE FAILURE\ OF\ TIHR}
A mindful examination of all the details of the facts discussed in the previous section guide us to the following remarks:
$\underline{{\bf P-7.1}}$ : Taken together, in an ensemble, the above presented defects incriminate and invalidate each of the main elements of TIHR.$\blacktriangle $
\underline{${\bf P-7.2}$} : The mentioned defects are insurmountable for the TIHR doctrine, because they cannot be avoided or refuted by means of credible arguments from the framework of the respective doctrine.$ \blacktriangle $
The two remarks reveal directly the indubitable failure of TIHR which now appears as an unjustified doctrine. Then the sole reasonable attitude is to abandon TIHR and, as a first urgency, to search the genuine significance (interpretation) of the HR. As second urgency, probably, it is necessary a re-evaluation of those problems in which, by its implications, TIHR persists as a source of misconceptions and confusions.
\section{THE\ GENUINE\ SIGNIFICANCE\ OF\ H R}
A veritable search regarding the genuine significance of HR must be founded on the true meaning of the elements implied in the introduction of the respective relations. Then we have to take into account the following considerations.
Firstly, we opine that thought-experimental HR of the type given by Eq. (2.1) must be omitted from discussions. This because, as it was pointed out (see Sec. VI.B and comments about the Eq. (5.1)), such a type of relations has a circumstantial character dependent on the performances of the supposed measuring-experiment. Also in the respective relations the involved variables are not regarded as stochastic quantities such are the true quantum variables. So the equations of (2.1) - type have not a noticeable importance for the conceptual foundation of quantum mechanics. Moreover the usages of such relations in various pseudo-demonstration (Tarasov, 1980) have not a real scientific value. That is why we opine that the thought-experimental HR of the type given by Eq. (2.1) must be completely excluded from physics.
We resume the above opinions under the following point:
$\underline{{\bf P-8.1}}$: The thought-experimental HR like Eq. (2.1) must be disregarded being fictitious formulae without a true physical significance.$\blacktriangle $
As regard the theoretical HR of the kind illustrated by Eqs. (2.2)/(2.3) the situation is completely different. The respective HR are mathematically justified for precisely defined conditions within the theoretical framework of quantum mechanics. This means that the physical significance (interpretation) of the theoretical HR is a question of notifiable importance. Now note that the mentioned HR belong to the large family of correlation relations reviewed in Sec. V. This fact suggests that the genuine significance (interpretation) of the theoretical HR must be completely similar with that of the mentioned correlation relations.
We opine that the alluded suggestion must be taken into account with maximum consideration. Then, firstly, we remark that all of the mentioned correlation relations refer to the variables with stochastic characteristics. Such variables are specific both for quantum and non-quantum (i.e. classical) physical systems. Secondly, let us regard the quantities like $\Delta _wA$ or $\left\langle \delta _wA\delta _wB\right\rangle _w$ which appear in the corresponding correlation relations from classical statistical physics. In our days science the respective quantities are unanimously interpreted as {\it fluctuation parameters} of the considered variables $A$ and $B$. Also it is clearly accepted the fact that the respective parameters describe intrinsic properties of the viewed systems but not some characteristics (i.e. uncertainties) of the measurements on the respective properties. In this sense in some scientific domains, such as noise spectroscopy (Weissman, 1981), the evaluation of the quantities like $\Delta _wA$ is regarded as a tool for investigation of the intrinsic properties of the physical systems. In classical conception the description of the intrinsic properties of the physical systems is supposed to be not amalgamated with elements regarding the measuring uncertainties. The alluded description is made, in terms of corresponding physical variables, within the framework of known chapters of classical physics (i.e. mechanics, electrodynamics, optics, thermodynamics and statistical physics). For the mentioned variables, when it is the case, the description approaches also the fluctuations characteristics. Otherwise, in classical conception the measuring uncertainties /errors are studied within the framework of error analysis. Note that the respective analysis is a scientific branch which is independent and additional with respect to the mentioned chapters of physics (Worthing and Geffner, 1955).
The above mentioned aspects about the classical quantities $\Delta _wA$ and $ \left\langle \delta _wA\delta _wB\right\rangle _w$ must be taken into account for the specification of the genuine significance (interpretation) of the quantum quantities $\Delta _\Psi A$ and $\left\langle \delta _\Psi A\delta _\Psi B\right\rangle _\Psi $, as well as of the theoretical HR. We think that the respective specification can be structured through the following points:
\underline{${\bf P-8.2}$} : The quantum variables (operators) must be regarded as stochastic quantities which admit fluctuations.$\blacktriangle $
\underline{${\bf P-8.3}$} : Acording to the usual quantum mechanics, the time is not a stochastic variable but a deterministic quantity which does not admit fluctuations.$\blacktriangle $
\underline{${\bf P-8.4}$} : The theoretical quantities $\Delta _\Psi A,\,\Delta _\Psi B$,\thinspace $\left( \delta _\Psi \widehat{A}\Psi ,\delta _\Psi \widehat{B}\Psi \,\right) $or $\left\langle \left[ \widehat{A}, \widehat{B}\right] _{-}\right\rangle _\Psi $ must be interpreted as parameters referring to the fluctuations regarded as intrinsic properties of quantum microparticles.$\blacktriangle $
\underline{${\bf P-8.5}$} : The theoretical HR of the kind illustrated by Eqs (2.2)/(2.3) must be considered through their accurate and complete forms presented in Sec.V. In such forms they have to be interpreted as fluctuation formulae regarding intrinsic characteristics of microparticles.$ \blacktriangle $
\underline{${\bf P-8.6}$} : The quantities mentioned in ${\bf P-8.4}$ as well as the theoretical HR have no connection with the description of measuring uncertainties.$\blacktriangle $
\underline{${\bf P-8.7}$} : The description of measurements, characteristics (e.g. of measuring uncertainties) for quantum microparticles must be made in the frame of a scientific branch which is independent and additional with respect to the usual quantum mechanics.$\blacktriangle $
A persuasive argumentation of these points results directly form the above presented considerations. For ${\bf P-8.4}$ and ${\bf P-8.6}$ the respective argumentation can be improved by the following observations: (i) the values of the quantities discussed in ${\bf P-8.4}$ depend (through the wave function $\Psi $) only on the considered microparticle state, (ii) the respective values are independent of the measuring uncertainties which, for the same measured state, can be modified (by changing the accuracy of experimental instruments).
The above presented points ${\bf P-8.1}$ --- ${\bf P-8.6}$ can be regarded as the main elements of a genuine reinterpretation of HR. It is easy to see that the respective reinterpretation is completely concordant with the working procedures of usual quantum mechanics (which, of course, include the theoretical HR as particular formulae).
The mentioned reinterpretation assures a vision where all the defects of TIHR presented in Sec. VI. A ---VI. L are eliminated as inadequate and unfounded statements. So the defect discussed in Sec. VI. A. loses any sense if one takes into account ${\bf P-8.1,\,P-8.4}$ and ${\bf P-8.6}$. Also the defect revealed in Sec. VI. B has no value if we accept ${\bf P-8.1}$. The facts presented in Sec. VI. C can be elucidated by considering ${\bf P-8.6.}$ For the case of solitary variables approached in Sec. VI. D a pertinent answer is given by ${\bf P-8.4}$ and ${\bf P-8.6}$. The same points ${\bf P-8.4}$ and ${\bf P-8.6}$ offer a natural vision on the case of commutable variables approached in Sec.VI. E.
The cases of the pairs $L_Z-\varphi $ and $N-\Phi $ discussed in Secs. VI. F and VI. G can be brought in concordance with the proposed reinterpretation of HR as follows.
In the situations described by the wave functions given by Eqs.(6.6) and (6.21) the conditions expressed by Eqs. (5.8) are not satisfied. Then, according to ${\bf P-8.5}$, in such situations the discussions must refer to the complete and general correlation relations given by Eqs. (5.7) or (5.14) but not to the restrictive formulae like Eqs. (2.3)/(5.13)/(6.4).
Note that, from a general perspective, the situations$_{\text{ }}$described by the wave functions $\Psi $ noted in Eqs. (6.6) or (6.21) refer to the cases which regard the uni-variable eigenstates. In such a state for two variables A and B we have $\widehat{A}\psi =a\psi $ and $\widehat{B}\psi \neq b\psi ${\bf \ }respectively $\Delta _\psi {}A=0$ and $0<\Delta _\psi B\neq \infty $. But if $\left\langle \left[ \widehat{A},\widehat{B}\right] _{-}\right\rangle ${}$_\Psi \neq 0$ for the pair $A-B$ the HR from Eq. (2.3) and the related TIHR assertions are not applicable. It must be reminded that an early modest notification (Davidson, 1965) of the TIHR shortcomings in respect with the uni-variable eigenstates seem to be ignored by the TIHR partisans.
Now we can see that the alluded inapplicability of Eq. (2.3) is generated by the fact that for the mentioned uni-variable eigenstates the Eqs. (5.8) are not satisfied. This because if for such states Eqs. (5.8) are satisfied {}we must admit the following row of relations.
\[ a<B>_\Psi =a\left( \Psi ,\widehat{B}\Psi \right) =\left( \widehat{A}\Psi , \widehat{B}\Psi \right) =\left( \Psi ,\widehat{A}\widehat{B}\Psi \right) = \]
\begin{equation} =\left( \Psi ,\left[ \widehat{A}\widehat{B}\right] _{-}\Psi \right) +\left( \Psi ,\widehat{B}\widehat{A}\Psi \right) =\left\langle \left[ \widehat{A} \widehat{B}\right] _{-}\right\rangle _\Psi +<B>_\Psi \cdot a \eqnum{8.1} \end{equation} i.e. the absurd result $a<B>_\Psi =\left\langle \left[ \widehat{A},\widehat{B }\right] _{\_}\right\rangle _\Psi +a\left\langle B\right\rangle _\Psi $ with $\left\langle \left[ A,B\right] _{-}\right\rangle _\Psi \neq 0$.
Add here the fact that for the discussed states instead of Eq. (2.3) the more general Eq. (5.7) remain valid (in the trivial form 0 = 0).
It is quite evidently that the situations of uni-variable eigenstates come into the normality if $\Delta _\Psi A$ and $\Delta _\Psi B$ are regarded as parameters which describe the quantum fluctuations. Then in such situations A and B have respectively have not fluctuations (i.e. stochastic characteristics).
The situation described by the wave function given by Eqs. (6.15) must be discussed separately. Firstly it necessitates a confrontation with the conditions expressed by Eqs. (5.8). In this sense we note that for the respective situation one obtains the relation \[ \left( \hat{L}_Z\Psi _l,\hat{\varphi}\Psi _l\right) -\left( \Psi _l,\widehat{ L}_z\hat{\varphi}\Psi _l\right) = \] \begin{equation} =i\hbar \left\{ 1+2
\mathop{\rm Im}
\left[ \sum_m\sum_{m\acute{}}C_m^{+}C_{m\acute{}}m\left( Y_{lm},\hat{\varphi} Y_{lm\acute{}}\right) \right] \right\} \eqnum{8.2} \end{equation} Where $
\mathop{\rm Im}
\alpha $ denote the imaginary part of the complex quantity $\alpha $. Then one observes that in the cases when the right hand term in Eq. (8.2) is null the variables $L_Z$ and $\varphi $ satisfy the Eqs.s (5.8). In such cases the Eqs. (2.3)/(5.13)/(6.4) are applicable. In other cases (when the mentioned term from Eq. (8.2) is non-null) the Eqs. (5.8) are infringed and for the pair $L_Z-\varphi $ it must apply only the Eqs. (5.7) or (5.14).
Note that the $L_Z-\varphi $ case in the situation described by wave functions from Eqs.(6.15) can be approached also by using the Fourier analysis procedures. So for the mentioned case and situations, similarly with Eq. (5.41), one obtains \begin{equation}
\Delta _\Psi L_Z\cdot \Delta _\Psi \varphi \geq \left|
\mathop{\rm Im}
\left\{ \sum_m\sum_{m\acute{}}C_m^{*}C_{m^{\prime }}\cdot m\left(
Y_{lm},\varphi Y_{lm^{\prime }}\right) \right\} \right| \eqnum{8.3} \end{equation}
As regards the case of QTP described by the wave functions given by Eqs. (6.18) we note the following observations. In such a case the variables $L_Z$ and $\varphi $ satisfy the conditions expressed by Eqs. (5.8). Consequently for the respective variables are applicable the Eqs. (2.3)/(5.13)(6.4). But the mentioned equations must be considered as resulting from more general Eqs. (5.7) or (5.14) which are referring to the quantum fluctuations.
The problems with the pair energy-time mentioned in Sec. VI. H become senseless if it is accepted ${\bf P-8.3.}$ According to ${\bf P-8.5}$ the relations mentioned in sections VI. I and VI. J become simple generalizations of he theoretical HR without interpretational shortcoming. The relations discussed in Sec. VI. K and VI. L are nothing but macroscopic similars of the quantum theoretical HR. Also the respective relations do not imply any interpretational shortcoming. Moreover, the so-called macroscopic operators discussed in Sec. VI. L appear as pure inventions without any physical utility or significance.
${\bf \blacklozenge }${\bf A reply addendum regarding the L}$_z{\bf -\varphi }${\bf \ pair}
Our first opinions about the $L_z-\varphi $ pair in connection with TIHR were presented in earlier works (Dumitru 1977, 1980). Perhaps the respective presentations were more modest and less complete - e.g. we did not use at all the arguments resulting from the above mentioned examples of $L_z$ - degenerate states or of QTP. Newertheless, we think that the alluded opinions were correct in their essence. However, in a review (Schroeck Jr., 1982) the respective opinions were judged as being erroneous. In this addendum, by using some of the above discussed facts, we wish to reply to the mentioned judgements.
The main error reproached by Prof. Schroeck to us is: ''most of the results stated concerning angular momentum and angle operators (including the supposed canonical commutation relations) are false, this being a consequence of not using Rieman-Stieljes integration theory which is necessitated since the angle function has a jump discontinuity''. In order to answer to this reproach we resort to the following specifications: (i) One can see that the respective reproach is founded, in fact, on the idea that the variable $\varphi $ has a jump (of magnitude 2$\pi $ at $\varphi =0$ or, equivalently at $\varphi =2\pi )$ and, consequently, on the commutation relation is $\left[ \widehat{L}_z,\widehat{\varphi }\right] _{-}=-i\hbar +i\hbar 2\pi \delta $ (where $\delta $ = Dirac delta function at the boundary $\varphi =0$ or $\varphi =2\pi ).$ Note that the respective idea (confessed explicitly to us by Prof. Schroeck in two letters dated September 16, 1981 and April 2, 1982) can also be found in most TIHR publications. (ii) The mentioned idea refers, in fact, only to the systems which are $ \varphi $ - circular and non-degenerate in respect with $L_z$ (defined in the sense precised above in Sec. VI. F). But, strangely, it is associated with the supposition that the range of $\varphi $ is the whole domain $ \left( -\infty ,\infty \right) $, but not the finite interval $\left[ 0,2\pi \right] $. (iii) Here is the place to add also the cases presented in Sec. VI. F of QTP and of the $L_z$ - degenerate states (the last ones for the situations with non-null term in the right hand side of Eq. (8.2)). For the respective cases we must consider, another commutation relation, namely $ \left[ \widehat{L}_z,\widehat{\varphi }\right] =-i\hbar $. (iv) Then in the spirit of the mentioned idea for the same pair of variables $L_z-\varphi $ one must tolerate two completely dissimilar commutation relations: $\left[ \widehat{L}_z,\widehat{\varphi }\right] =-i\hbar +i\hbar 2\pi \delta $ and $ \left[ \widehat{L}_z,\widehat{\varphi }\right] =-i\hbar .$ But such a toleration seems to be senseless and without any real (physical) substantion. (v) Our opinion about the $\widehat{L}_z-\widehat{\varphi }$ pair remains, as it was announced in previous works, and argued with more details in the present paper. It is founded on the necessity to approach in a unique manner all the alluded cases. In essence we think that, in all the respective cases, for the $L_z-\varphi $ pair we must have an unique commutation relation, namely $\left[ \widehat{L}_z,\widehat{\varphi }\right] _{-}=-i\hbar .$ The implementation of the respective relation in the mentioned cases for obtaining theoretical formulae of HR-type must be made by taking into account the natural (physical) range of $\varphi $ as well as the fulfillment of the Eqs. (5.8).
The ensemble of the above noted specifications proves as unfounded the reproaches of Professor F.E. Schroeck Jr. regarding our opinions about the $ L_z-\varphi $ pair.$\blacklozenge $
\[ \ast ** \]
The facts presented in this section show that all the problems directly connected with the interpretation of HR can be solved by the here-proposed genuine reinterpretation of the respective relations. But, as it is known, TIHR generated also disputes about the topics which are adjacent or additional with respect to the alluded problems. Several such topics will be briefly approached in next sections.
\section{A RECONSIDERATION\ REGARDING THE\ MEASUREMENTS}
As it was mentioned in Sec. II the story of HR started with the primary questions regarding the measuring uncertainties. During the years the respective questions and more generally the description of the measurements generated a large number of studies (a good list of references, in this sense, can be obtained from the works: Yanase et al., 1978; Braginsky and Khalili 1992; Bush et. al., 1996; Hay and Peres, 1998; Sturzu, 1999 and surely from the bibliographical publications indicated in the end of Sec. I). It is surprising to see that many of the above alluded studies are contaminated one way or another by ideas pertaining to the TIHR doctrine. After the above exposed argumentation against TIHR, here we wish to present a few elements of a somewhat new reconsideration of the problems regarding the description of measurements (including of measuring uncertainties). We think that, even modestly limited, such a reconsideration can be of non-trivial interest for our days\'{}science. This because we agree with the opinion (Primas and M\H{u}ller - Herold, 1978) that, in fact, ''there exists not yet a fundamental theory of actual measuring instruments''.
Firstly, we note that, in our opinion, the questions ${\bf P-2.1-P-2.2}$ are of real significance for the studies of the physical systems. This fact is due to the essential role of measurements (i.e. of quantitatively evaluated experiments) for the mentioned studies. Moreover, we think that, in principle, the alluded role appears both in quantum and non-quantum physics.
Then in our announced reconsideration we must try to search for natural answer to the questions.. ${\bf P-2.1-P-2.2}$ as well as to some other (more or less) directly connected problems. For such a purpose we shall note the remarks under the following points:
\underline{${\bf P-9.1}$} : As a rule, all the measurements, both from macroscopic and microscopic physics, are confronted with measuring uncertainties.$\blacktriangle $
\underline{${\bf P-9.2}$} : The respective uncertainties are generated by various factors. Among such factors the most important ones seem to be the intrinsic fluctuations within the experimental devices and the measuring perturbations (due to the interactions of the respective devices with the measured systems).$\blacktriangle $
\underline{${\bf P-9.3}$} : A quantitative description of the measuring uncertainties must be made in the framework of an authentic theory of measurements. The respective theory must be independent and additional with respect to the traditional chapters of physics (which describe the intrinsic properties of physical systems).$\blacktriangle $
\underline{${\bf P-9.4}$} : The measurement of a stochastic variable should not be reduced to a sole detection. It must be regarded and managed as a {\it statistical sampling} (i.e. as a statistical ensemble of detections). Therefore, for such a variable, the finding of a single value from a sole detection does not mean the collapse of the corresponding stochastic characteristics (described by a wave function or by a probability density).$ \blacktriangle $
\underline{${\bf P-9.5}$} : In the spirit of the above remark the measuring uncertainties of stochastic variables must be described in terms of quantities connected with the afferent statistical sampling but not with solitary detections.$\blacktriangle $
\underline{${\bf P-9.6}$} : As regards the above alluded theory of measurements we agree with the idea (Bunge, 1977b) that it must include some specific elements, but not only generic-universal aspects. This because every experimental apparatus, used in real measurements, has a well-defined level of performance and a restricted class of utilizations - i.e. it is not a generic-universal (for all-purpose) device.$\blacktriangle $
\underline{${\bf P-9.7}$} : Togheter with the mentioned agreement we opine that the measurements theory can include also some elements/ideas with generic-universal characteristics. One such characteristic is connected with the fact that, in essence, every measurement can be regarded as an acquisition of some information about the measured system.$\blacktriangle $
In the spirit of the latter remark we think that from a generic-universal viewpoint a measurement can be described as a process of information transmission, from the measured system to the receiver (recorder or observer). In such a view, the measuring apparatus can be represented as a channel for information transmission, whereas the measuring uncertainties can be pictured as an alteration of the processed information. Such informational approach is applicable for measurements on both macroscopic and microscopic (quantum) systems. Also the mentioned approach does not contradict to the idea of specificity as regards the measurements theory. The respective specificity is implemented in theory by the concrete models/descriptions of the measured system (information source), of the measuring apparatus(transmission channel), and of the recorder/observer (information receiver).
For an illustration of the above ideas let us refer to the description of the measurement of a single stochastic variable $x$ having a continuous spectrum of values within the range $\left( -\infty ,\infty \right) $. The respective variable can be of classical kind (such are the macroscopic quantities discussed in connection with Eqs. (5.19)-(5.23)) or of a quantum nature (e.g. a Cartesian coordinate of a microparticle).
The alluded measurement, being regarded as a statistical sampling, its usual task is to find certain global probabilistic parameters of $x$ such as: mean/expected value, standard deviation, or even higher order moments. But the respective parameters are evaluated by means of the elementary probability $dP=w\left( x\right) dx$ of finding the value of $x$ within the infinitesimal range $\left( x,x+dx\right) .$ Here $w\left( x\right) $ denotes the corresponding probability density. Then the mentioned task can be connected directly to $w\left( x\right) .$
Related to the measured system\'{}s own properties, $w\left( x\right) $ has an IN (input) expression $w_{IN}\left( x\right) $.So viewed $w_{IN}\left( x\right) $ is assimilable with: (a) a usual distribution from classical statistical physics - in the case of a macroscopic system, respectively (b)
the quantity $\left| \Psi \left( x\right) \right| ^2;$($\Psi \left( x\right) $ = the corresponding wave function) {}- in the case of a quantum microparticle.
The fact that the measuring apparatus distorts (alters) the information about the values of $x$ means that the respective apparatus records an OUT (output) probability density $w_{OUT}\left( x\right) $ which generally differs from $w_{IN}\left( x\right) $. So, with respect to the measuring process, $w_{IN}\left( x\right) $ and $w_{OUT}\left( x\right) $ describe the input respectively the output information. Then it results that the measuring uncertainties (alterations of the processed information) must be described in terms of several quantities depending on both $w_{OUT}\left( x\right) $ and $w_{IN}\left( x\right) .$
A description of the mentioned kind can be obtained, for instance, if one uses the following mean values: \begin{equation} \left\langle f\right\rangle _A=\int_{-\infty }^\infty f\left( x\right) \,w_A\left( x\right) dx,\,\,\,\,\left( A=IN;\,OUT\right) \eqnum{9.1} \end{equation} with $f\left( x\right) $ = an arbitrary function of $x$. Then a possible quantitative evaluation of the measuring disturbances can be made in terms of both the following parameters:
(i) the {\it mean value uncertainty }given by \begin{equation} \delta \left( \left\langle x\right\rangle \right) =\left\langle x\right\rangle _{OUT}-\left\langle x\right\rangle _{IN} \eqnum{9.2} \end{equation}
(ii) the {\it standard deviation uncertainty }defined as \begin{equation} \delta \left( \Delta x\right) =\Delta _{OUT}x-\Delta _{IN}x \eqnum{9.3} \end{equation} where $\Delta _Ax=\left[ \left\langle x^2\right\rangle _A-\left\langle x_A\right\rangle ^2\right] ^{1/2},\,\,\,\,\,\,(A=IN;OUT).$
The mentioned evaluation can be enriched by also using the higher order probabilistic moments/correlations (in the sense discussed by Dumitru (1999)).
Another evaluation of the measuring uncertainties can be made by means of the {\it informational entropy uncertainty } \begin{equation} \delta H=H_{OUT}-H_{IN} \eqnum{9.4} \end{equation} Here the informational entropies $H_A\,\,(A=IN;\,OUT)\,$are defined by \begin{equation} H_A=-\int_{-\infty }^\infty w_A\left( x\right) \ln \left[ 1_xw_A\left( x\right) \right] dx \eqnum{9.5} \end{equation} where $1_x$ = the unit of the physical variable $x.$
Due to the fact that, in the present considerations, a measurement is regarded as a statistical sampling, the parameters defined by Eqs. (9.2)-(9.4) can be called {\it statistical uncertainties.}
The uncertainty parameters introduced by Eqs. (9.2)-(9.4) can be detailed if one takes into account more elements regarding the characteristics of the measuring apparatus and/or of the measured system. So we can refer to an apparatus modeled as an (information) transmission channel with stationary and linear characteristics.. Then we can write \begin{equation} w_{OUT}\left( x\right) =\int_{-\infty }^\infty G\left( x,x\acute{}\right) \,w_{IN}\left( x\acute{}\right) dx\acute{} \eqnum{9.6} \end{equation} where the kernel $G\left( x,x\acute{}\right) $ must satisfy the normalization conditions \begin{equation} \int_{-\infty }^\infty G\left( x,x\acute{}\right) dx=\int_{-\infty }^\infty G\left( x,x\acute{}\right) dx\acute{}=1 \eqnum{9.7} \end{equation} The measurement is ideal or real (non-ideal) in the cases when $ w_{OUT}\left( x\right) =w_{IN}\left( x\right) $ respectively $w_{OUT}\left( x\right) \neq w_{IN}\left( x\right) $. In the above model such cases appear if we take $G\left( x,x\acute{}\right) =\delta \left( x-x\acute{}\right) $ respectively $G\left( x,x\acute{}\right) \neq \delta \left( x-x\acute{} \right) $, with $\delta \left( x-x\acute{}\right) $ as Dirac $\delta $ function of $x-x\acute{}.$ Then by using Eqs. (9.4)-(9.6) and by taking into account the relation $\ln y\leq y-1,\left( y>0\right) ,$ one obtains \[ \delta H=-\int_{-\infty }^\infty dx\int_{-\infty }^\infty dx\acute{}G\left( x,x\acute{}\right) \,w_{IN}\left( x\acute{}\right) \ln \left[ \frac{ w_{OUT}\left( x\right) }{w_{IN}\left( x\acute{}\right) }\right] \geq \] \begin{equation} \geq -\int_{-\infty }^\infty dx\int_{-\infty }^\infty dx\acute{}G\left( x,x \acute{}\right) \,w_{IN}\left( x\acute{}\right) \left[ \frac{w_{OUT}\left( x\right) }{w_{IN}\left( x\acute{}\right) }-1\right] =0 \eqnum{9.8} \end{equation} So we find \begin{equation} \delta H=H_{OUT}-H_{IN}\geq 0 \eqnum{9.9} \end{equation} This relation shows that for the processed information (measurement of $x$) the {\it entropy H} at the recorder is greater (or at least equal) than at the measured system. In terms of the entropy uncertainty we can speak of ideal respectively real (non-ideal) measurement as $\delta H=0$ or $\delta H\neq 0.$
We can detail even more the above ideas by the following example. Let us consider $x$ as coordinate of a rectilinear quantum oscillator situated in its lowest energy state. Such a state is described by the Gaussian wave function \begin{equation} \Psi \left( x\right) =\left( \sqrt{2\pi }\sigma _{IN}\right) ^{-1/2}\exp \left\{ -\frac{x^2}{4\sigma _{IN}^2}\right\} \eqnum{9.10} \end{equation} where $\sigma _{IN}=\hbar /2m\omega $ with $m$ and $\omega $ denoting the mass respectively the angular frequency of the oscillator.. Then $ w_{IN}\left( x\right) $ is given by \begin{equation}
w_{IN}\left( x\right) =\left| \Psi \left( x\right) \right| ^2=\left( \sqrt{ 2\pi }\sigma _{IN}\right) ^{-1}\exp \left\{ -\frac{x^2}{2\sigma _{IN}^2} \right\} \eqnum{9.11} \end{equation} The $IN$ - values (calculated with $w_{IN}\left( x\right) $) of mean and standard deviation of $x$ are \begin{equation} \left\langle x\right\rangle _{IN}=0,\,\,\,\,\,\,\,\,\Delta _{IN}x=\sigma _{IN}=\sqrt{\frac \hbar {2m\omega }} \eqnum{9.12} \end{equation} If the errors induced by the measuring device are supposed to be small, the kernel $G\left( x,x\acute{}\right) $ can be taken of the Gaussian form: \begin{equation} G\left( x,x\acute{}\right) =\frac 1{\sigma _D\sqrt{2\pi }}\exp \left\{ - \frac{\left( x-\varepsilon -x\acute{}\right) }{2\sigma _D^2}\right\} \eqnum{9.13} \end{equation} Here $\varepsilon $ and $\sigma _D$ denote the {\it precision indices} of the device. So an ideal, respectively a real measurement correspond to $ \varepsilon \rightarrow 0$ and $\sigma _D\rightarrow 0$ (when $G\left( x,x \acute{}\right) \rightarrow \delta \left( x-x\acute{}\right) )$ respectively to $\varepsilon \neq 0$ and $\sigma _D\neq 0.$ By introducing Eqs. (9.13) in (9.6) one finds \begin{equation} w_{OUT}\left( x\right) =\frac 1{\sigma _{OUT}\sqrt{2\pi }}\exp \left\{ - \frac{\left( x-\varepsilon \right) ^2}{2\sigma _{OUT}^2}\right\} \eqnum{9.14} \end{equation} with \begin{equation} \sigma _{OUT}^2=\sigma _{IN}^2+\sigma _D^2 \eqnum{9.15} \end{equation} For the OUT-expressions (calculated with $w_{OUT}\left( x\right) $) of the mean value respectively of standard deviation of $x$ one obtains \begin{equation} \left\langle x\right\rangle _{OUT}=\varepsilon \eqnum{9.16} \end{equation} \begin{equation} \Delta _{OUT}x=\sigma _{OUT}=\sqrt{\left( \Delta _{IN}x\right) ^2+\sigma _D^2 } \eqnum{9.17} \end{equation} Then the uncertainties defined by Eqs. (9.2) and (9.3) become \begin{equation} \delta \left( \left\langle x\right\rangle \right) =\varepsilon \eqnum{9.18} \end{equation} \begin{equation} \delta \left( \Delta x\right) =\Delta _{IN}x\left\{ \sqrt{1+\left( \frac{ \sigma _D}{\Delta _{IN}x}\right) ^2}-1\right\} \eqnum{9.19} \end{equation} In the same circumstances for the uncertainty given by Eq. (9.4) of the informational entropy regarding $x$ one finds \begin{equation} \delta H_x=\ln \left( \frac{\sigma _{OUT}}{\sigma _{IN}}\right) =\ln \sqrt{ 1+\left( \frac{\sigma _D}{\Delta _{IN}x}\right) ^2} \eqnum{9.20} \end{equation} Now we can extend our discussion for the measurement of the momentum $p$ in the case of the same quantum oscillator described by the wave function from Eq. (9.10). As it is known the respective wave function is given in $x$ -representation. But it can be transcribed in $p$-representation in the form: \begin{equation} \Psi \left( p\right) =\left( \sqrt{2\pi }\mu _{IN}\right) ^{-1/2}\exp \left\{ -\frac{p^2}{4\mu _{IN}}\right\} \eqnum{9.21} \end{equation} with $\mu _{IN}=\left( \hbar m\omega /2\right) .$ So all the above considerations can be transcribed from a $x$-variable version in a $p$ -variable form.
Then the $IN$- expressions for the probability distribution respectively for mean value and standard deviation of $p$ are \begin{equation}
w_{IN}\left( p\right) =\left| \Psi \left( p\right) \right| ^2=\left( \sqrt{ 2\pi }\mu _{IN}\right) ^{-1}\exp \left\{ -\frac{p^2}{2\mu _{IN}^2}\right\} \eqnum{9.22} \end{equation} \begin{equation} \left\langle p\right\rangle _{IN}=0 \eqnum{9.23} \end{equation} \begin{equation} \Delta _{IN}p=\mu _{IN}=\sqrt{\frac{\hbar m\omega }2} \eqnum{9.24} \end{equation} If the $p$-measuring device is also characterized by small errors it can be described by the kernel \begin{equation} G\left( p,p^{\prime }\right) =\left( \sqrt{2\pi }\mu _D\right) ^{-1}\exp \left\{ -\frac{\left( p-\eta -p^{\prime }\right) }{2\mu _D^2}\right\} \eqnum{9.25} \end{equation} with $\eta $ and $\mu _D$ as precision indices of the device. Similarly with the $x$-variable case for the momentum $p$ one finds that the distribution $ w_{OUT}\left( p\right) $characterizing the output of the measuring process is given by \begin{equation} w_{OUT}\left( p\right) =\left( \sqrt{2\pi }\cdot \mu _{OUT}\right) ^{-1}\exp \left\{ -\frac{\left( p-\eta \right) ^2}{2\mu _{OUT}^2}\right\} \eqnum{9.26} \end{equation} where \begin{equation} \mu _{OUT}^2=\mu _{IN}^2+\mu _D^2 \eqnum{9.28} \end{equation}
Then the $OUT$-expression of the mean value and standard deviation for $p$ are \begin{equation} \left\langle p\right\rangle _{OUT}=\eta \eqnum{9.29} \end{equation} \begin{equation} \Delta _{OUT}p=\mu _{OUT}=\sqrt{\left( \Delta _{IN}p\right) ^2+\mu _D^2} \eqnum{9.30} \end{equation}
Correspondingly, the uncertainties of the mean value and standard deviation of $p$ are \begin{equation} \delta \left( \left\langle p\right\rangle \right) =\eta \eqnum{9.31} \end{equation} \begin{equation} \delta \left( \Delta p\right) =\Delta _{IN}p\left\{ \sqrt{1+\left( \frac{\mu _D}{\Delta _{IN}p}\right) ^2}-1\right\} \eqnum{9.32} \end{equation} Also for the uncertainty regarding the informational entropy for the $p$ variable one obtains \begin{equation} \delta H_p=\ln \left( \frac{\mu _{OUT}}{\mu _{IN}}\right) =\ln \sqrt{ 1+\left( \frac{\mu _D}{\Delta _{IN}p}\right) ^2} \eqnum{9.33} \end{equation}
Now, the above presented statistical uncertainties for $x$ and $p$ can be regarded together for a possible comparison with some supposed ideas from the TIHR doctrine. For such a purpose we consider the following products: \begin{equation} \delta \left( \left\langle x\right\rangle \right) \cdot \delta \left( \left\langle p\right\rangle \right) =\varepsilon \cdot \eta \eqnum{9.34} \end{equation} \[ \delta \left( \Delta x\right) \cdot \delta \left( \Delta p\right) =\Delta _{IN}x\cdot \Delta _{IN}p\cdot \] \begin{equation} x\left\{ \sqrt{1+\left( \frac{\sigma _D}{\Delta _{IN}x}\right) ^2}-1\right\} \cdot \left\{ \sqrt{1+\left( \frac{\mu _D}{\Delta _{IN}p}\right) ^2} -1\right\} \eqnum{9.35} \end{equation} \begin{equation} \delta H_x\cdot \delta H_p=\ln \sqrt{1+\left( \frac{\sigma _D}{\Delta _{IN}x} \right) ^2}\cdot \ln \sqrt{1+\left( \frac{\mu _D}{\Delta _{IN}p}\right) ^2} \eqnum{9.36} \end{equation}
As we have discussed in sections II and VI, TIHR supposes that the product of the uncertainties for $x$ and $p$ has a non-null inferior limit. The respective limit is expressed only in terms of the fundamental constant $ \hbar $, and it is completely independent of certain characteristics regarding the measuring device. Comparatively, from Eqs. (9.34)-(9.36) it results that the products of the statistical uncertainties for $x$ and $p$ are directly dependent on the precision parameters $\varepsilon ,\eta ,\sigma _D$ and $\mu _D$ of the measuring devices. If all the respective parameters are null (case of ideal measurements) the mentioned products are also null. This means that the products of the mentioned statistical uncertainties for $x$ and $p$ have not a non-null inferior limit.
Now note that, in the here-proposed model for describing the measurements, the $x$ - and $p$ - devices are considered as completely independent. In principle, the respective devices can be coupled in a more complex $x$ \& $p$ - instrument. A theoretical model for the description of measurements with such an instrument can be obtained only by using a set of adequately justified hypotheses. In such a model probably that the above presented kernels $G\left( x,x\acute{}\right) $ and $G\left( p,p\acute{}\right) $must be regarded as resulting from a more complex quantity dependent on both pairs $x-x\acute{}$ and $p-p\acute{}$. But a $x-p$ couplage of the mentioned kind still requires further investigations. Then, probably, the problem of the product of adequate $x$-and $p$-uncertainties will be also discussed.
\section{THE\ SIMILAR\ STOCHASTIC\ SIGNIFICANCES OF\ PLANCK'S AND\ BOLTZMANN'S CONSTANTS}
In sectionsV, VI, and VII we have argued that the theoretical HR from quantum mechanics given by Eqs. (2.2)-(2.3) have authentic nonquantum analogs. But the mentioned HR are commonly associated with Planck\'{}s constant $\hbar .$ Then there naturally arises the question whether $\hbar $ also has an authentic analog in nonquantum physics. Now, in this section we shall present a lot of elements which reveal that the answer to the above question is affirmative. The alluded analog of $\hbar $ is shown to be the Boltzmann\'{}s constant k. The viewed analogy is given by the fact that $ \hbar $ and k have similar roles of generic indicators of the onefold stochasticity (randomness) for well-specified classes of physical systems (i.e. for individual quantum microparticles and macroscopic nonquantum systems, respectively).
A physical system is considered to have stochastic respectively nonstochastic characteristics depending on the probabilistic nature of its specific variables. For a system, the level (degree) of stochasticity depends on the frame (approach) in which it is studied. So, for a macroscopic system consisting of a large ensemble of molecules, the stochasticity is significant in statistical physics approach but it is completely unimportant in the frame of continuous mechanics or of thermodynamics.. Also, in the case of a microparticle of atomic size, the stochastic characteristics are essential from a quantum mechanics view, but they are negligible from a classical mechanics perspective. Of course, the level of stochasticity can be described by means of certain fluctuation quantities such as the ones defined/implied in Eqs. (4.3), (5.4), (5.6), (5.15), (5.20) and (5.24). But the respective quantities take particular expressions (and values) for diverse variables or various systems. Therefore, they cannot be considered as generic indicators of stochasticity, i.e. as parameters indicating generically the stochasticity level for a set or variables or for a whole class of systems. Below we shall show that the roles of such generic indicators of stochasticity are played, in similar ways, by the constants k and $\hbar $ for the macroscopic nonquantum systems and individual quantum microparticles, respectively.
Firstly, let us discuss the alluded role for k with respect to the macroscopic systems. If such a system is studied in the framework of phenomenological (quasithermodynamic) theory of fluctuations (Munster, 1960, 1969; Dumitru, 1974a; Landau and Lifschitz, 1984), its microscopic-molecular structure is completely neglected. Also, its specific variables are global macroscopic quantities regarded as real stochastic variables with continuous spectra of values. For such a system, in the mentioned approach, the fluctuations of the variables as pressure $P$ and volume $V$ are described by the quantities $\Delta _wV,\,\Delta _wP$ and $\left\langle \delta _wV\delta _wP\right\rangle _w$ given in Eq. (6.40). More generally, for the same system we can consider two arbitrary variables $A=A\left( X_j\right) $ and $B=B\left( X_j\right) $regarded as functions of certain independent variables $X_j\,\left( j=1,2,....,n\right) .$ Then the correlation $ \left\langle \delta _wA\delta _wB\right\rangle _w$ describing the fluctuations of $A$ and $B$ is given by \begin{equation} \left\langle \delta _wA\delta _wB\right\rangle _w=k\sum_{j=1}^n\sum_{l=1}^n \frac{\partial \overline{A}}{\partial \overline{X_j}}\frac{\partial \overline{B}}{\partial \overline{X_l}}\left[ \frac{\partial ^2\overline{S}}{ \partial \overline{X_j}\partial \overline{X_l}}\right] ^{-1} \eqnum{10.1} \end{equation} where $\overline{A}=\left\langle A\right\rangle _w,\,\left[ a_{jl}\right] ^{-1}$ denotes the inverse of the matrix $a_{jl\text{ }}$and $S=S\left( X_j\right) $is the entropy of system.
As an example from classical statistical physics we can consider the system referred in connection with the Eqs. (6.41)-(6.42). The corresponding stochastic variables are $H$ and $Z_c$. Their fluctuations are described by the quantities $\Delta _wH,\,\Delta _wZ_c$ and $\left\langle \delta _wH\delta _wZ_c\right\rangle _w$ whose expressions are given by Eqs. (6.42).
Now we can proceed to a direct examination of the expressions from Eqs. (6.40), (10.1) and (6.42) of the quantities $\Delta _wA$ and $\left\langle \delta _wA\delta _wB\right\rangle _w$ which describe the thermal fluctuations in macroscopic systems. One can observe that all the respective expressions are structured as products of k with terms which are independent from k. The alluded independence is ensured by the fact that the mentioned terms are expressed only by means of macroscopic non-stochastic quantities. (Note that the mean values $\overline{A}$ from the respective terms must coincide with deterministic (i.e. nonstochastic) quantities from usual thermodynamics). Due to the above observed structure, the examined fluctuation quantities are in a direct dependence of k. So they are significant respectively negligible as we take $k\neq 0$ or $k\rightarrow 0$ . Because $k$ is a constant, the limit $k\rightarrow 0$ must be regarded in the sense that the quantities directly proportional with $k$ are negligible comparatively with other terms of the same dimensionality but not containing $k.$ However, the fluctuations reveal the stochastic characteristics of the physical systems. Then we can conclude that the thermal stochasticity, for the system studied in nonquantum statistical physics, is an important respectively insignificant property as we consider $k\neq 0$ or $ k\rightarrow 0.$
The mentioned features vis-a-vis the values of $k$ are specific for all the macroscopic systems (e.g. gases, liquids and solids of various inner compositions) and for all their specific global variables. But such a remark reveals the fact that $k$ has the qualities of an authentic {\it generic indicator for thermal stochasticity} (i.e. for the stochasticity evidenced through the thermal fluctuations).
Now let us approach questions connected with the {\it quantum stochasticity} which is specific for the individual, nonrelativistic microparticles of atomic size. Such a kind of stochasticity is revealed by the specific quantum fluctuations of the corresponding variables (of orbital and spin nature). The respective fluctuations described by means of quantities like the standard deviations and correlations defined in Eqs. (5.2) and (5.15). Some expressions, e.g. those given by Eqs. (6.19) and (6.38), for the mentioned fluctuation quantities show the direct dependence of the respective quantities on the Planck\'{}s constant $\hbar $. Then, there results that $\hbar $ can play the role of generic indicator for quantum stochasticity. Corespondingly, as $\hbar \neq 0$ or $\hbar \rightarrow 0$ the mentioned stochasticity appears as a significant respectively negligible property.
The above mentioned connection between the quantum stochasticity and $\hbar $ must be complemented with certain deeper considerations. Such considerations regard (Dumitru and Veriest, 1995) different behaviour patterns of various physical variables in the limit $\hbar \rightarrow 0$, usually called {\bf Q} uantum{\it \ }$\rightarrow ${\bf C}lassical{\it \ }{\bf L}imit (QCL).
Firstly, let us refer to the spin variables. We consider an electron whose spin state is described by the function (spinor) $\chi $ given by \begin{equation} \chi =\left( \begin{array}{c} \cos \alpha \\ \sin \alpha \end{array} \right) \,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\alpha \in \left[ 0,\frac \pi 2 \right] \eqnum{10.2} \end{equation} For a specific variable we take the z-component of the spin angular moment $ \widehat{S}_z=\left( \hbar /2\right) \widehat{\sigma _z}$ ($\widehat{\sigma } _z$ being the corresponding Pauli matrix). For the respective variable in the mentioned state we find \begin{equation} \Delta _\chi S_z=\frac \hbar 2\sin 2\alpha \eqnum{10.3} \end{equation} The quantity $\Delta _\chi S_z$ describes the quantum fluctuations of spin kind i.e. the spin quantum stochasticity. The presence of $\hbar $ in Eq. (10.3) show that the respective stochasticity is significant or not as $ \hbar \neq 0$ or $\hbar \rightarrow 0$. This means that $\hbar $ plays the role of generic indicator for the respective stochasticity. But in the state described by Eq. (10.2) one finds also $\left\langle S_z\right\rangle _x=\left( \hbar /2\right) \cos \alpha $. This additional results shows that, in fact, for $\hbar \rightarrow 0$ the variable $S_z$ disappears completely. Then we can note that for spin variables the behaviour pattern in quantum $ \rightarrow $classical limit consists of an annulment of both stochastic characteristics and mean values (i.e. in a complete disappearance).
In the case of orbital quantum variables the quantum $\rightarrow $classical limit implies not only the condition $\hbar \rightarrow 0$ but also the requirement that certain quantum numbers grow unboundedly.. The mentioned requirement is due to the fact that certain significant variables connected with the orbital motion (e.g. the energy) pass from their quantum values to adequate classical values. So, with respect to the mentioned limit the orbital variables have two kinds of behaviour patterns.
As an example of the first kind we refer to the coordinate $x$ of a harmonic rectilinear oscillator considered in its n-th energy level. Then similarly with $\Delta _\Psi \varphi $ from Eq. (6.19) we have: \begin{equation} \Delta _\Psi x=\left[ \frac \hbar {m\omega }\left( n+\frac 12\right) \right] ^{1/2} \eqnum{10.4} \end{equation} where $m,\omega $ and $n$ denote the mass, the angular frequency respectively the oscillation quantum number. For the mentioned example the quantum $\rightarrow $classical limit means not only $\hbar \rightarrow 0$ but also $n\rightarrow \infty $. This because the energy must pass from the quantum expression $E=\hbar \omega \left( n+\frac 12\right) $to the corresponding classical expression $E_{cl}=\frac 12m\omega ^2x_0^2$, where $ x_0$ is the coordinate amplitude. Then the standard deviation of $x$ passes from the quantum value given by Eq. (10.4) to the classical value \begin{equation} \Delta _{cl}x=\frac{x_0}{\sqrt{2}} \eqnum{10.5} \end{equation} But $\Delta _\Psi x$ and $\Delta _{cl}x$ are fluctuation parameters which describe the stochastic characteristics of $x$ in quantum respectively classical contexts. Then one can say that in quantum $\rightarrow $classical limit the above considered coordinate $x$ preserves both its role of significant variable and its stochasticity.
As an example of the second kind of orbital variable we consider the distance $r$ between the electron and nucleus in a hydrogen atom. We refer to an electron in a state described by the wave function $\Psi _{nlm}$ with $ l=n-1$ (where $n.l$ and $m$ are respectively the principal, orbital and magnetic quantum numbers). Then for $\Delta _\Psi r=\Delta r$ we can use the expression given by Schwabl (1995), rewritten in the form \begin{equation} \Delta r=\frac{2\pi \varepsilon _0}{m_0e}\hbar ^2n\left( n+1\right) ^{1/2} \eqnum{10.6} \end{equation} with $m_0$ and $e$ denoting the mass respectively the change of the electron. The energy of the electron is \begin{equation} E_n=-\frac{m_0e^4}{32\pi ^2\varepsilon _0^2\hbar ^2n^2} \eqnum{10.7} \end{equation} The quantum$\rightarrow $classical limit requires that $E_n\rightarrow E_{cl} $ with $E_{cl}$ denoting the classical value of the energy. Then from Eqs. (10.6) and (10.7) it results that in the respective limit we have \begin{equation} \Delta r\rightarrow \left( \frac{\hbar e^4}{16\pi \varepsilon _0}\right) ^{1/2}\left( -2m_0E_{cl}\right) ^{-1/4} \eqnum{10.8} \end{equation} In the same circumstances we obtain \begin{equation} \left\langle r\right\rangle _\Psi \rightarrow r_{cl}=-\frac{e^2}{8\pi \varepsilon _0E_{cl}} \eqnum{10.9} \end{equation} So it results that in the quantum$\rightarrow $classical limit (when $\hbar \rightarrow 0$ and $E_n\rightarrow E_{cl}$) we have $\Delta r\rightarrow 0$ and $\left\langle r\right\rangle _\Psi \rightarrow r_{cl}\neq 0.$ This means that $r$ preserves its role of significant variable but loses its stochasticity.
The above considerations can be concluded with the following remark:
\underline{${\bf P-10.1}$} : In the quantum$\rightarrow $classical limit the physical variables display the following different behaviour patterns:
(i) The complete disappearance of both stochastic characteristics and mean values, as in the case of spin variables.
(ii) The preservation of both the role of significant variable and of stochastic characteristics, as in the case of oscillator coordinate
(iii) The preservation of the role of significant variable but the loss of stochastic characteristics as in the case of electron-nucleus distance.$ \blacktriangle $
It is clear that the above remark corrects the traditional belief of a unique behaviour pattern compulsorily associated with the disappearance of the ''uncertainties'' (i.e. of the standard deviations $\Delta _\Psi A$).
Now let us return to the quantum stochasticity, specific for the variables of individual microparticles. We think that, in spite of the peculiarities mentioned in ${\bf P-10.1}$, the Planck constant $\hbar $ can be considered as a generic indicator of such a stochasticity. Moreover, we consider that the respective role of $\hbar $ is completely similar with that the Boltzmann constant k with respect to the macroscopic thermal stochasticity (see above).
Regarding the mentioned roles of $\hbar $ and $k$ another observation must be added. In the discussed cases, $\hbar $ and $k$ appear independently and singly. That is why one can say that the stochasticity of the corresponding systems (microparticles and classical macroscopic systems) has a onefold character. But there are also physical systems endowed with a twofold stochasticity characterized by a simultaneous connection with both $\hbar $ and $k$. Such systems are those studied in quantum statistical physics, i.e., the bodies of macroscopic size considered as statistical ensembles of quantum microparticles. The stochasticity of the respective systems is revealed by corresponding fluctuations described by the quantities given by Eqs. (5.24) which depend simultaneously on both $\hbar $ and $k$. The respective dependence is revealed by the so-called fluctuation-dissipation theorem. According to the respective theorem (Kubo 1957; Zubarev 1971; Balescu 1975) one can write \[ \left\langle \delta _\rho \widehat{A}\delta _\rho \widehat{B}\right\rangle _\rho +\left\langle \delta _\rho \widehat{B}\delta _\rho \widehat{A} \right\rangle _\rho = \] \begin{equation} =\frac i{2\pi }\int_{-\infty }^\infty \hbar \coth \left( \frac{\hbar \omega }{2kT}\right) \left[ \chi _{AB}^{*}\left( \omega \right) -\chi _{BA}\left( \omega \right) \right] d\omega \eqnum{10.11} \end{equation} with $\chi _{AB}^{*}\left( \omega \right) $ as complex conjugate of $\chi _{AB}\left( \omega \right) .$
In Eq. (10.11) $\chi _{AB}\left( \omega \right) $ represent the generalized susceptibilities which appears also in the deterministic framework of nonequilibrium thermodynamics (De Groot and Mazur, 1962). But it is a known fact that in the respective framework all the stochastic characteristics of physical variables are neglected and no miscroscopic (i.e. atomic or molecular) structure of the systems is taken into account. Another fact is that $\chi _{AB}\left( \omega \right) $ are directly connected (Landau and Lifschitz, 1984) with the macroscopic nonstochastic expression of the energy dissipated inside the thermodynamic systems actioned by external deterministic and macroscopic perturbations. The mentioned facts show that the susceptibilities $\chi _{AB}\left( \omega \right) $ do not depend on the constants $\hbar $ and $k.$
The above mentioned property of $\chi _{AB}\left( \omega \right) $ combined with Eq. (10.11) shows that the sole significant dependence of fluctuation quantities given by Eq. (5.24) on the constants $k$ and $\hbar $ is given by the factor $\hbar \coth \left( \hbar \omega /kT\right) $. For the respective factor one can write \begin{equation} \lim_{k\rightarrow 0}\left\{ \lim_{\hbar \rightarrow 0}\left[ \hbar \coth \left( \frac{\hbar \omega }{2kT}\right) \right] \right\} =0 \eqnum{10.12} \end{equation} This means that when both $\hbar $ and $k$ tend to zero the fluctuation quantities defined by Eq. (5.24) become null. So it results that in the mentioned limit the fluctuations in quantum statistical systems cease to manifest themselves. Consequently, for such a limit the respective systems lose their stochastic characteristics. Then in the spirit of the above presented opinions one can state that quantum statistical systems can be considered as endowed with a twofold stochasticity of the thermal and quantum type, revealed respectively by $k$ and $\hbar $ as generic indicators.
In the above considerations the stochasticity appears as a property of exclusively intrinsic type. This means that it is connected only with the internal (inner) characteristics of the considered systems and does not depend on external (outside) factors. Moreover the mentioned stochasticity is directly and strongly associated with $\hbar $ and $k$ as generic indicators. But the stochasticity can be also of extrinsic type. In such cases it is essentially connected with factors from the outside (surroundings) of the considered physical systems. Also the extrinsic stochasticity is not (necessarily) associated with $\hbar $ and $k.$ As examples of system with stochasticity of exclusively extrinsic type can be considered an empty bottle floating on a stormy sea or a die in a game.
In practice one finds also systems endowed with stochasticity of both the intrinsic and extrinsic types. Such are for example, the electric and electronic circuits. For a circuit the intrinsic stochasticity is caused by the thermal agitation of the charge carries and/or of elementary (electric or magnetic) dipoles inside of its constitutive elements (i.e. inside of resistors, inductances, condensers, transistors, integrated circuits, etc.). The respective agitation is responsible for fluctuations of macroscopic voltages and currents. Such fluctuations are known (Robinson, 1974) as thermal (or Nyquist) noises. Note that in the case of circuits the intrinsic stochasticity is characterized by generic indicators. Such indicators are $k$ alone, if the circuit is considered as a classical (nonquantum) statistical system, and $k$ together with $\hbar $ when the circuit is viewed as a quantum statistical system. Otherwise, the stochasticity of a circuit can also be of extrinsic type when it is under the influence of a large variety of factors. Such factors can be: thermal fluctuations in the surrounding medium, accidental outside discharges and inductions, atmospheric (or even cosmic) electrical phenomena. The mentioned extrinsic stochasticity is also responsible for the noises in macroscopic currents and voltages in the circuits. But it must be noted that even for circuits, the extrinsic stochasticity is not connected in principle with generic indicators dependent on fundamental physical constants (such as $\hbar $ and $k$). As an interesting case which implies stochasticity of both intrinsic and extrinsic type can be considered a measuring process viewed as in Sec. IX. In such a case the intrinsic stochasticity regards the inner properties of the measured system while the extrinsic one is due to the measuring apparatus. The corresponding intrinsic stochasticity is connected with $ \hbar $ and $k$ as generic indicators in the above discussed sense. However, the extrinsic stochasticity, due to the apparatus, seems to be not connected with certain generic indicators. This because of the large diversity of apparata as regards their own structure and accuracy.
\section{A FEW\ REMARKS\ ON\ SEVERAL\ ADJACENT\ QUESRTIONS}
With respect to problematics of HR proper, in literature (see the bibliographical publications mentioned in the end of Sec. I) one knows of a large variety of adjacent questions which, one way or another, are allied with the subjects discussed in the previous sections of this paper. Now, we wish to note a few remarks on several such questions.
$\blacklozenge $ Firstly, let us refer to the consequences of the here-proposed reconsideration of HR for both lucrative procedures and interpretational frame of quantum mechanics. Note that our reconsideration does not prejudice in any way the authentic version of the mentioned lucrative procedures, which, in fact, have met with unquestionable successes in both basic and aplicative researches. As regards the alluded interpretational frame, our reconsideration, mainly by the abandonment of TIHR, generates major and important changes. But such changes must be regarded as benefic, since they can offer a genuine elucidation to the controversial questions introduced in the frame of science by TIHR.$ \blacksquare $
$\blacklozenge $ By reinterpreting HR in the sense presented in Sec.VIII the respective relations lose their quality of crucial physical formulae. So, one can find a consonance with the prediction (Dirac, 1963): ''I think one can make a safe guess that uncertainty relations in their present form will not survive in the physics of future''. Note that the above prediction was founded not on some considerations about the essence of HR but on a supposition about the future role of $\hbar $ in physics. So it was supposed that $\hbar $ will be a derived quantity while $c$ and $e$ (speed of light and elementary charge) will remain as fundamental constants. That is why we wish to add here that our view about HR does not affect the actual position of $\hbar $ as a physical constant. More precisely, our findings cannot answer the question whether $\hbar $ will be a fundamental constant or a derived quantity (e.g. expressed in terms of $c$ and $e$).$\blacksquare $
$\blacklozenge $ As it was pointed in Sec.X the Planck's constant $\hbar
$ has also the significance of estimator for the spin of microparticles (like the electron). So the spin appears to be a notable respectively absent property as $\hbar \neq 0$ or $\hbar \rightarrow 0$. On the other hand with the reference to the spin there are also some intriguing questions related to its relativistic justification. Usually (Dirac, 1958; Blochintsev,1981) for electrons the spin is regarded to be essentially explicable as consequence of relativistic theory. But, as it is known, the relativistic characteristics of a particle are evidenced by the relative value $v/c$ of its velocity $v$ comparatively with the light velocity $c$. Particularly, the respective characteristics must be insignificant when $ v/c\ll 1$ or $c\rightarrow \infty $. Then the absence of the factor $v/c$ (or of some other equivalent factors) in the description of the electron spin variable appears at least as intriguing fact. Is such a fact a sufficient reason to consider $\hbar $ as a derived quantity in the sense guessed by Dirac (1963). In such a sense $\hbar =\frac{e^2}{4\pi \cdot \varepsilon _0\cdot c\cdot \alpha }$ ($\varepsilon _o$= the permittivity of vacuum and $\alpha =\frac 1{137}$ = the fine structure constant) and the situations with $\hbar \rightarrow 0$ appear when $c\rightarrow \infty $. So the significance of $\hbar $ as spin estimator can be apparently related with some aspects of relativity. But here it must be noted the surprising fact that even in the nonrelativistic {}limit (i.e. when $v/c\ll 1$ or $ c\rightarrow \infty $) the spin remains a significant variable of the electron. It is known (Ivanov, 1989) that the electron spin plays a decisive role (as a fourth quantum variable/number) in the electronic configuration of many-electronic atoms, in spite of the fact that for atomic electrons $ v/c\ll 1$. Due to the here mentioned features we think that the relativistic justification of the spin appears as a intriguing question witch requires further investigations.$\blacksquare $
$\blacklozenge $ Our findings facilitate also a remark in connection with another supposition about $\hbar $. The respective supposition regards the possible existence of multiple Planck constants associated with various kinds of microparticles (e.g. with electrons, protons, neutrons). Currently, (Whichman, 1971; Fischbach {\it et. al.} 1991), the tendency is to contest such a possibility and to promote the idea of a unique Planck constant. For this one appeals either to experimental data or to some connection with the fundamental conservation laws. We think that our view about $\hbar $ pleads somewhat for the alluded idea of uniqueness.. So, regarding $\hbar $ as generic indicator of quantum stochasticity, this one must have the same value for various kinds of microparticles. This because, similarly, the Boltzman constant $k$ in its role of generic indicator for thermal stochasticity has a unique value for various kinds of macroscopic systems (e.g. hydrogen gas, liquid water or crystallin germanium ).
$\blacklozenge $ The revealed stochastic similarity among quantum microparticles and macroscopic systems facilitates another remark. In the macroscopic case the stochastic characteristics for {\it an individual system } is incorporated in the probability distribution $w$ (see sections V and VI. K). As we have shown, the quantum similar of $w(x)$ is the wave function
$\Psi $ (or the square $\left| \Psi \right| ^2$ of its module). Such a $ w-\Psi $ similarity motivates us to agree the idea (Van Kampen 1978) that $ \Psi $ refer to a single system (microparticle). Simultaneously, we incline to a circumspect regard about the opinions that $\Psi $ belongs to an ''ensemble of equally prepared systems'' (Tschudi, 1987) or to an ''abstract physical object'' (Mayants,1984). Moreover, our agreement and opinion are also motivated by the observation that in practical applications both $\Psi $ and $w$ are calculated for individual systems (e.g. for an electron in a hydrogen atom or, respectively for an ideal gas).$\blacksquare $
$\blacklozenge $A distinct group of remarks regards the reduction of stochasticity to subjacent elements of deterministic nature, for both cases of thermodynamic systems and quantum microparticles. In the first case the stochasticity refers to the macroscopic variables which characterize each system as a whole. But according to the classical statistical mechanics the respective variables are expressible in terms of subjacent molecular quantities (coordinates and momenta) which are considered as deterministic elements. In the case of quantum microparticles a similar problem was taken into account. So it was promoted the idea that the stochastic quantum variables (characterizing each microparticle as a whole) would be expressible in terms of some sujacent elements of deterministic nature, called ''hidden variables''. Viewing comparatively the two mentione d cases we think that is of nontrivial interest ti note the foolowing observations:
(i) In the case of thermodynamic systems the subjacent molecular quantities can be justfied in essence only by adequate experimental facts.
(ii) The mentioned molecular quantities are deterministic (i. e. depresionfree) only from a microscopic perspective, connected with the characteristics of the molecules. From a macroscopic perspective, coonected with a thermodynamic systems as a whole, they are stochastic variables. That is why, for example, in respect with a thermodynamic system like an ideal gas one speaks about the mean value and non-null dispersion of the molecular velocity.
(iii) Even by taking into account the existence of sujacent molecular quantities the macroscopic variables, characterizing a thermodynamic system as a whole, keep their stochastic characteristics. Particularly the mentioned existence does not influence the verity or the significance of the macroscopic relations frim the family of Eqs. (5.21) - (5.23).
(iv) The above observations (i) - (iii) reveal as unfounded the idea (uffink and Van Lith 1999) that the sole examination of some theoretical formulas, from the mentioned family, can give a light on the problem of reduction of thermodynamic stochasticity to subjacend deterministic elements.
(v) By analogy with the fact noted in (i), in the case of quantum microparticles, the existence of the ''hidden variables'' must be proved firstly by indubitable experimental facts. But, as far as we know, until now such an experimental proof was not ratified by scientific research.
(vi) The existence of the mentioned ''hidden variables'' cannot be asserted only by means of considerations on some theoretical formulas regarding the global stochasticity of quantum microparticles, such are the HR.
(vii) The global description of a quantum microparticle remain equally probabilistic in both cases, with or without ''hidden variables''. More exactly in both casses for a variable refering to a quantum microparticle as a whole the theoretical predictions must be done in probabilistic terms while the experimental informations can be obtained only from measurements consisting in statistical samplings.
$\blacklozenge $ The discussions from Sec. X about the stochasticity suggest a remark connected with the Boltzmann\'{}s constant $k$. As we have shown $k$ plays a major role in the evaluation of the level of the thermal stochasticity. But the respective stochasticity must be regarded as an important property of the macroscopic systems. So one finds as unfounded the idea, promoted in some publications (Wichman, 1971; Landau and Lifschitz, 1984; Storm, 1986) that, in physics $k$ has only a minor role of conversion factor between temperature scales (from energetic units into Kelvin degrees). $\blacksquare $
\pagebreak
\section{CONCLUSIONS}
We started the paper reminding the fact that even in our days TIHR persists as a source of unelucidated controversies about it defects. Motivated by the respective fact we proposed an investigation in the very core of the alluded controversies and defects. For such a purpose firstly we identified the main elements (assertions and arguments) of TIHR. Then, in reference with the mentioned elements, we localized the most known and critical defects of TIHR.
In such a reference frame we analyzed the reality of the respective defects. We found that all of them are veridical. Moreover, for TIHR, they are insurmountable and incriminate each of its main elements. So we can conclude that the sole reasonable attitude is to abandon TIHR as an unjustified doctrine.
The mentioned abandonment must be accompanied with a search for a new and genuine {}reinterpretation of HR. On this direction we opine that HR of troughs-experimental nature must be disregarded because they are fictitious formulae without a true physical significance. On the other hand we think that the theoretical HR are authentic physical formulae regarding the quantum fluctuations. So regarded the theoretical HR belong to a large class of formulae specific for systems, of both quantum and non-quantum nature, endowed with stochastic characteristics. By adopting the mentioned regards about HR all the controversies connected with the TIHR are elucidated on a natural way.
In the mentioned regard HR lose their traditional role of crucial physical formulae connected with the description of measurement characteristics (uncertainties). In the here promoted view the respective description must be done in terms (and formulae) which do not belong to the traditional chapter of physics (including the quantum mechanics). We suggested that a promising version for the description of measurements can be done in terms of information theory. So a measurement can be considered as an information transmission, from the measured system (information source) through the measuring device (transmission channel) to the device recorder (information receiver). Then the measuring {}uncertainties appear as alternations of processed information. In the Sec. IX we illustrated the alluded informational model with some concrete considerations.
In our opinion the theoretical HR and their classical (non-quantum) similars are connected with the stochasticity regarded as an important property of physical systems. We showed that the respective property is characterized by generic indicators which are:
(i) the Planck's constant $\hbar $ (for quantum microparticles),
(ii) the Boltzmann's constant $k$ (for classical thermodynamical system), respectively
(iii) both $\hbar $ and $k$ (for quantum statistical systems).
In the end, in Sec. XI, we presented remarks on some questions which are adjacent with the subjects discussed in the other parts of the paper.
\section*{ACKNOWLEDGMENTS}
$\blacklozenge $ Several publications studied by me in connection with this and other previous papers of mine were put at my disposal by their authors (often in a preprint or amended-reprint form). To all the respective authors I express my sincere thanks.
$\blacklozenge $ Refering to my own views, during the years, I have received many comments which stimulated my work. I remain profoundly grateful to the corresponding commentators (referees and readers of my papers, colleagues). But, of course, that for all the shortcomings of my views I assume the entire responsibility.
$\blacklozenge $ The work involved in the long-standing studies reported here took me away from some family duties. For the evinced agreement as well as for the permanent aid I am deeply indebted to all my family.
$\blacklozenge $ In the end I mention that the research reported here was finalized with a partial support from the Roumanian Ministry of National Education under a grant.
\pagebreak
\section*{LIST OF ACRONYMS}
\begin{tabbing}
CRCRCRCRCR \= correlation relation(s) \kill\\
CR\> correlation relation(s)\\
HR\> Heisenberg's relation(s)\\
P\> point\\
P.../A\> assertion point\\
P.../M\> motivation point\\
QCL\> quantum$\longrightarrow $classical limit\\
QTP\> quantum torsion pendulum\\
SRTE\> super-resolution thought experiment(s/al)\\
TE\> thought experiment(s/al)\\
TIHR\> traditional interpretation of Heisenberg's relations\\
\end{tabbing}
\begin{references} \bibitem{} Aharonov Y. and Bohm D., 1961, Phys. Rev. {\bf 122}, 1649.
\bibitem{} Aharonov Y. and Bohm D., 1964, Phys. Rev. {\bf 134B}, 1417.
\bibitem{} Albertson J., 1963, Phys. Rev. {\bf 129}, 940.
\bibitem{} Alcook G.R. 1969 a, Ann. Phys. (N.Y.) {\bf 53}, 253.
\bibitem{} Alcook G.R. 1969 b, Ann. Phys. (N.Y.) {\bf 53}, 286.
\bibitem{} Alcook G.R. 1969 c, Ann. Phys. (N.Y.) {\bf 53}, 311.
\bibitem{} Balescu R., 1975 {\it Equilibrium and Nonequilibrium Statistical Mechanics (Wiley, New York).}
\bibitem{} Ballentine L. E., 1987, Am. J. Phys. {\bf 55, 785.}
\bibitem{} Bauer M. and P.A.Mello, 1978, Ann. Phys. (New York) {\bf 111, } 38.
\bibitem{} Bazarov I. P.,1979, {\it Methodological Problems of Statistical Physics and Thermodinamics (}Moscow University Press, Moscow{\it ) (}in Russian{\it ).}
\bibitem{} Bell J. S.,1985, Private letter to the author, dated January 29.
\bibitem{} Blochinstsev D., 1981, {\it Principles de mecanique quantique} (Mir, Moscov).
\bibitem{} Bohm D., 1957, {\it Causality and Chance in Modern Physics } (Routlege Kegan Paul, London).
\bibitem{} Bouten M., N. Maene, P.Van Leuven, 1965, Nuovo Cimento {\bf 37}, 119.
\bibitem{} Braginsky V. B. and Khalili, 1992, {\it Quantum Measurement} (Cambridge University Press, Cambridge).
\bibitem{} Bransden B. H., C.J. Joachain, 1994, {\it Introduction to Quantum Mechanics (Longman, Essex).}
\bibitem{} Bunge M., 1970, Can. J. Phys. {\bf 8}, 183.
\bibitem{} Bunge M., 1977 a, in: {\it Denken und Umdenken (zu Werk und Werkung von Werner Heisenberg)}, edited by H. Pfepfer (Piper R., M\"{u}nchen).
\bibitem{} Bunge M., 1977b, Int. J. Quantum Chem. (Suppl 1), {\bf 12}, 1.
\bibitem{} Bush P., P.J. Lathi and P. Mittelstaedt, 1996, {\it The Quantum Theory of Measurement}, (Cambridge University Press, Cambridge).
\bibitem{} Carruthers P. and M. Nietto, 1968, Rev. Mod Phys. {\bf 40}, 411.
\bibitem{} Cramer J.C., 1986, Rev. Mod Phys. {\bf 58, }647.
\bibitem{} Croca J.R., A. Rica da Silva and J.S.Ramos, 1996, {\it ''Experimental Violation of Heisenberg's Uncertainty Relations by the Scaning Near-Field Optical Microscope''}preprint, University of Lisboa, Portugal. (This work discusses the potential implications of performances attained in optical experiments such are those reported by Pohl et al. 1984, and by Heiselmann and Pohl 1984).
\bibitem{} Davidson R.E., 1968, J. Chem. Phys. {\bf 42}, 1491.
\bibitem{} Davydov A.S., 1973, {\it Quantum Mechanics} (Nauka, Moscow), (in Russian)
\bibitem{} De Brujin N.G., 1996, in; {\it Inequalities,} edited by O. Sisha (Academic, New York).
\bibitem{} De Groot S.R. and P. Mazur, 1962, {\it Nonequilibrium Thermodinamics} (North Holland, Amsterdam).
\bibitem{} De Witt B.S.,Graham R.N., 1971, Am. J. Phys., {\bf 39}, 724.
\bibitem{} Dirac P.A.M., 1958, {\it The Principles of Quantum Mechanics (Clarendon Press, Oxford).}
\bibitem{} Dirac P.A.M., 1963, Sci. Am. {\bf 208}({\it 5}) 45
\bibitem{} Dodonov V.V. and V.I. Man'ko, 1987, Proceedings of the Lebedev Physics Institute {\bf 183}, 5 (in Russian; English version Nova Science Pub., Commack, New York, 1989).
\bibitem{} Dumitru S., 1974 a, Physica Scripta {\bf 10}, 101.
\bibitem{} Dumitru S., 1974 b, Phys. Lett \ {\bf A 48}, 109.
\bibitem{} Dumitru S., 1977, Epistemological Letters {\bf 15},1.
\bibitem{} Dumitru S., 1980, {\it What are in fact the Uncertainty Relations? (An Analysis of the Insufficiencies of the Conventional Interpretations of Uncertainty Relations)} Preprint 120p (University of Brasov).
\bibitem{} Dumitru S., 1984 {\it Microphysics - Solved Problems and a Critical Examination of the Question of Uncertainty Relations} (Dacia, Cluj-Napoca), (in Romanian).
\bibitem{} Dumitru S., 1987, in: {\it Recent Advances in Statistical Physics } (Proceedings of International Bose Symposium on Statistical Physics, Calcutta, India, 28-31 dec. 1984) edited by B. Datta and M. Dutta (World Scientific, Singapore)
\bibitem{} Dumitru S., 1989, Rev. Roum. Phys. {\bf 34,} 329.
\bibitem{} Dumitru S., 1991, in: {\it Quantum Field Theory, Quantum Mechanics and Quantum Optics. Part 1. Symmetries} {\it and Algebric Structures. }(Proceedings 18th International Colloquium on Group Theoretical Methods in Physics, Moscow - June 4-9, 1990) edited by V.V. Dodonov and V.I. Man'ko (Nova Science, New York).
\bibitem{} Dumitru S., 1993, Physics Essays {\bf 6}, 5.
\bibitem{} Dumitru S., 1996, Romanian Reports in Physics {\bf 48}, 891.
\bibitem{} Dumitru S., 1999, Optik (Stuttgart) {\bf 110}, 110.
\bibitem{} Dumitru S. and E.I. Verriest, 1995, Int. J. Theor.Phys. {\bf 34} , 1785.
\bibitem{} Evett A. A. and H.M. Mahmoud, 1965, Nuovo Cimento, {\bf 38}, 295.
\bibitem{} Fain V.M. and A.I. Khainnin, 1965, {\it Quantum Radiophysics } (Sov. Radio, Moscow) (in Russian)
\bibitem{} Fischbuch E., G.J.Green and R.J. Hughes, 1991, Phys. Rev. Lett. {\bf 66}, 256.
\bibitem{} Fock V., 1962, Zh. Eksp. Thepr. Phys.{\bf \ 42}, 1135.
\bibitem{} Frank - Kamenetsky D.A. 1940, Zh. Eksp. Theor. Phys. {\bf 10}, 700.
\bibitem{} Fujiwara I., 1970, Progr. Theor. Phys. {\bf 44}, 1701.
\bibitem{} F\"{u}rth R., 1933, Z. Phys. {\bf 81}, 143.
\bibitem{} Galinski V., B. Karnakov and V. Kogan 1985, {\it Problemes de Mecanique Quantique (}Mir, Moscou{\it )}
\bibitem{} Gellert W., et al (Ed), 1975, {\it Kleine Enzyklopadie der Mathematik} (VEB Bibliogr., Leipzig).
\bibitem{} Ghanapragasam B. and M. D. Srinivas, 1979, Pranama, {\bf 12}, 699.
\bibitem{} Gudder S. P., 1979, {\it Stochastic Methods in Quantum Mechanics} (North Holland, Amsterdam)
\bibitem{} Harris R.A., and H. Strauss, 1978, J. Chem. Education {\bf 55}, 374.
\bibitem{} Hasse R.W., 1980, J. Phys. A, {\bf 13}, 307.
\bibitem{} Hay O., and A. Peres 1998, Phys. Rev. A, {\bf 58}, 116.
\bibitem{} Heiselman H. and D. W. Pohl, 1994, Appl. Phys. A, {\bf 58}, 89.
\bibitem{} Heisenberg W., 1927, Z. Phys. {\bf 43}, 172.
\bibitem{} Heisenberg W., 1930, {\it The Physical Principles of Quantum Theory }(First German Edition, Leipzig 1930; English version, Dover Pub., New York 1949)
\bibitem{} Heisenberg W., 1977 in: {\it The Uncertainty Principle and Founddation of Quantum Mechanics,} edited by W. C. Price and S.S. Chissick (Wiley, New York)
\bibitem{} Holevo A.S., 1981, {\it Probabilistic an dStatistical Aspects of Quantum Theory}, (Nauka, Moscow) (in Russian)
\bibitem{} Ivanov B.N., 1989, {\it Fundamentals of Physics} (Mir, Moscou)
\bibitem{} Jammer M., 1966, {\it The Conceptual Development of Quantum Mechanics} (Mc Graw Hiil, New York)
\bibitem{} Jammer M., 1974, {\it The Philosophy of Quantum Mechanics } (Wiley, New York)
\bibitem{} Jancel R., 1973, {\it Foundations of Classical and Quantum Statistical Mechanics }(Pergamon, New York)
\bibitem{} Judge D., 1963, Phys. Lett. {\bf 5}, 189.
\bibitem{} Judge D., 1964, Nuovo Cimento{\bf \ 31}, 322.
\bibitem{} Judge D., and J.T. Levis, 1963, Phys. Lett. {\bf 5}, 190.
\bibitem{} Kijowskii J., 1974, Rep. Math. Phys. {\bf 6}, 361.
\bibitem{} Kobe D.H. and V.C. Aquilera-Navaro, 1994, Phys. Rev. A. {\bf 50} , 933.
\bibitem{} Kompaneyets A.S., 1966, {\it Basic Concepts in Quantum Mechanics }(Reinhold, New York).
\bibitem{} Korn G.A. and T.M. Korn, 1968, {\it Mathematical Handbook }({\it For Scientists and Engineers}) (Mc Graw Hill, New York).
\bibitem{} Krauss K., 1965, Z. Phys. {\bf 188}, 374.
\bibitem{} Krauss K., 1968, Z. Phys. {\bf 201}, 134.
\bibitem{} Kubo R. 1957, J. Phys. Soc. Japan {\bf 12}, 570.
\bibitem{} Landau L. and E. Lifchitz, 1984, {\it Physique Statistique} (Mir, Moscou).
\bibitem{} Levy-Leblond J.-M., 1972, Am. J.Phys. {\bf 40}, 899.
\bibitem{} Levy-Leblond J.-M., 1976, Ann. Phys. {\bf 101}, 319.
\bibitem{} Linder A., Rei$\beta $., Wassiliadis G. and Freese H., 1996, Phys. Lett. A. {\bf 218}, 1
\bibitem{} Martens H., 1991, {\it Uncertainty Principle, }Ph. D. Thesis (Tehnical University, Eidhoven).
\bibitem{} Mayants L.S., 1984, {\it The Enigma of Probability and Physics } (D. Reidel, Dordrecht).
\bibitem{} Mehra J., and H. Rechenberg 1982, {\it The Historical Developement of Quantum Theory} vol 1-9 (Springer, Berlin) (1982 for vol. 1-4 and to be published for vol. 5-9).
\bibitem{} M\"{u}nster A., 1960, in {\it Thermodinamica dei processi irreversibili,} Redinconti della Scuola Internazionale de fisica ``E. Fermi'' corso X (Soc. Italiana de Fisica, Bologna).
\bibitem{} M\"{u}nster A., 1969, {\it Statistical Thermodinamics, }vol. I, (Springer, Berlin).
\bibitem{} Nilson D.R., 1976, in: {\it Logic and Probability in Quantum Mechanics, }edited by P. suppes (D. Reidel, Dordrecht).
\bibitem{} Omnes R., 1992, Rev. Mod. Phys. {\bf 64}, 339.
\bibitem{} Omnes R., 1994, {\it The Interpretation of Quantum Mechanics (Princeton Univerity Press, Princeton).}
\bibitem{} Opatrny T., 1995, J. Phys. A {\bf 28,} 6961.
\bibitem{} Piron C., 1982, Lect. Notes. Phys. (Springer) {\bf 153}, 179.
\bibitem{} Pohl D.W., W. Denk and M. Lanz, 1984, Appl. Phys. Lett. {\bf 44} , 651.
\bibitem{} Primas H., 1981, {\it Chemistry, Quantum Mechanics and Reductionsm, }Lecture Notes in Chemistry, vol. 24 (Springer, Berlin).
\bibitem{} Primas H. and U. M\"{u}ller-Herold, 1978, Adv.Chem.Phys. {\bf 38} , 1. pg.1-107.
\bibitem{} Robinson F.N.H., 1974, {\it Noises and Fluctuations in Electronic Devices and Circuits }(Clarendon, Oxford).
\bibitem{} Ror C.L. and A.B. Samigrahi, 1979, Am. J. Phys. {\bf 47}, 965.
\bibitem{} Rosenfeld L., 1961, Nature, (London) {\bf 190}, 384.
\bibitem{} Rosenfeld L., 1962, in: {\it Questions of Irreversibility and Ergodicity in Ergodic Theories} edited by P. Caldirola (Zanichelli, Bologna).
\bibitem{} Roychoudhuri C., 1978, Found. Phys. {\bf 8}, 845.
\bibitem{} Ruppeiner G., 1995, Rev. Mod. Phys.{\bf \ 67}, 605.
\bibitem{} Schaposhnikov I.G., 1947, Zh. Eksp. Theor. Phys. {\bf 17}, 485.
\bibitem{} Scheer J., K. G\"{o}tsch, T. Koch, G. L\"{u}ning, M. Schmidt and H. Ziggel, 1989, Found. Phys. Lett. {\bf 2}, 71.
\bibitem{} Schilling H., 1972 {\it Statistische Physik in Beispielen} (Veb Fechbuchverlang, Leipzig).
\bibitem{} Schroeck F.E. Jr., 1982, Matthematical Review 82d : 81007.
\bibitem{} Schwabl F., 1995, {\it Quantum Mechanics, }2nd. rev., ed. (Springer, Berlin).
\bibitem{} Storm L., 1986 in: {\it Noise in Physical Systems and 1/f Noise-1985 }(North Holland, Amsterdam).
\bibitem{} Sturzu I., 1999, Revista de Filosofie, {\bf 410}, 3-4 (in press).
\bibitem{} Surdin M., 1973, Int. J. Theor. Phys.{\bf \ 8}, 183.
\bibitem{} Synge J.L., 1971, Proc. R. Soc. London {\bf A325,} 151.
\bibitem{} Tarascov L., 1980, {\it Physique Quantique et Operateurs Lineaires (}Mir, Moscou{\it ).}
\bibitem{} Terletsky Ya. P., 1974, Porc. Univ. `` P. Lumumba'' - Theor. Phys. {\bf 70/8,} 3.
\bibitem{} Tschudi H.R., 1987, Helv. Phys. Acta {\bf 60}, 363.
\bibitem{} Tyablikov S.V., 1975, {\it Methods of Quantum Theory of Magnetism }(Nauka, Moscow)(in Russian).
\bibitem{} Uffink J. and van Lith J., 1999, Found. Phys. {\bf 29}, 655.
\bibitem{} Van Kampen N.G., 1988, Physica A (Utrecht) {\bf 53}, 97.
\bibitem{} Vorontsov Yn. I., 1981, Uspekhi Fiz. Nauk {\bf 133}, 351.
\bibitem{} Weissman M., 1981, Ann. Rev. Phys. Chem. {\bf 32}, 205.
\bibitem{} Wichman E.H., 1971, {\it Quantum Physics - Berkley Physics Course } (Mc Graw Hill, New York).
\bibitem{} Worthing A.G. and J. Geffner, 1955, {\it Treatment of Experimental Data (Wiley, New York).}
\bibitem{} Yamada K., 1982, Phys. Rev.D {\bf 25}, 3256.
\bibitem{} Yanase M.M., M. Namiki and S. Makida (editors) 1978 {\it Selected Papers on the Theory of Measurement in Quantum Mechanics }(Phys. Soc. Japan, Tokyo).
\bibitem{} Zubarev D.N., 1971, {\it Nonequilibrium Statistical Thermodynamics }(in Russian: Nauka, Moscow; English version: Consultants Bureau, New York, 1974). \end{references}
\begin{figure}
\caption{ Private paper from J.S. Bell to the author (dated January 29,1985) }
\label{}
\end{figure}
\end{document} |
\begin{document}
\title[On a characterization of finite-dimensional vector spaces] {On a characterization\\ of finite-dimensional vector spaces}
\author[Marat V. Markin]{Marat V. Markin}
\address{ Department of Mathematics\newline California State University, Fresno\newline 5245 N. Backer Avenue, M/S PB 108\newline Fresno, CA 93740-8001, USA }
\email{[email protected]}
\subjclass{Primary 15A03, 15A04; Secondary 15A09, 15A15}
\keywords{Linear operator, vector space, Hamel basis}
\begin{abstract} We provide a characterization of the finite dimensionality of vector spaces in terms of the right-sided invertibility of linear operators on them. \end{abstract}
\maketitle
\section{Introduction}
In paper \cite{Markin2005}, found is a characterization of one-dimensional (real or complex) normed algebras in terms of the bounded linear operators on them, echoing the celebrated \textit{Gelfand-Mazur theorem} charachterizing complex one-dimensional Banach algebras (see, e.g., \cite{Bach-Nar,Gelfand39,Gelfand41,Naimark,Rickart1958}).
Here, continuing along this path, we provide a simple characterization of the finite dimensionality of vector spaces in terms of the right-sided invertibility of linear operators on them.
\section{Preliminaries}
As is well-known (see, e.g., \cite{Horn-Johnson,ONan}), a square matrix $A$ with complex entries is invertible \textit{iff} it is one-sided invertible, i.e., there exists a square matrix $C$ of the same order as $A$ such that \[ AC = I\ \text{(\textit{right inverse})}\quad \text{or}\quad CA=I\ \text{(\textit{left inverse})}, \] where $I$ is the \textit{identity matrix} of an appropriate size, in which case $C$ is the (two-sided) inverse of $A$, i.e., \[ AC=CA=I. \]
Generally, for a linear operator on a (real or complex) vector space, the existence of a \textit{left inverse} implies being \textit{invertible}, i.e., \textit{injective}. Indeed, let $A:X\to X$ be a linear operator on a (real or complex) vector space $X$ and a linear operator $C:X\to X$ be its \textit{left inverse}, i.e., \begin{equation}\label{cfdvs2} CA=I, \end{equation} where $I$ is the \textit{identity operator} on $X$. Equality \eqref{cfdvs2}, obviously, implies that \[ \ker A=\left\{0\right\}, \] and hence, there exists an inverse $A^{-1}:R(A)\to X$ for the operator $A$, where $R(A)$ is its range (see, e.g., \cite{Markin2020EOT}). Equality \eqref{cfdvs2} also implies that the inverse operator $A^{-1}$ is the restriction of $C$ to $R(A)$.
Further, as is easily seen, for a linear operator on a (real or complex) vector space, the existence of a \textit{right inverse}, i.e., a linear operator $C:X\to X$ such that \begin{equation*} AC=I, \end{equation*} immediately implies being \textit{surjective}, which, provided the underlying vector space is \textit{finite-dimensional}, by the \textit{rank-nullity theorem} (see, e.g., \cite{Markin2018EFA,Markin2020EOT}), is equivalent to being \textit{injective}, i.e., being \textit{invertible}.
With the underlying space being \textit{infinite-dimensional}, the arithmetic of infinite cardinals does not allow to directly infer by the \textit{rank-nullity theorem} that the \textit{surjectivity} of a linear operator on the space is equivalent to its \textit{injectivity}. In this case the right-sided invertibility for linear operators need not imply invertibility. For instance, on the (real or complex) \textit{infinite-dimensional} vector space $l_\infty$ of bounded sequences, the left shift linear operator \[ l_\infty\ni x:=(x_1,x_2,x_3,\dots) \mapsto Lx:=(x_2,x_3,x_4,\dots)\in l_\infty \] is \textit{non-invertible} since \[ \ker L=\left\{(x_1,0,0,\dots) \right\}\neq \left\{0\right\} \] (see, e.g., \cite{Markin2018EFA,Markin2020EOT}), but the right shift linear operator \[ l_\infty\ni x:=(x_1,x_2,x_3,\dots) \mapsto Rx:=(0,x_1,x_2,\dots)\in l_\infty \] is its \textit{right inverse}, i.e., \[ LR=I, \] where $I$ is the \textit{identity operator} on $l_\infty$.
Not only does the above example give rise to the natural question of whether, when the right-sided invertibility for linear operators on a (real or complex) vector space implies their invertibility, i.e., \textit{injectivity}, the underlying space is necessarily \textit{finite-dimensional} but also serve as an inspiration for proving the \textit{``if''} part of the subsequent characterization.
\section{Characterization}
\begin{thm}[Characterization of Finite-Dimensional Vector Spaces]\ \\ A (real or complex) vector space $X$ is finite-dimensional iff, for linear operators on $X$, right-sided invertibility implies invertibility. \end{thm}
\begin{proof}\
\textit{``Only if''} part. Suppose that the vector space $X$ is \textit{finite-dimensional} with $\dim X=n$ ($n\in {\mathbb N}$) and let $B:=\left\{x_1,\dots,x_n\right\}$ be an ordered basis for $X$.
For an arbitrary linear operator $A:X\to X$ on $X$, which has a \textit{right inverse}, i.e., a linear operator $C:X\to X$ such that \begin{equation*} AC=I, \end{equation*} where $I$ is the \textit{identity operator} on $X$, let $[A]_B$ and $[C]_B$ be the \textit{matrix representations} of the operators $A$ and $C$ relative to the basis $B$, respectively (see, e.g., \cite{Horn-Johnson,ONan}).
Then \begin{equation}\label{cfdvs1} [A]_B[C]_B=I_n, \end{equation} where $I_n$ is the \textit{identity matrix} of size $n$ (see, e.g., \cite{Horn-Johnson,ONan}).
By the \textit{multiplicativity of determinant} (see, e.g, \cite{Horn-Johnson,ONan}), equality \eqref{cfdvs1} implies that \[ \det\left([A]_B\right)\det\left([C]_B\right)=\det\left([A]_B[C]_B\right)=\det(I_n)=1. \]
Whence, we conclude that \[ \det\left([A]_B\right)\neq 0, \] which, by the \textit{determinant characterization of invertibility}, implies that the matrix $[A]_B$ is invertible, and hence, so is the operator $A$ (see, e.g., \cite{Horn-Johnson,ONan}).
\textit{``If''} part. Let us prove this part \textit{by contrapositive}, assuming that the vector space $X$ is \textit{infinite-dimensional}. Suppose that $B:=\left\{x_i\right\}_{i\in I}$ is a (Hamel) basis for $X$ (see, e.g., \cite{Markin2018EFA,Markin2020EOT}), where $I$ is an infinite indexing set, and that $J:=\left\{i(n)\right\}_{n\in {\mathbb N}}$ is a \textit{countably infinite} subset of $I$.
Let us define a linear operator $A:X\to X$ as follows: \[ Ax_{i(1)}:=0,\ Ax_{i(n)}:=x_{i(n-1)},\ n\ge 2,\quad Ax_i:=x_i,\ i\in I\setminus J, \] and \[ X\ni x=\sum_{i\in I}c_ix_i\mapsto Ax:=\sum_{i\in I}c_iAx_i, \] where \[ \sum_{i\in I}c_ix_i \] is the \textit{basis representation} of a vector $x\in X$ relative to $B$, in which all but a finite number of the coefficients $c_i$, $i\in I$, called the \textit{coordinates} of $x$ relative to $B$, are zero (see, e.g., \cite{Markin2018EFA,Markin2020EOT}).
As is easily seen, $A$ is a linear operator on $X$, which is \textit{non-invertible}, i.e., \textit{non-injective}, since \[ \ker A=\spa\left(\left\{x_{i(1)}\right\}\right)\neq \left\{0\right\}. \]
The linear operator $C:X\to X$ on $X$ defined as follows: \[ Cx_{i(n)}:=x_{i(n+1)},\ n\in{\mathbb N},\quad Cx_i:=x_i,\ i\in I\setminus J, \] and \[ X\ni x=\sum_{i\in I}c_ix_i\mapsto Cx:=\sum_{i\in I}c_iCx_i, \] is a \textit{right inverse} for $A$ since \[ ACx_{i(n)}=Ax_{i(n+1)}=x_{i(n)},\ n\in {\mathbb N},\quad ACx_i=Ax_i=x_i,\ i\in I\setminus J. \]
Thus, on a (real or complex) infinite-dimensional vector space, there exists a non-invertible linear operator with a right inverse, which completes the proof of the \textit{``if''} part, and hence, of the entire statement. \end{proof}
\end{document} |
\begin{document}
\begin{abstract} We investigate stationary solutions of a non-local aggregation equation with degenerate power-law diffusion and bounded attractive potential in arbitrary dimensions. Compact stationary solutions are characterized and compactness considerations are used to derive the existence of global minimizers of the corresponding energy depending on the prefactor of the degenerate diffusion for all exponents of the degenerate diffusion greater than one. We show that a global minimizer is compactly supported and, in case of quadratic diffusion, we prove that it is the unique stationary solution up to a translation. The existence of stationary solutions being only local minimizers is discussed. \end{abstract}
\maketitle
\section{Introduction} In this paper, we investigate stationary solutions of the non-local aggregation equation \begin{equation} \label{eq1} \partial_t\rho =\nabla\cdot(\rho\nabla(\varepsilon\rho^{m-1}-G\ast\rho)) \end{equation}
in $\mathbb{R}^N$ where $G$ is a bounded purely attractive and integrable interaction potential and $\rho\in L^1(\mathbb{R}^N)\cap L^m(\mathbb{R}^N)$ a non-negative function satisfying $\|\rho\|_{L^1}=1$. Since we can rescale time in \eqref{eq1}, we assume the potential $G$ to be normalized, i.e. $\|G\|_{L^1}=1$, and state all results depending on the coefficient $\varepsilon>0$.
Due to the theory of gradient flows in \cite{ambrosiogiglisavare}, we can consider the aggregation equation \eqref{eq1} for probability density functions $\rho\in L^m(\mathbb{R}^N)$ formally as a gradient flow in the Wasserstein metric of the energy functional \begin{equation} \label{eq2} E[\rho]=\int_{\mathbb{R}^N}\frac{\varepsilon}{m}\rho^m(x)\,dx-\frac{1}{2}\int_{\mathbb{R}^N}\int_{\mathbb{R}^N}G(x-y)\rho(y)\rho(x)\,dydx. \end{equation}
Equation \eqref{eq1} is discussed in \cite{topazbertozzilewis} for $m=3$ to model biological aggregation. The non-linear diffusion models an anti-crowding motion acting locally repulsive while the attractive potential models a non-local aggregative behaviour. Performing some weakly non-linear and asymptotic analysis, so-called clump solutions with sharp edges are obtained in \cite{topazbertozzilewis} as stationary solutions of \eqref{eq1} which is one reasonable behaviour for biological swarming.
In \cite{burgerdifrancescofranek}, it is shown for $N=1$ and $m=2$ that a unique stationary solution of \eqref{eq1} exists if the coefficient $\varepsilon$ satisfies $0<\varepsilon<1$ and that a stationary solution of \eqref{eq1} is compactly supported. In \cite{burgerfetecauhuang}, it can be found for $N=1$ a detailed discussion about stationary states of \eqref{eq1} for any $m>1$ partly based on numerical experiments. These observations are the starting point for this work.
Aggregation equations with a Newtonian or Bessel potential, which do not fall into the class of potentials considered in this paper, are widely studied due to the Keller-Segel model for chemotaxis \cite{kellersegel}. This model reads in a simplified form as \begin{align} \begin{split} \label{kellersegel} \partial_t\rho & =\Delta\rho-\nabla\cdot(\rho\nabla\phi),\\ -\Delta\phi+\alpha\phi & =\rho \end{split} \end{align} with $\alpha\geq0$. For an extensive summary about results of the Keller-Segel system until 2003 see e.g.\ the review \cite{horstmann} and the references therein. Here, we only focus on thresholds which allow us to characterize properties of solutions of system \eqref{kellersegel} in $\mathbb{R}^N$.
In \cite{dolbeaultperthame,blanchetdolbeaultperthame}, it is shown for $N=2$ and $\alpha=0$, depending on the conserved total mass $M=\|\rho\|_{L^1}$, that for $M>8\pi$ solutions of system \eqref{kellersegel} blow up in finite time and that for $M<8\pi$ a global in time weak solution of \eqref{kellersegel} exists which converges with exponential rate \cite{camposdolbeault} to the self-similar solution of \eqref{kellersegel}. The critical case $M=8\pi$ is considered in \cite{blanchetcarrillomasmoudi} where the existence of a global in time solution with a finite second moment blowing up in infinite time is proved whereas the existence of a stationary solution with an infinite second moment is shown in the critical case in \cite{blanchetcarlencarrillo}.
Considering the Keller-Segel system \eqref{kellersegel} with a homogeneous non-linear diffusion \begin{align} \begin{split} \label{kellersegeldegenerate} \partial_t\rho & =\Delta\rho^m-\nabla\cdot(\rho\nabla\phi),\\ -\Delta\phi+\alpha\phi & =\rho, \end{split} \end{align} we basically end up with equation \eqref{eq1} with a Newtonian or Bessel potential since we can rewrite \eqref{kellersegeldegenerate} as \begin{equation} \label{kellersegeldegeneraterewritten} \partial_t\rho=\nabla\cdot(\nabla\rho^m-\rho\nabla(G\ast\rho)) \end{equation} with a Newtonian or Bessel potential $G$. In \cite{bedrossianrodriguezbertozzi}, existence and uniqueness of weak solutions of \eqref{kellersegeldegeneraterewritten} are proved in $\mathbb{R}^N$ for $N\geq3$ for a wide class of attractive interaction potentials $G$ including the Newtonian and Bessel potentials. This result covers the well-posedness of weak solutions obtained in \cite{bertozzislepcev} for smooth attractive potentials and extends the existence theory for the Bessel potential in \cite{sugiyama2}. Existence and uniqueness of solutions to \eqref{kellersegeldegeneraterewritten} with respect to entropy solution are considered in \cite{burgercapassomorale} and for $N=1$ the uniqueness of solutions to \eqref{kellersegeldegeneraterewritten} is shown in \cite{burgerdifrancesco} using the pseudo-inverse of the Wasserstein distance.
Moreover, in \cite{bedrossianrodriguezbertozzi} it is determined for $N\geq3$ a critical exponent of the degenerate diffusion and a critical mass such that similar results as for the Keller-Segel system \eqref{kellersegel} for $N=2$ are obtained. In addition, for subcritical exponents it is shown that a global in time solution exists whereas in the supercritical case a finite time blow-up can occur for a certain class of problems. In particular, for the Newtonian and Bessel potential these results are also partly obtained in \cite{blanchetcarrillolaurencot} and \cite{sugiyama1,sugiyama2} respectively where the critical exponent is $m=2-\frac{2}{N}$. Some of the results in \cite{bedrossianrodriguezbertozzi} are extended in \cite{bedrossianrodriguez} to $\mathbb{R}^2$.
Regarding stationary solutions, equation \eqref{kellersegeldegeneraterewritten} is considered with a Newtonian or regularized Newtonian potential in \cite{kimyao}. Among others, it is shown via some mass comparison that there is a unique radially symmetric stationary solution for $m>2-\frac{2}{N}$ in dimensions $N\geq3$ which is monotonically decreasing and compactly supported. See also \cite{liebyau} for radial uniqueness results for stationary solutions of \eqref{kellersegeldegeneraterewritten} with a Newtonian potential. In addition, it is derived in \cite{kimyao} that continuous, radially symmetric and compactly supported solutions of equation \eqref{kellersegeldegeneraterewritten} with a Newtonian or regularized Newtonian potential are asymptotically converging in the subcritical case to the unique radial stationary solution as $t\rightarrow\infty$. The asymptotic behaviour towards stationary solutions in the critical case is considered in \cite{yao}.
The symmetry of stationary solutions of equation \eqref{kellersegeldegeneraterewritten} with a Newtonian potential is investigated in \cite{stroehmer} in three dimensions. It is shown via some variant of the moving plane method that compact stationary solutions are radially symmetric.
Combining the results in \cite{kimyao} and \cite{stroehmer}, one can conclude that compact stationary solutions of equation \eqref{kellersegeldegeneraterewritten} with a Newtonian interaction are unique in the subcritical case in three dimensions. The ideas of \cite{kimyao} and \cite{stroehmer} were recently used in \cite{carrillocastorinavolzone} to show that there is a unique compact stationary solution of equation \eqref{kellersegeldegeneraterewritten} with a Newtonian potential for $N=2$. This unique compact stationary solution coincides with the global minimizer of the corresponding energy. For results in the supercritical case $0<m<2-\frac{2}{N}$ and $N\geq3$ see e.g.\ \cite{bianliu}.
Recently, using continuous Steiner symmetrization techniques it was shown in \cite{carrillohittmeirvolzoneyao} for $N\geq1$ that all stationary solutions of equation \eqref{kellersegeldegeneraterewritten} are radially symmetric and monotonically decreasing if the interaction potential $G$ is radially symmetric, purely attractive and satisfies some growth assumptions. In particular, in case of a Newtonian interaction it is proved that there is a unique stationary solution. Moreover, for $N=2$ it is shown that a weak solution is converging towards this unique stationary solution as $t\rightarrow\infty$.
Furthermore, in \cite{chayeskimyao} the non-local aggregation equation \eqref{kellersegeldegeneraterewritten} with a symmetric attractive $C^2$-potential is considered on a torus. For $m=2$, it is shown in the subcritical case $\varepsilon>\max_{k\neq0}\big\{|\hat{G}(k)|\,\big|\,\hat{G}(k)>0\big\}$, where $\hat{G}$ denotes the Fourier transform of $G$, that the global minimizer is the constant solution and the energy of a weak solution converges exponentially fast to the energy of the constant solution with a rate depending on the initial data. In the supercritical case, it is found that a strictly positive solution, in particular the constant solution, is not a local minimizer of the energy. Similar results regarding such a phase transition were obtained before in \cite{chayespanferov} for an aggregation equation with linear diffusion.
Recently, many results were achieved considering non-local aggregation equations without diffusion in the space of probability measures. Instead of the diffusion and a purely attractive potential, a potential which acts locally repulsive and non-locally attractive is considered. Particularly, power-law potentials are widely studied. Depending on the potential, a rich variety of pattern formation is observed, see e.g.\ \cite{bertozzikolokolnikovsunuminskyvonbrecht}. In \cite{balaguecarrillolaurentraoul1}, the dimensionality of local minimizers of the corresponding energy is determined out of the strength of the local repulsive behaviour and in \cite{balaguecarrillolaurentraoul2}, the stability of radially symmetric solutions is investigated. Aggregation equations of this type do allow for stationary solutions not being radially symmetric, in contrast to aggregation equations with a degenerate diffusion and a suitable attractive potential as we consider here. This is due to the symmetry result in \cite{carrillohittmeirvolzoneyao}.
Regarding power-law potentials, the regularity of local minimizers is studied in \cite{carrillodelgadinomellet} and, if the repulsive part behaves like the Newtonian potential, the existence of unique radially symmetric and compactly supported stationary solutions is derived in \cite{fetecauhuangkolokolnikov,fetecauhuang}. Recently, for certain parameters of the power-law potential, explicit radially symmetric stationary solutions are obtained in \cite{carrillohuang}.
Especially for power-law potentials, the existence of a global minimizer of the corresponding energy is shown in \cite{choksifetecautopaloglu} via the concentration-compactness principle of Lions \cite{lions} and in \cite{carrillochipothuang} via the discrete setting and some compactness observation. In \cite{canizocarrillopatacchini,simioneslepcevtopaloglu}, the existence of global minimizers is obtained for more general potentials. Additionally, it is shown in \cite{canizocarrillopatacchini} that global minimizers are compactly supported. The strategy in \cite{canizocarrillopatacchini} is to consider the problem first in a given ball instead of the whole space because this setting allows to derive the existence of a global minimizer. If there exists a probability measure such that its energy is less than the limit of the interaction potential at infinity, then it is shown that the diameter of the support of the global minimizer for the problem restricted to the ball is bounded from above independently of the size of the ball. This result is used to conclude that a compact global minimizer of the energy exists in the whole space. The approach in \cite{canizocarrillopatacchini} is similar to the one in \cite{auchmutybeals}.
In this paper, assuming that the interaction potential $G$ is bounded and radially strictly monotonically decreasing, we derive a sharp condition for stationary solutions of \eqref{eq1} being compactly supported which we then use to show that global minimizers of the energy functional \eqref{eq2}, if they exist, have compact support.
In \cite{bedrossian}, it is proved that a global minimizer of the energy functional \eqref{eq2} exists for $m=2$ and $0<\varepsilon<1$ as well as for $m>2$ and all $\varepsilon>0$. In \cite{burgerdifrancescofranek}, it is shown for $m=2$ that the threshold for the coefficient $\varepsilon$ is sharp. For a bounded, radially symmetric and purely attractive potential, we also characterize for $1<m<2$ a threshold $\varepsilon_0$ such that a global minimizer exists for all coefficients $0<\varepsilon\leq\varepsilon_0$. We prove this statement by adapting the compactness considerations of \cite{canizocarrillopatacchini} to our problem instead of following the approach in \cite{bedrossian} via the concentration-compactness principle of Lions \cite{lions} which only applies to $m\geq2$.
Moreover, for $m=2$ and $0<\varepsilon<1$ we show in arbitrary dimensions that the global minimizer of the energy functional \eqref{eq2} is unique up to a translation and that it is the unique stationary solution of \eqref{eq1} up to a translation. This stationary solution is radially symmetric and monotonically decreasing. We use several ideas from the proof for $N=1$ in \cite{burgerdifrancescofranek} as well as the symmetry result in \cite{carrillohittmeirvolzoneyao} and compactness considerations from \cite{canizocarrillopatacchini} to prove the uniqueness in higher dimensions.
For $m>2$, we prove that a compact stationary solution of \eqref{eq1} exists for all $\varepsilon>0$ if the interaction potential $G$ is bounded and strictly radially monotonically decreasing. For $1<m<2$, we show the existence of a constant $\varepsilon_0>0$ depending on the interaction potential and the exponent $m$ of the degenerate diffusion such that a compact stationary solution of \eqref{eq1} exists for all $0<\varepsilon\leq\varepsilon_0$. The results for $m\neq2$ are conjectured in \cite{burgerfetecauhuang}. Furthermore, our results complement some findings in \cite{burgerfetecauhuang} where it is shown for $N=1$ that for all $L>0$ there is some coefficient $\varepsilon>0$ such that a radially symmetric and monotonically decreasing stationary solution of \eqref{eq1} with support $[-L,L]$ exists. We extend this statement to arbitrary dimensions.
In addition, under the assumptions that for $m\neq2$ there is a unique radially symmetric and monotonically decreasing stationary solution of \eqref{eq1} with the coefficient $\varepsilon$ strictly increasing with the size of the support (which we prove for $m=2$ and which is indicated by numerical results in \cite{burgerfetecauhuang} for $m\neq2$), we show the following. For $1<m<2$, this unique stationary solution coincides with the global minimizer of the energy \eqref{eq2} for coefficients $\varepsilon>0$ not greater than $\varepsilon_0$ but loses this property for coefficients with a larger value. In particular, under these assumptions we show that $\varepsilon_0<\varepsilon_1$ where $\varepsilon_1$ denotes the threshold for the existence of a compact stationary solution of \eqref{eq1}. Numerically, it is also observed in \cite{burgerfetecauhuang} that stationary solutions of \eqref{eq1} being only local minimizers may exist.
Due to the additional non-linearity arising for $m\neq2$, it seems to be rather difficult to prove the uniqueness result which holds for $m=2$. However, numerical observations in \cite{burgerfetecauhuang} indicate that uniqueness should also be valid for $m\neq2$. So, this question stays an open problem for now. Besides, another interesting open problem not considered in the present paper concerns the long-time asymptotic of solutions to the aggregation equation \eqref{eq1} with a bounded attractive potential.
The paper is organized as follows: we state some assumptions of the interaction potential and the main result of the paper in Section \ref{notationandmainresult}. In Section \ref{preliminaries}, we show that under these assumptions stationary solutions of \eqref{eq1} are continuous in $\mathbb{R}^N$ as well as radially symmetric and monotonically decreasing. In Section \ref{compactness}, we derive a condition to characterize compact stationary solutions of \eqref{eq1} and prove that global minimizers of the energy functional \eqref{eq2} are compactly supported. The existence of a global minimizer of the energy given in \eqref{eq2} is shown in Section \ref{globalminimizers}. In Section \ref{stationarysolutions} we prove in arbitrary dimensions for $m=2$ uniqueness of stationary solutions of \eqref{eq1} up to a translation and consequently we show the uniqueness of the global minimizer of the corresponding energy. Finally, we discuss the existence of stationary solutions of \eqref{eq1} with positive energy in Section \ref{discussion}.
\section{Notation and main result} \label{notationandmainresult} In the entire paper, we suppose the interaction potential $G$ to satisfy that \begin{enumerate}[label=(G\arabic*)] \item\label{(G1)} $G$ is non-negative and $\mathrm{supp}\,G=\mathbb{R}^N$,
\item\label{(G2)} $G\in\,W^{1,1}(\mathbb{R}^N)\cap L^{\infty}(\mathbb{R}^N)\cap C^2(\mathbb{R}^N)$ with $\|G\|_{L^1}=1$,
\item\label{(G3)} $G$ is radially symmetric and strictly monotonically decreasing, i.e.\ $G(x)=g(|x|)$ and $g^\prime(r)<0$ for all $r>0$, \item\label{(G4)} $g^{\prime\prime}(0)<0$ and $\lim_{r\rightarrow +\infty}g(r)=0$. \end{enumerate} Especially, $G$ being bounded plays an important role in our further considerations.
We define \begin{equation}
\mathcal{P}^{M}(\mathbb{R}^N)\coloneqq\Big\{f\in L_+^1(\mathbb{R}^N)\,\Big|\,\int_{\mathbb{R}^N}f(x)\,dx=M\Big\} \end{equation} and for $M=1$ we use the abbreviated notation $\mathcal{P}(\mathbb{R}^N)$. Moreover, we write \begin{equation}
\mathcal{P}_R(\mathbb{R}^N)\coloneqq\Big\{f\in L_+^1(\mathbb{R}^N)\,\Big|\,\int_{\mathbb{R}^N}f(x)\,dx=1,\,\mathrm{supp}\,f\subset\overline{B_R(0)}\Big\}. \end{equation} We consider stationary solutions of \begin{equation} \label{eq1b} \partial_t\rho =\nabla\cdot(\rho\nabla(\varepsilon\rho^{m-1}-G\ast\rho)) \end{equation} in $\mathbb{R}^N$ and minimizers of the corresponding energy \begin{equation} \label{eq2b} E[\rho]=\int_{\mathbb{R}^N}\frac{\varepsilon}{m}\rho^m(x)\,dx-\frac{1}{2}\int_{\mathbb{R}^N}\int_{\mathbb{R}^N}G(x-y)\rho(y)\rho(x)\,dydx \end{equation} in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$.
As a main result of this paper, we will prove that for $m=2$ a stationary solution of \eqref{eq1b} is unique up to a translation: \begin{thm} \label{uniquenesstheoremformequals2} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}, $m=2$ and $0<\varepsilon<1$, then there exists up to a translation a unique stationary solution $\rho\in L^2(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b}. Moreover, $\rho$ has the following properties: \begin{itemize} \item $\rho\in C^2(\mathrm{supp}\,\rho)\cap C(\mathbb{R}^N)$. \item $\mathrm{supp}\,\rho=\overline{B_R(0)}$ for some $R>0$ depending on $\varepsilon$. \item $\rho$ is radially symmetric. \item $\rho$ is monotonically decreasing with increasing radius. \item $\rho$ is the unique (up to a translation) global minimizer of the energy $E$ in $L^2(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$. \end{itemize} \end{thm}
\section{Continuity and symmetry of stationary solutions} \label{preliminaries} First, we recall some results being obtained among others in \cite[Section 2 and Section 3]{burgerdifrancescofranek} for $m=2$ which can be straightforwardly extended to all $m>1$. \begin{lem} \label{resultsfromburgerdifrancescofranek} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}, then the following results are satisfied for $m>1$ and $\varepsilon>0$: \begin{enumerate}[label=(\roman*)] \item\label{results(i)} The total mass $\int_{\mathbb{R}^N}\rho(x,t)\,dx$ of a solution of \eqref{eq1b} is preserved. \item\label{results(ii)} The center of mass $\int_{\mathbb{R}^N}x\rho(x,t)\,dx$ of a solution of \eqref{eq1b} is preserved. \item\label{results(iii)} If $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ is a stationary solution of \eqref{eq1b}, then it holds that \begin{equation*} \varepsilon\rho^{m-1}-G\ast\rho=C \end{equation*}
almost everywhere on every connected component of $\mathrm{supp}\,\rho$ where the constant $C$ may be different for each connected component. Moreover, we have $\rho^{m-1}\in C^2(\mathrm{supp}\,\rho)$ and $|\nabla\rho^{m-1}|\leq C^{\prime}$ in $\mathrm{supp}\,\rho$. \item\label{results(iv)} If $\rho$ is a minimizer of the energy functional $E$ given in \eqref{eq2b} in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$, then we have \begin{equation*} \rho\nabla(\varepsilon\rho^{m-1}-G\ast\rho)=0 \end{equation*} almost everywhere in $\mathbb{R}^N$. \item\label{results(v)} If $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ is a solution to \begin{equation*} \rho\nabla(\varepsilon\rho^{m-1}-G\ast\rho)=0, \end{equation*} then $\rho$ is a stationary point of the energy $E$ given in \eqref{eq2b}. \item\label{results(vi)} If $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ is a connected stationary solution of \eqref{eq1b}, then we have \begin{equation*} \varepsilon\rho^{m-1}(x)=(G\ast\rho)(x)+2E[\rho]-\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho^m(y)\,dy \end{equation*} for all $x\in\,\mathrm{supp}\,\rho$. \end{enumerate} \end{lem} In addition, in \cite[Corollary 2.3]{burgerdifrancescofranek} it is shown for $N=1$ that a stationary solution of \eqref{eq2b} is continuous in $\mathbb{R}$. It is easy to prove this result for higher dimensions. \begin{lem} \label{continuityofstationarysolution} If $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ is a stationary solution of \eqref{eq1b}, $m>1$, $\varepsilon>0$ and $G$ satisfies \ref{(G1)}-\ref{(G4)}, then $\rho$ is continuous in $\mathbb{R}^N$. \end{lem} \begin{proof} By Lemma \ref{resultsfromburgerdifrancescofranek}\ref{results(iii)}, a stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} satisfies after differentiation and multiplication by $\rho$ \begin{equation*} \varepsilon\frac{m-1}{m}\nabla(\rho^m)=\rho\nabla(G\ast\rho). \end{equation*}
Due to \ref{(G2)}, \ref{(G4)} and $\rho\in\mathcal{P}(\mathbb{R}^N)$, we have $|\nabla(G\ast\rho)(x)|\leq C^\prime$ such that we obtain $\|\nabla(\rho^m)\|_{L^p}\leq \varepsilon^{-1}\frac{m}{m-1}C^\prime\|\rho\|_{L^p}$ for some $p\geq1$. Since $\rho\in L^1(\mathbb{R}^N)\cap L^m(\mathbb{R}^N)$, it follows that $\rho^m\in W^{1,1}(\mathbb{R}^N)$ and by Sobolev embedding we conclude for $N>1$ that $\rho^m\in L^p(\mathbb{R}^N)$ for all $1\leq p\leq\frac{N}{N-1}$. Hence, $\rho^m\in W^{1,p}(\mathbb{R}^N)$ and repeating the argument for $1<p<N$ it follows $\rho^m\in W^{1,q}(\mathbb{R}^N)$ for all $1\leq q\leq\frac{Np}{N-p}$. By Sobolev embedding, we conclude that $\rho^m\in C(\mathbb{R}^N)$ if $q>N$. Otherwise, we repeat the argument for $p<q<N$ until $\rho^m\in W^{1,r}(\mathbb{R}^N)$ for some $r>N$. \end{proof} Using a continuous Steiner symmetrization, in \cite[Section 2]{carrillohittmeirvolzoneyao} it is shown under suitable assumptions of the interaction potential that certain stationary solutions of an aggregation equation with a degenerate diffusion are radially symmetric and monotonically decreasing up to a translation. Due to our assumptions of the interaction potential $G$, in particular its radial symmetry, strict monotonicity and boundedness, and due to Lemma \ref{resultsfromburgerdifrancescofranek}\ref{results(iii)}, the symmetry result \cite[Theorem 2.2]{carrillohittmeirvolzoneyao} applies such that we can state the following theorem. \begin{thm} \label{symmetryofstationarysolutions} If $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ is a stationary solution of \eqref{eq1b}, $m>1$, $\varepsilon>0$ and $G$ satisfies \ref{(G1)}-\ref{(G4)}, then $\rho$ is radially symmetric and monotonically decreasing up to a translation. In particular, $\rho$ has a connected support. \end{thm}
\section{Compactness of stationary solutions} \label{compactness} In \cite{canizocarrillopatacchini}, the existence of a global minimizer of the energy functional \begin{equation*} F[\mu]=\frac{1}{2}\int_{\mathbb{R}^N}\int_{\mathbb{R}^N}W(x-y)\,d\mu(y)d\mu(x) \end{equation*}
is shown with $\mu$ being a probability measure and $W$ an appropriate interaction potential. One key idea to obtain this result is first considering a global minimizer of $F$ in the space of probability measures with support in a given ball and to show that in a fixed neighbourhood of each point of the support of this global minimizer there is at least a certain amount of mass concentrated. To derive this statement, one has to assume that the given ball is large enough and that the potential $W$ is unstable, i.e.\ that a probability measure $\mu$ with $F[\mu]<\lim_{|x|\rightarrow\infty}W(x)$ exists.
Under a suitable condition for the energy, we perform a calculation similar to a part of the proof in \cite[Lemma 2.6]{canizocarrillopatacchini} to conclude that stationary solutions of \eqref{eq1b}, i.e.\ solutions $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ to the equation \begin{equation} \rho\nabla(\varepsilon\rho^{m-1}-G\ast\rho)=0 \end{equation} in $\mathbb{R}^N$ are compactly supported.
\begin{lem} \label{condforcompsupp} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}. If $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ is a stationary solution of \eqref{eq1b}, $m>1$, $\varepsilon>0$ and if there exists a constant $K<0$ such that \begin{equation} \label{condforcompsuppeq1} 2E[\rho]-\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho^m(y)\,dy\leq K, \end{equation} then $\rho$ is compactly supported. \end{lem} \begin{proof}
Let $A<0$ with $K<A$ and $-G(0)<A$. By our assumptions of $G$, we know that there is an $r>0$ with $-G(x)>A$ for all $|x|>r$. Since $\rho$ has connected support due to Theorem \ref{symmetryofstationarysolutions}, by Lemma \ref{resultsfromburgerdifrancescofranek}\ref{results(vi)} it follows that \begin{equation*} \begin{aligned} K & \geq 2E[\rho]-\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho^m(y)\,dy\\ & =\varepsilon\rho^{m-1}(z)-\int_{\mathbb{R}^N}G(z-y)\rho(y)\,dy\\ & \geq-G(0)\int_{B_r(z)}\rho(y)\,dy+A\int_{\mathbb{R}^N\setminus B_r(z)}\rho(y)\,dy\\ & =(-G(0)-A)\int_{B_r(z)}\rho(y)\,dy+A \end{aligned} \end{equation*} for $z\in\,\mathrm{supp}\,\rho$. Due to $-G(0)-A<0$ and $K-A<0$, we obtain \begin{equation*} \int_{B_r(z)}\rho(y)\,dy\geq\frac{A-K}{A+G(0)}\eqqcolon C>0. \end{equation*} The constant $C$ is independent from $z$ such that this bound holds for every $z\in\,\mathrm{supp}\,\rho$ and we conclude that $\rho$ has a compact support because otherwise we obtain a contradiction. \end{proof} The proof of Lemma \ref{condforcompsupp} can be obtained more directly using Theorem \ref{symmetryofstationarysolutions}, Lemma \ref{resultsfromburgerdifrancescofranek}\ref{results(iv)} and the continuity of a stationary solution of \eqref{eq1b} in $\mathbb{R}^N$ (cf. Lemma \ref{continuityofstationarysolution}). However, we will use in Section \ref{globalminimizers} the above argumentation to show the existence of global minimizers of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$.
Using the continuity of a stationary solution of \eqref{eq1b} in $\mathbb{R}^N$, we can actually show that $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ being a compact stationary solution of \eqref{eq1b} is equivalent to the existence of a constant $K<0$ such that \begin{equation*} 2E[\rho]-\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho^m(y)\,dy\leq K \end{equation*} is satisfied. \begin{lem} \label{compsuppsatisfycond} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}. If $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ is a compact stationary solution of \eqref{eq1b}, $m>1$ and $\varepsilon>0$, then there exists a constant $K<0$ such that \begin{equation*} 2E[\rho]-\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho^m(y)\,dy\leq K \end{equation*} is satisfied. \end{lem} \begin{proof} Due to the symmetry result of Theorem \ref{symmetryofstationarysolutions} and since $\rho$ has compact support, there exists an $R<\infty$ such that we can assume that $\mathrm{supp}\,\rho=\overline{B_R(0)}$ without loss of generality. Then, by Lemma \ref{resultsfromburgerdifrancescofranek}\ref{results(vi)} and by the continuity of $\rho$ in $\mathbb{R}^N$ it follows that \begin{equation*} 2E[\rho]-\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho^m(y)\,dy=-(G\ast\rho)(R). \end{equation*} Now, choose $K=-(G\ast\rho)(R)$. \end{proof}
\begin{rmk} \label{differentcompcond} Using the definition of the energy $E$, we can rewrite the compactness condition in Lemma \ref{condforcompsupp} for a stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} as \begin{equation} \label{differentcompcondeq1} \int_{\mathbb{R}^N}\varepsilon\rho^m(x)\,dx-\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx\leq K \end{equation} with a constant $K<0$. Using Lemma \ref{resultsfromburgerdifrancescofranek}\ref{results(vi)}, we can also rewrite the compactness condition as \begin{equation} \label{differentcompcondeq2} \varepsilon\rho^{m-1}(x)-(G\ast\rho)(x)\leq K \end{equation} for all $x\in\,\mathrm{supp}\,\rho$.
Moreover, due to \ref{(G3)} and \ref{(G4)}, a stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} with non-compact support has to satisfy equation \eqref{condforcompsuppeq1}, \eqref{differentcompcondeq1} or \eqref{differentcompcondeq2} with equality and $K=0$. \end{rmk}
\begin{cor} \label{propcompandnoncompsol} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}, $m>1$ and $\varepsilon>0$.
A compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} satisfies (after a translation such that the center of mass is in zero) \begin{equation} \label{equationforcompactsolution} \varepsilon\rho^{m-1}(x)=(G\ast\rho)(x)-(G\ast\rho)(R) \end{equation} for all $x\in\,\mathrm{supp}\,\rho$ where $\mathrm{supp}\,\rho=\overline{B_R(0)}$ for some $R<\infty$. Moreover, we have \begin{equation*}
\|\rho\|_{L^{m}}^{m}=\frac{1}{\varepsilon}\Big(\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx-(G\ast\rho)(R)\Big) \end{equation*} and the energy of a compact stationary solution of \eqref{eq1b} can be written as \begin{equation*} \begin{aligned} E[\rho] & =\int_{\mathbb{R}^N}\varepsilon\Big(\frac{1}{m}-\frac{1}{2}\Big)\rho^m(y)\,dy-\frac{1}{2}(G\ast\rho)(R)\\ & =\Big(\frac{1}{m}-\frac{1}{2}\Big)\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx-\frac{1}{m}(G\ast\rho)(R). \end{aligned} \end{equation*}
A non-compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} satisfies \begin{equation} \label{equationforconnectednoncompactsolution} \varepsilon\rho^{m-1}(x)=(G\ast\rho)(x) \end{equation} in $\mathbb{R}^N$. Moreover, we have \begin{equation*}
\|\rho\|_{L^{m}}^{m}=\frac{1}{\varepsilon}\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx \end{equation*} and the energy of a non-compact stationary solution of \eqref{eq1b} can be written as \begin{equation*} \begin{aligned} E[\rho] & =\int_{\mathbb{R}^N}\varepsilon\Big(\frac{1}{m}-\frac{1}{2}\Big)\rho^m(y)\,dy\\ & =\Big(\frac{1}{m}-\frac{1}{2}\Big)\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx. \end{aligned} \end{equation*} \end{cor} \begin{proof} By Lemma \ref{continuityofstationarysolution} and Theorem \ref{symmetryofstationarysolutions}, a stationary solution of \eqref{eq1b} is continuous as well as radially symmetric and monotonically decreasing. Using Lemma \ref{resultsfromburgerdifrancescofranek}\ref{results(vi)}, we immediately obtain equations \eqref{equationforcompactsolution} and \eqref{equationforconnectednoncompactsolution}. These equation can be used to calculate the $L^m$-norm and the energy $E$ of $\rho$. \end{proof}
\begin{rmk} \label{mequals2nononcompact} For $m=2$, there cannot exist a non-compact stationary solution $\rho\in L^2(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b}, which satisfies $\mathrm{supp}\,\rho=\mathbb{R}^N$ due to Theorem \ref{symmetryofstationarysolutions}, since when integrating \eqref{equationforconnectednoncompactsolution} over $\mathbb{R}^N$ we obtain $\varepsilon=1$. Using a Fourier transform, it is shown in the proof of \cite[Theorem 3.5]{burgerdifrancescofranek} that if $\varepsilon=1$ (the critical case for $m=2$), then no $\rho\in L^2(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ satisfying $\varepsilon\rho(x)=(G\ast\rho)(x)$ in $\mathbb{R}^N$ exists.
For $m\neq 2$, we cannot exclude the existence of a non-compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} via the Fourier approach. \end{rmk}
\begin{thm} \label{negenergy} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}. Every stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} (compact or non-compact) has negative energy if $m\geq 2$ and $\varepsilon>0$. \end{thm} \begin{proof} By Corollary \ref{propcompandnoncompsol}, for $m\geq2$ no compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} with non-negative energy exists. Furthermore, for $m>2$ no non-compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} with non-negative energy exists due to Corollary \ref{propcompandnoncompsol} and the existence of a non-compact stationary solution of \eqref{eq1b} for $m=2$ is excluded by Remark \ref{mequals2nononcompact}. \end{proof}
\begin{rmk} \label{stationarysolutionswithpositiveenergy} By Corollary \ref{propcompandnoncompsol}, we directly see that for $1<m<2$ and $\varepsilon>0$ a non-compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b}, if it exists, has positive energy. Moreover, for $1<m<2$ and $\varepsilon>0$ the compactness condition from Lemma \ref{condforcompsupp} does not exclude the existence of compact stationary solutions of \eqref{eq1b} with positive energy in contrast to $m\geq 2$. \end{rmk}
\begin{thm} \label{globmincomp} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}, $m>1$ and $\varepsilon>0$. If a global minimizer of the energy $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ exists, it is compactly supported. \end{thm} \begin{proof} For $1<m\leq 2$, a global minimizer, if it exists (cf.\ Section \ref{globalminimizers}), has non-positive energy such that, due to Lemma \ref{resultsfromburgerdifrancescofranek}\ref{results(iv)} and \ref{results(vi)}, it applies Lemma \ref{condforcompsupp}.
Therefore, let $m>2$ and assume that $\rho$ is a non-compact global minimizer, i.e.\ \begin{equation*} \varepsilon\rho^{m-1}(x)=(G\ast\rho)(x). \end{equation*}
Since $\rho$ is radially symmetric and monotonically decreasing by Riesz symmetric decreasing rearrangement inequality and since we have $\int_{\mathbb{R}^N}\rho(x)\,dx=1$, it holds that $\rho(x)|B_{|x|}(0)|\leq1$ such that we conclude that $\rho(x)\leq C|x|^{-N}$. Moreover, using that $G$ is positive and radially symmetric by conditions \ref{(G1)} and \ref{(G3)}, for $|x|>1$ we estimate that \begin{equation*} \begin{aligned}
(G\ast\rho)(x)&=\int_{\mathbb{R}^N}G(x-y)\rho(y)\,dy\geq\rho(x)\int_{B_{|x|}(0)}G(x-y)\,dy\\
& \geq\rho(x)\int_{B_1(x-\frac{x}{|x|})}G(x-y)\,dy=\rho(x)\int_{B_1(0)}G\Big(\frac{x}{|x|}-y\Big)\,dy=C^\prime\rho(x) \end{aligned} \end{equation*} with $C^\prime$ dependent on $G$ but independent from $x$ because of the radial symmetry of $G$. Therefore, we have \begin{equation*}
\varepsilon\rho^{m-1}(x)-(G\ast\rho)(x)\leq\rho(x)(\varepsilon C^{m-2}|x|^{-N(m-2)}-C^\prime) \end{equation*}
which contradicts for large $|x|>1$ the assumption that $\rho$ is a non-compact stationary solution of (1) since $m>2$. \end{proof}
\section{Existence of global minimizers} \label{globalminimizers} In the following, we prove the existence of a global minimizer of the energy \begin{equation*} E[\rho]=\int_{\mathbb{R}^N}\frac{\varepsilon}{m}\rho^m(x)\,dx-\frac{1}{2}\int_{\mathbb{R}^N}\int_{\mathbb{R}^N}G(x-y)\rho(y)\rho(x)\,dydx \end{equation*} in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ without using the concentration-compactness principle \cite{lions} if the interaction potential $G$ satisfies \ref{(G1)}-\ref{(G4)}. For $m\geq2$, we obtain the same result as in \cite{bedrossian} and for $1<m<2$ we determine coefficients $\varepsilon>0$ which allow the existence of a global minimizer.
Using the concentration-compactness principle it is proved in \cite[Theorem II.1]{lions} that a global minimizer of $E$ exists if and only if the strict subadditivity condition \begin{equation} \inf_{\rho\in L^m\cap\mathcal{P}}E[\rho]<\inf_{\rho\in L^m\cap\mathcal{P}^M}E[\rho]+\inf_{\rho\in L^m\cap\mathcal{P}^{(1-M)}}E[\rho] \end{equation} is satisfied for all $M\in(0,1)$.
Moreover, in \cite{lions} the interaction potential does not need to be necessarily radially symmetric and monotonically decreasing. However, verifying that the strict subadditivity condition is satisfied may not be straightforward. In \cite[Corollary II.1]{lions} this condition is shown to be satisfied if either $1<m\leq2$ and $\inf_{\rho\in L^m\cap\mathcal{P}}E[\rho]<0$ or $G(\tau x)\geq\tau^{-k}G(x)$ for all $\tau\geq1$ and almost every $x\in\mathbb{R}^N$ and $\inf_{\rho\in L^m\cap\mathcal{P}}E[\rho]<0$.
If the interaction potential $G$ is non-negative, radially symmetric and monotonically decreasing, it is proved in \cite{bedrossian} that a global minimizer of the energy $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ exists for any coefficient $\varepsilon>0$ if $m>2$ and for any $0<\varepsilon<1$ if $m=2$. Moreover, it is shown that a global minimizer is radially symmetric and monotonically decreasing if the interaction potential $G$ is radially symmetric and strictly monotonically decreasing.
The strategy of the proof is to use a suitable scaling argument to observe that, under the above assumptions of $\varepsilon$ and $m$, for every $M>0$ a function $\rho\in C_0^\infty(\mathbb{R}^N)\cap L^m(\mathbb{R}^N)\cap\mathcal{P}^{M}(\mathbb{R}^N)$ with $E[\rho]<0$ exists. Together with Riesz symmetric decreasing rearrangement inequality, this statement is used to show that for $M_1>M_2$ the relation \begin{equation} \label{massminrelation} \inf_{\rho\in L^m\cap\mathcal{P}^{M_1}}E[\rho]<\inf_{\rho\in L^m\cap\mathcal{P}^{M_2}}E[\rho]. \end{equation} is satisfied. This conclusion is used to derive the existence of a radially symmetric and monotonically decreasing global minimizer using Riesz symmetric decreasing rearrangement inequality and following the approach of the concentration-compactness principle of Lions \cite{lions}. More precisely, the inequality \eqref{massminrelation} is needed to rule out that dichotomy occurs.
We prove the existence of global minimizers of $E$ with a potential satisfying \ref{(G1)}-\ref{(G4)} using some ideas in \cite{canizocarrillopatacchini} instead of the concentration-compactness principle \cite{lions}. In \cite{canizocarrillopatacchini}, the existence of global minimizers is investigated in a slightly different setting. More precisely, an interaction energy functional \begin{equation*} F[\mu]=\frac{1}{2}\int_{\mathbb{R}^N}\int_{\mathbb{R}^N}W(x-y)\,d\mu(y)d\mu(x) \end{equation*} with $\mu$ being a probability measure and $W$ an appropriate interaction potential is analysed. It is shown that if considering the problem on a given ball, then the diameter of the support of a global minimizer of $F$ is bounded independently of the size of the given ball. This observation is used to conclude that a global minimizer of the problem in the whole space exists. A similar approach can also be found in \cite{auchmutybeals}.
Moreover, this strategy allows to consider the case $\inf_{\rho\in L^m\cap\mathcal{P}}E[\rho]=0$ for $1<m<2$ which is not covered by the approach in \cite{lions}. We will obtain in this section the following result: \begin{thm} \label{existenceofglobalmin} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}. Let one of the following conditions be satisfied: \begin{itemize}
\item[(i)] $1<m<2$ and $0<\varepsilon\leq\varepsilon_0\coloneqq\sup_{\rho\in L^m\cap\mathcal{P}}\frac{m}{2}\|\rho\|_{L^m}^{-m}\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx$ \item[(ii)] $m=2$ and $0<\varepsilon<1$ \item[(iii)] $m>2$ and $\varepsilon>0$ \end{itemize} Then, there exists a global minimizer of the energy $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ which is radially symmetric and monotonically decreasing.
If $1<m\leq2$ and the corresponding condition (i) or (ii) for the coefficient $\varepsilon$ is not satisfied, then there exists no global minimizer of the energy $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$. \end{thm} First, let us remark that a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ cannot have positive energy. So, if we want to obtain a global minimizer for $1<m<2$, we have to assume at least that $\varepsilon$ is chosen such that a function $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ with $E[\rho]\leq0$ exists.
If we assume that a function $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ with $E[\rho]=0$ exists but none with $E[\rho]<0$, then $\rho$ is already a global minimizer. Therefore, it is enough to consider the case that $E[\rho]<0$ for some $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$. Following an analogous idea given in \cite[Lemma 2.5]{canizocarrillopatacchini}, we define \begin{equation*} \rho_n\coloneqq\frac{1}{\int_{B_n(0)}\rho(x)\,dx}\chi_{B_n(0)}\rho. \end{equation*} By Lebesgue's monotone convergence theorem, we obtain \begin{equation*} E[\rho_n]\rightarrow E[\rho]\quad\mathrm{for}\quad n\rightarrow\infty \end{equation*} such that a function $\hat{\rho}\in L^m(\mathbb{R}^N)\cap\mathcal{P}_S(\mathbb{R}^N)$ with $E[\hat{\rho}]<0$ exists for some large enough but finite constant $S$. In particular, there exists a $\hat{\rho}\in L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$ with $E[\hat{\rho}]<0$ for all $R\geq S$. In the following, we always denote by $S$ the finite constant we just obtained. \begin{lem} \label{globminrestricteddomain} Let $m>1$, $\varepsilon>0$ and let $G$ satisfy \ref{(G1)}-\ref{(G4)}. Then, there exists a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$ for every $R>0$. \end{lem} \begin{proof} Since $G$ is bounded from above, we have $\inf_{\rho\in L^m\cap\mathcal{P}_R}E[\rho]>-\infty$. Let $(\rho_n)_{n\in\mathbb{N}}$ denote a minimizing sequence for $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$. Since $G$ is bounded from above and $(\rho_n)_{n\in\mathbb{N}}$ a minimizing sequence, it holds that $(\rho_n)_{n\in\mathbb{N}}$ is bounded in $L^m(\mathbb{R}^N)$. So, there exists a subsequence $(\rho_{n_k})_{k\in\mathbb{N}}$ such that $\rho_{n_k}$ converges weakly in $L^m(\mathbb{R}^N)$ to some $\rho_R\in L^m(\mathbb{R}^N)$. Further, $L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$ is convex and closed in $L^m(\mathbb{R}^N)$ since $L^m(\overline{B_R(0)})$ is closed in $L^m(\mathbb{R}^N)$ and since the embedding of $L^m(\overline{B_R(0)})$ in $L^1(\overline{B_R(0)})$ as well as the mass constraint is continuous in $L^1(\overline{B_R(0)})$. Therefore, $L^m({\mathbb{R}^N})\cap\mathcal{P}_R(\mathbb{R}^N)$ is weakly closed in $L^m(\mathbb{R}^N)$ and we have $\rho_R\in L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$. Obviously, $E$ is weakly lower semi-continuous in $L^m(\mathbb{R}^N)$ such that $E[\rho_R]=\inf_{\rho\in L^m\cap\mathcal{P}_R}E[\rho]$. \end{proof} Performing similar calculations as in \cite[Lemma 2.3]{canizocarrillopatacchini}, we get the following result for a global minimizer of the energy in the restricted setting. \begin{lem} Let $m>1$, $\varepsilon>0$ and let $G$ satisfy \ref{(G1)}-\ref{(G4)}. Let $\rho_R$ be a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$, then we have \begin{equation} \label{equationforglobalminimizer} \varepsilon\rho_R^{m-1}(x)-(G\ast\rho_R)(x)=2E[\rho_R]-\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho_R^m(y)\,dy \end{equation} almost everywhere in $\mathrm{supp}\,\rho_R$. \end{lem} \begin{proof} Let $\psi\in C_0^\infty(\mathbb{R}^N)$ and define \begin{equation*} f(x)\coloneqq\Big(\psi(x)-\int_{\mathbb{R}^N}\psi(y)\rho_R(y)\,dy\Big)\rho_R(x) \end{equation*} for all $x\in\mathbb{R}^N$ and $\rho_{\delta}\coloneqq\rho_R+\delta f$ with $\delta>0$. Then, it holds that \begin{equation*} \int_{\mathbb{R}^N}\rho_{\delta}(x)\,dx=\int_{\mathbb{R}^N}\rho_R(x)\,dx+\delta\int_{\mathbb{R}^N}f(x)\,dx=1. \end{equation*} Moreover, we have \begin{equation*}
\psi(x)-\int_{\mathbb{R}^N}\psi(y)\rho_R(y)\,dy\geq -2\|\psi\|_{L^\infty(\mathbb{R}^N)} \end{equation*}
such that $\rho_{\delta}(x)\geq(1-2\delta\|\psi\|_{L^{\infty}(\mathbb{R}^N)})\rho_{R}(x)$. So, for sufficiently small $\delta>0$ we have $\rho_{\delta}\in L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$ and since $\rho_R$ is a global minimizer in $L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$, it follows that $E[\rho_{\delta}]\geq E[\rho_R]$. Therefore, we obtain \begin{equation*} \begin{aligned}
& \frac{1}{\delta}\big(E[\rho_{\delta}]-E[\rho_R]\big)\big|_{\delta=0}\\ & \quad =\int_{\mathbb{R}^N}\Big(\varepsilon\rho_R^{m-1}(x)-(G\ast\rho_R)(x)-2E[\rho_R]\Big)\rho_R(x)\psi(x)\,dx\\
& \quad\quad +\int_{\mathbb{R}^N}\Big(\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho_R^m(y)\,dy\Big)\rho_R(x)\psi(x)\,dx\\ & \quad \geq0. \end{aligned} \end{equation*} Since this inequality holds for all $\psi\in C_0^\infty(\mathbb{R}^N)$, we conclude that \begin{equation*} \varepsilon\rho_R^{m-1}(x)-(G\ast\rho_R)(x)=2E[\rho_R]-\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho_R^m(y)\,dy \end{equation*} holds almost everywhere in $\mathrm{supp}\,\rho_R$. \end{proof} Taking into consideration the proof of Lemma \ref{condforcompsupp} and the strategy in \cite{canizocarrillopatacchini}, for $R\geq S$ we obtain for each point in the support of a global minimizer $\rho_R$ a lower bound for the mass in a certain neighbourhood. The size of the neighbourhood as well as of the lower bound do not depend on $R$. This is because of $E[\rho_R]\leq E[\rho_S]<0$ for all $R\geq S$, where $\rho_R$ denotes a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$ and $\rho_S$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}_S(\mathbb{R}^N)$ respectively, since we can now choose \begin{equation*} K=2E[\rho_S] \end{equation*} in the proof of Lemma \ref{condforcompsupp}.
By Riesz symmetric decreasing rearrangement inequality, we know that the global minimizer $\rho_R$ is radially symmetric and monotonically decreasing, i.e.\ it has connected support. As in \cite[Lemma 2.9]{canizocarrillopatacchini}, due to our above considerations about mass concentration, we obtain an upper bound $D<+\infty$ for the diameter of the support of $\rho_R$ for all $R>0$. Using that, as in \cite[Lemma 2.10]{canizocarrillopatacchini} we prove the following statement. \begin{lem} \label{globalminimizerformsmaller2} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}. Let $1<m<2$ and $\varepsilon>0$ be small enough such that a function $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ with $E[\rho]\leq0$ exists. Then, there exists a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$. \end{lem} \begin{proof} As argued before, we only need to consider the case $E[\rho]<0$. We can conclude analogously as in the proof for a global minimizer in \cite[Lemma 2.10]{canizocarrillopatacchini} and include the proof for the convenience of the reader.
Let $D$ denote the upper bound of the diameter of the support of a global minimizer $\rho_R$ for all $R>0$ and let $\rho_D$ be a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}_D(\mathbb{R}^N)$. Then, we show that $\rho_D$ actually is a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$.
First, consider a function $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ with compact support, i.e.\ $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$ for some $R>0$. Then, $E[\rho_R]\leq E[\rho]$ with $\rho_R$ denoting a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$. Since the diameter of the support of $\rho_R$ is bounded by $D$, we have $\rho_R\in L^m(\mathbb{R}^N)\cap\mathcal{P}_D(\mathbb{R}^N)$. Therefore, it holds that $E[\rho_D]\leq E[\rho]$ for all $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ with compact support.
Now, consider some function $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ without further restrictions. If $n$ is chosen large enough, it holds that $\rho(B_n(0))>0$. Define \begin{equation*} \rho_n\coloneqq\frac{1}{\int_{B_n(0)}\rho(x)\,dx}\chi_{B_n(0)}\rho \end{equation*} for $n$ large enough and observe that $E[\rho_n]\rightarrow E[\rho]$ as $n\rightarrow\infty$ by Lebesgue's monotone convergence theorem. Since $\rho_n$ is compactly supported, we have $E[\rho_D]\leq E[\rho_n]$ for all $n$. Therefore, it holds that $E[\rho_D]\leq E[\rho]$. Due to $E[\rho_D]<0$ by assumption, we have shown that $\rho_D$ is a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$. \end{proof} By our previous approach, we can also prove the existence of global minimizers for $m\geq2$, which is obtained in \cite{bedrossian} via the concentration-compactness principle of Lions \cite{lions}, if we assume the interaction potential $G$ to be bounded.
Notice that for $m=2$, no function $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ with $E[\rho]=0$ exists due to Lemma \ref{compsuppsatisfycond} and since no non-compact stationary solution of \eqref{eq1b} exists for $m=2$ (cf.\ Remark \ref{mequals2nononcompact}). So, for $m=2$ we also obtain as in \cite{bedrossian} the existence of a global minimizer for every $0<\varepsilon<1$. Furthermore, there cannot exist a global minimizer of $E$ in $L^2(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ for $\varepsilon\geq1$ because of the non-existence of a stationary solution of \eqref{eq1b} as shown in \cite[Lemma 3.4 and Theorem 3.5]{burgerdifrancescofranek}.
Keep in mind, it is also impossible for $1<m<2$ that a global minimizer exists for coefficients $\varepsilon>0$ not satisfying the condition in Lemma \ref{globalminimizerformsmaller2}.
For $m>2$, we assume by contradiction that there exists no upper bound for the diameter of the support of global minimizers $\rho_R$ of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$ for all $R>0$. Then, we can use equation \eqref{equationforglobalminimizer} and the argumentation in the proof of Theorem \ref{globmincomp} to show that a global minimizer $\rho_S$ of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}_S(\mathbb{R}^N)$ satisfies \begin{equation*}
2E[\rho_S]-\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho_S^m(y)\,dy\leq\rho_S(x)(\varepsilon C^{m-2}|x|^{-N(m-2)}-C^\prime)<0 \end{equation*}
if $S$ is chosen large enough such that there exists a point $x\in\,\mathrm{supp}\,\rho_S$ with $|x|>1$ large enough.
We have to show that there exists a constant $K<0$ such that \begin{equation*} 2E[\rho_R]-\int_{\mathbb{R}^N}\varepsilon\Big(\frac{2}{m}-1\Big)\rho_R^m(y)\,dy\leq K<0 \end{equation*} for all $R\geq S$. We can use this constant $K$ as in the proof of Lemma \ref{condforcompsupp} to obtain a uniform upper bound for the diameter of the support of $\rho_R$.
Choosing a point $x\in\,\mathrm{supp}\,\rho_S$ such that $\varepsilon C^{m-2}|x|^{-N(m-2)}-C^\prime<0$, we are done if it holds that $\rho_R(x)\geq c>0$ for all $R\geq S$ since we can choose $K=c(\varepsilon C^{m-2}|x|^{-N(m-2)}-C^\prime)$. Let us assume the contrary, i.e. $\rho_R(x)\rightarrow0$ as $R\rightarrow\infty$, and note that $\rho_R$ is radially symmetric and monotonically decreasing. If $\int_{\mathbb{R}^N}\rho_\infty(x)\,dx=1$, then we have $E[\rho_\infty]=E[\rho_S]$ and $\rho_\infty\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ which means that there exists a global minimizer $\rho_R$ of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$ with $\mathrm{supp}\,\rho_R\subset\mathrm{supp}\,\rho_S$ for all $R\geq S$. We exclude $\rho_\infty\equiv0$ since $E[\rho_R]\leq E[\rho_S]<0$. Furthermore, for every $\delta>0$ we find a constant $R^\prime>0$ such that $\inf_{\rho\in L^m\cap\mathcal{P}}E[\rho]>E[\rho_R]-\delta$ for all $R\geq R^\prime$. Therefore, assuming that $\int_{\mathbb{R}^N}\rho_\infty(x)\,dx=M\in(0,1)$ we obtain a contradiction by inequality \eqref{massminrelation} (which holds for $m>2$) since $E[\rho_R]\rightarrow E[\rho_\infty]\geq\inf_{\rho\in L^m\cap\mathcal{P}^M}E[\rho]$. \begin{lem} \label{noexistenceofglobminformsmaller2}
Let $G$ satisfy \ref{(G1)}-\ref{(G4)}. Let $1<m\leq2$ and $\varepsilon>\frac{m}{2}\|G\|_{L^{\frac{1}{m-1}}}$, then there exists no global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$. \end{lem} \begin{proof} By H\"older's inequality and Young's inequality for convolutions, we obtain for $1\leq r\leq2$ \begin{equation*} \begin{aligned}
\|(G\ast\rho)\rho\|_{L^1} & \leq\|G\ast\rho\|_{L^{\frac{r}{r-1}}}\|\rho\|_{L^r}\leq\|G\|_{L^{\frac{r}{2r-2}}}\|\rho\|_{L^r}^2\\
& \leq\|G\|_{L^{\frac{r}{2r-2}}}\|\rho\|_{L^{\frac{3r-2}{r}}}^{\frac{3r-2}{r}}\|\rho\|_{L^1}^{2-\frac{3r-2}{r}}.
\end{aligned} \end{equation*} For $1<m\leq2$, we can choose $r\geq1$ such that $m=\frac{3r-2}{r}$ to conclude that \begin{equation*}
E[\rho]\geq\frac{\varepsilon}{m}\|\rho\|_{L^m}^m-\frac{1}{2}\|G\|_{L^{\frac{1}{m-1}}}\|\rho\|_{L^m}^m\geq\Big(\frac{\varepsilon}{m}-\frac{1}{2}\|G\|_{L^{\frac{1}{m-1}}}\Big)\|\rho\|_{L^m}^m. \end{equation*}
Obviously, there cannot exist a global minimizer with positive energy such that we exclude the existence of a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ for $\varepsilon>\frac{m}{2}\|G\|_{L^{\frac{1}{m-1}}}$. \end{proof} \begin{rmk} \label{noexistenceofcompstatsolformsmaller2}
Analogously as in Lemma \ref{noexistenceofglobminformsmaller2}, we can state a result for the non-existence of compact stationary solutions of \eqref{eq1b} in case of $1<m\leq2$. Considering Lemma \ref{condforcompsupp}, we conclude that there cannot exist a compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} for $1<m\leq2$ if $\varepsilon>\|G\|_{L^{\frac{1}{m-1}}}$. \end{rmk}
However, these estimates do not imply that a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ exists for all coefficients $\varepsilon\leq\frac{m}{2}\|G\|_{L^{\frac{1}{m-1}}}$ or that a compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} exists for all coefficients $\varepsilon\leq\|G\|_{L^{\frac{1}{m-1}}}$.
The threshold for the existence of a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ can be specified for $1<m\leq2$ by \begin{equation}
\varepsilon_0\coloneqq\sup_{\rho\in L^m\cap\mathcal{P}}\frac{m}{2}\frac{\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx}{\|\rho\|_{L^m}^m}. \end{equation} \begin{lem} Let $G$ be bounded and satisfy \ref{(G1)}-\ref{(G4)}. Let $1<m<2$, then there exists a compactly supported function $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ such that $\varepsilon_0$ is attained. \end{lem} \begin{proof}
Let us denote by $(\rho_n)_{n\in\mathbb{N}}$ a maximizing sequence in $L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$ for $\frac{m}{2}\|\rho\|_{L^m}^{-m}\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx$. Due to $G$ being bounded and $(\rho_n)_{n\in\mathbb{N}}$ being a maximizing sequence, it follows that $(\rho_n)_{n\in\mathbb{N}}$ is bounded in $L^m(\mathbb{R}^N)$. Therefore, as in the proof of Lemma \ref{globminrestricteddomain}, we can conclude that \begin{equation}
\varepsilon_{0,R}\coloneqq\sup_{\rho\in L^m\cap\mathcal{P}_R}\frac{m}{2}\frac{\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx}{\|\rho\|_{L^m}^m} \end{equation} is attained for some $\rho_{0,R}\in L^m(\mathbb{R}^N)\cap\mathcal{P}_R(\mathbb{R}^N)$.
Note that $(\rho_{0,R})_{R\geq R^\prime}$ is a maximizing sequence for the problem in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$, i.e. $\varepsilon_{0,R}\rightarrow\varepsilon_0$ as $R\rightarrow\infty$ and in particular $\varepsilon_{0,R}\geq\varepsilon_{0,R^\prime}$ for all $R\geq R^\prime$ where $R^\prime$ is chosen large enough. Due to $\|(G\ast\rho)\rho\|_{L^1}\leq\|G\|_{L^{\frac{m}{2m-2}}}\|\rho\|_{L^m}^2$, the $L^m$-norm of this maximizing sequence is bounded from below by a constant greater than zero, i.e. $\|\rho_{0,R}\|_{L^m}\geq\delta>0$ for all $R\geq R^\prime$. We use the lower bound of the $L^m$-norm to estimate that \begin{equation} 2E[\rho_{0,R}]-\int_{\mathbb{R}^N}\varepsilon_{0,R}\Big(\frac{2}{m}-1\Big)\rho_{0,R}^m(y)\,dy\leq-\varepsilon_{0,R^\prime} \Big(\frac{2}{m}-1\Big)\delta^m \end{equation} for all $R\geq R^\prime$. Choosing $K=-\varepsilon_{0,R^\prime}\big(\frac{2}{m}-1\big)\delta^m$ in Lemma \ref{condforcompsupp}, we obtain $\mathrm{supp}\,\rho_{0,R}\subset B_D(0)$ for all $R>0$ where $D$ depends on $R^\prime$ but is independent from $R\geq R^\prime$. Hence, $\rho_{0,R}\in L^m(\mathbb{R}^N)\cap\mathcal{P}_D(\mathbb{R}^N)$ for all $R>0$ and, similar as in the proof of Lemma \ref{globalminimizerformsmaller2}, it follows $\varepsilon_0=\varepsilon_{0,D}$. \end{proof} To sum up, we have now proved the statement of Theorem \ref{existenceofglobalmin} concerning the existence of a global minimizer of the energy $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ depending on the exponent $m$ of the degenerate diffusion and the coefficient $\varepsilon$.
\section{Uniqueness of stationary solutions} \label{stationarysolutions} In the following, we prove for $m=2$ in arbitrary dimensions the uniqueness of stationary solutions of \eqref{eq1b} up to a translation. In \cite{burgerdifrancescofranek}, it is shown for $N=1$ that a stationary solution of \eqref{eq1b} is unique up to a translation. As in \cite{burgerfetecauhuang} for $N=1$, we also derive for $m\neq2$ in higher dimensions that there are coefficients $\varepsilon$ such that compact stationary solutions of \eqref{eq1b} exist.
First, we sketch the strategy in \cite[Section 4]{burgerdifrancescofranek} which is used to conclude for $N=1$ that up to a translation a unique stationary solution in $L^2(\mathbb{R})\cap\mathcal{P}(\mathbb{R})$ of \eqref{eq1b} exists for $m=2$ and $0<\varepsilon<1$. \begin{itemize} \item Stationary solutions of \eqref{eq1b} have connected support for $N=1$. \item Stationary solutions of \eqref{eq1b} are compactly supported for $N=1$. \item For any stationary solution of \eqref{eq1b} there exists a symmetric stationary solution of \eqref{eq1b} with the same energy if $N=1$.
\item A stationary solution $\rho\in L^2(\mathbb{R})\cap\mathcal{P}(\mathbb{R})$ of \eqref{eq1b} with $|\mathrm{supp}\,\rho|\geq|\mathrm{supp}\,\rho_{\mathrm{min}}|$, where $\rho_{\mathrm{min}}$ is a global minimizer of $E$ in $L^2(\mathbb{R})\cap\mathcal{P}(\mathbb{R})$, is a minimizer. \item For every $L>0$, a unique function $\rho$ with $\mathrm{supp}\,\rho=[-L,L]$ exists which is symmetric, monotonically decreasing on $\{x>0\}$, compact and satisfies \begin{equation*} \varepsilon\rho(x)=\int_{-L}^{L}G(x-y)\rho(y)\,dy-(G\ast\rho)(L) \end{equation*} for some $\varepsilon>0$. This follows considering the eigenvalue problem \begin{equation*} \varepsilon\rho^{\prime}(x)=\int_0^L\big(G(x-y)-G(x+y)\big)\rho^{\prime}(y)\,dy \end{equation*} via the strong version of the Krein-Rutman theorem (see Theorem \ref{strongversionkreinrutman}). \item The eigenvalue $\varepsilon$ in the above eigenvalue problem is strictly monotonically increasing on $\{x>0\}$ with the size $L$ of the support of $\rho$. It holds that $\varepsilon\searrow 0$ as $L\searrow 0$ and $\varepsilon\nearrow1$ as $L\nearrow +\infty$. \item Using the above results and that the global minimizer is symmetric and monotonically decreasing, one can show for $\varepsilon<1$ that up to a translation a unique stationary solution $\rho\in L^2(\mathbb{R})\cap\mathcal{P}(\mathbb{R})$ of \eqref{eq1b} exists for $N=1$ which coincides with the global minimizer of $E$ in $L^2(\mathbb{R})\cap\mathcal{P}(\mathbb{R})$. \end{itemize}
Due to Theorem \ref{symmetryofstationarysolutions}, we already know that a stationary solution of \eqref{eq1b} is radially symmetric and monotonically decreasing in higher dimensions. In particular, it has a connected support. As observed in Remark \ref{mequals2nononcompact}, for $m=2$ there cannot exist a stationary solution of \eqref{eq1b} with non-compact support. By Theorem \ref{globmincomp} and Theorem \ref{existenceofglobalmin}, we also know that the global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ is radially symmetric, monotonically decreasing and has compact support.
Following the last three points used for the proof for $N=1$, in higher dimensions we show for $m=2$ and $0<\varepsilon<1$ that there is a unique stationary solution of \eqref{eq1b} up to a translation which coincides with the global minimizer of the energy $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$. Moreover, as in \cite[Section 3.1]{burgerfetecauhuang} for $N=1$, we deduce in higher dimensions in case of $m\neq 2$ for any $R>0$ the existence of a coefficient $\varepsilon>0$ such that there is a radially symmetric stationary solution of \eqref{eq1b} with support $\overline{B_R(0)}$.
For radially symmetric functions $\rho$ and $G$, i.e.\ $\rho(x)=\tilde{\rho}(|x|)$ and $G(x)=g(|x|)$, we can write \begin{equation*} \begin{aligned} (G\ast\rho)(x) & =\int_{\mathbb{R}^N}G(x-y)\rho(y)\,dy=\int_0^{+\infty}\Big(\int_{\partial B_s(0)}G(x-y)\,d\sigma(y)\Big)\tilde{\rho}(s)\,ds\\
& =\int_0^{+\infty}\Big(\int_{\partial B_s(0)}G(|x|e_1-y)\,d\sigma(y)\Big)\tilde{\rho}(s)\,ds. \end{aligned} \end{equation*} Using that $\int_{\partial B_s(0)}\nabla G(x-y)\,d\sigma(y)$ is parallel to $x$ and rotationally invariant, it is shown in \cite[Lemma 3.2]{bertozzicarrillolaurent} that it holds \begin{equation}
\nabla(G\ast\rho)(x)=\int_0^{+\infty}\Big(\int_{\partial B_s(0)}\nabla G(|x|e_1-y)\cdot e_1\,d\sigma(y)\Big)\tilde{\rho}(s)\,ds\,\frac{x}{|x|}. \end{equation} Similarly, we also derive \begin{equation}
\nabla(G\ast\rho)(x)=\int_0^{+\infty}\Big(\int_{\partial B_s(0)}G(|x|e_1-y)\frac{y\cdot e_1}{|y|}\,d\sigma(y)\Big)\tilde{\rho}^{\prime}(s)\,ds\,\frac{x}{|x|}. \end{equation} Using the radial symmetry of $G$, it is useful to observe that we have \begin{equation} \label{rotationjustwithg} \int_{\partial B_s(0)}G(re_1-y)\,d\sigma(y)=\frac{s^{N-1}}{r^{N-1}}\int_{\partial B_r(0)}G(se_1-y)\,d\sigma(y) \end{equation} and \begin{equation} \label{rotationwithgandcos}
\int_{\partial B_s(0)}G(re_1-y)\frac{y\cdot e_1}{|y|}\,d\sigma(y)=\frac{s^{N-1}}{r^{N-1}}\int_{\partial B_r(0)}G(se_1-y)\frac{y\cdot e_1}{|y|}\,d\sigma(y). \end{equation} In the radial case, the energy functional reads \begin{equation*} \begin{aligned} E[\tilde{\rho}]= & \int_0^{+\infty}\frac{\varepsilon}{m}\tilde{\rho}^m(r)\omega_N r^{N-1}\,dr\\ & -\frac{1}{2}\int_0^{+\infty}\int_0^{+\infty}\omega_N r^{N-1}\tilde{\rho}(r)\Big(\int_{\partial B_s(0)}G(re_1-y)\,d\sigma(y)\Big)\tilde{\rho}(s)\,dsdr \end{aligned} \end{equation*} where $\omega_N$ denotes the surface area of the unit sphere in $\mathbb{R}^N$.
Let us now state the strong version of the Krein-Rutman theorem as in \cite[Theorem 4.10]{burgerdifrancescofranek}. \begin{thm}[Krein-Rutman theorem, strong version] \label{strongversionkreinrutman} Let $X$ be a Banach space. Let $K\subset X$ be a solid cone, i.e.\ $\lambda K\subset K$ for all $\lambda\geq 0$ and $K$ has a non-empty interior $K_0$. Let $T$ be a compact linear operator which is strongly positive with respect to $K$, i.e.\ $T[u]\in K_0$ if $u\in K\setminus\{0\}$. Then: \begin{itemize} \item[(i)] The spectral radius $r(T)$ is strictly positive and $r(T)$ is a simple eigenvalue with an eigenvector $v\in K_0$. There is no other eigenvalue with a corresponding eigenvector $v\in K$.
\item[(ii)] $|\lambda|<r(T)$ for all other eigenvalues $\lambda\neq r(T)$. \end{itemize} \end{thm} Following the approach for $N=1$ in \cite[Section 4]{burgerdifrancescofranek}, finding in higher dimensions a compact, radially symmetric and monotonically decreasing function $\rho\in C^2(\mathrm{supp}\,\rho)$ vanishing on the boundary and satisfying \begin{equation*} \varepsilon\rho(x)=(G\ast\rho)(x)-(G\ast\rho)(\tilde{x}) \end{equation*} with $\tilde{x}\in\partial(\mathrm{supp}\,\rho)$, means finding $\tilde{\rho}\in C^2(\mathrm{supp}\,\tilde{\rho})$ with $\mathrm{supp}\,\tilde{\rho}=[0,R]$ such that \begin{equation*} \begin{aligned} & \tilde{\rho}(R)=0,\quad -\tilde{\rho}^{\prime}(r)=u(r),\quad u\geq 0, \\
& \varepsilon u=\int_0^R H(r,s)u(s)\,ds\quad\mathrm{with}\quad H(r,s)\coloneqq\int_{\partial B_s(0)}G(re_1-y)\frac{y\cdot e_1}{|y|}\,d\sigma(y). \end{aligned} \end{equation*} This is equivalent since we have \begin{equation*} \begin{aligned}
\varepsilon\rho(x) & =\varepsilon\int_{|x|}^R -\tilde{\rho}^{\prime}(r)\,dr\\
& =-\int_{|x|}^R\int_0^R\Big(\int_{\partial B_s(0)}G(re_1-y)\frac{y\cdot e_1}{|y|}\,d\sigma(y)\Big)\tilde{\rho}^{\prime}(s)\,dsdr\\
& =-\int_{|x|}^R\int_0^R\Big(\int_{\partial B_s(0)}\partial_r G(re_1-y)\,d\sigma(y)\Big)\tilde{\rho}(s)\,dsdr\\ & =-\int_0^R\Big(\int_{\partial B_s(0)}G(Re_1-y)\,d\sigma(y)\Big)\tilde{\rho}(s)\,ds\\
& \quad +\int_0^R\Big(\int_{\partial B_s(0)}G(|x|e_1-y)\,d\sigma(y)\Big)\tilde{\rho}(s)\,ds\\ & =(G\ast\rho)(x)-(G\ast\rho)(\tilde{x}) \end{aligned} \end{equation*}
with $|\tilde{x}|=R$.
In order to simplify notation, let us define \begin{align}
Y_R & \coloneqq\{\tilde{\rho}\in C([0,R])\,\big|\,\tilde{\rho}(R)=0\},\\
\mathcal{H}_R[u](r) & \coloneqq \int_0^R\Big(\int_{\partial B_s(0)}G(re_1-y)\frac{y\cdot e_1}{|y|}\,d\sigma(y)\Big)u(s)\,ds,\\ \mathcal{G}_R[\tilde{\rho}] & \coloneqq \int_0^R\Big(\int_{\partial B_s(0)}G(re_1-y)-G(Re_1-y)\,d\sigma(y)\Big)\tilde{\rho}(s)\,ds. \end{align}
To prove the following result about uniqueness of a function with the above properties via the strong version of the Krein-Rutman theorem as in \cite[Proposition 4.11]{burgerdifrancescofranek} for $N=1$, it is enough to show that if $u\geq 0$, then $\mathcal{H}_R[u](r)\geq 0$ holds for all $r\in[0,R]$ and $\mathcal{H}_R[u](0)=0$. Moreover, for $u\in\{f\in C^1([0,R])\,\big|\,f(0)=0\}$ satisfying $u\geq0$ and $u\not\equiv0$ we must have $(\mathcal{H}[u])^{\prime}(0)>0$. \begin{thm} \label{uniquenessviakreinrut} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}. For every $R>0$ there exists a unique, radially symmetric function $\rho\in C^2(\overline{B_R(0)})\cap\mathcal{P}(\mathbb{R}^N)\cap C(\mathbb{R}^N)$ with $\mathrm{supp}\,\rho=\overline{B_R(0)}$ and with radial representative $\tilde{\rho}$ such that $\tilde{\rho}^{\prime}(r)\leq 0$ for $r\geq0$, $\tilde{\rho}^{\prime\prime}(0)<0$ and such that $\rho$ solves \begin{equation*} \varepsilon\rho(x)=(G\ast\rho)(x)-(G\ast\rho)(R) \end{equation*} in $\overline{B_R(0)}$ for some coefficient $\varepsilon=\varepsilon(R)>0$.
Moreover, $\varepsilon(R)$ is the largest eigenvalue of the compact operator $\mathcal{G}_R$ in the Banach space $Y_R$ and any other eigenfunction of $\mathcal{G}_R$ in $Y_R$ with \begin{equation*} \int_0^{+\infty}\tilde{\rho}(r)\omega_N r^{N-1}\,dr=1 \end{equation*}
has the corresponding eigenvalue $\varepsilon^{\prime}$ satisfying $|\varepsilon^{\prime}|<\varepsilon(R)$. \end{thm} \begin{proof} Let us split up $H(r,s)$ into \begin{equation*} \begin{aligned}
H(r,s) & =\int_{\partial B_s(0)\cap\{y_1>0\}}G(re_1-y)\frac{y\cdot e_1}{|y|}\,d\sigma(y)\\
& \quad +\int_{\partial B_s(0)\cap\{y_1<0\}}G(re_1-y)\frac{y\cdot e_1}{|y|}\,d\sigma(y). \end{aligned} \end{equation*}
For any $y_1>0$ there exists $\tilde{y}_1<0$ such that we have $-y_1=\tilde{y}_1$ as well as $\frac{y\cdot e_1}{|y|}=-\frac{\tilde{y}\cdot e_1}{|\tilde{y}|}$ with $y=(y_1,\ldots,y_N)$ and $\tilde{y}=(\tilde{y}_1,\ldots,y_N)$ on $\partial B_s(0)$.
Let $r\geq 0$, then it holds that $|r-y_1|\leq|r-\tilde{y}_1|$, i.e.\ we have $|re_1-y|\leq|re_1-\tilde{y}|$ and $g(|re_1-y|)\geq g(|re_1-\tilde{y}|)$ and equality holds if and only if $r=0$ since $g$ is strictly monotonically decreasing.
Therefore, we have $G(re_1-y)\frac{y\cdot e_1}{|y|}\geq-G(re_1-\tilde{y})\frac{\tilde{y}\cdot e_1}{|\tilde{y}|}$ such that $H(r,s)\geq 0$ for all $r\in[0,R]$ and an equality holds if and only if $r=0$. Due to $u$ being non-negative, we directly see that $\mathcal{H}_R[u](r)\geq 0$ holds for all $r\in[0,R]$ and $\mathcal{H}_R[u](0)=0$.
Finally, calculating the derivative of $\mathcal{H}_R[u]$ at the point $r=0$ for $u\not\equiv 0$ \begin{equation*}
\frac{d}{dr}\mathcal{H}_R[u](r)\Big|_{r=0}=-\int_0^{+\infty}\int_{\partial B(0,s)}g^{\prime}(|y|)\frac{y_1^2}{|y|^2}\,d\sigma(y)\,u(s)\,ds>0 \end{equation*} yields the desired inequality. \end{proof} Using the properties of the radially symmetric case as in Theorem \ref{uniquenessviakreinrut}, following the proofs for $N=1$ in \cite[Section 3.1]{burgerfetecauhuang} we also obtain in case of $m\neq2$ for all $R>0$ the existence of a compact, radially symmetric stationary solution of \eqref{eq1b} with $\mathrm{supp}\,\rho=\overline{B_R(0)}$ in higher dimensions. Due to the additional non-linearity arising for $m\neq2$, it seems to be difficult to obtain the uniqueness as in Theorem \ref{uniquenessviakreinrut}. \begin{thm} \label{existenceviakreinrutmanforgeneralm} Let $G$ satisfy \ref{(G1)}-\ref{(G4)}. For every $R>0$ there exists a radially symmetric function $\rho\in\mathcal{P}(\mathbb{R}^N)\cap C(\mathbb{R}^N)$ with $\mathrm{supp}\,\rho=\overline{B_R(0)}$ and with radial representative $\tilde{\rho}$ such that $\tilde{\rho}^{\prime}(r)\leq0$ for $r\geq0$, $\tilde{\rho}(R)=0$ and $\rho$ solves \begin{equation*} \varepsilon\rho^{m-1}(x)=(G\ast\rho)(x)-(G\ast\rho)(R) \end{equation*} in $\overline{B_R(0)}$ for some coefficient $\varepsilon>0$. \end{thm} Analogously to the one-dimensional case in \cite[Proposition 4.12]{burgerdifrancescofranek}, we can prove the following result in higher dimensions. \begin{lem} \label{eigenvalueandsupport} In case of $m=2$ the simple eigenvalue $\varepsilon(R)$ from Theorem \ref{uniquenessviakreinrut} is uniquely determined as a function of $R$ and $\varepsilon(R)$ is strictly monotonically increasing with $R$, $\varepsilon(R)\searrow 0$ as $R\searrow 0$ and $\varepsilon(R)\nearrow1$ as $R\nearrow +\infty$. \end{lem} \begin{proof} We adapt the approach in \cite[Proposition 4.12]{burgerdifrancescofranek} for $N=1$ to our setting. Let $u_R$ denote the unique eigenfunction from Theorem \ref{uniquenessviakreinrut} with corresponding eigenvalue $\varepsilon(R)$. We conclude, as was the idea in the proof of \cite[Proposition 4.12]{burgerdifrancescofranek}, by considering \begin{equation} \varepsilon(R)u_R(r)=\mathcal{H}_R[u_R](r), \end{equation} multiplying by $r^{N-1}u_{R+\delta}(r)$ and taking into consideration that $H(r,s)=\frac{s^{N-1}}{r^{N-1}}H(s,r)$ due to \eqref{rotationwithgandcos}. Then, we obtain for every $\delta>0$ \begin{equation*} \begin{aligned} & \varepsilon(R)\int_0^R r^{N-1}u_R(r)u_{R+\delta}(r)\,dr=\int_0^R r^{N-1}\mathcal{H}_R[u_R](r)u_{R+\delta}(r)\,dr\\ & \quad=\int_0^R\int_0^R H(r,s)u_R(s)\,ds\,u_{R+\delta}(r)\,dr\\ & \quad=\int_0^R s^{N-1}u_R(s)\Big(\int_0^{R+\delta}H(s,r)u_{R+\delta}(r)\,dr\Big)ds\\ & \quad\quad-\int_0^R s^{N-1}u_R(s)\Big(\int_R^{R+\delta}H(s,r)u_{R+\delta}(r)\,dr\Big)ds\\ & \quad=\varepsilon(R+\delta)\int_0^R s^{N-1}u_R(s)u_{R+\delta}(s)\,ds\\ & \quad\quad-\int_0^R s^{N-1}u_R(s)\Big(\int_R^{R+\delta}H(s,r)u_{R+\delta}(r)\,dr\Big)ds. \end{aligned} \end{equation*} Since we know by the proof of Theorem \ref{uniquenessviakreinrut} that it holds $u_R(r)>0$ for $r\in(0,R]$, we have shown that $\varepsilon(R+\delta)>\varepsilon(R)$. \end{proof} Now, we prove for $m=2$ in arbitrary dimensions that there is a unique stationary solution of \eqref{eq1b} up to a translation. \begin{proof}[Proof of Theorem \ref{uniquenesstheoremformequals2}] By Theorem \ref{existenceofglobalmin}, we know that a radially symmetric and monotonically decreasing global minimizer of $E$ in $L^2(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ exists for $\varepsilon<1$. Therefore, the minimizer is connected and has compact support as shown in Theorem \ref{globmincomp}. Due to Corollary \ref{propcompandnoncompsol}, Theorem \ref{uniquenessviakreinrut} and Lemma \ref{eigenvalueandsupport}, we know that a unique stationary solution of \eqref{eq1b} with these properties exists such that we conclude that the global minimizer of the energy $E$ in $L^2(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ is unique.
Moreover, by Corollary \ref{propcompandnoncompsol} and Remark \ref{mequals2nononcompact}, for $m=2$ no stationary solution $\rho\in L^2(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} with a non-compact support and due to Theorem \ref{symmetryofstationarysolutions} no stationary solution of \eqref{eq1b} not being radially symmetric and monotonically decreasing exists. Therefore, we have proved that a stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} is unique. \end{proof}
\section{Discussion of stationary solutions with positive energy} \label{discussion} In this section, we discuss if a stationary solution of \eqref{eq1b} with positive energy may exist. We assume that the interaction potential $G$ satisfies \ref{(G1)}-\ref{(G4)}.
We know by Theorem \ref{uniquenesstheoremformequals2} that for $m=2$ there exists for every $0<\varepsilon<1$ and $R>0$ a triple $(\varepsilon,R,\rho)$ where each component uniquely determines the other ones such that \begin{equation*} \rho\nabla(\varepsilon\rho^{m-1}-G\ast\rho)=0 \end{equation*} is satisfied. Here, $\rho$ is a radially symmetric and monotonically decreasing function $\rho\in C^2(\overline{B_R(0)})\cap\mathcal{P}(\mathbb{R}^N)\cap C(\mathbb{R}^N)$ with $\mathrm{supp}\,\rho=\overline{B_R(0)}$.
If one would like to derive this result for $m\neq2$, even when being able to prove uniqueness in Theorem \ref{existenceviakreinrutmanforgeneralm}, which is indicated by numerical calculations in \cite[Section 4.2]{burgerfetecauhuang}, one cannot simply follow the approach in the proof of Lemma \ref{eigenvalueandsupport} since for general $m>1$ one obtains \begin{equation*} \begin{aligned} & \varepsilon(R+\delta)(m-1)\int_0^R r^{N-1}\tilde{\rho}_{R+\delta}^{m-2}(r)u_R(r)u_{R+\delta}(r)\,dr\\ & -\varepsilon(R)(m-1)\int_0^R r^{N-1}\tilde{\rho}_R^{m-2}(r)u_R(r)u_{R+\delta}(r)\,dr\\ & \quad=\int_0^R r^{N-1}u_R(r)\Big(\int_R^{R+\delta}H(r,s)u_{R+\delta}(s)\,ds\Big)dr. \end{aligned} \end{equation*}
Out of this equation, one could still conjecture that the coefficient $\varepsilon$ is strictly monotonically increasing with the size of the support as is also suggested by numerical results in \cite[Section 4.2]{burgerfetecauhuang}.
However, in case of $m>2$ we can at least show that the size of the support of a compact stationary solution of \eqref{eq1b} is bounded from below by the coefficient $\varepsilon$. Using the reverse H\"older inequality, we obtain \begin{equation} \label{estimateforrhoinnorm}
\varepsilon\|\rho\|_{L^{m-1}}^{m-1}=\varepsilon\|\rho^{m-1}\|_{L^1}\geq\varepsilon\|\rho\|_{L^1}^{m-1}|\mathrm{supp}\,\rho|^{2-m}=\varepsilon|\mathrm{supp}\,\rho|^{2-m}. \end{equation} Since we assume the stationary solution of \eqref{eq1b} to be compactly supported, it holds that $\varepsilon\rho^{m-1}(x)<G\ast\rho(x)$ in $\mathrm{supp}\,\rho$. Integrating over $\mathrm{supp}\,\rho$ and extending the integration domain on the right hand side to the whole space, using inequality \eqref{estimateforrhoinnorm} we obtain that \begin{equation*}
\varepsilon|\mathrm{supp}\,\rho|^{2-m}<1. \end{equation*} So, we derived for $m>2$ a lower bound for the size of the support depending on the coefficient $\varepsilon$ which reads for a radially symmetric and monotonically decreasing stationary solution of \eqref{eq1b} with $\mathrm{supp}\,\rho=\overline{B_R(0)}$ as \begin{equation*}
|B_R(0)|>\varepsilon^{\frac{1}{m-2}}. \end{equation*} Such a relation is also obtained for $N=1$ in \cite[Section 4.2]{burgerfetecauhuang} by assuming the stationary solution of \eqref{eq1b} to be approximately a characteristic function which is suitably scaled to have unit mass.
If $m>2$, then there exists for every $\varepsilon>0$ a radially symmetric and monotonically decreasing stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} being compactly supported due to Theorem \ref{existenceofglobalmin}, the compactness of global minimizers of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ (Theorem \ref{globmincomp}) and Lemma \ref{resultsfromburgerdifrancescofranek}\ref{results(iv)}.
In contrast, if $1<m<2$, we only know that there exists for all coefficients $\varepsilon$ not greater than some $\varepsilon_0>0$ a radially symmetric and monotonically decreasing stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} being compactly supported due to Theorem \ref{existenceofglobalmin}, Theorem \ref{globmincomp} and Lemma \ref{resultsfromburgerdifrancescofranek}\ref{results(iv)}. The constant $\varepsilon_0$ depends on the interaction potential $G$ and the exponent $m$ of the degenerate diffusion (cf.\ Remark \ref{noexistenceofcompstatsolformsmaller2}) and marks the threshold where we can find a function $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ such that we have $E[\rho]\leq0$.
These observations complement the results in the theoretical part of \cite{burgerfetecauhuang}. It is shown in \cite[Theorem 3.7]{burgerfetecauhuang} for $N=1$ that for $m>2$ there exists for every $L>0$ a coefficient $\varepsilon>0$ such that there is a stationary solution of \eqref{eq1b} with $\mathrm{supp}\,\rho=[-L,L]$ which is symmetric and monotonically decreasing on $\{x>0\}$. We extended this result in Theorem \ref{existenceviakreinrutmanforgeneralm} to arbitrary dimensions. In the numerical part of \cite{burgerfetecauhuang}, it is suggested that there may exist a compactly supported stationary solution of \eqref{eq1b} for all coefficients $\varepsilon>0$ which is proved here as remarked above.
Moreover, for $1<m<2$ it is shown in \cite[Theorem 3.9]{burgerfetecauhuang} for $N=1$ that for every $L>0$ there exists a coefficient $\varepsilon>0$ such that there is a stationary solution of \eqref{eq1b} with $\mathrm{supp}\,\rho=[-L,L]$ which is symmetric and monotonically decreasing on $\{x>0\}$. Again, we extended this result in Theorem \ref{existenceviakreinrutmanforgeneralm} to arbitrary dimensions. In the numerical part of \cite{burgerfetecauhuang}, it is suggested that in this case a compactly supported stationary solution of \eqref{eq1b} exists for any coefficient $\varepsilon$ smaller than a constant $\varepsilon_1>0$ and that these stationary solutions are just local minimizers but could turn into global minimizers for coefficients $\varepsilon$ smaller than some $\varepsilon_0<\varepsilon_1$. As remarked above, this statement is proved here only in terms of stationary solutions of \eqref{eq1b} which are global minimizers of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$, i.e.\ for $0<\varepsilon\leq\varepsilon_0$.
Obviously, and also pointed out in Lemma \ref{noexistenceofglobminformsmaller2} and Remark \ref{noexistenceofcompstatsolformsmaller2}, for $1<m<2$ the condition for being a compact stationary solution of \eqref{eq1b} is less strict than for being a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ because the latter forces the energy to be non-positive which does not need to hold for compactness of stationary solutions of \eqref{eq1b} in case of $1<m<2$ (cf.\ Remark \ref{stationarysolutionswithpositiveenergy}).
By Corollary \ref{propcompandnoncompsol}, we know that we can write the energy of a compactly supported stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} with $\mathrm{supp}\,\rho=\overline{B_R(0)}$ as \begin{equation*} E[\rho]=\int_{\mathbb{R}^N}\varepsilon\Big(\frac{1}{m}-\frac{1}{2}\Big)\rho^m(y)\,dy-\frac{1}{2}(G\ast\rho)(R). \end{equation*} So, depending on the interaction potential $G$ and the size of the support of the stationary solution of \eqref{eq1b} there could exist a compactly supported stationary solution of \eqref{eq1b} with positive energy for $1<m<2$.
Now, assume that there is a triple $(\varepsilon,R,\rho)$ where each component uniquely determines the other ones and which solves $\rho\nabla(\varepsilon\rho^{m-1}-G\ast\rho)=0$ with $\rho$ being a radially symmetric and monotonically decreasing function $\rho\in C^2(B_R(0))\cap\mathcal{P}(\mathbb{R}^N)\cap C(\mathbb{R}^N)$ and $\mathrm{supp}\,\rho=\overline{B_R(0)}$. In addition, assume that $\varepsilon$ is strictly increasing with the size of the support. Then, there has to exist a compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} which is no global minimizer. To convince ourselves about that let $\varepsilon=\varepsilon_0$, i.e.\ there exists a $\rho_0\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ such that $E[\rho_0]=0$ and there is no $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ with $E[\rho]<0$. Then, by Theorem \ref{existenceofglobalmin} we know that $\rho_0$ is a global minimizer of $E$ which has compact support due to Theorem \ref{globmincomp}. Considering Theorem \ref{existenceviakreinrutmanforgeneralm}, there is a radially symmetric and monotonically decreasing stationary solution $\rho\in\mathcal{P}(\mathbb{R}^N)\cap C(\mathbb{R}^N)$ of \eqref{eq1b} with $\mathrm{supp}\,\rho\supset\,\mathrm{supp}\,\rho_0$. Since we assumed the coefficient $\varepsilon$ to increase strictly with $R$, we have $E[\rho]>0$ because of $\varepsilon>\varepsilon_0$.
To sum up, if we were able to prove uniqueness in Theorem \ref{existenceviakreinrutmanforgeneralm} for $1<m<2$ and if we were able to prove that the coefficient $\varepsilon$ is strictly increasing with the size of the support of the resulting unique function, then we would have shown that a unique compactly supported, radially symmetric and monotonically decreasing stationary solution of \eqref{eq1b} exists for coefficients $\varepsilon$ smaller than some value $\varepsilon_1$. This stationary solution is a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ for coefficients $\varepsilon\leq\varepsilon_0$ but loses this property for coefficients with a larger value. In particular, we would have shown that the threshold $\varepsilon_0$ for the existence of a global minimizer of $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ is strictly smaller than the threshold $\varepsilon_1$ for the existence of a compact stationary solution of \eqref{eq1b}.
The value $\varepsilon_1$ is presumably determined by the condition for a stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} to be compact, i.e.\ by Remark \ref{differentcompcond} and Theorem \ref{symmetryofstationarysolutions} it is presumably the smallest coefficient such that there is no radially symmetric and monotonically decreasing $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ satisfying \begin{equation*} \varepsilon\rho^{m-1}(x)-(G\ast\rho)(x)<0 \end{equation*} in $\mathrm{supp}\,\rho$. In \cite[Section 4.2]{burgerfetecauhuang}, it is also shown formally for $1<m<2$ that the non-linear eigenvalue problem \begin{equation*} \varepsilon\rho^{m-1}(x)-(G\ast\rho)(x)=0 \end{equation*} governs for $N=1$ the limiting profile for a stationary solution of \eqref{eq1b} with support $[-L,L]$ and $L\rightarrow\infty$.
So, for $1<m<2$ the threshold for the existence of a compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} can be estimated by \begin{equation}
\varepsilon_1<\frac{2}{m}\varepsilon_0=\sup_{\rho\in L^m\cap\mathcal{P}}\frac{\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx}{\|\rho\|_{L^m}^m}. \end{equation} We obtain a strict inequality since the function attaining the supremum is a global minimizer of the energy $E$ in $L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ with coefficients $\varepsilon_0$ and has to be compactly supported.
Considering equation \eqref{equationforconnectednoncompactsolution} in Corollary \ref{propcompandnoncompsol}, we know that every non-compact stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} satisfies $\varepsilon=\|\rho\|_{L^m}^{-m}\int_{\mathbb{R}^N}(G\ast\rho)(x)\rho(x)\,dx$. Therefore, we can estimate that under our assumptions a stationary solution $\rho\in L^m(\mathbb{R}^N)\cap\mathcal{P}(\mathbb{R}^N)$ of \eqref{eq1b} cannot exist for $1<m<2$ if $\varepsilon\geq\frac{2}{m}\varepsilon_0$.
\end{document} |
\begin{document}
\title[]{On vanishing coefficients of algebraic power series over fields of positive characteristic} \author{Boris Adamczewski}
\address{ CNRS, Universit\'e de Lyon, Universit\'e Lyon 1\\ Institut Camille Jordan \\ 43 boulevard du 11 novembre 1918 \\ 69622 Villeurbanne Cedex, France} \email{[email protected]}
\author{Jason P.~Bell} \thanks{The First author was supported by ANR grants Hamot and SubTile. The second author was supported by NSERC grant 31-611456.}
\address{ Department of Mathematics\\ Simon Fraser University\\ Burnaby, BC, Canada\\
V5A 1S6}
\email{[email protected]}
\begin{abstract} Let $K$ be a field of characteristic $p>0$ and let $f(t_1,\ldots ,t_d)$ be a power series in $d$ variables with coefficients in $K$ that is algebraic over the field of multivariate rational functions $K(t_1,\ldots ,t_d)$. We prove a generalization of both Derksen's recent analogue of the Skolem--Mahler--Lech theorem in positive characteristic and a classical theorem of Christol, by showing that the set of indices $(n_1,\ldots ,n_d)\in \mathbb{N}^d$ for which the coefficient of $t_1^{n_1}\cdots t_d^{n_d}$ in $f(t_1,\ldots ,t_d)$ is zero is a $p$-automatic set. Applying this result to multivariate rational functions leads to interesting effective results concerning some Diophantine equations related to $S$-unit equations and more generally to the Mordell--Lang Theorem over fields of positive characteristic. \end{abstract}
\maketitle
\tableofcontents
\section{Introduction}\label{introduction} The Skolem--Mahler--Lech theorem is a celebrated result which describes the set of solutions in $n$ to the equation $a(n)=0$, where $a(n)$ is a sequence satisfying a linear recurrence over a field of characteristic $0$. We recall that if $K$ is a field and $a$ is a $K$-valued sequence, then $a$ satisfies a linear recurrence over $K$ if there exists a natural number $m$ and values $c_1,\ldots ,c_m\in K$ such that \begin{displaymath} a(n)=\sum_{i=1}^m c_i a(n-i) \end{displaymath}
for all sufficiently large values of $n$. The zero set of the linear recurrence $a$ is defined
by
\begin{displaymath} \mathcal Z( a) := \left\{ n \in \mathbb N \mid f(n)=0 \right\} \, . \end{displaymath} The Skolem--Mahler--Lech theorem can then be stated as follows.
\begin{thm}[Skolem--Mahler--Lech] Let $a$ be a linear recurrence over a field of characteristic $0$. Then the set $\mathcal Z(a)$ is a union of a finite set and a finite number of infinite arithmetic \label{thm: SMLlr} progressions. \end{thm}
This result was first proved for linear recurrences over the rational numbers by Skolem \cite{Sk}. It was next extended to linear recurrences over the algebraic numbers by Mahler \cite{Mah}. The version above was proven first by Lech \cite{Lech} and later by Mahler \cite{Mah1, Mah2}. More details about the history of this theorem can be found in the book by Everest {\it et al.} \cite{EPSW}.
Though the conclusion of the Skolem--Mahler--Lech theorem obviously holds for linear recurrences defined over finite fields, this is not the case for infinite fields $K$ of positive characteristic. The simplest counter-example was given by Lech \cite{Lech}. Throughout this paper, $p$ will denote a prime number. Let $K=\mathbb{F}_p(t)$ be the field of rational functions in one variable over $\mathbb{F}_p$. Let \begin{displaymath} a(n) :=(1+t)^n-t^n-1 \, . \end{displaymath} We can observe that the sequence $a$ satisfies the recurrence \begin{displaymath} a(n)\ = \ (2+2t)a(n-1)-(1+3t+t^2)a(n-2)+(t+t^2)a(n-3) \end{displaymath}
for $n>3$, while \begin{displaymath} \mathcal Z(a) = \{1,p,p^2,p^3,\ldots\} \, . \end{displaymath}
More recently, Derksen \cite{Der} gave more pathological examples, which show that the correct analogue of the Skolem--Mahler--Lech theorem in positive characteristic is much more subtle. For example, one has
$$
\mathcal Z(a) = \{p^n \mid n \in \mathbb N\} \cup \{p^n + p^m \mid n, m \in \mathbb N\} \, ,
$$ for the linear recurrence $a$ defined over the field $\mathbb{F}_p(x,y,z)$ by
\begin{displaymath}
a(n) := (x + y + z)^n - (x + y)^n - (x + z)^n - (y + z)^n + x^n + y^n + z^n \, .
\end{displaymath} Derksen noted that while pathological examples of zero sets of linear recurrences do exist in characteristic $p$, the base-$p$ expansions of the natural numbers in the zero set are still well behaved. In fact, he proved the remarkable result that the zero set of a linear recurrence can always be described in terms of finite automata \cite{Der}.
\begin{thm}[Derksen]\label{thm:derksen} Let $a$ be a linear recurrence over a field $K$ of characteristic $p$. Then the set $\mathcal{Z}(a)$ is $p$-automatic. \end{thm}
We recall that an infinite sequence $a$ with values in a finite set is said to be $p$-automatic if $a(n)$ is a finite-state function of the base-$p$ representation of $n$. Roughly, this means that there exists a finite automaton taking the
base-$p$ expansion of $n$ as input and producing the term $a(n)$ as output. A set ${\mathcal E}\subset \mathbb N$ is said to be $p$-automatic if there exists a finite automaton that reads as input the base-$p$ expansion of $n$ and accepts this integer (producing as output the symbol $1$) if $n$ belongs to ${\mathcal E}$, otherwise this automaton rejects the integer $n$, producing as output the symbol $0$.
Let us give a formal definition of both notions. Let $k\ge 2$ be a natural number. We let $\Sigma_k$ denote the alphabet $\left\{0,1,\ldots,k-1\right\}$. A $k$-automaton\index{$k$-automaton} is a $6$-tuple \begin{displaymath} {\mathcal A} = \left(Q,\Sigma_k,\delta,q_0,\Delta,\tau\right) , \end{displaymath} where $Q$ is a finite set of states, $\delta:Q\times \Sigma_k\rightarrow Q$ is the transition function, $q_0$ is the initial state, $\Delta$ is the output alphabet and $\tau : Q\rightarrow \Delta$ is the output function. For a state $q$ in $Q$ and for a finite word $w=w_1 w_2 \cdots w_n$ on the alphabet $\Sigma_k$, we define $\delta(q,w)$ recursively by $\delta(q,w)=\delta(\delta(q,w_1w_2\cdots w_{n-1}),w_n)$. Let $n\geq 0$ be an integer and let $w_r w_{r-1}\cdots w_1 w_0$ in $\left(\Sigma_k\right)^{r+1}$ be the base-$k$ expansion of $n$. Thus $n=\sum_{i=0}^r w_i k^{i} :=[w_rw_{r-1}\cdots w_0]_k$. We denote by $w(n)$ the word $w_0 w_1 \cdots w_r$.
\begin{defn}{\em A sequence $(a_n)_{n\geq 0}$ is said to be $k$-automatic if there exists a $k$-automaton ${\mathcal A}$ such that $a_n=\tau(\delta(q_0,w(n)))$ for all $n\geq 0$.} \end{defn}
\begin{defn}{\em A set ${\mathcal E}\subset \mathbb N$\index{automatic set} is said to be recognizable by a finite $k$-automaton, or for short $k$-automatic, if the characteristic sequence of ${\mathcal E}$, defined by $a_n=1$ if $n\in {\mathcal E}$ and $a_n=0$ otherwise, is a $k$-automatic sequence. } \end{defn}
More generally, feeding a finite automaton with $d$-tuples of nonnegative integers leads to the notion of $p$-automatic subsets of $\mathbb N^d$. Some background on automata theory, including examples, formal definitions of multidimensional automatic sequences and sets, and their extension to arbitrary finitely generated abelian groups, are given in Section \ref{Salon}.
\begin{rem}{\em Let us make few important remarks.
\begin{itemize}
\item[$\bullet$] In the previous definitions, we chose the convention that the base-$k$ expansion of $n$ is scanned from left to right. Our automata thus read the input starting with the most significant digit. We recall that it is well-known that the class of $k$-automatic sets or sequences remains unchanged when choosing to read the input starting from the least significant digit (see for instance Chapter V of \cite{Eilenberg} or Chapter 5 of \cite{AS}).
\item[$\bullet$] One could also ask whether the base $k$ plays an important role here. As proved in a fundamental paper of Cobham \cite{Cob69}, this is actually the case. Periodic sets, that are sets obtained as a union of a finite set and a finite number of infinite arithmetic progressions, are exactly those that are $k$-automatic for every integer $k\geq 2$. In addition, an infinite aperiodic $k$-automatic set is also $k^n$-automatic for every positive integer $n$, while it cannot be $\ell$-automatic if $k$ and $\ell$ are two multiplicatively independent integers.
\item[$\bullet$] The class of $k$-automatic sets is closed under various natural operations such as intersection, union and complement (see for instance Chapter V of \cite{Eilenberg} or Chapter 5 of \cite{AS}). \end{itemize}} \end{rem}
On the other hand, it is well known that if $K$ is a field and $a$ is a $K$-valued sequence, then $a$ satisfies
a linear recurrence over $K$ if and only if the power series
$$f(t)=\sum_{n=0}^{\infty} a(n)t^n$$ is the power series expansion of a rational function. For instance,
Mahler \cite{Mah, Mah1, Mah2} worked with rational power series rather than linear recurrences
when proving what we now call the Skolem--Mahler--Lech theorem. Let
$$
\mathcal{Z}(f) := \{n \mid a(n) = 0 \} \, .
$$Then Derksen's theorem
can be restated as follows: let $K$ be a field of characteristic $p$ and let $f(t)\in K[[t]]$ be a rational function, then the set $\mathcal{Z}(f)$ is $p$-automatic.
This formulation of Derksen's theorem is in the same spirit as another famous result involving automata theory and known as Christol's theorem \cite{Christol}.
\begin{thm}[Christol]\label{thm:christol} Let $q$ be a positive integer power of $p$. Then $f(t)=\sum_{n= 0}^{\infty} a(n)t^n\in \mathbb F_q[[t]]$ is algebraic over $\mathbb F_q(t)$ if and only if the sequence $a$ is $p$-automatic. \end{thm}
The main aim of this paper is to produce a simultaneous multivariate generalization of both the theorem of Derksen and the theorem of Christol.
Given a multivariate power series $$f(t_1,\ldots,t_d)= \sum_{(n_1,\ldots,n_d)\in\mathbb N^d}a(n_1,\ldots,n_d) t_1^{n_1}\cdots t_d^{n_d}\in K[[t_1,\ldots,t_d]]\, ,$$ we define the set of vanishing coefficients of $f$ by \begin{displaymath} \mathcal Z(f) = \{ (n_1,\ldots,n_d)\in \mathbb N^d \mid a(n_1,\ldots,n_d)=0\} \,.
\end{displaymath}
Our main result reads as follows.
\begin{thm} Let $K$ be a field of characteristic $p$ and let $f(t_1,\ldots ,t_d) \in K[[t_1,\ldots ,t_d]]$ be a power series that is algebraic over the field of multivariate rational functions $K(t_1,\ldots ,t_d)$. Then the set ${\mathcal Z}(f)$ is $p$-automatic. \label{thm: main} \end{thm}
Let us make few comments on this result.
\begin{itemize} \item[$\bullet$] In the case that $d=1$ and $f(t)$ is chosen to be the power series expansion of a rational function in Theorem \ref{thm: main}, we immediately obtain Derksen's theorem (Theorem \ref{thm:derksen}). We do not obtain his finer characterization, but, as explained in Section \ref{conclude}, it is not possible to obtain a significantly improved characterization of zero sets even for multivariate rational power series.
\item[$\bullet$] In the case that $d=1$ and $K$ is chosen to be a finite field in Theorem \ref{thm: main},
we cover the more difficult direction of Christol's theorem (Theorem \ref{thm:christol}). Indeed, if $f(t)=\sum_{n=0}^{\infty} a(n) t^n \in K[[t]]$ is an algebraic power series, then for each $x\in K$ the function $f(t)-x/(1-t)$ is algebraic. Theorem \ref{thm: main} thus implies that the set $\{n\in \mathbb{N} \mid a(n)=x\}$ is $p$-automatic for all $x\in K$. This immediately implies that the sequence $a$ is $p$-automatic.
\item[$\bullet$] Theorem \ref{thm: main} can actually take a stronger form. Let ${\mathcal E}\subset \mathbb N^d$. The following conditions are equivalent. \begin{itemize} \item[\textup{(i)}] The set ${\mathcal E}$ is $p$-automatic. \item[\textup{(ii)}] ${\mathcal E}={\mathcal Z(f)}$ for some algebraic power series with coefficients over a field of characteristic $p$. \end{itemize} Indeed, it is known \cite{Salon1987} that given a $p$-automatic set ${\mathcal E}\subset \mathbb N^d$, the formal power series $$f(t_1,\ldots,t_d)=\sum_{(n_1,\ldots,n_d)\in \mathcal E} t_1^{n_1}\cdots t_d^{n_d}$$ is algebraic over $\mathbb F_p(t_1,\ldots,t_d)$. From the latter property and Theorem \ref{thm: main}, we also deduce the following result. Let $K$ be a field of characteristic $p$ and let $$f(t_1,\ldots,t_d)= \sum_{(n_1,\ldots,n_d)\in\mathbb N^d}a(n_1,\ldots,n_d) t_1^{n_1}\cdots t_d^{n_d}\in K[[t_1,\ldots,t_d]]\, $$ be a power series that is algebraic over the field of multivariate rational functions $K(t_1,\ldots ,t_d)$. For $x\in K$, let $$
a^{-1}(x) := \left\{ (n_1,\ldots,n_d)\in \mathbb N^d \mid a(n_1,\ldots,n_d)=x\right\} \, . $$ Then for every $x\in K$ the formal power series $$ f_x(t_1,\ldots,t_d):=\sum_{(n_1,\ldots,n_d)\in a^{-1}(x)} t_1^{n_1}\cdots t_d^{n_d} $$ is also algebraic. In the particular case where $K$ is a finite field, this result was first proved by Furstenberg \cite{Fur} (see also the more recent result of Kedlaya \cite{Ked} for a generalization to Hahn's power series with coefficients in a finite field).
\item[$\bullet$] No such multivariate generalization of the Skolem--Mahler--Lech theorem exists in characteristic $0$. For example, if one takes the rational bivariate power series $$f(x,y)=\sum_{n,m} (n^3 - 2^m) x^n y^m\in \mathbb{Q}[[x,y]]\, ,$$ then ${\mathcal Z}(f)=\{(n,m) \mid m\equiv 0~(\bmod~3), n=2^{m/3}\}$. This shows that there is no natural way to express the set of vanishing coefficients of $f$ in terms of more general arithmetic progressions or in terms of automatic sets. In fact, finding zero sets of coefficients of multivariate rational power series with integer coefficients is often equivalent to very difficult classes of Diophantine problems which cannot be solved at this moment, such as for instance finding an effective procedure to solve all $S$-unit equations (see Section \ref{sunit} for more details). In Section \ref{decidability}, we also give a Diophantine problem related to linear recurrences which is conjectured in \cite{CMP} to be undecidable and, as shown in the proof of Theorem \ref{thm:cmp}, which is equivalent to describe the zero sets of coefficients of a class of simple mutivatiate rational power series with integer coefficients.
\end{itemize}
Our proof of Theorem \ref{thm: main} involves using methods of Derksen as well as more advanced techniques from automata theory reminiscent of works of Christol \cite{Christol}, Denef and Lipshitz \cite{DL}, Harase \cite{Har88}, Shariff and Woodcock \cite{SW} among others. We first consider the action of a certain infinite semigroup on the ring of power series over a field of characteristic $p$. We use the fact that algebraic power series have a finite orbit under the action of this semigroup to apply Derksen's ``Frobenius splitting'' technique which allows us to show that the set of vanishing coefficients is necessarily $p$-automatic. An especially important aspect of the proof of Theorem \ref{thm:derksen} is that each step can be made effective. We prove that this is also the case with Theorem \ref{thm: main}.
\begin{thm}\label{thm: eff} Let $K$ be a field of positive characteristic and let $f(t_1,\ldots ,t_d) \in K[[t_1,\ldots ,t_d]]$ be a power series that is algebraic over the field of multivariate rational functions $K(t_1,\ldots ,t_d)$. Then the set ${\mathcal Z}(f)$ can be effectively determined. Furthermore, the following properties are decidable.
\begin{itemize} \item[{\rm (i)}] the set ${\mathcal Z}(f)$ is empty.
\item[{\rm (ii)}] the set ${\mathcal Z}(f)$ is finite.
\item[{\rm (iii)}] the set ${\mathcal Z}(f)$ is periodic, that is, formed by the union of a finite set and of a finite number of ($d$-dimensional) arithmetic progressions.
\end{itemize} \noindent In particular, when ${\mathcal Z}(f)$ is finite, one can determine (in a finite amount of time) all its elements. \end{thm}
\begin{rem}{\em When we say that the set ${\mathcal Z}(f)$ can be effectively determined, this means that there is an algorithm that produces a $p$-automaton that generates ${\mathcal Z}(f)$ in a finite amount of time. Furthermore, there exists an algorithm that allows one to determine in a finite amount of time whether or not ${\mathcal Z}(f)$ is empty, finite, or periodic.} \end{rem}
As we will illustrate in Sections \ref{decidability}, \ref{sunit} and \ref{MLT}, applying Theorem \ref{thm: eff} to multivariate rational functions actually leads to interesting effective results concerning some Diophantine equations related to $S$-unit equations and more generally to the Mordell--Lang Theorem over fields of positive characteristic.
The outline of this paper is as follows. Our Diophantine applications are discussed in Sections \ref{decidability}, \ref{sunit} and \ref{MLT}. In Section \ref{Salon}, we recall some basic background on automata theory. We define in particular the notion of automatic sets of $\mathbb N^d$ and more generally of automatic subsets of finitely generated abelian groups. The latter notion does not appear to have been introduced earlier and may be of independent interest. In Section \ref{proof}, we prove Theorem \ref{thm: main}. In Sections \ref{section: ker} and \ref{section: eff}, we make the proof of Theorem \ref{thm: main} effective, proving Theorem \ref{thm: eff}. Finally, we conclude our paper with some comments in Section \ref{conclude}.
\section{Linear recurrences and decidability}\label{decidability}
There are many different proofs and extensions of the Skolem--Mahler--Lech theorem in the literature (see for instance \cite{Bez,Han86,vdP,EPSW}). These proofs all use $p$-adic methods in some way, although the result is valid in any field of characteristic $0$. This seems to be responsible for a well-known deficiency of the Skolem--Mahler--Lech theorem: all known proofs are ineffective. This means that we do not know any algorithm that allows us to
determine the set ${\mathcal Z}(a)$ for a given linear recurrence $a(n)$ defined over a field of characteristic $0$. We refer the reader to \cite{EPSW} and to the recent discussion in \cite{Ta08} for more details. It is actually still unknown whether the fact that ${\mathcal Z}(a)$ is empty or not is decidable. In fact, it seems unclear that one should even expect it to be decidable. In this direction, let us recall the following conjecture from \cite{CMP}. Given linear recurrences $a_1(n),\ldots,a_d(n)$ over a field $K$, we let $$ {\mathcal Z}(a_1,\ldots,a_d) := \left\{(n_1,\ldots,n_d)\in \mathbb N^d \mid a_1(n_1)+\cdots + a_d(n_d)=0 \right\} \, . $$ It was conjectured in \cite{CMP} that, if $K=\mathbb Q$, the property $$ {\mathcal Z}(a_1,\ldots,a_d) \not = \emptyset $$ is undecidable for every positive integer $d$ large enough.
As mentioned in the introduction, the situation is drastically different for fields of positive characteristic. Indeed, Derksen \cite{Der} proved that each step of the proof of Theorem \ref{thm:derksen} can be made effective. In particular, there exists an algorithm that allows one to decide whether the set ${\mathcal Z}(a)$ is empty or not in a finite amount of time. We give below a generalization of Derksen's theorem to an arbitrary number of linear recurrences. It well illustrates the relevance of Theorem \ref{thm: eff}.
\begin{thm}\label{thm:cmp} Let $K$ be a field of characteristic $p$, $d$ a positive integer, and let $a_1(n),\ldots,a_d(n)$ be linear recurrences over $K$. Then ${\mathcal Z}(a_1,\ldots,a_d)$ is a $p$-automatic set that can be effectively determined. In particular, the property $$ {\mathcal Z}(a_1,\ldots,a_d) \not = \emptyset $$ is decidable. \end{thm}
Note that, in addition, we can decide whether such a set ${\mathcal Z}(a_1,\ldots,a_d)$ is finite or periodic.
\begin{proof} In view of Theorem \ref{thm: eff}, it suffices to prove that there exists an explicit multivariate rational function $f(t_1,\ldots,t_d) \in K(t_1,\ldots,t_d)$ such that ${\mathcal Z}(f)={\mathcal Z}(a_1,\ldots,a_d)$.
Let $i\in\{1,\ldots,d\}$. Since $a_i$ is a linear recurrence over $K$, we have that $f_i(t):=\sum_{n\geq 0} a_i(n)t^n$ is a rational function. Thus, $$ f(t_1,\ldots,t_d) := \sum_{i=1}^d \left( f_i(t_i)\cdot \prod_{j\not=i} \frac{1}{1-t_j}\right) $$ is a multivariate rational function in $K(t_1,\ldots,t_d)$. Furthermore, this definition implies that $$f(t_1,\ldots,t_d) = \sum_{(n_1,\ldots,n_d)\in \mathbb N^d} (a_1(n_1)+\cdots +a_d(n_d)) t_1^{n_1}\cdots t_d^{n_d} \, .$$ We thus deduce that ${\mathcal Z}(f)={\mathcal Z}(a_1,\ldots,a_d)$. This ends the proof. \end{proof}
\section{Linear equations over multiplicative groups}\label{sunit}
In this section, we discuss some Diophantine equations that generalize the famous $S$-unit equations (see for instance the survey \cite{EGST}). More precisely, given a field $K$ and a finitely generated subgroup $\Gamma$ of $K^*$, we consider linear equations of the form \begin{equation}\label{eq: sunit} c_1X_1+\cdots +c_d X_d =1 \, , \end{equation} where $c_1,\ldots,c_d$ belong to $K$ and where we look for solutions in $\Gamma^d$.
These equations have a long history. Let $S$ be a finite number of prime numbers and $\Gamma\subseteq \mathbb Q^*$ the multiplicative group generated by the elements of $S$. In 1933, Mahler \cite{Mah33} proved that for all nonzero rational numbers $a$ and $b$ the equation \begin{equation}\label{eq: 2v} aX+bY=1 \end{equation} has only a finite number of solutions in $\Gamma^2$. Lang \cite{Lan60} later generalized this result by proving that for all $a$ and $b$ belonging to $\mathbb C^*$ and all subgroups of finite rank $\Gamma$ of $\mathbb C^*$, Equation (\ref{eq: 2v}) has only a finite number of solutions in $\Gamma^2$. Furthermore, in the case where $\Gamma$ is a subgroup of $\mathbb Q^*$, there exists an effective method based on the theory of linear forms of logarithms to determine all solutions of Equation (\ref{eq: 2v}).
When the number of variables $d$ is larger than $2$, one can no longer expect that Equation (\ref{eq: sunit}) necessarily has only a finite number of solutions. However, the subspace theorem can be used to prove that such an equation has only a finite number of nondegenerate solutions; that is, solutions with the property that no proper subsum vanishes \cite{Ev84,PS91}. Furthermore, it is possible to use some quantitative version of the subspace theorem to bound the number of nondegenerate solutions. In this direction, the following general and very strong result was obtained by Evertse, Schlickewei and W.\. M. Schmidt \cite{ESS02}: given $K$ a field of characteristic $0$ and $\Gamma$ a multiplicative subgroup of rank $r$ of $K^*$, Equation (\ref{eq: sunit}) has at most $\exp ((6d)^{3d}(r+1))$ nondegenerate solutions. However, all general known results concerning more than two variables are ineffective.
The situation in characteristic $p$ is similar to the one encountered with the Skolem--Mahler--Lech theorem. The Frobenius endomorphism may be responsible for the existence of ``pathological solutions". Indeed, it is easy to check that, for every positive integer $q$ that is a power of $p$, the pair $(t^q,(1-t)^q)$ is a solution of the equation $$ X+Y=1 $$ in $\Gamma^2$, where $\Gamma$ is the multiplicative subgroup of $\mathbb F_p(t)^*$ generated by $t$ and $1-t$. In fact, if we take $K=\mathbb F_p(t)$ and $\Gamma=\langle t,(1-t)\rangle$, we can find more sophisticated examples. As observed in \cite{Masser}, the equation $$ X+Y-Z=1 $$ has for every pair of positive integer $(n,m)$ the nondegenerated solution $$ X=t^{(p^n-1)p^m},\;\; Y=(1-t)^{p^{n+m}},\;\; Z=t^{(p^n-1)p^m}(1-t)^{p^m}\, . $$ Thus, there is no hope to obtain in this framework results similar to those mentioned previously. Concerning Equation (\ref{eq: 2v}), Voloch \cite{Vo98} gave interesting results. He obtained, in particular, conditions that ensure the finiteness of the number of solutions (with explicit bounds for the number of solutions). Masser \cite{Masser} obtained a result concerning the structure of the solutions of the general Equation (\ref{eq: sunit}). His aim was actually to prove a conjecture of K. Schmidt concerning mixing properties of algebraic $\mathbb Z^d$-actions (see \cite{Masser,Sch01,SW93} for more details on this problem).
As a consequence of Theorem \ref{thm: eff}, we are able to give a satisfactory effective solution to the general equation (\ref{eq: sunit}) over fields of positive characteristic, proving that the set of solutions is $p$-automatic in a natural sense. We note that the notion of an automatic subset of a finitely generated abelian group is given in Section \ref{Salon} (see Definition \ref{def:autofgg} and Proposition \ref{prop:fggcarac}).
\begin{thm}\label{thm: sunit} Let $K$ be a field of characteristic $p$, let $c_1,\ldots,c_d\in K^*$, and let $\Gamma$ be a finitely generated multiplicative subgroup $K^*$. Then the set of solutions in $\Gamma^d$ of the equation $$ c_1X_1+\cdots + c_dX_d =1 $$ is a $p$-automatic subset of $\Gamma^d$ that can be effectively determined. \end{thm}
\begin{proof} Let $$S:= \left\{ (x_1,\ldots,x_d)\in \Gamma^{d} \mid c_1x_1+\cdots + c_dx_d =1\right\}\, .$$ Our aim is to prove that $S$ is $p$-automatic and can be effectively determined.
We first fix some notation. Let $g_1,\ldots,g_m$ be a set of generators of $\Gamma$ and let us consider a surjective group homomorphism $\Phi:\mathbb{Z}^{m}\rightarrow \Gamma$. This allows us to define a surjective group homomorphism $\tilde{\Phi}:(\mathbb{Z}^{m})^d\rightarrow \Gamma^d$ by $\tilde{\Phi}({\bf x}_1,\ldots,{\bf x}_d)= (\Phi({\bf x}_1),\ldots, \Phi({\bf x}_d))$. By Proposition \ref{prop:automorph}, it is enough to show that $\tilde{\Phi}^{-1}(S)$ is a $p$-automatic subset of $(\mathbb{Z}^{m})^d\simeq \mathbb Z^{m\times d}$. Let ${\mathcal E} \ := \ \{\pm 1\}^m.$ Given ${\bf n}:=(n_1,\ldots ,n_m)\in \mathbb N^m$ and ${\bf a}:=(a_1, \ldots, a_m)\in \mathcal E$, we let ${\bf a}\cdot {\bf n}:= (a_1n_1,\ldots,a_mn_m)$ denote the ordinary coordinate-wise multiplication. Given $A\subseteq \mathbb N^m$, we also set ${\bf a}\cdot A:= \left\{ {\bf a}\cdot {\bf n} \mid {\bf n} \in A\right\}$. For every ${\bf a}:=({\bf a}_1, \ldots,{\bf a}_d)\in {\mathcal E}^d$, we set $$ S_{\bf a} := \left\{({\bf n}_1,\ldots,{\bf n}_d)\in \mathbb N^{m\times d} \mid c_1\Phi({\bf a}_1 \cdot {\bf n}_1)+ \cdots + c_d \Phi({\bf a}_d \cdot {\bf n}_d) =1\right\}\, . $$ Thus \begin{equation}\label{eq: phi} \tilde{\Phi}^{-1}(S)= \bigcup_{{\bf a}\in {\mathcal E}^d} {\bf a}\cdot S_{\bf a} \, . \end{equation} Note that by Proposition \ref{prop:autoZequiv}, $S$ is $p$-automatic subset of $\Gamma^d$ if and only if $S_{\bf a}$ is a $p$-automatic subset of $\mathbb N^d$ for every ${\bf a}\in {\mathcal E}^d$.
We let $t_{i,j}$ be indeterminates for $1\le i\le d$ and $1\le j\le m$. We define ${\bf t}_i=(t_{i,1},\ldots ,t_{i,m})$ for $1\le i\le d$. Given ${\bf n}\in \mathbb{N}^m$ and $i\in \{1,2,\ldots ,d\}$, we define ${\bf t}_i^{\bf n}$ to be the product $t_{i,1}^{n_1}\cdots t_{i,m}^{n_m}$. Given ${\bf a}:=({\bf a}_1,\ldots ,{\bf a}_d)\in {\mathcal E}^d$, we define the function $$f_{\bf a}({\bf t}_1,\ldots ,{\bf t}_d):= \sum_{{\bf n}_1,\ldots ,{\bf n}_d\in \mathbb{N}^{m}} \left( -1+\sum_{i=1}^d c_i \Phi({\bf a}_i\cdot {\bf n}_i)\right){\bf t}_1^{{\bf n}_1}\cdots {\bf t}_d^{{\bf n}_d}.$$ This definition ensures that \begin{equation}\label{eq: sa} S_{\bf a} = {\mathcal Z}(f_{\bf a}) \, . \end{equation} For every $i\in \{1,2,\ldots ,d\}$, we also set ${\bf n}_i=(n_{i,1},\ldots, n_{i,m})$ and ${\bf a}_i:=(a_{i,1},\ldots,a_{i,m})$. Let ${\bf e}_j=(0,0,\ldots ,0,1,0\ldots ,0)\in \mathbb{Z}^m$ denote the element whose $j$th coordinate is $1$ and whose other coordinates are $0$. Then for every $i\in \{1,\ldots ,d\}$, we have \begin{eqnarray*} \sum_{{\bf n}_i\in \mathbb{N}^{m}} c_i\Phi({\bf a}_i\cdot {\bf n}_i){\bf t}_i^{{\bf n}_i} &=& \sum_{n_{i,1}=0}^{\infty}\cdots \sum_{n_{i,m}=0}^{\infty} \prod_{j=1}^m \Phi({\bf e}_j)^{a_{i,j}n_{i,j}} t_{i,j}^{n_{i,j}}\\ &=& \prod_{j=1}^m (1-\Phi({\bf e}_j)^{a_{i,j}} t_{i,j})^{-1} \end{eqnarray*} is a rational function. Hence \begin{eqnarray*} f_{\bf a}({\bf t}_1,\ldots ,{\bf t}_d) &=& \prod_{i=1}^d\prod_{j=1}^m (1-t_{i,j})^{-1}\left( -1 + \sum_{i=1}^d c_i \prod_{j=1}^m \frac{(1-t_{i,j})}{(1-\Phi({\bf e}_j)^{a_{i,j}} t_{i,j})}\right) \end{eqnarray*} is a rational function for each ${\bf a}\in {\mathcal E}^d$. Since we get an explicit expression for the function $f_{\bf a}$ (assuming that we explicitly know a set of generators $g_1,\ldots, g_m$ of $\Gamma$), we infer from Theorem \ref{thm: eff} that the set ${\mathcal Z}(f_{\bf a})$ is a $p$-automatic subset of $\mathbb N^d$ which can be effectively determined. By (\ref{eq: phi}) and (\ref{eq: sa}), this ends the proof. \end{proof}
\section{An effective result related to the Mordell--Lang theorem }\label{MLT}
The expression ``Mordell--Lang theorem" or ``Mordell--Lang conjecture" serves as a generic appellation which denotes results describing the structure of intersections of the form $$X\cap \Gamma\, ,$$ where $X$ is a subvariety (Zariski closed subset) of a (affine, abelian, or semi-abelian) variety $A$ and $\Gamma$ is a finitely generated subgroup (or even a subgroup of finite rank) of $A$. The case where the variety $A$ is defined over a field of characteristic $0$ has many interesting Diophantine consequences, including the famous Faltings' theorem \cite{Fal}.
On the other hand, simple examples constructed using the Frobenius endomorphism (as in Section \ref{sunit}) show that such intersections may behave differently when the variety $A$ is defined over a field of positive characteristic. Hrushovski \cite{Hru} proved a relative version of the Mordell--Lang conjecture for semi-abelian varieties defined over a field $K$ of positive characteristic. His approach, which makes use of model theory, has then been pursued by several authors (see for instance \cite{MS} and \cite{Ghioca}).
All general results known up to now in this direction seem to be ineffective. The aim of this section is to prove the two following effective statements. We recall that the notion of an automatic subset of a finitely generated abelian group is given in Section \ref{Salon} (see Definition \ref{def:autofgg} and Proposition \ref{prop:fggcarac}).
\begin{thm}\label{thm: MT} Let $K$ be a field of characterisitc $p$ and let $d$ be a positive integer. Let $X$ be a Zariski closed subset of ${\rm GL}_d(K)$ and $\Gamma$ a finitely generated abelian subgroup of ${\rm GL}_d(K)$. Then the set $X\cap \Gamma$ is a $p$-automatic subset of $\Gamma$ that can be effectively determined. \end{thm}
Note more generally that, given positive integers $d_1,\ldots,d_n$, the same result holds for Zariski closed subsets of $\prod_{i=1}^n {\rm GL}_{d_i}(K)$. Indeed, we have a natural embedding $\i$ of $\prod_{i=1}^n {\rm GL}_{d_i}(K)$ as a Zariski closed subset of ${\rm GL}_{d_1+\cdots +d_n}(K)$, where $\i$ sends an $n$-tuple of invertible matrices in which the $i$th matrix has size $d_i\times d_i$ to the block diagonal matrix with $n$ blocks whose $i$th block is the $i$th coordinate of our $n$-tuple. Indeed, under this identification, $\prod_{i=1}^n {\rm GL}_{d_i}(K)$ is the zero set of the linear polynomials $x_{i,j}$ for which $i$ and $j$ have the property that there does not exist a positive integer $k$, $k\le n$, such that $$d_0+\cdots + d_{k-1} < i,j\le d_1+\cdots + d_k\, ,$$ where we take $d_0$ to be zero. Given a Zariski closed subset $X$ of $\prod_{i=1}^n {\rm GL}_{d_i}(K)$, we thus may regard $X$ as a Zariski closed subset of ${\rm GL}_{d_1+\cdots +d_n}(K)$. We note that the additive torus embeds in ${\rm GL}_2(K)$ by identifying the torus with unipotent upper-triangular matrices. Moreover, this is easily seen to be a Zariski closed subset of ${\rm GL}_2(K)$. Applying these remarks with $d_1,\ldots ,d_n\in \{1,2\}$, we deduce the following corollary.
\begin{cor}\label{thm: tori} Let $K$ be a field of characterisitc $p$ and let $s$ and $t$ be nonnegative integers. Let $X$ be a subvariety of ${\rm G}_a^s(K)\times {\rm G}_m^t(K)$ and $\Gamma$ a finitely generated subgroup of ${\rm G}_a^s(K)\times {\rm G}_m^t(K)$. Then the set $X\cap \Gamma$ is a $p$-automatic subset of $\Gamma$ that can be effectively determined. \end{cor}
We note that one can actually obtain an ineffective version of Theorem \ref{thm: MT} from Corollary \ref{thm: tori}. In fact, one only needs to consider multiplicative tori. To see this, we observe that if $\Gamma$ is a finitely generated abelian subgroup of ${\rm GL}_d(K)$, then by considering Jordan forms, there is some natural number $n$ such that $g^{p^n}$ is diagonalizable for every $g\in \Gamma$. As commuting diagonalizable operators are simultaneously diagonalizable, we may replace $K$ by a finite extension $K'$ that contains the eigenvalues of $g^{p^n}$ as $g$ ranges over a generating set, and assume that $\Gamma^{p^n}$ is a subgroup of $T\cong {\rm G}_m^d(K')$, the invertible diagonal matrices in ${\rm GL}_d(K')$. As $X\cap T$ is Zariski closed in $T$ and $X\cap \Gamma^{p^n}=(X\cap T)\cap \Gamma^{p^n}$, Corollary \ref{thm: tori} applies and so $\Gamma^{p^n}\cap X$ is $p$-automatic. By applying a suitable translate, it follows that the intersection of $X$ with each coset of $\Gamma/\Gamma^{p^n}$ is $p$-automatic. As there are only finitely many cosets, using basic properties of automaticity, we deduce that $\Gamma\cap X$ is $p$-automatic.
It is however less clear whether an effective version of Theorem \ref{thm: MT} can be obtained from Corollary \ref{thm: tori}. Indeed, to determine the intersection using the method described above in practice, one must be able to explicitly find eigenvectors in order to diagonalize elements of $\Gamma^{p^n}$. A necessary step in doing this is to find roots of characteristic polynomials in the algebraic closure of $K$, which seems uneasy to be done explicitly in general.
It is also natural to ask whether a similar version of Theorem \ref{thm: MT} might hold for abelian varieties. We believe this to be the case, but it is not clear whether the result follows from our approach: if $P$ is a point on an abelian variety $X$ over a field of positive characteristic then the points $n\cdot P$ do not appear, in general, to be sufficiently well-behaved to allow one to associate an algebraic generating function, which is necessary to apply our methods.
\begin{proof}[Proof of Theorem \ref{thm: MT}] We first make a few reductions. We let $\Phi: {\rm GL}_d(K)\to \mathbb{A}^{d^2}(K)$ be the injective morphism whose image, $Y$, consists of all points at which the determinant does not vanish. Note that the affine variety ${\rm GL}_d(K)$ is a Zariski open subset of $\mathbb{A}^{d^2}(K)$ and that the Zariski closed subsets of ${\rm GL}_d(K)$ are precisely those obtained by intersecting Zariski closed subsets of $\mathbb{A}^{d^2}(K)$ with ${\rm GL}_d(K)$. By the Hilbert Basis Theorem, a Zariski closed subset of $\mathbb{A}^{d^2}(K)$ is given by the vanishing set of a finite set of polynomials. Thus there are polynomials $P_1,\ldots,P_r\in K[x_{1,1},\ldots ,x_{d,d}]$
such that for $M\in {\rm GL}_d(K)$, $$M\in X \iff P_1(\Phi(M))=\cdots = P_r(\Phi(M))=0\, .$$
It is then enough to consider the case that $\Phi(X)=Z(P)\cap Y$, where $P$ is a single polynomial in the indeterminates $x_{i,j}$ with $1\le i,j\le d$ and $Z(P)$ denotes the set of zeros of $P$.
Let $\Gamma$ be a finitely generated abelian subgroup of ${\rm GL}_d(K)$ and let $X$ be a Zariski closed subset of ${\rm GL}_d(K)$ such that $\Phi(X)=Z(P)\cap Y$, where $P\in K[x_{1,1},\ldots ,x_{d,d}]$. Our aim is to prove that $X\cap\Gamma$ is a $p$-automatic subset of $\Gamma$. Let $C_1,\ldots ,C_m\in {\rm GL}_d(K)$ be generators of $\Gamma$ and suppose that $\Psi:\mathbb{Z}^{m}\rightarrow \Gamma$ is the surjective group homomorphism defined by $\Psi(e_i)=C_i$ for $1\le i\le m$, where $e_i$ stands for the vector whose $i$th coordinate is $1$ and all other coordinates are $0$. We let ${\bf n}$ denote an $m$-tuple $(n_1,\ldots ,n_m)\in \mathbb{N}^m$. By Proposition \ref{prop:automorph}, $X\cap \Gamma$ is $p$-automatic if $$ S:= \left\{{\bf n}\in \mathbb{Z}^m \mid P(\Phi\circ \Psi({\bf n}) )=0 \right\} $$ is a $p$-automatic subset of $\mathbb Z^m$. Let ${\mathcal E} \ := \ \{\pm 1\}^m.$ Given ${\bf n}:=(n_1,\ldots ,n_m)\in \mathbb N^m$ and ${\bf a}:=(a_1, \ldots, a_m)\in \mathcal E$, we denote by ${\bf a}\cdot {\bf n}:= (a_1n_1,\ldots,a_mn_m)$ the ordinary coordinate-wise multiplication. Given $A\subseteq \mathbb N^m$, we also set ${\bf a}\cdot A:= \left\{ {\bf a}\cdot {\bf n} \mid {\bf n} \in A\right\}$. For every ${\bf a}\in {\mathcal E}$, we set $$ S_{\bf a} := \left\{{\bf n} \in \mathbb N^m \mid P(\Phi\circ \Psi({\bf a}\cdot {\bf n}) )=0 \right\}\, . $$ Note that by Proposition \ref{prop:autoZequiv}, $S$ is a $p$-automatic subset of $\mathbb Z^m$ if and only if $S_{\bf a}$ is a $p$-automatic subset of $\mathbb N^m$ for every ${\bf a}\in {\mathcal E}$.
To see this, let $t_{j}$ be indeterminates for $1\le j\le m$. Given ${\bf n}\in \mathbb{N}^m$, we define ${\bf t}^{\bf n}$ to be the product $t_{1}^{n_1}\cdots t_{m}^{n_m}$. Let ${\bf a}=(a_1,\ldots ,a_m)\in {\mathcal E}$. We set \begin{equation*} f_{\bf a}({\bf t}) := \sum_{{\bf n}\in \mathbb{N}^{m}} \Psi({\bf a \cdot n}) {\bf t}^{\bf n} \in {\rm GL}_d(K)[[{\bf t}]] \, . \end{equation*} We claim that for $1\le i,j\le d$, the $(i,j)$ entry of $\Psi({\bf a \cdot n}) {\bf t}^{\bf n}$ is a rational function in ${\bf t}$. To see this, first note that since $C_1,\ldots ,C_m$ commute, we have \begin{eqnarray*} f_{\bf a}({\bf t})&=& \sum_{(n_1,\ldots,n_m)\in\mathbb N^m}\Psi(C_1^{a_1n_1},\ldots,C_m^{a_mn_m}) t_1^{n_1}\cdots t_m^{n_m}\\ &=& \prod_{i=1}^m \, \sum_{n_i\in\mathbb N} C_i^{a_i n_i} t_i^{n_i}. \end{eqnarray*} On the other hand, for every $i\in\{1,\ldots,m\}$, the sum $$ \sum_{n_i\in \mathbb N} C_i^{a_i n_i} t_i^{n_i}$$ is a $d\times d$ matrix whose entries are rational functions that belong to $K(t_i)$. This follows for instance from Proposition 1.1 in \cite{Han86}. Since rational functions are closed under Hadamard product and taking linear combinations, we obtain that $f_{\bf a}({\bf t})$ is a $d\times d$ matrix whose entries are all multivariate rational functions in ${\bf t}$. For all $1\le i,j\le d$, let us denote by $f_{i,j,{\bf a}} ({\bf t})$ the $(i,j)$ entry of $f_{\bf a}({\bf t})$. Note that the power series \[ \tilde{f}_{\bf a}({\bf t}) \ := \ \sum_{{\bf n}\in \mathbb{N}^{m}} P( \Phi\circ \Psi({\bf a \cdot n})) {\bf t}^{\bf n}\] can be obtained by taking Hadamard product and linear combinations of the rational functions $f_{i,j,{\bf a}} ({\bf t})$. We thus deduce that $ \tilde{f}_{\bf a}({\bf t})$ belongs to the field of multivariate rational functions $K({\bf t})$. On the other hand, the definition of $ \tilde{f}_{\bf a}$ implies that $$ S_{\bf a} = {\mathcal Z}( \tilde{f}_{\bf a}) \,. $$ By Theorem \ref{thm: eff}, we have that the set $S_{\bf a}$ is a $p$-automatic set that can be effectively determined. Since this holds true for evey ${\bf a}\in {\mathcal E}$, this ends the proof. \end{proof}
\section{Background from automata theory} \label{Salon}
We start this section with few examples of automatic sequences and automatic subsets of the natural numbers, as well as a useful chatacterization of them (Theorem \ref{AB:theorem:eilenberg}). Then we describe Salon's \cite{Salon1987} extension of the notion of automatic sets to subsets of $\mathbb{N}^d$ and show how to genralize it to subsests of $\mathbb Z^d$. Finally, we introduce a natural notion of automaticity for subsets of arbitrary finitely generated abelian groups. It seems that the latter notion has not been considered before and that it could be of independent interest.
Let $k\ge 2$ be a natural number. We let $\Sigma_k$ denote the alphabet $\left\{0,1,\ldots,k-1\right\}$.
\subsection{Automatic sequences and one-dimensional automatic sets}
For reader's convenience we choose to recall here the definitions of a $k$-automatic sequence and a $k$-automatic subset of the natural numbers.
A $k$-automaton\index{$k$-automaton} is a $6$-tuple \begin{displaymath} {\mathcal A} = \left(Q,\Sigma_k,\delta,q_0,\Delta,\tau\right) , \end{displaymath} where $Q$ is a finite set of states, $\delta:Q\times \Sigma_k\rightarrow Q$ is the transition function, $q_0$ is the initial state, $\Delta$ is the output alphabet and $\tau : Q\rightarrow \Delta$ is the output function. For a state $q$ in $Q$ and for a finite word $w=w_1 w_2 \cdots w_n$ on the alphabet $\Sigma_k$, we define $\delta(q,w)$ recursively by $\delta(q,w)=\delta(\delta(q,w_1w_2\cdots w_{n-1}),w_n)$. Let $n\geq 0$ be an integer and let $w_r w_{r-1}\cdots w_1 w_0$ in $\left(\Sigma_k\right)^{r+1}$ be the base-$k$ expansion of $n$. Thus $n=\sum_{i=0}^r w_i k^{i} :=[w_rw_{r-1}\cdots w_0]_k$. We denote by $w(n)$ the word $w_0 w_1 \cdots w_r$. A sequence $(a_n)_{n\geq 0}$ is said to be $k$-automatic if there exists a $k$-automaton ${\mathcal A}$ such that $a_n=\tau(\delta(q_0,w(n)))$ for all $n\geq 0$. A set ${\mathcal E}\subset \mathbb N$\index{automatic set} is said to be recognizable by a finite $k$-automaton, or for short $k$-automatic, if the characteristic sequence of ${\mathcal E}$, defined by $a_n=1$ if $n\in {\mathcal E}$ and $a_n=0$ otherwise, is a $k$-automatic sequence.
\begin{exam}{\em The Thue--Morse sequence\index{Thue--Morse sequence} $t:=(t_n)_{n\geq 0}$ is probably the famous example of automatic sequences. It is defined as follows: $t_n=0$ if the sum of the binary digits of $n$ is even, and $t_n=1$ otherwise. The Thue--Morse sequence can be generated by the following finite $2$-automaton: $ {\mathcal A}=\left ( \{A, B\}, \{0, 1\}, \delta, A, \{0, 1\}, \tau \right)$, where $ \delta(A, 0) = \delta (B, 1) = A$, $\delta(A, 1) = \delta (B, 0) = B$,
$\tau (A) = 0$ and $\tau (B) = 1$. \begin{figure}
\caption{A $2$-automaton generating Thue--Morse sequence.}
\label{AB:figure:thue}
\end{figure}} \end{exam}
\begin{exam} {\em The simplest automatic sets are arithmetic progressions. \begin{figure}
\caption{A $2$-automaton recognizing the arithmetic progression $5\mathbb N+3$.}
\label{AB:figure:ap}
\end{figure}} \end{exam}
\begin{exam}{\em The set $\{1,2,4,8,16,\ldots\}$ formed by the powers of $2$ is also a typical example of a $2$-automatic set. \begin{figure}
\caption{A $2$-automaton recognizing the powers of $2$.}
\label{AB:figure:2n}
\end{figure} } \end{exam}
\begin{exam}{\em In the same spirit, the set formed by taking all integers that can be expressed as the sum of at most two powers of $3$ is $3$-automatic. \begin{figure}
\caption{A $3$-automaton recognizing those integers that are the sum of at most two powers of $3$.}
\label{AB:figure:3n3m}
\end{figure}} \end{exam}
There are also much stranger automatic sets. The fact that the class of $k$-automatic sets is closed under various natural operations such as intersection, union and complement, can actually be used to easily construct rather sophisticated automatic sets. For instance, the set of integers whose binary expansion has an odd number of digits, does not contain three consecutive $1$'s, and contains an even number of two consecutive $0$'s is a $2$-automatic set.
An important notion in the study of $k$-automatic sequences is the notion of $k$-kernel.
\begin{defn}{\em The $k$-kernel of a sequence $a=(a_n)_{n\geq 0}$ is defined as the set \begin{displaymath}
\left\{(a_{k^in+j})_{n\geq 0} \mid i\geq 0, \,0\leq j<k^i \right\} \, . \end{displaymath}} \end{defn}
\begin{exam}\label{AB:example:kernel}{\em The $2$-kernel of the Thue--Morse sequence $t$ has only two elements $t$ and the sequence $\overline{t}$ obtained by exchanging the symbols $0$ and $1$ in $t$.} \end{exam}
This notion gives rise to a useful characterization of $k$-automatic sequences which was first proved by Eilenberg in \cite{Eilenberg}.
\begin{thm}[Eilenberg]\label{AB:theorem:eilenberg} A sequence is $k$-automatic if and only if its $k$-kernel is finite. \end{thm}
\subsection{Automatic subsets of $\mathbb N^d$ and multidimensional automatic sequences}
Salon \cite{Salon1987} extended the notion of automatic sets to include subsets of $\mathbb{N}^d$, where $d\ge 1$. To do this, we consider an automaton \begin{displaymath} {\mathcal A} = \left(Q,\Sigma_k^d,\delta,q_0,\Delta,\tau\right) \, , \end{displaymath} where $Q$ is a finite set of states, $\delta:Q\times \Sigma_k^d\rightarrow Q$ is the transition function, $q_0$ is the initial state, $\Delta$ is the output alphabet and $\tau : Q\rightarrow \Delta$ is the output function. Just as in the one-dimensional case, for a state $q$ in $Q$ and for a finite word $w=w_1 w_2 \cdots w_n$ on the alphabet $\Sigma_k^d$, we recursively define $\delta(q,w)$ by $\delta(q,w)=\delta(\delta(q,w_1w_2\cdots w_{n-1}),w_n)$. We call such an automaton a $d$-dimensional $k$-automaton.
We identify $\left(\Sigma_k^d\right)^*$ with the subset of $\left( \Sigma_k^*\right)^d$ consisting of all $d$-tuples $(u_1,\ldots ,u_d)$ such that $u_1,\ldots ,u_d$ all have the same length. Each nonnegative integer $n$ can be written uniquely as \begin{displaymath} n \ = \ \sum_{j=0}^{\infty} e_j(n) k^j \, , \end{displaymath} in which $e_j(n)\in \{0,\ldots ,k-1\}$ and $e_j(n)=0$ for all sufficiently large $j$. Given a nonzero $d$-tuple of nonnegative integers $(n_1,\ldots ,n_d)$, we set \begin{displaymath} h:= \max \{ j \geq 0~\mid~ {\rm there ~exists ~some}~i~,\; 1\leq i \leq d,~{\rm such ~that ~}e_j(n_i)\neq 0\}\, . \end{displaymath} Furthermore, if $(n_1,\ldots,n_d)=(0,\ldots,0)$, we set $:h=0$.
We can then produce an element
\begin{displaymath}
w_k(n_1,\ldots ,n_d):=(w_1,\ldots ,w_d)\in \left(\Sigma_k^d\right)^*
\end{displaymath}
corresponding to $(n_1,\ldots ,n_d)$ by defining \begin{displaymath} w_i := e_h(n_i)e_{h-1}(n_i)\cdots e_0(n_i)\, . \end{displaymath}
In other words, we are taking the base-$k$ expansions of $n_1,\ldots ,n_r$ and
then ``padding" the expansions of each $n_i$ at the beginning with $0$'s if necessary
to ensure that each expansion has the same length.
\begin{exam} If $d=3$ and $k=2$, then we have $w_2(3,5,0)=(011, 101,000)$. \end{exam}
\begin{defn}{\em A map $f:\mathbb{N}^d\rightarrow \Delta$ is $k$-automatic if there is a $d$-dimensional $k$-automaton\index{multidimensional automaton} ${\mathcal A} = \left(Q,\Sigma_k^d,\delta,q_0,\Delta,\tau\right)$ such that \begin{displaymath} f(n_1,\ldots ,n_d) = \tau(\delta(q_0,w_d(n_1,\ldots ,n_d)))\, . \end{displaymath} Similarly, a subset $S$ of $\mathbb{N}^d$ is $k$-automatic if its characteristic function, $f:\mathbb{N}^d\to \{0,1\}$, defined by $f(n_1,\ldots ,n_d)=1$ if $(n_1,\ldots ,n_d)\in S$; and $f(n_1,\ldots ,n_d)=0$, otherwise, is $k$-automatic.} \end{defn}
\begin{exam}\label{exam: 2dim}{\em Let $f:\mathbb{N}^2\rightarrow \{0,1\}$ be defined by $f(n,m)=1$ if the sum of the binary digits of $n$ added to the sum of the binary digits of $m$ is even, and $f(n,m)=0$ otherwise. Then $f(m,n)$ is a $2$-automatic map. One can check that $f$ can be generated by the following $2$-dimensional $2$-automaton: $ {\mathcal A}=\left ( \{A, B\}, \{0, 1\}^2, \delta, A, \{0, 1\}, \tau \right)$, where $\delta(A, (0,0)) = \delta (A, (1,1)) = \delta(B,(1,0)) = \delta(B,(0,1)) = A$, $\delta(A, (1,0)) = \delta(A,(0,1)) = \delta (B, (0,0)) = \delta (B, (1,1))= B$,
$\tau (A) = 0$ and $\tau (B) = 1$.} \end{exam}
\begin{figure}
\caption{A $2$-dimensional $2$-automaton generating the map $f$ defined in Example \ref{exam: 2dim}.}
\label{AB:figure:thue2dim}
\end{figure}
Just as $k$-automatic sequences can be characterized by the finiteness of the $k$-kernel, multidimensional $k$-automatic sequences have a similar characterization.
\begin{defn} {\em Let $d$ be a positive integer and let $\Delta$ be a finite set. We define the $k$-\emph{kernel}\index{kernel} of a map $f:\mathbb{N}^d\rightarrow \Delta$ to be the collection of all maps of the form \begin{displaymath} g(n_1,\ldots ,n_d):=f(k^a n_1+b_1,\ldots ,k^a n_d+b_d) \end{displaymath}
where $a\ge 0$ and $0\le b_1,\ldots ,b_d<k^a$. }
\label{defn: kerd} \end{defn}
\begin{exam}{\em The $2$-kernel of the map $f:\mathbb{N}^2\rightarrow \{0,1\}$ defined in Example \ref{exam: 2dim}
consists of the $2$ maps $f_1(m,n):=f(m,n)$, $f_2(m,n)=f(2m+1,2n)$. } \end{exam}
Just as Eilenberg \cite{Eilenberg} showed that being $k$-automatic is equivalent to having a finite $k$-kernel for $k$-automatic sequences, Salon \cite[Theorem 1]{Salon1987} observed that a similar characterization of multidimensional $k$-automatic maps holds.
\begin{thm}[Salon]\label{thm: Salon} Let $d$ be a positive integer and let $\Delta$ be a finite set. A map $f:\mathbb{N}^d\to \Delta$ is $k$-automatic if and only if its $k$-kernel is finite. \end{thm}
\subsection{Automatic subsets of $\mathbb Z^d$}
We show now how to naturally extend Salon's construction to $k$-automatic subsets of $\mathbb Z^d$ by simply adding sympols $+$ and $-$ to our alphabet $\Sigma_k$.
Given a natural number $n$, we let $[n]_k$ denote the base-$k$ expansion of $n$.
We set \begin{equation*} \Sigma'_k \: = \ \{0,1,\ldots ,k-1,-,+ \}\end{equation*}
and we let $\mathcal{L}_k$ denote the language over the alphabet $\Sigma'_k$ consisting of the empty word and all words over $\Sigma'_k$ whose length is at least $2$ such that the initial letter is either $+$ or $-$, the remaining letters are all in $\Sigma_k$, and the last letter is not equal to zero. This is easily seen to be a regular language.
There is a bijection $[\ \cdot \ ]_k:\mathcal{L}(k)\rightarrow \mathbb{Z}$ in which the empty word is sent to zero, \begin{equation*} +s_0\cdots s_{n}\ \in \mathcal{L}(k) \ \mapsto \ \sum_{j=0}^{n} s_j k^{j} \end{equation*} and \begin{equation*} -s_0\cdots s_{n}\ \in \mathcal{L}(k) \ \mapsto \ -\sum_{j=0}^{n} s_j k^{j}\, , \end{equation*} where $s_0,\ldots ,s_n\in \{0,1,\ldots ,k-1\}$.
\begin{defn} {\em We say that a subset $S$ of $\mathbb{Z}$ is $k$-\emph{automatic} if there is a finite-state automaton that takes words over $\Sigma'_k$ as input, and has the property that a word $W\in \mathcal{L}_k$ is accepted by the automaton if and only if $[W]_k\in S$. } \end{defn}
More generally, we can define automatic subsets of $\mathbb{Z}^d$, mimicking the construction of Salon \cite{Salon1987}. For a natural number $d\ge 1$, we create the alphabet $\Sigma_k'(d)$ to be the alphabet $\left(\Sigma_k' \right)^d$ consisting of all $d$-tuples of elements of $\Sigma_k'$.
With this in mind, we construct a regular language $\mathcal{L}_k(d) \subseteq \left(\Sigma_k'(d)\right)^*$ as follows. Given a nonzero integer $n$, we can write it uniquely as $$n \ = \ \epsilon \sum_{j=0}^{\infty} e_j(n) k^j\, ,$$ in which $\epsilon\in \{\pm 1\}$, $e_j(n)\in \{0,\ldots ,k-1\}$ and there is some natural number $N$, depending on $n$, such that $e_j(n)=0$ whenever $j>N$. We also set $$ 0 \ = \ + \sum_{j=0}^{\infty} e_j(0) k^j\, , $$ where $e_j(0)=0$ for all $j\geq 0$. Given a nonzero $d$-tuple of integers $(n_1,\ldots ,n_d)$, we set $$h:= \max\{ j~\mid~ {\rm there ~exists ~some}~i~{\rm such ~that ~}e_j(n_i)\neq 0\}\, .$$ If $(n_1,\ldots,n_d)=(0,\ldots,0)$, we set $:h=0$.
We can then produce an element $$w_k(n_1,\ldots ,n_d):=(w_1,\ldots ,w_d)\in
\left(\Sigma_k'(d)\right)^*$$ corresponding to $(n_1,\ldots ,n_d)$ by defining $$w_i := \epsilon_i e_h(n_i)e_{h-1}(n_i)\cdots e_0(n_i)\, ,$$ where $\epsilon_i$ is $+$ if $n_i$ is nonnegative and is $-$ if $n_i<0$. In other words, we are taking the base $k$-expansions of $n_1,\ldots ,n_d$ and then ``padding'' the expansions of each $n_i$ at the beginning to ensure that each expansion has the same length.
\begin{exam} If $d=3$ and $k=2$, then we have $w_3(14,-3,0)=(+1110, -0011,+0000)$. \end{exam}
We then take $\mathcal{L}_k(d)$ to be the collection of words of the form $$w_k(n_1,\ldots ,n_d)$$ where $(n_1,\ldots ,n_d)\in \mathbb Z^d$. Then there is an obvious way to extend the map $[\cdot ]_k$ to a bijection $[\, \cdot \, ]_k :\mathcal{L}_k(d)\rightarrow \mathbb{Z}^d$; namely, $$[w_k(n_1,\ldots ,n_d)]_k \ := \ (n_1,\ldots ,n_d)\, .$$ We also denote by $[\, \cdot \, ]_k^{-1}$ the reciprocal map.
We can now define the notion of a $k$-automatic function from $\mathbb{Z}^d$ to a finite set as follows.
\begin{defn}\label{def:autoZ} {\em Let $\Delta$ be a finite set. A function $f: \mathbb{Z}^d \rightarrow \Delta$ is $k$-\emph{automatic} if there is a finite automaton that takes words over ${\mathcal L}_k(d)$ as input and has the property that reading a word $W\in \mathcal{L}_k(d)$, the automaton outputs $f([W]_k)$.
Similarly, a subset $S$ of $\mathbb{Z}^d$ is $k$-automatic if its characteristic function, $f:\mathbb{Z}^d\to \{0,1\}$, defined by $f(n_1,\ldots ,n_d)=1$ if $(n_1,\ldots ,n_d)\in S$; and $f(n_1,\ldots ,n_d)=0$, otherwise, is $k$-automatic.} \end{defn}
In fact, much as in the classical situation, automaticity of subsets of $\mathbb{Z}^d$ can be characterized using the kernel.
\begin{defn} {\em Let $d\geq 1$ be an integer and $\Delta$ a finite set. Given a map $f:\mathbb{Z}^d\to \Delta$, we define the $k$-kernel of $f$ to be the collection of all maps of the form \begin{displaymath} g(n_1,\ldots ,n_d):=f(k^a n_1+b_1,\ldots ,k^a n_d+b_d) \end{displaymath}
where $a\ge 0$ and $0\le b_1,\ldots ,b_d<k^a$. } \end{defn}
\begin{prop}\label{prop:autoZequiv} Let $d\geq 1$ be an integer and $\Delta$ a finite set. Given a map $f:\mathbb{Z}^d\to \Delta$, the following are equivalent. \begin{enumerate} \item[{\rm (i)}] The map $f$ is $k$-automatic. \item[{\rm (ii)}] The $k$-kernel of $f$ is finite. \item[{\rm (iii)}] For each ${\bf \epsilon}=(\epsilon_1,\ldots ,\epsilon_d) \in \{\pm 1\}^d$, the function $f_{{\bf \epsilon}}:\mathbb{N}^d\to \Delta$ defined by $(n_1,\ldots ,n_d)\mapsto f(\epsilon_1 n_1,\ldots ,\epsilon_d n_d)$ is $k$-automatic in the usual sense. \end{enumerate} \end{prop}
\begin{proof} We note that by definition of automatic maps on $\mathbb{Z}^d$, each of the $f_{{\bf \epsilon}}$ is $k$-automatic in the usual sense and hence (i) implies (iii). Similarly, (iii) implies (i). Next assume that (iii) holds. Let $h(n_1,\ldots ,n_d)=f(k^an_1+b_1,\ldots , k^a n_d +b_d)$ be a map in the kernel of $f$. Then for ${\bf \epsilon}=(\epsilon_1,\ldots ,\epsilon_d) \in \{\pm 1\}^d$, the map $h_{{\bf \epsilon}}:\mathbb{N}^d\to \Delta$ defined by $(n_1,\ldots ,n_d)\mapsto h(\epsilon_1 n_1,\ldots \epsilon_d n_d)$ is of the form $$f(\epsilon_1k^a n_1+b_1,\ldots ,\epsilon_d k^a n_d + b_d),$$ which is in the $k$-kernel of $f_{{\bf \epsilon}}$. Since there are only finitely many
${\bf \epsilon}=(\epsilon_1,\ldots ,\epsilon_d) \in \{\pm 1\}^d$ and only finitely many elements
in the kernel of $f_{{\bf \epsilon}}$, we see that the kernel of $f$ is finite and hence (iii) implies (ii).
Similarly, (ii) implies (iii). \end{proof}
\subsection{Automatic subsets of finitely generated abelian groups}
We introduce here a relevant notion of automaticity for subsets of arbitrary finitely generated abelian groups. In this area, we quote \cite{Aandal} where the authors provide a general framework for the automaticity of maps from some semirings to finite sets. In particular, a similar notion of automaticity for subsets of $\mathbb Z^2$ was considered in that paper.
In this more general framework, it seems more natural to define first $k$-automatic maps in terms of some generalized $k$-kernels and then to prove that such maps can be characterized in terms of finite automata.
In the rest of this section, all finitely generated abelian groups are written additively. We thus first define the $k$-kernel of a map from a finitely generated abelian group to a finite set.
\begin{defn} {\em Let $\Gamma$ be a finitely generated abelian group and $T=\{\gamma_1,\ldots ,\gamma_d\}$ a set of generators of $\Gamma$. Let $\Delta$ be a finite set. Given a map $f:\Gamma\to \Delta$, we define the $k$-kernel of $f$ with respect to the generating set $T$ to be the collection of all maps from $\Gamma$ to $\Delta$ of the form \begin{displaymath} g(x):=f(k^a x+ b_1 \gamma_1+\cdots +b_d \gamma_d) \end{displaymath} such that $a\ge 0$ and $0\le b_1,\ldots ,b_d<k^a$. } \end{defn}
We can now define $k$-automatic maps as follows.
\begin{defn} {\em Let $\Gamma$ be a finitely generated abelian group and $\Delta$ a finite set. A map $f:\Gamma\to \Delta$ is $k$-automatic if its $k$-kernel with respect to every finite generating set of $\Gamma$ is finite.} \end{defn}
As usual, we can use the previous definition to introduce the notion of a $k$-automatic subset of a finitely generated abelian group.
\begin{defn}\label{def:autofgg} {\em Let $\Gamma$ be a finitely generated abelian group. A subset $S$ of $ \Gamma$ is $k$-automatic if the map $\chi_S:\Gamma\to \{0,1\}$, defined by $\chi_S(x)=1$ if and only if $x\in S$, is $k$-automatic. } \end{defn}
We note that our definition of $k$-automaticity appears to be somewhat difficult to verify, as we must check that the $k$-kernel is finite with respect to every finite generating set. As shown below, it actually suffices to check that the $k$-kernel is finite with respect to just anyone generating set.
\begin{prop} Let $\Gamma$ be a finitely generated abelian group and $\Delta$ a finite set. Let us assume that the map $f:\Gamma\to \Delta$ has a finite $k$-kernel with respect to some generating set of $\Gamma$. Then the map $f$ is $k$-automatic. \label{prop: independentofcoice} \end{prop}
\begin{proof} Suppose that the $k$-kernel of $f$ is finite with respect to the generating set $T:=\{\gamma_1,\ldots ,\gamma_d\}$ of $\Gamma$ and let $f_1,\ldots ,f_m$ denote the distinct maps in the $k$-kernel of $f$.
Given another generating set of $\Gamma$, say $T':=\{\delta_1,\ldots ,\delta_e\}$, we have to show that the $k$-kernel of $f$ with respect to $T'$ is also finite.
There exist integers $c_{i,j}$ with $1\le i\le d$ and $1\le j\le e$ such that $$\delta_j =\sum_{i=1}^d c_{i,j} \gamma_i$$ for $j\in \{1,\ldots ,e\}$.
Set $N:=\sum_{i,j} |c_{i,j}|$. Given an integer $i$, $1\leq i\leq m$, and a $d$-tuple of integers ${\bf j}=(j_1,\ldots ,j_d)$, we define the map $g_{i,{\bf j}}$ from $\Gamma$ to $\Delta$ by $$g_{i,{\bf j}}(x):=f_i( x + j_1\gamma_1+\cdots + j_d \gamma_d)$$ for all $x\in \Gamma$. We claim that the $k$-kernel of $f$ with respect to $T'$ is contained in the finite set $\mathcal S$ defined by $${\mathcal S}:= \left\{ g_{i,{\bf j}} : \Gamma \to\Delta \mid {\bf j}=(j_1,\ldots ,j_d)\in \{-N,-N+1,\ldots ,N\}^d, \; i\in \{1,\ldots ,m\}\right\}\,.$$
To see this, note that if $a\ge 0$ and $0\le b_1,\ldots ,b_d<k^a$, then $$b_1 \delta_1+\cdots + b_e \delta_e=b_1' \gamma_1+\cdots +b_d'\gamma_d\,,$$ where $\displaystyle b_i' = \sum_{j=1}^{e} b_j c_{i,j}$. It follows that $$\vert b'_i\vert \leq N(k^a-1)$$ for every $i$, $1\leq i\leq d$. We can thus write $b_i' = k^a m_i + r_i$ with $\vert m_i\vert <N$ and $0\le r_i < k^a$. This implies that \begin{eqnarray*} f(k^a x + b_1 \delta_1+\cdots + b_e \delta_e) &=& f(k^a x + b_1' \gamma_1+\cdots +b_d'\gamma_d) \\ &= &f(k^a(x+m_1\gamma_1+\cdots + m_d\gamma_d)\\ &&\;\;\;+r_1\gamma_1+\cdots+r_d\gamma_d)\\ &= &f_{\ell}(x+m_1\gamma_1+\cdots + m_d\gamma_d) \end{eqnarray*} for some $\ell$, $1\leq \ell\leq m$. Thus we see that $$f(k^a x + b_1 \delta_1+\cdots + b_e \delta_e)= g_{\ell, {\bf m}}(x)$$ where ${\bf m}:=(m_1,\ldots,m_d)$, which proves that the $k$-kernel of $f$ with respect to the generating set $T'$ is included in the finite set ${\mathcal S}$, as claimed. \end{proof}
\begin{prop}\label{prop:automorph} Let $\Gamma_1$ and $\Gamma_2$ be two finitely generated abelian groups, and $\Phi:\Gamma_1\to\Gamma_2$ a surjective group homomorphism. If $S$ is a $k$-automatic subset of $\Gamma_2$ then $\Phi^{-1}(S)$ is a $k$-automatic subset of $\Gamma_1$. \end{prop}
\begin{proof}
Let $f$ and $g$ denote respectively the characteristic function of $\Phi^{-1}(S)$ and $S$. Let $\{\gamma_1,\ldots ,\gamma_d\}$ be a set of generators of $\Gamma_1$. Then if $a\geq0$ and $0\le b_1,\ldots ,b_d< k^a$, we infer from the definition of $f$ that $$f(k^a x+b_1\gamma_1+\cdots +b_d\gamma_d)=1\iff \Phi(k^a x+b_1\gamma_1+\cdots + b_d\gamma_d)\in S\, ,$$ which occurs if and only if $$g\left(k^a \Phi(x)+\sum_{i=1}^d b_i \Phi(\gamma_i)\right) =1\, .$$
Note that $$T:= \{\Phi(\gamma_i)~:~1\le i\le d\}$$ is a set of generators of $\Gamma_2$ since $\Phi$ is surjective. Since, by assumption, $g$ is $k$-automatic, the $k$-kernel of $g$ is finite with respect to $T$. Thus the $k$-kernel of $f$ is finite with respect to $T':=\{\gamma_1,\ldots ,\gamma_d\}$. The result now follows from Proposition \ref{prop: independentofcoice}. \end{proof}
We can now prove, as we may expect, that a $k$-automatic subset of a finitely generated abelian group can be described by a finite automaton.
\begin{prop}\label{prop:fggcarac}
Let $\Gamma$ be a finitely generated group, $\{\gamma_1,\ldots,\gamma_d\}$ a set of generators
of $\Gamma$, and $S$ a subset of $\Gamma$. Then $S$ is $k$-automatic if and only if there exists a
finite automaton that takes words over ${\mathcal L}_k(d)$ as input and has the property that for
every $d$-tuple of integers $(n_1,\ldots,n_d)$ the word
$[(n_1,\ldots,n_d)]_k^{-1}\in {\mathcal L}_k(d)$ is accepted by the automaton if and only if
$n_1\gamma_1 + \cdots + n_d\gamma_d$ belongs to $S$.
\end{prop}
\begin{proof}
For every integer $i$, $1\leq i\leq d$, we denote by $e_i:= (0, 0, . . . , 0, 1, 0 . . . , 0)$ the element of
$\mathbb Z^d$ whose $j$th coordinate is $1$ and whose other coordinates are $0$.
Let $\Phi$ be the surjective group homomorphism from $\mathbb Z^d$ to $\Gamma$ defined by
$\Phi(e_i)=\gamma_i$ for every integer $i$, $1\leq i\leq d$.
If $S$ is $k$-automatic then, by Proposition \ref{prop:automorph}, $\Phi^{-1}(S)$ is a $k$-automatic subset of
$\mathbb Z^d$. By Definition \ref{def:autoZ}, there is a finite automaton that takes words over ${\mathcal L}_k(d)$ as input and has the property that the word $W\in \mathcal{L}_k(d)$ is accepted by the automaton if and only if $[W]_k$ belongs to $\Phi^{-1}(S)$. Thus for every $d$-tuple
of integers $(n_1,\ldots,n_d)$ the word
$[(n_1,\ldots,n_d)]_k^{-1}\in {\mathcal L}_k(d)$ is accepted by this automaton if and only if
$n_1\gamma_1+\cdots + n_d\gamma_d$ belongs to $S$.
On the other hand, if there exists a finite automaton such that for every $d$-tuple
of integers $(n_1,\ldots,n_d)$ the word
$[(n_1,\ldots,n_d)]_k^{-1}\in {\mathcal L}_k(d)$ is accepted by this automaton if and only if
$n_1\gamma_1+\cdots +n_d\gamma_d$ belongs to $S$. The same automaton can also be
used to recognize $\Phi^{-1}(S)$. Thus $\Phi^{-1}(S)$ is a $k$-automatic subset of $\mathbb Z^d$.
By Proposition \ref{prop:autoZequiv}, the set $\Phi^{-1}(S)$ has a finite $k$-kernel and it follows
that $S$ has a finite $k$-kernel with respect to $\{\gamma_1,\ldots,\gamma_d\}$. By Proposition
\ref{prop: independentofcoice}, $S$
is thus a $k$-automatic subset of $\Gamma$.
\end{proof}
\section{Proof of our main result} \label{proof}
Our aim is to prove Theorem \ref{thm: main}. Throughout this section, we take $d$ to be a natural number. We let ${\bf n}$ and ${\bf j}$ denote respectively the $d$-tuple of natural numbers $(n_1,\ldots ,n_d)$ and $(j_1,\ldots ,j_d)$. We will also let ${\bf t }^{\bf n}$ denote the monomial $t_1^{n_1}\cdots t_d^{n_d}$ in indeterminates $t_1,\ldots ,t_d$. The degree of such a monomial is the nonnegative integer $n_1+\cdots + n_d$. Given a polynomial $P$ in $K[{\bf t}]$, we denote by $\deg P$ the maximum of the degrees of the monomials appearing in $P$ with nonzero coefficient.
\begin{defn} {\em We say that a power series $f({\bf t}) \in K[[{\bf t}]]$ is algebraic if it is algebraic over the field of rational functions $K({\bf t})$, that is, if there exist polynomials $A_0,\ldots, A_m\in {K}[{\bf t}]$, not all zero, such that \begin{displaymath} \sum_{i=0}^{m} A_i({\bf t})f({\bf t})^i \ = \ 0\, . \end{displaymath}} \end{defn}
In order to prove Theorem \ref{thm: main} we need to introduce some notation. For each
${\bf j}=(j_1,\ldots ,j_d) \in \{0,1,\ldots ,p-1\}^d$, we define
$e_{\bf j}:\mathbb{N}^d\to \mathbb{N}^d$ by
\begin{equation}\label{AB:equation:ej}
e_{\bf j}(n_1,\ldots ,n_d):=(pn_1+j_1,\ldots ,pn_d+j_d)\, .
\end{equation}
We let $\Sigma$ denote the semigroup generated by the collection of all
$e_{\bf j}$ under composition. In view of Definition \ref{defn: kerd}, this semigroup
is intimately related to the definition of the $p$-kernel of $d$-dimensional maps.
As a direct consequence of Theorem \ref{thm: Salon}, we make the following remark which underlines the important role that will be played by the semigroup $\Sigma$ in the proof of Theorem \ref{thm: main}.
\begin{rem}{\em Let $\Delta$ be a finite set. Then a map $a:\mathbb{N}^d\to \Delta$ is $p$-automatic if and only the set of functions $\{a\circ e~\mid~e\in\Sigma\}$ is a finite set.} \label{rem: kernel} \end{rem}
We recall that a field $K$ of characteristic $p>0$ is perfect if the map $x\mapsto x^p$ is surjective on $K$. Let $p$ be a prime number and let $K$ be a perfect field of characteristic $p$. For every ${\bf j}\in \Sigma_p^d=\{0,1,\ldots ,p-1\}^d$, we define the so-called Cartier operator $E_{\bf j}$ from $K[[{\bf t}]]$ into itself by \begin{equation}\label{AB:equation:EJ} E_{\bf j}(f({\bf t}))\ := \ \sum_{{\bf n}\in \mathbb{N}^d} (a\circ e_{\bf j}({\bf n}))^{1/p}{\bf t}^{\bf n} \end{equation} where $f({\bf t}):= \sum_{{\bf n} \in \mathbb N^d} a({\bf n}) {\bf t}^{\bf n} \in K[[{\bf t}]]$. Then we have the following useful decomposition: \begin{equation}\label{eq: fs} f = \sum_{{\bf j}\in \Sigma_p^d} {\bf t}^{\bf j} E_{\bf j}( f)^p \, . \end{equation}
We now recall the following simple classical result, usually known as Ore's lemma.
\begin{lem}\label{lem: ore}
Let $f({\bf t})\in K[[{\bf t}]]$ be a nonzero algebraic power series. Then there exists a positive integer $r$ and polynomials $P_0,\ldots,P_r$ in $\mathbb K[{\bf t}]$ such that $$ \sum_{i=0}^r P_if^{p^i} = 0 $$ and $P_0\not=0$.
\end{lem}
\begin{proof}
Since $f$ is algebraic, $\left\{ f,f^p,f^{p^2},\ldots\right\}$ is linearly dependent over $K({\bf t})$.
There thus exists a natural number $r$ and polynomials $P_0,\ldots,P_r$ in $\mathbb K[{\bf t}]$
such that $$ \sum_{i=0}^r P_if^{p^i} = 0 \, . $$ It remains to prove that one can choose $P_0\not = 0$. Let $k$ be the smallest nonnegative integer such that $f$ satisfies a relation of this type with $P_k\not=0$. We shall prove that $k=0$ which will end the proof. We assume that $k>0$ and we argue by contradiction. Since $P_k\not=0$, we infer from Equality (\ref{eq: fs}) that there exists a $d$-tuple ${\bf j}\in \Sigma_p^d$ such that $E_{\bf j}(P_k)\not =0$. Since $\sum_{i=k}^{r} P_if^{p^i}=0$, we have $$ E_{\bf j}\left(\sum_{i=k}^{r} P_if^{p^i}\right)= \sum_{i=k}^{r} E_{\bf j}\left(P_if^{p^i}\right) = \sum_{i=k}^{r} E_{\bf j}\left(P_i\right) f^{p^{i-1}}= 0 \, . $$ We thus obtain a new relation of the same type but for which the coefficient of $f^{p^{k-1}}$ is nonzero. This provides a contradiction with the definition of $k$. \end{proof}
We now let $\Omega$ denote the semigroup generated
by the collection of the Cartier operators $E_{\bf j}$ and the identity operator
under composition. We let $\Omega(f)$ denote the orbit of $f$ under the action of
$\Omega$, that is,
$$
\Omega(f) := \left\{ E(f) \mid E \in \Omega \right\} \, .
$$
As in the work of Harase \cite{Har88} and of Sharif and Woodcock \cite{SW}, the $K$-vector space
spanned by $\Omega(f)$ will play an important role. We will in particular need the following auxiliary result based on Ore's lemma.
\begin{lem} Let $K$ be a perfect field of characteristic $p$, and let $$f({\bf t}):=\sum_{{\bf n}\in \mathbb{N}^d} a({\bf n}){\bf x}^{\bf n} \in K[[{\bf t}]]$$ be a nonzero algebraic function over $K({\bf t})$. Then there exists a natural number $m$ and there exist maps $a_1,\ldots ,a_m:\mathbb{N}^d\to K$ with the following properties.
\begin{itemize}
\item[\textup{(i)}] The formal power series $f_i({\bf t}):=\sum_{{\bf n}\in \mathbb{N}^d} a_i({\bf n}){\bf t}^{\bf n}$, $1\le i\le m$, form a basis of the K-vector space spanned by $\Omega(f)$.
\item[\textup{(ii)}] One has $f_1=f\, .$
\item[\textup{(iii)}] Let $g({\bf t}) := \sum_{ {\bf n} \in \mathbb N^d } b( {\bf n} ) {\bf t}^{\bf n}$ be a power series that belongs to $\Omega(f)$. Then $b\circ e_{\bf j} \in K\,a_1^p+\cdots + K\,a_m^p\,$ for every ${\bf j}\in\{0,\ldots, p-1\}^d \,.$ \end{itemize}\label{lem: SW} \end{lem}
\begin{proof} Let $f({\bf t})\in K[[{\bf t}]]$ be a nonzero algebraic power series. By Lemma \ref{lem: ore}, there exist a positive integer $r$ and polynomials $P_0,\ldots,P_r$ in $\mathbb K[{\bf t}]$ such that $$ \sum_{i=0}^r P_if^{p^i} = 0 $$ and $P_0\not=0$. Set $\tilde{f} := P_0^{-1} f$. Then \begin{equation}\label{eq: g} \tilde{f} = \sum_{i=1}^r Q_i \tilde{f} ^{p^i} \, , \end{equation} where $Q_i= -P_i P_0^{p^i-2}$. Set $M := \max \{ \deg P_0, \deg Q_i \mid 1 \leq i \leq r \}$ and \begin{equation}\label{eq: H} {\mathcal H} := \left\{ h \in K(({\bf t})) \mid h = \sum_{i=0}^r R_i\tilde{f} ^{p^i} \mbox{ such that } R_i \in K[{\bf t}] \mbox{ and } \deg R_i \leq M \right\} \, . \end{equation} We first note that $f$ belongs to $\mathcal H$ since $f= P_0\tilde{f} $ and $\deg P_0\leq M$. We also observe that ${\mathcal H}$ is closed under the action of $\Omega$. Indeed, if $h:=\sum_{i=0}^r R_i\tilde{f} ^{p^i} \in {\mathcal H}$ and ${\bf j}\in \{0,\ldots,p-1\}^d$, then $$\begin{array}{ll} E_{\bf j}(h) & \displaystyle = E_{\bf j}\left(R_0\tilde{f} + \sum_{i=1}^r R_i\tilde{f} ^{p^i}\right ) =E_{\bf j}\left(\sum_{i=1}^r (R_0Q_i+R_i)\tilde{f} ^{p^i}\right )\\ &\displaystyle = \sum_{i=1}^r E_{\bf j}(R_0\tilde{f} + R_i)\tilde{f} ^{p^{i-1}} \, , \end{array}$$ and since $\deg (R_0Q_i+R_i) \leq 2M$, we have $\deg E_{\bf j}(R_0Q_i+R_i) \leq 2M/p\leq M$. It follows that the $K$-vector space spanned by $\Omega(f)$ is contained in ${\mathcal H}$ and thus has finite dimension, say $m$.
We can thus pick maps $a_1,\ldots ,a_m:\mathbb{N}^d\to K$ such that the $m$ power series $f_i({\bf t}):=\sum_{{\bf n}\in \mathbb{N}^d} a_i({\bf n}){\bf t}^{\bf n}$ form a basis of $\Omega(f)$. Furthermore, since by assumption $f$ is a nonzero power series, we can chose $f_1=f$. Let $b:\mathbb{N}^d\to K$ be such that $g({\bf t}):=\sum_{{\bf n}\in \mathbb{N}^d} b({\bf n}){\bf t}^{\bf n}$ belongs to $\Omega(f)$. Observe that the power series $g$ can be decomposed as \begin{equation} \label{AB:equation:eq1} g({\bf t}) = \sum_{{\bf j}\in \{0,\ldots ,p-1\}^d} {\bf t}^{\bf j} E_{\bf j}(g({\bf t}))^p \, . \end{equation}
By assumption, $E_{\bf j}(g({\bf t}))\in K\, f_1({\bf t})+\cdots + K\, f_m({\bf t})$ and hence $E_{\bf j}(g({\bf t}))^p\in K\, f_1({\bf t})^p+\cdots + K\, f_m({\bf t})^p$. Let ${\bf j}\in\{0,1,\ldots, p-1\}^d$. Considering the coefficient of ${\bf t}^{p{\bf n}+{\bf j}}$ in Equation (\ref{AB:equation:eq1}), we see that $b\circ e_{\bf j}({\bf n})$ is equal to the coefficient of ${\bf t}^{p{\bf n}}$ in $E_{\bf j}(g({\bf t}))^p$, which belongs to $K\, a_1({\bf n})^p+\cdots + K\, a_m({\bf n})^p$. This concludes the proof. \end{proof}
We will also need the following lemma that says we will only have to work with finitely generated extensions of the prime field instead of general fields of characteristic $p$.
\begin{lem} \label{lem: fg} Let $f_1,\ldots, f_m$ be power series as in Lemma \ref{lem: SW}. Then there is a finitely generated field extension $K_0$ of $\mathbb{F}_p$ such that all coefficients of the power series $f_1,\ldots, f_m$ belong to $K_0$. \end{lem}
\begin{proof} Let $\tilde{f} :=\sum_{{\bf n}\in \mathbb{N}^d} \tilde{a} ({\bf n}){\bf t}^{\bf n}$ be defined as in Equation (\ref{eq: g}), that is, \begin{equation}\label{eq: g'} \tilde{f} = \sum_{i=1}^r Q_i \tilde{f} ^{p^i} \, , \end{equation} Let also $\mathcal H$ be the $K$-vector space defined as in Equation (\ref{eq: H}), that is, \begin{equation}\label{eq: H'} {\mathcal H} = \left\{ h \in K(({\bf t})) \mid h = \sum_{i=0}^r R_i\tilde{f} ^{p^i} \mbox{ such that } R_i \in K[{\bf t}] \mbox{ and } \deg R_i \leq M \right\} \, . \end{equation} Since $\mathcal H$ contains the $K$-vector space spanned by $\Omega(f)$, the power series $f_1,\ldots, f_m$ belong to ${\mathcal H}$. There thus exist a finite number of polynomials $R_{i,k}$ such that $$ f_k = \sum_{i=0}^r R_{i,k}\tilde{f} ^{p^i} \, . $$ It thus remains to prove that there exists a finitely generated field extension $K_0$ of $\mathbb{F}_p$ such that all coefficients of $\tilde{f} $ belong to $K_0$. Indeed, by adding to $K_0$ all the coefficients of the polynomials $R_{i,k}$, we would obtain a finitely generated field extension $K_1$ of $\mathbb{F}_p$ such that all coefficients of the power series $f_1,\ldots, f_m$ belong to $K_1$.
Given a $d$-tuple ${\bf n}=(n_1,\ldots ,n_d)$, we set $\Vert {\bf n} \Vert:= \max (n_1,\ldots ,n_d)$. Let $N$ be a positive integer. We let $K_0$ be the finitely generated extension of $\mathbb{F}_p$ generated by the coefficients of $Q_1,\ldots ,Q_r$ and the collection of coefficients of ${\bf t}^{\bf n}$ in $\tilde{f} ({\bf t})$ with $\Vert {\bf n} \Vert \le N$. We claim that the coefficients of $\tilde{f} $ all lie in $K_0$. We prove by induction on $\Vert {\bf n} \Vert$ that all coefficients $\tilde{a} ({\bf n})$ belongs to $K_0$. By construction, this holds whenever $\Vert {\bf n} \Vert \le N$.
Suppose that the claim holds whenever $\Vert {\bf n} \Vert< M$ for some $M>N$ and let us assume that $\Vert {\bf n} \Vert= M$. Then if we consider the coefficient of $t_1^{ n_1}\cdots t_d^{n_d}$ in both sides of Equation \ref{eq: g'}, we get that $$ \tilde{a} (n_1,\ldots ,n_d) \in \sum_{i = 1}^r \sum_{(m_1,\ldots ,m_d)\in S} K_0 \tilde{a} (m_1,\ldots ,m_d)^{p^i}, $$ where $S$ is the (possibly empty) set of all $d$-tuples ${\bf m}:=(m_1,\ldots ,m_d)\in \mathbb{N}^d$ such that either $m_i=0$ or $m_i<n_i$ for each $i\in \{1,\ldots, d\}$. Since $M>0$, we get that $\Vert {\bf m}\Vert < M$ and the inductive hypothesis implies that $$ \sum_{i =1}^r \sum_{(m_1,\ldots ,m_d)\in S} K_0 \tilde{a} (m_1,\ldots ,m_d)^{p^i}\subseteq K_0 \, , $$ and so $\tilde{a} (n_1,\ldots ,n_d)\in K_0$. This completes the induction and shows that all coefficients of $\tilde{f} $ lie in $K_0$.
\end{proof}
Before proving Theorem \ref{thm: main}, we first fix a few notions. Given a finitely generated field extension $K_0$ of $\mathbb{F}_p$, we let $K_0^{\langle p \rangle}$ denote the subfield consisting of all elements of the form $x^p$ with $x\in K_0$. Given $\mathbb{F}_p$-vector subspaces $U$ and $V$ of $K_0$ we let $VU$ denote the $\mathbb{F}_p$-subspace of $K_0$ spanned by all products of the form $vu$ with $v\in V,u\in U$. We let $V^{\langle p \rangle}$ denote the $\mathbb{F}_p$-vector subspace consisting of all elements of the form $v^p$ with $v\in V$. We note that since $K_0$ is a finitely generated field extension of $\mathbb{F}_p$, $K_0$ is a finite-dimensional $K_0^{\langle p \rangle}$-vector space. If we fix a basis \begin{displaymath} K_0=\bigoplus_{i=1}^r K_0^{\langle p \rangle}h_i \end{displaymath} then we have \emph{projections} $\pi_1,\ldots ,\pi_r :K_0\to K_0$ defined by \begin{equation} \label{eq: 2}x=\sum_{i=1}^r \pi_i(x)^p h_i\, .\end{equation}
\begin{rem} \label{{AB:remark:rem2}}{\em For $1\le i\le r$ and $x,y,z\in K_0$ we have \begin{displaymath} \pi_i(x^p y+z) = x\pi_i(y)+\pi_i(z) \, . \end{displaymath}} \end{rem}
The last ingredient we have to state before proving Theorem \ref{thm: main} is a rather technical result, but very useful, due to Derksen, which we state here without proof. It corresponds to Proposition 5.2 in \cite{Der}. Basically, we will prove an effective version of this result later in Section \ref{section: eff} (step $2$ in the proof of Theorem \ref{thm: eff}).
\begin{prop}[Derksen]\label{AB:proposition:derksen} Let $K_0$ be a finitely generated field extension of $\mathbb{F}_p$ and let $\pi_1,\ldots ,\pi_r :{K}_0\to {K}_0$ be as in Equation (\ref{eq: 2}). If $U$ is a finite-dimensional $\mathbb{F}_p$-vector subspace of $K_0$. Then there exists a finite-dimensional $\mathbb{F}_p$-vector subspace $V$ of $K_0$ containing $U$ such that $$\pi_i(VU)\subseteq V$$ for all $i$ such that $1\le i\le r$. \end{prop}
We are now ready to prove Theorem \ref{thm: main}.
\begin{proof}[Proof of Theorem \ref{thm: main}] By enlarging $K$ if necessary, we may assume that $K$ is perfect. By Lemma \ref{lem: SW} we can find maps $a_1,\ldots ,a_m:\mathbb{N}^d\to K$ with the following properties. \begin{enumerate}
\item[\textup{(i)}] The power series $f_i({\bf t}):=\sum_{{\bf n}\in \mathbb{N}^d} a_i({\bf n}){\bf t}^{\bf n}$, $1\le i\le m$, form a basis of the $K$-vector space spanned by $\Omega(f)$.
\item[\textup{(ii)}] One has $f_1=f$.
\item[\textup{(iii)}] Let $g({\bf t}) := \sum_{ {\bf n} \in \mathbb N^d } b( {\bf n} ) {\bf t}^{\bf n}$ be a power series that belongs to $\Omega(f)$. Then $b\circ e_{\bf j} \in K\,a_1^p+\cdots + K\,a_m^p\,$ for every ${\bf j}\in\{0,\ldots, p-1\}^d \,.$
\end{enumerate}
In particular, given $1\leq i \leq m$ and ${\bf j} \in \{0,1,\ldots ,p-1\}^d$, there are elements $ \lambda(i,{\bf j},k)$, $1\le k\le m$, such that \begin{equation} \label{{AB:equation:aiej}} a_i\circ e_{{\bf j}} = \sum_{k=1}^m \lambda(i,{\bf j},k) a_k^p\, . \end{equation} Furthermore, by Lemma \ref{lem: fg}, there exists a finitely generated field extension of $\mathbb{F}_p$ such that all coefficients of $f_1,\ldots ,f_m$ are contained in this field extension. It follows that the subfield $K_0$ of $K$ generated by the coefficients of $f_1({\bf t}),\ldots , f_m({\bf t})$ and all the elements $\lambda(i,{\bf j},k)$ is a finitely generated field extension of $\mathbb{F}_p$.
Since $K_0$ is a finite-dimensional $K_0^{\langle p \rangle}$-vector space, we can fix a basis $\{h_1,\ldots ,h_r\}$ of $K_0$, that is, \begin{displaymath} K_0=\bigoplus_{i=1}^r K_0^{\langle p \rangle}h_i \,. \end{displaymath} As already mentioned, we have projections $\pi_1,\ldots ,\pi_r : K_0\to K_0$ defined by \begin{equation} \label{{AB:equation:projdecomp}} x=\sum_{i=1}^r \pi_i(x)^p h_i \,. \end{equation} We let $U$ denote the finite-dimensional $\mathbb{F}_p$-vector subspace of $K_0$ spanned by the elements $\lambda(i,{\bf j},k)$, $1\le i,k\le m$ and ${\bf j} \in \{0,1,\ldots ,p-1\}^d$, and by $1$. By Equation (\ref{{AB:equation:aiej}}), we have \begin{equation} \label{{AB:equation:aiej2}} a_i\circ e_{{\bf j}} \in U a_1^p+\cdots +U a_m^p \,, \end{equation} for $1\le i\le m$ and ${\bf j} \in \{0,1,\ldots ,p-1\}^d$. By Proposition \ref{AB:proposition:derksen} there exists a finite-dimensional $\mathbb{F}_p$-vector subspace $V$ of $K_0$ containing $U$ such that $\pi_i(VU)\subseteq V$ for $1\le i\le r$.
We now set \begin{displaymath} W:=Va_1+\cdots +Va_m\subseteq \{b~\mid b:\mathbb{N}^d\to K_0\} \,. \end{displaymath} We note that since $V$ is a finite-dimensional $\mathbb{F}_p$-vector space, it is a finite set. It follows that $W$ is also a finite set since $\mbox{ Card } W \le (\mbox{ Card } V)^d<\infty$. Note also that if
$\ell \in \{1,\ldots ,r\}$, $i\in \{1,\ldots ,m\}$, and $j\in \{0,1,\ldots ,p-1\}^d$ then by Equation (\ref{{AB:equation:aiej2}}) and Remark \ref{{AB:remark:rem2}} we have \begin{align} \pi_{\ell}(Va_i\circ e_{\bf j})& \subseteq \pi_{\ell}(VUa_1^p+\cdots + VUa_m^p) \nonumber \\ & \subseteq \pi_{\ell}(VU)a_1 + \cdots + \pi_{\ell}(VU)a_m \nonumber \\ &\subseteq Va_1 + \cdots + Va_m \, . \nonumber \end{align} By Remark \ref{{AB:remark:rem2}}, we obtain that \begin{equation}\label{eq: bl} b_{\ell}:=\pi_{\ell}(b\circ e_{\bf j})\in W \end{equation} for all $b\in W$, ${\bf j} \in \{0,1,\ldots ,p-1\}^d$, and $1\le \ell \le r$. Since $\{h_1,\ldots ,h_r\}$ form a basis of $K_0$ as a $K_0^{\langle p \rangle}$-vector space, given $x$ in $K_0$, we have $$ x=0 \iff (\pi_{\ell}(x)=0 \mbox{ for all } 1\le \ell \le r) \, . $$ In particular, \begin{equation}\label{eq: base} b(p{\bf n}+{\bf j})=0 \iff b_1({\bf n})=b_2({\bf n})=\cdots =b_r({\bf n})=0 \, . \end{equation}
Given a map $b:\mathbb{N}^d\to K_0$, we define the map $\chi_b:\mathbb{N}^d\to \{0,1\}$ by \begin{equation}\label{eq: chi}
\chi_b({\bf n}) \ = \
\left\{
\begin{aligned}
0 & \;{ \rm if } \; b({\bf n}) \not = 0 \, \\ 1 &\;{ \rm if } \;~b({\bf n})=0 \, . \end{aligned} \right. \end{equation} Then we set \begin{equation*} X := \{ \chi_{b_1}\cdots \chi_{b_t}~\mid~t\ge 0, b_1,\ldots , b_t\in W\} \, . \end{equation*} We first get from Equation (\ref{eq: base}) that \begin{displaymath} (\chi_{b}\circ e_{\bf j})({\bf n})=\prod_{\ell=1}^r \chi_{b_{\ell}}({\bf n})\, . \end{displaymath} Furthermore, we infer from Equation \ref{eq: bl} that
$b_{\ell}\in W$ for all $b\in W$, ${\bf j}\in \{0,1,\ldots ,p-1\}^d$, and $1\le \ell \le r$. The definition of $X$ then implies that $\chi_{b}\circ e_{\bf j}$ belongs to $ X$. More generally, it follows that \begin{equation}\label{eq: S} \forall \chi \in X, \forall e\in \Sigma, \;\;\chi \circ e\in X \, . \end{equation}
We note that by (\ref{eq: chi}) we have $\chi_b^2=\chi_b$ for all $b\in W$. Since $W$ is a finite set, it follows that the set $X$ is also finite. It thus follows from (\ref{eq: S}) and Remark \ref{rem: kernel} that
all maps $\chi$ in $ X$ are $p$-automatic. In particular, since by assumption $a({\bf n})=a_1({\bf n})\in W$, we deduce that the map $\chi_a$ is $p$-automatic. It follows that the set $${\mathcal Z}(f) = \left\{ {\bf n}\in \mathbb{N}^d \mid a({\bf n})=0\right\}$$ is a $p$-automatic set, which ends the proof. \end{proof}
\section{Finite automata and effectivity}\label{section: ker}
In this section, we define a classical measure of complexity for $p$-automatic sets and we show how it can be used to prove effective results concerning such sets. We follow the presentation of \cite{Der}.
\begin{defn}{\em Let $S \subset \mathbb N^d$ be a $p$-automatic set and let denote by $K$ the $p$-kernel of $S$. We define the $p$-complexity of $S$ by $$ \mbox{ comp}_{\rm p}(S) := \mbox{ Card } K \, . $$ } \end{defn}
The aim of this section is to state and prove the following result.
\begin{prop}\label{prop: eff} Let $S \subset \mathbb N^d$ be a $p$-automatic set and suppose that there exists an explicit integer $N(S)$ such that $$ \mbox{\rm comp}_{\rm p}(S) \leq N(S) \, . $$ Suppose also that for every positive integer $n$ one can compute (in a finite amount of time) all the elements ${\bf s}\in S$ such that $\Vert {\bf s}\Vert\leq n$. Then the set $S$ can be effectively determined. Furthermore, the following properties are decidable.
\begin{itemize} \item[{\rm (i)}] the set $S$ is empty.
\item[{\rm (ii)}] the set $S$ is finite.
\item[{\rm (iii)}] the set $S$ is periodic, that is, formed by the union of a finite set and of a finite number of ($p$-dimensional) arithmetic progressions.
\end{itemize} \noindent In particular, when $S$ is finite, one can find (in a finite amount of time) all its elements. \end{prop}
\begin{rem}{\em When we say that the set $S$ can be effectively determined, this means that there is an algorithm that produces in a finite amount of time a $p$-automaton that generates $S$. The format of the output is thus a $6$-tuple $\left(Q,\Sigma_p^d,\delta,q_0,\{0,1\},\tau\right)$, where $Q$ the set of states, $\delta:Q\times \Sigma_k^d\rightarrow Q$ is the transition function, $q_0$ is the initial state, and $\tau : Q\rightarrow \{0,1\}$ is the output function. Furthermore, there exists an algorithm that allows one to determine in a finite amount of time whether $S$ is empty, finite or whether $S$ is formed by the union of a finite set and of a finite number of ($p$-dimensional) arithmetic progressions.} \end{rem}
We first make the important observation that for every positive integer $N$ there are only a finite number of $p$-automatic subsets of $\mathbb N^d$ whose $p$-complexity is at most $N$.
\begin{lem}\label{lem: enum} Let $N$ be a positive integer. Then there at most $N 2^N N^{pN}$ distinct $p$-automatic subsets of $\mathbb N^d$ whose $p$-complexity is at most $N$. \end{lem}
\begin{proof} In the definition of $p$-automatic sets in Section \ref{Salon}, we used $p$-automata that read the input ($d$-tuples of integers) starting from the most significant digits (the input is scanned from the left to the right). It is well known that using $p$-automata that read the input starting from the least significant digits (the input is scanned from the right to the left) leads to the same notion of $p$-automatic sets. Furthermore, it is known that for every $p$-automatic set $S$, there exists such a $p$-automaton for which the number of states is equal to the cardinality of the $p$-kernel of $S$. Such an automaton has actually the minimal number of states among all automata recognizing $S$ and reading the input from the right to the left (see for instance \cite{AS} or \cite{Der}).
Thus a $p$-automatic set $S\subseteq \mathbb N^d$ with $p$-complexity at most $N$ can be recognized by a $p$-automaton $\mathcal A$ (reading from the right to the left) with at most $N$ states. Let $Q:=\{Q_1,\ldots, Q_N\}$ denote the set of states of $\mathcal A$. To define $\mathcal A$, we must choose the initial state, the transition function from $Q\times \Sigma_p$ to $Q$, and the output function from $Q$ to $\{0,1\}$. We have at most $N$ choices for the initial state, at most $N^{pN}$ choices for the transition function, and at most $2^N$ choices for the output function. The result immediately follows. \end{proof}
\begin{lem}\label{lem: S1S2}
Let $S_1,S_2\subseteq \mathbb N^d$ be $p$-automatic sets. Then the following hold. \begin{itemize}
\item[$\bullet$] $ \mbox{ \rm comp}_{\rm p}(S_1\cap S_2) \leq \mbox{\rm comp}_{\rm p}(S_1)\mbox{ \rm comp}_{\rm p}(S_2) . $
\item[$\bullet$] $ \mbox{\rm comp}_{\rm p}(S_1\cup S_2) \leq \mbox{\rm comp}_{\rm p}(S_1)\mbox{ \rm comp}_{\rm p}(S_2) . $
\item[$\bullet$] $\mbox{\rm comp}_{\rm p}((S_1\setminus S_2)\cup (S_2\setminus S_1)) \leq \mbox{ \rm comp}_{\rm p}(S_1)\mbox{ \rm comp}_{\rm p}(S_2) . $
\item[$\bullet$] $\mbox{\rm comp}_{\rm p}(S_1\setminus (S_1\cap S_2) )\leq \mbox{ \rm comp}_{\rm p}(S_1)\mbox{ \rm comp}_{\rm p}(S_2) . $ \end{itemize} \end{lem}
\begin{proof} Given a set $S$ let us denote by ${\mathcal I}_S$ its indicator function. The proof follows from the fact that ${\mathcal I}_{S_1\cap S_2} = {\mathcal I}_{S_1}\cdot {\mathcal I}_{S_2} $, ${\mathcal I}_{S_1\setminus S_2} = {\mathcal I}_{S_1} \cdot(1-{\mathcal I}_{S_2}) $, ${\mathcal I}_{S_1\cup S_2} = {\mathcal I}_{S_1} + {\mathcal I}_{S_2 } - {\mathcal I}_{S_1}\cdot {\mathcal I}_{S_2}$, ${\mathcal I}_{(S_1\setminus S_2)\cup (S_2\setminus S_1)}= {\mathcal I}_{S_1}\cdot (1-{\mathcal I}_{S_2}) + {\mathcal I}_{S_2} \cdot(1-{\mathcal I}_{S_1}) $, and ${\mathcal I}_{S_1\setminus (S_1\cap S_2) }= {\mathcal I}_{S_1} \cdot (1-{\mathcal I}_{S_1} \cdot{\mathcal I}_{S_2}) $. \end{proof}
We will also use the following two results that can be easily proved as in \cite{Der}.
\begin{lem}\label{lem: min} Let $S\subseteq \mathbb N^d$ be a nonempty $p$-automatic set. Then $$ \min \left\{ \Vert{\bf s}\Vert \mid {\bf s} \in S\right\} \leq p^{\mbox{\rm comp}_{\rm p}(S) -2}\, . $$ \end{lem}
\begin{lem}\label{lem: max} Let $S\subseteq \mathbb N^d$ be a finite $p$-automatic set. If ${\bf s}\in S$, then $$ \Vert {\bf s}\Vert \leq p^{\mbox{\rm comp}_{\rm p}(S) -2}\, . $$ \end{lem}
We are now ready to prove Proposition \ref{prop: eff}.
\begin{proof}[Proof of Proposition \ref{prop: eff}] Let $S\subseteq \mathbb N^d$ be a $p$-automatic set. Let us assume that one knows an effective bound $N(S)$ for the $p$-complexity of $S$ and that one can compute the initial terms of $S$. Let us also assume that for every positive integer $n$ one can compute (in a finite amount of time) all the elements ${\bf s}\in S$ such that $\Vert {\bf s}\Vert\leq n$.
We first note that by Lemma \ref{lem: enum} there are only a finite number, say $r$, of $p$-automatic subsets of $\mathbb N^d$ with $p$-complexity at most $N(S)$. Going through the proof of Lemma \ref{lem: enum}, we can explicitly enumerate all these sets to get a collection $S_1,S_2,\ldots, S_r$.
Now for each $S_i$, we can check whether $S=S_i$ as follows. Since both $S$ and $S_i$ have $p$-complexity at most $N(S)$, we infer from Lemma \ref{lem: S1S2} that $$ \mbox{ comp}_{\rm p}((S\setminus S_i)\cup (S_i\setminus S)) \leq \mbox{ comp}_{\rm p}(S)\mbox{ comp}_{\rm p}(S_i) \leq N(S)^2 \, . $$ Thus, by Lemma \ref{lem: min}, the set $(S\setminus S_i)\cup (S_i\setminus S)$ is empty if and only if it has no element up to $p^{N(S)^2 -2}$. This implies that $S=S_i$ if and only if $$ S\cap \left\{ {\bf n}\in \mathbb N^d\mid \Vert{\bf n} \Vert\leq p^{N(S)^2 -2} \right\}= S_i\cap \left\{ {\bf n}\in \mathbb N^d\mid \Vert{\bf n} \Vert \leq p^{N(S)^2 -2} \right\} \, . $$ By assumption, this can be verified in a finite amount of time.
\noindent{\bf (i).} Since the $p$-complexity of $S$ is at most $N(S)$, Lemma \ref{lem: min} implies that $S$ is empty if and only if $$ S \cap \left\{ {\bf n}\in \mathbb N^d\mid \Vert{\bf n} \Vert\leq p^{N(S)^2 -2} \right\} =\emptyset \, . $$ By assumption, this can be verified in a finite amount of time.
\noindent{\bf (ii).} Since the $p$-complexity of $S$ is at most $N(S)$, Lemma \ref{lem: max} implies that $S$ is finite if and only if $$ S = S \cap \left\{ {\bf n}\in \mathbb N^d\mid \Vert{\bf n} \Vert\leq p^{N(S)^2 -2} \right\} \, . $$ Set $S' := \left\{ {\bf n}\in \mathbb N^d\mid \Vert{\bf n} \Vert\leq p^{N(S)^2 -2} \right\}$. Thus $S$ is finite if and only if the set \begin{equation} \label{eq: empty} S\setminus \left(S \cap S'\right) = \emptyset \, . \end{equation} On the other hand, it is easy to see that $S'$ is a $p$-automatic set with complexity at most $(p^{N(S)^2 -2}+1)^d$. By Lemma \ref{lem: S1S2}, we deduce that $$ \mbox{ comp}_{\rm p}\left(S\setminus \left(S \cap S'\right) \right) \leq \mbox{ comp}_{\rm p}(S) \mbox{ comp}_{\rm p}(S') \leq N(S) \left(p^{N(S)^2 -2}+1\right)^d \, . $$ This shows, using (i), that one can check whether Equality (\ref{eq: empty}) is satisfied in a finite amount of time.
\noindent{\bf (iii).} We have already shown that we can explitly determine a $p$-automaton that recognized $S$, since the $p$-complexity of $S$ is at most $N(S)$. Then a classical result of Honkala \cite{Hon86} shows that one can check whether such set is periodic, that is, whether $S$ is the union of a finite set and a finite number of ($p$-dimensional) arithmetic progressions.
Finally, to obtain all the elements of $S$ when $S$ is finite one can proceed as follows. First, one can check that $S$ is finite as in (ii). Once this has been done, one knows that $S$ is finite and thus Lemma \ref{lem: max} implies that $$ S=S \cap \left\{ {\bf n}\in \mathbb N^d\mid \Vert{\bf n} \Vert\leq p^{N(S)^2 -2} \right\} $$ since $S$ has complexity at most $N(S)$. By assumption, all the elements of $S$ can thus be determined in a finite amount of time. This ends the proof. \end{proof}
\section{Proof of Theorem \ref{thm: eff}}\label{section: eff}
The aim of this section is to show how each step of the proof of Theorem \ref{thm: main} can be made effective.
We first recall some notation. Given a polynomial $P(X)\in K[{\bf t}][X]$, we define the height of $P$ as the maximum of the degrees of the coefficient of $P$. The (naive) height of an algebraic power series $$f({\bf t})=\sum_{n \in \mathbb N} a({\bf n}) {\bf t}^{\bf n}\in K[[{\bf t}]]$$
is then defined as the height of the minimal polynomial of $f$, or equivalently,
as the minimum of the heights of the nonzero polynomials $P(X)\in K[{\bf t}][X]$
that vanishes at $f$.
We first prove the following effective version of Ore's lemma.
\begin{lem}\label{lem: oref} Let $s$ and $H$ be two positive integers and let $f({\bf t})\in K[[{\bf t}]]$ be an algebraic power series of degree at most $s$ and height at most $H$. Then there exist polynomials $Q_0,\ldots,Q_s\in K[{\bf t}]$ with degree at most $Hsp^s$ such that $$ \sum_{i=0}^s Q_i({\bf t})f({\bf t})^{p^i} \ = \ 0 $$ and $Q_0\not=0$. \end{lem}
In order to prove Lemma \ref{lem: oref}, we will need the following auxiliary result.
\begin{lem}\label{lem: vi} Let $s$ be a natural number and let $V_0,\ldots, V_s$ be $s+1$ vectors in $K[{\bf t}]^s$ such that each coordinate has degree at most $r$. Then there exist $Q_0({\bf t}),\ldots ,Q_s({\bf t})$ in $K[{\bf t}]$ of degree at most $rs$, not all of which are zero, such that $$ \sum_{i=0}^s Q_iV_i=0 \, . $$ \end{lem}
\begin{proof} Let $e$ denote the size of a maximally linearly independent subset of $V_0,\ldots ,V_s$. By relabelling if necessary, we may assume that $V_0,\ldots ,V_{e-1}$ are linearly independent. Let $A$ denote the $s\times e$ matrix whose $(j+1)$th column is $V_j$. Then by reordering the coordinates of our vectors if necessary, we may assume that the $e\times e$ submatrix $B$ of $A$ obtained by deleting the bottom $d-e$ rows of $A$ is invertible. Let $V_s'$ denote the vector in $K[{\bf t}]^e$ obtained by deleting the bottom $d-e$ coordinates of $V_s$. Then there is a unique vector $X$ that is solution to the matrix equation $$BX=V_s'\, .$$ Moreover, by Cramer's rule, the $i$th coordinate of $X$ is the polynomial $X_i$ defined by $$X_i({\bf t}):=\det(B_i)/\det(B) \, ,$$ where $B_i$ is the $e\times e$ matrix obtained by replacing the $i$th column of $B$ by $V_s'$. For $0\le i \le e-1$, we set $$Q_i({\bf t}):=-\det(B_i) \,.$$ We also set $$Q_s({\bf t}):=\det(B) \, .$$ Since the entries of $B_i$ and $B$ are all polynomials of degree at most $r$, we obtain that these polynomials have degree at most $re\le rs$. Furthermore, by construction $$ \sum_{i=0}^{e-1} X_i V_i = V_s \, . $$ Letting $Q_i({\bf t})=0$ for $e\leq i<s$, we obtain that $$\sum_{i=0}^s Q_iV_i=0$$ and each $Q_i$ has degree at most $rs$, as required. \end{proof}
We are now ready to prove Lemma \ref{lem: oref}.
\begin{proof}[Proof of Lemma \ref{lem: oref}] By assumption, there exist polynomials $P_0({\bf t})$, $\ldots, P_s({\bf t})\in K[{\bf t}]$ of degree at most $H$ such that $$ \sum_{i=0}^sP_i({\bf t}) f({\bf t})^i = 0 $$ and $P_s({\bf t}) \not=0$.
Let $\mathcal V$ denote the $K({\bf t})$-vector space spanned by $1,f,\ldots ,f^{s-1}$. For $1\leq i \leq s$, let $e_i$ denote the standard unit $d\times 1$ vector in $K({\bf t})^s$ whose $j$th coordinate is equal to the Kronecker delta $\delta_{ij}$. Then we have a surjective linear map $T: K({\bf t})^s \to \mathcal V$ in which we send the vector $e_i$ to $f^{i-1}$. Let $V=\sum_{i=1}^s T(e_i)e_i\in K({\bf t})^s$ and let \[M \ := \ \left( \begin{array}{cccccc} 0 & 0 & 0 & \cdots & 0 & -X_0({\bf t})\\ 1 & 0 & 0 & \cdots & 0 & -X_1({\bf t}) \\ 0 & 1 & 0 & \cdots & 0 & -X_2({\bf t}) \\ \vdots & \vdots & \vdots & \ddots & \cdots & \vdots \\ 0 & \cdots & 0& 1 & 0 & -X_{s-2}({\bf t}) \\ 0& 0 & \cdots & 0 & 1 & -X_{s-1}({\bf t}) \end{array}\right) \in M_s(K({\bf t}))\, ,\] where $X_i({\bf t}):=P_i({\bf t})/P_s({\bf t})$ for $i=0,1,\ldots ,s-1$. Then $$ T\left( M^n e_1\right)=f({\bf t})^n \, . $$ Notice that $M^n=P_s({\bf t})^{-n}C_n$ where $C_n$ is a matrix in $M_s(K[{\bf t}])$ whose entries have degree at most $nH$. Then to find a relation of the form $$ \sum_{i=0}^s Q_i({\bf t})f({\bf t})^{p^i} \ = \ 0 \, , $$ it is enough to find a vector $$ [Q_0({\bf t}), \ldots,Q_s({\bf t})]\in K[{\bf t}]^{1\times d} $$ such that \begin{equation}\label{eq: PQ} P_s({\bf t})^{p^s} Q_0({\bf t}) e_1+P_s({\bf t})^{p^s-p}Q_1({\bf t})C_p e_1 +\cdots + Q_s({\bf t}) C_{p^s} e_1 = 0 \, . \end{equation}
For $0\leq j\leq s$, we set \begin{equation}\label{eq: Vj} V_j := P_s({\bf t})^{p^s-p^j}C_{p^j}e_1 \, . \end{equation}
We note that $V_j$ is a vector in $K[{\bf t}]^s$ such that each coordinate has degree at most $Hp^s$. Then Lemma \ref{lem: vi} ensures the existence of polynomials $Q_0({\bf t}),\ldots ,Q_s({\bf t})$ in $K[{\bf t}]$ of degree at most $sHp^s$, not all of which are $0$, and such that $$ \sum_{j=0}^s Q_j V_j =0 \, . $$ We deduce from Equations (\ref{eq: PQ}) and (\ref{eq: Vj}) that \begin{equation} \sum_{j=0}^s Q_j({\bf t}) f^{p^j}({\bf t}) =0 \, .\label{eq: 1} \end{equation}
It thus remains to show that we can choose our polynomials $Q_0,\ldots, Q_s$ such that $Q_0$ is nonzero. To see this, we let $k$ denote the smallest index such that we have a relation of the form given in Equation (\ref{eq: 1}) with the degrees of $Q_0,\ldots ,Q_s$ all bounded above by $sHp^s$ and such that $Q_k$ is nonzero. If $k$ is equal to zero, we are done.
We now assume that $k>0$ and we argue by contradiction. Since $Q_k\not=0$, we infer from Equality (\ref{eq: fs}) that there exists a $d$-tuple ${\bf j}\in \Sigma_p^d$ such that $E_{\bf j}(Q_k)\not =0$. Since $\sum_{i=k}^{s} Q_if^{p^i}=0$, we have $$ E_{\bf j}\left(\sum_{i=k}^{s} Q_if^{p^i}\right)= \sum_{i=k}^{s} E_{\bf j}\left(Q_if^{p^i}\right) = \sum_{i=k}^{s} E_{\bf j}\left(Q_i\right) f^{p^{i-1}}= 0 \, . $$ Furthermore, one can observe that, for $k\le i\le s$, the polynomial $E_{\bf j}(Q_i)$ has degree at most $sHp^s$. We thus obtain a new relation of the same type but for which the coefficient of $f^{p^{k-1}}$ is nonzero, which contradicts the minimality of $k$. This ends the proof. \end{proof}
We are now ready to prove Theorem \ref{thm: eff}.
\begin{proof}[Proof of Theorem \ref{thm: eff}] We first explain our strategy. We assume that $f({\bf t}) \in K[[{\bf t}]]$ is an algebraic function and that we know an explicit polynomial $P(X)\in K[{\bf t}][X]$ that vanishes at $f$. Note that from the equation $P(f)=0$, one can obviously derive explicit effective bounds of the degree and of the height of $f$. Then we will show how the proof of Theorem \ref{thm: main} allows us to derive an effective upper bound for $\mbox{comp}_{\rm p}({\mathcal Z}(f))$. It will thus follows from the results of Section \ref{section: ker} that one can effectively determined the set ${\mathcal Z}(f)$ only by looking at the first coefficients of $f$ (which can be computed in a finite amount of time by using the equation $P(f)=0$).
Let us assume that the degree of $f$ is bounded by $s$ and that the height of $f$ is bounded by $H$. In order to get an effective upper bound for $\mbox{comp}_{\rm p}({\mathcal Z}(f))$, we have to give effective upper bounds for the cardinality of the sets $U,V,W$ and $X$ introduced all along the proof of Theorem \ref{thm: main}.
\noindent{\bf Step 1.} In this first step we show how to obtain an effective upper bound for the dimension $m$ of the $K$-vector space spanned by $\Omega(f)$. We then deduce an effective upper bound for the cardinality of the $\mathbb F_p$-vector space $U$.
This can be deduced from our effective version of Ore's lemma. Indeed, by Lemma \ref{lem: ore}, one can find polynomials $Q_0,\ldots,Q_s\in K[{\bf t}]$ with degree at most $Hsp^s$ such that $$ \sum_{i=0}^s Q_i({\bf t})f({\bf t})^{p^i} \ = \ 0 $$ and $Q_0\not=0$. We set ${\tilde f}:=Q_0^{-1}f$. Then \begin{equation}\label{eq: geff} {\tilde f}= \sum_{i=1}^s R_i {\tilde f}^{p^i} \, , \end{equation} where $R_i= -Q_i Q_0^{p^i-2}$. Then each $R_i$ has degree at most $Hsp^s(p^i-1)$. Set $M := Hsp^s(p^s-1)$ and \begin{equation}\label{eq: Heff} {\mathcal H} := \left\{ h \in K(({\bf t})) \mid h = \sum_{i=0}^s S_i{\tilde f}^{p^i} \mbox{ such that } S_i \in K[{\bf t}] \mbox{ and } \deg S_i \leq M \right\} \, . \end{equation} Furthermore, $\mathcal H$ is a $K$-vector space of dimension at most $$(s+1) {M+d \choose M}\, .$$ Just as in the proof of Lemma \ref{lem: SW}, one can check that $f$ belongs to $\mathcal H$ and that ${\mathcal H}$ is closed under the action of $\Omega$. It follows that the $K$-vector space spanned by $\Omega(f)$ is contained in ${\mathcal H}$. There thus exists an effective constant $N_0 := (s+1){M+d \choose M}$ such that the $K$-vector space spanned by $\Omega(f)$ has dimension \begin{equation}\label{eq: N0} m \leq N_0 \, . \end{equation} We recall that $K_0$ denotes the subfield of $K$ generated by the coefficients of $f_1\ldots , f_m$ and all the elements $\lambda(i,{\bf j},k)$ $1\le i,k\le m$ and ${\bf j} \in \{0,1,\ldots ,p-1\}^d$, and that $U$ is defined as the finite-dimensional $\mathbb{F}_p$-vector subspace of $K_0$ spanned by the elements $\lambda(i,{\bf j},k)$, $1\le i,k\le m$ and ${\bf j} \in \{0,1,\ldots ,p-1\}^d$, and by $1$. We thus deduce from (\ref{eq: N0}) that there exist an effective upper bound $N_1:= p^{1+p^dN_0^2}$ such that \begin{equation}\label{eq: N1} \mbox{Card}(U) \leq N_1 \, . \end{equation}
\noindent{\bf Step 2.} From Derksen's proposition (Proposition \ref{AB:proposition:derksen}), we know that there exists a finite-dimensional $\mathbb{F}_p$-vector subspace $V$ of $K_0$ containing $U$ such that $\pi_i(VU)\subseteq V$ for $1\leq i \leq r$. In this second step, we show how to obtain an effective upper bound for the cardinality of such a vector space $V$.
In the proof of Lemma \ref{lem: fg}, we have shown that $K_0$ is a finitely generated field extension of $\mathbb{F}_p$ that can be generated by the $\lambda(i,{\bf j},k)$ and the coefficients of a finite number of some explicit polynomials. We write $$K_0=\mathbb{F}_p(X_1,\ldots ,X_r)(a_1,\ldots ,a_s)\, ,$$ where $X_1,\ldots ,X_r$ are algebraically independent over $\mathbb{F}_p$ and $a_1,\ldots , a_s$ form a basis for $K_0$ as an $\mathbb{F}_p(X_1,\ldots ,X_r)$-vector space; moreover, we may assume that for each $i$ and $j$, we have $a_ia_j$ is an $\mathbb{F}_p(X_1,\ldots ,X_r)$-linear combination of $a_1,\ldots ,a_s$ in which the numerators and denominators of the coefficients are polynomials in $\mathbb{F}_p[X_1,\ldots ,X_r]$ whose degrees are uniformly bounded by some explicit constant $N_2$.
Let $T_1,\ldots,T_n$ denote such a set of generators of $K_0$ with the following properties. \begin{enumerate} \item[{\rm (i)}] $T_i=X_i$ for $i\le r$. \item[{\rm (ii)}] $T_{r+j}=a_j$ for $j\le s$. \item[{\rm (iii)}] $T_n=1$. \item[{\rm (iv)}] $\{ T_1,\ldots,T_n\}$ contains all the $\lambda(i,{\bf j},k)$. \end{enumerate} Note that from Step 1 and the proof of Lemma \ref{lem: fg} we can obtain an explicit upper bound for the integer $n$.
An easy induction shows that for $1\le j\le s$, $a_j^p$ is an $\mathbb{F}_p(X_1,\ldots ,X_r)$-linear combination of $a_1,\ldots ,a_s$ in which the coefficients are rational functions whose numerators and denominators have degrees uniformly bounded by \begin{equation}\label{N3} N_3 := N_2\left(2s^{p-2}+\frac{s^{p-2}-s}{s-1}\right)\, . \end{equation} We now regard $K_0$ as an $s$-dimensional $\mathbb{F}_p(X_1,\ldots ,X_r)$-vector space. Then we may regard the $\mathbb{F}_p(X_1,\ldots ,X_r)$-span of $a_1^p,\ldots ,a_s^p$ as a subspace of $$\mathbb{F}_p(X_1,\ldots ,X_r)^s$$ spanned by $s$ vectors whose coordinates are rational functions whose numerators and denominators have degrees uniformly bounded by $N_3$. We can effectively compute the dimension of this space and a basis. We let $t$ denote the dimension of this vector space and by relabelling if necessary, we may assume that $a_1^p,\ldots ,a_t^p$ form a basis. Then there exist $\ell_1,\ldots ,\ell_{s-t}$ such that $\{a_1^p,\ldots ,a_t^p, a_{\ell_1},\ldots ,a_{\ell_{s-t}}\}$ forms a basis for $K_0$ as a $\mathbb{F}_p(X_1,\ldots ,X_r)$-vector space. Moreover, using Cramer's rule, we can express each $a_j$ as a $\mathbb{F}_p(X_1,\ldots ,X_r)$-linear combination of $a_1^p,\ldots ,a_t^p, a_{\ell_1},\ldots ,a_{\ell_{s-t}}$ in which the numerators and denominators have degrees uniformly bounded by \begin{equation}\label{N4} N_4:= 2N_3st \, . \end{equation} To see this, let $\phi:K_0\to {\mathbb F}_p(X_1,\ldots ,X_r)^s$ be the $\mathbb{F}_p(X_1,\ldots ,X_r)$-linear isomorphism which sends $a_i$ to the vector with a $1$ in the $i$th coordinate and zeros in all other coordinates. Let $A$ denote the $s\times s$ matrix whose $i$th row is equal to $\phi(a_i^p)$ for $i\le t$ and is equal to $\phi(a_{\ell_{t-i}})$ for $i>t$. Then the entries of $A$ are rational functions whose numerators and denominators have degrees that are uniformly bounded by $N_3$. Note that expressing $a_j$ as a $\mathbb{F}_p(X_1,\ldots ,X_r)$-linear combination of $a_1^p,\ldots ,a_t^p,a_{\ell_1},\ldots ,a_{\ell_{t-s}}$ is the same as solving the matrix equation $$A{\bf x} \ = \ {\bf e}_j\, ,$$ where ${\bf e}_j$ is the vector whose $j$th coordinate is $1$ and whose other coordinates are $0$. By Cramer's rule, the $i$th coordinate of ${\bf x}$ is a ratio of two $s\times s$ determinants, each of which have entries which are rational functions in $\mathbb{F}_p(X_1,\ldots ,X_r)$ whose numerators and denominators have degrees that are uniformly bounded by $N_3$, and such that the bottom $s-t$ rows consist of constants. Note that the determinant of an $s\times s$ matrix whose entries are rational functions is a rational function; moreover, we can take the denominator to be the product of the denominators of the entries. Since our matrices have a total of $st$ entries which are not constant, we obtain a bound of $N_3st$ for the degrees of the denominators of our determinants. It is easy to check that this bound applies to the degrees of the numerators as well. When we take a ratio of these determinants, this can at most double this bound on the degrees of the numerators and denominators. Thus we can express each $a_j$ as a $\mathbb{F}_p(X_1,\ldots ,X_r)$-linear combination of $a_1^p,\ldots ,a_t^p,a_{\ell_1},\ldots ,a_{\ell_{t-s}}$ in which the degrees of the numerators and denominators are uniformly bounded by $2N_3st$, as claimed.
Notice that $$ S:=\left\{ T_1^{i_1}\cdots T_{n}^{i_n}~\mid~0\le i_1,\ldots ,i_{n}<p\right\} $$ spans $K_0$ as a $K_0^{\langle p\rangle}$-vector space. Observe also that every polynomial $Q\in \mathbb F_p[T_1,\ldots,T_n]$ can be decomposed as \begin{equation}\label{eq: Qf} Q = \sum_{f\in S} Q_f^p f \,, \end{equation} where the $Q_f$ are polynomials in $\mathbb F_p[T_1,\ldots,T_n]$ of degree at most $\lfloor \deg Q/p\rfloor$.
Let us choose $S_0$ to be the subset of $S$ corresponding to the monomials
from the set formed by the union of $$\left\{X_1^{i_1}\cdots X_r^{i_r} \mid 0\le i_1,\ldots ,i_r<p\right\}$$ and $$\left\{X_1^{i_1}\cdots X_r^{i_r} a_{\ell_j}\mid 0\le i_1,\ldots ,i_r<p, 1\le j\le s-t\right\}\, .$$ Then $S_0$ is a basis for $K_0$ as a $K_0^{\langle p\rangle}$-vector space. Thus for $T_1^{i_1}\cdots T_n^{i_n}\in S$, we have $$ T_1^{i_1}\cdots T_n^{i_n}=\sum_{h\in S_0} \alpha_{h,i_1,\ldots ,i_n}^p h $$ for some coefficients $\alpha_{h,i_1,\ldots ,i_n}\in K_0$. We may pick some nonzero polynomial $H(T_1,\ldots ,T_n)$ such that \begin{equation} \label{eq: xxx} H(T_1,\ldots ,T_n)^pT_1^{i_1}\cdots T_n^{i_n} =\sum_{h\in S_0} A_{h,i_1,\ldots ,i_n}^ph \, , \end{equation} where $$A_{h,i_1,\ldots ,i_n} \in \mathbb{F}_p[T_1,\ldots ,T_n] $$ for all $$(h,i_1,\ldots ,i_n)\in S_0\times \{0,1,\ldots ,p-1\}^n\, . $$ We let \begin{equation} \label{eq: xxy} M':=\max\, \left\{ \deg H, \, \deg A_{h,i_1,\ldots ,i_n} \right\} \end{equation} where the maximum is taken over all $$ (h,i_1,\ldots ,i_n)\in S_0\times \{0,1,\ldots ,p-1\}^n \,. $$ We claim that it is possible to obtain an effective upper bound for $M'$, once the set of generators and the basis are known. To see this, note that we write $T_i=\sum_{j=1}^s \phi_{i,j}(X_1,\ldots ,X_r)a_s$, where each $\phi_{i,j}$ is a rational function in $X_1,\ldots ,X_r$, where we assume that the degrees of the numerators and denominators of the $\phi_{i,j}$ are uniformly bounded by some explicit constant $N_5$.
Note that by construction, a monomial $T_1^{i_1}\cdots T_n^{i_n}$ with $0\le i_1,\ldots ,i_n<p$ is an $\mathbb{F}_p(X_1,\ldots ,X_r)$-linear combination of $a_1,\ldots ,a_s$ in which the coefficients are rational functions whose numerators and denominators have degrees uniformly bounded by \begin{equation}\label{N6} N_6 := (N_2+N_5)(p-1)ns^{2(p-1)n}\, . \end{equation} To see this, we claim more generally that a monomial $T_1^{j_1}\cdots T_n^{j_n}$ can be expressed as a $\mathbb{F}_p(X_1,\ldots ,X_r)$-linear combination of $a_1,\ldots ,a_s$ in which the coefficients are rational functions whose numerators and denominators have degrees uniformly bounded by $$(N_2+N_5)(j_1+\cdots +j_n)s^{2(j_1+\cdots +j_n)} \, .$$ We prove this by induction on $j_1+\cdots +j_n$. When $j_1+\cdots +j_n=1$, the claim is trivially true. So we assume that the claim holds whenever $j_1+\cdots +j_n<k$ and we consider the case that $j_1+\cdots +j_n=k$. Then $j_i\ge 1$ for some $i$. Thus we may write $$T_1^{j_1}\cdots T_n^{j_n}=T_i \cdot T_1^{j_1}\cdots T_i^{j_i-1}\cdots T_n^{j_n}\, .$$ By the inductive hypothesis, $$T_1^{j_1}\cdots T_i^{j_i-1}\cdots T_n^{j_n} = \sum_{\ell=1}^s \psi_{\ell} a_{\ell}\, ,$$ where each $\psi_{\ell}$ is a rational function whose numerator and denominator have degrees bounded by $(N_2+N_5)(k-1)s^{2k-2}$. Thus \begin{eqnarray*} &~& T_i \cdot T_1^{j_1}\cdots T_i^{j_i-1}\cdots T_n^{j_n} \\ &=& \left( \sum_{j=1}^s \phi_{i,j} a_j \right)\left( \sum_{\ell=1}^s \psi_{\ell} a_{\ell} \right) \\ &=& \sum_{1\leq j,\ell\leq s} (\phi_{i,j}\psi_{\ell}) a_ja_{\ell}\, . \end{eqnarray*} Recall that by assumption each $a_ja_{\ell}$ is a $\mathbb{F}_p(X_1,\ldots, X_r)$-linear combination of $a_1,\ldots ,a_s$ in which the degrees of the numerators and denominators are uniformly bounded by $N_2$. Thus the coefficient of each $a_j$ is a linear combination consisting of $s^2$ terms whose numerators and denominators have degrees bounded by $N_5+(N_2+N_5)(k-1)s^{2k-2}+N_2$ and hence can be expressed as a rational function whose numerator and denominator have degrees bounded by $(N_2+N_5)(1+(k-1)s^{2k-2})s^2\le (N_2+N_5)ks^{2k}$. This gives the bound (\ref{N6}), as claimed.
Then we may write $$T_1^{i_1}\cdots T_n^{i_n} = \sum_{j=1}^s \frac{C_j(X_1,\ldots ,X_r)}{D(X_1,\ldots ,X_r)^p} a_j\, ,$$ where $C_1,\ldots , C_s,D$ are polynomials of degree at most $N_6sp$.
Furthermore, we showed in (\ref{N4}) that each $a_j$ can be written as a
$\mathbb{F}_p(X_1,\ldots ,X_s)$-linear combination of
$\{a_1^p,\ldots ,a_t^p, a_{\ell_1},\ldots, a_{\ell_{s-t}}\}$ in which the coefficients have numerators
and denominators uniformly bounded by $N_4$. Thus we may write
$$T_1^{i_1}\cdots T_n^{i_n} = \sum_{j=1}^t \frac{\widehat{C}_j(X_1,\ldots ,X_r)}{\widehat{D}(X_1,\ldots ,X_r)^p }a_j^p +
\sum_{j=1}^{s-t} \frac{\widehat{C}_j(X_1,\ldots ,X_r)}{\widehat{D}(X_1,\ldots ,X_r)^p} a_{\ell_j}\, ,$$
where $\widehat{C}_1,\ldots ,\widehat{C}_{s},\widehat{D}$ have degrees bounded
by $$(N_4+N_6)sp\,. $$
Since $S_0$ forms a basis for $K_0^{\langle p\rangle}$, this ensures that \begin{equation}\label{eq: M} M' \leq (N_4+N_6)sp+p\, . \end{equation}
Now, we set \begin{equation}\label{eq: V0} U_0:=\mathbb{F}_p H^{-1}+\sum_{j=1}^n \mathbb{F}_p T_j\, . \end{equation} Since $\left\{1\right\}\cup \left\{\lambda_{i,{\bf j},k} \mid 1\le i,k\le m, \, {\bf j} \in \{0,1,\ldots ,p-1\}^d \right\} \subseteq \left\{ T_1,\ldots ,T_n \right\}$, we have \begin{equation}\label{eq: VV0} U \subseteq U_0 \, . \end{equation}
Let $k$ be a positive integer. We infer from (\ref{eq: V0}) that $U_0^k$ is contained in the $\mathbb F_p$-vector space spanned by the set $$ {\mathcal K}:=\left\{H^{-j_0}T_1^{j_1}\cdots T_n^{j_n}~\mid~ \sum_{i=0}^nj_i \leq k \right\} \, . $$ Then, every element $L$ of $\mathcal K$ can be written as \begin{equation}\label{eq: K} L = H^{-pi_0}(H^{\ell} T_1^{j_1}\cdots T_n^{j_n}) =:H^{-p(i_0+1)}H^pQ \, \end{equation} where $Q:= H^{\ell}T_1^{j_1}\cdots T_n^{j_n}$, $0\le \ell<p$, $0\leq i_0 \leq \lfloor k/p\rfloor$ and $\sum_{i=1}^nj_i \leq k-pi_0$. Thus $Q$ is a polynomial in $\mathbb F_p[T_1,\ldots,T_n]$ of degree at most $(p\deg H + k-pi_0)$. By (\ref{eq: Qf}), $Q$ can be decomposed as $$ Q = \sum_{f \in S} Q_f^p f\, , $$ where $Q_f$ are polynomials in $\mathbb F_p[T_1,\ldots,T_n]$ of degree at most $\deg H + \lfloor k/p\rfloor -i_0$. We deduce that $$ H^{-(i_0+1)}Q_f \in U_0^{M' + \lfloor k/p\rfloor+1} \, . $$ Thus we have \begin{equation}\label{eq: HQ} H^pQ = \sum_{f \in S} Q_f^p (H^pf)\, . \end{equation} Furthermore, by assumption, for $f\in S$ \begin{equation}\label{eq: Hf} H^p f \in \bigoplus_{h\in S_0} (U_0^{M'})^{\langle p\rangle} h \, . \end{equation} We infer from (\ref{eq: K}), (\ref{eq: HQ}) and (\ref{eq: Hf}) that $$ L \in \bigoplus_{h\in S_0} (U_0^{2M'+\lfloor k/p\rfloor+1})^{\langle p\rangle} h $$ and thus $$U_0^k \subseteq \bigoplus_{h\in S_0} (U_0^{2M'+\lfloor k/p\rfloor+1})^{\langle p\rangle} h\, .$$ Let $k_0:=\left\lfloor 2(M'+1)p/(p-1)\right\rfloor +1$ and set $V:=U_0^{k_0-1}$. This choice of $k_0$ implies that $\pi_i(VU)\subseteq V$. Furthermore, $U\subseteq V$ and the cardinality of $V$ is bounded by $\mbox{ Card }U_0^{k_0-1}\leq (\mbox{ Card }U_0)^{k_0-1}\leq p^{(n+1)(k_0-1)}$. Since one could find an effective upper bound for $n$ and since Inequality (\ref{eq: M}) provides an effective upper bound for $M'$ (and thus for $k_0$), we obtain that there exists an effective constant $N_7$ such that $$ \mbox{ Card }V \leq N_7\,. $$
\noindent{\bf Step 3.} In this last step, we show how to derive from Step 2 effective upper bounds for the cardinality of the sets $W$ and $X$, from which we will finally deduce an effective upper bound for $\mbox{Comp}_{\rm p}{\mathcal Z}(f)$.
We just show that it is possible to get an effective upper bound $N_7$ for the cardinality of the $\mathbb F_p$-vector space $V$. We now recall that the set $W$ is defined by \begin{displaymath} W:=Va_1+\cdots +Va_m \,. \end{displaymath} We thus have $\mbox{ Card } W \le (\mbox{ Card } V)^m$, and we infer from (\ref{eq: N0}) that there exists an effective constant $N_8 := N_7^{N_0}$ such that \begin{equation}\label{eq: N8} \mbox{ Card } W \le N_8 \, . \end{equation}
We recall that given a map $b:\mathbb{N}^d\to K_0$, the map $\chi_b:\mathbb{N}^d\to \{0,1\}$ is defined by \begin{equation}\label{eq: chieff}
\chi_b({\bf n}) \ = \
\left\{
\begin{aligned}
0 & \;{ \rm if } \; b({\bf n})\mathbb \neq 0 \, \\ 1 &\;{ \rm if } \;~b({\bf n})=0 \, . \end{aligned} \right. \end{equation} We recall that the set $X$ is defined by \begin{equation*} X := \{ \chi_{b_1}\cdots \chi_{b_t}~\mid~t\ge 0, b_1,\ldots , b_t\in W\} \, . \end{equation*} Since $\chi_b^2=\chi_b$ for all $b\in W$ and since the product of maps $\chi_b$ is commutative, we get that $$ \mbox{ Card } X \le 2^{ \mbox{ Card } W } \, . $$ Thus we infer from (\ref{eq: N8}) the existence of an effective constant $N_9 := 2^{N_8}$ such that $$ \mbox{ Card } X \leq N_9 \, . $$ On the other hand, the proof of Theorem \ref{thm: main} shows that the $p$-kernel of ${\mathcal Z}(f)$ is contained in $X$, which implies that $$ \mbox{comp}_{\rm p}({\mathcal Z}(f)) \leq N_9\, . $$ This ends the proof. \end{proof}
\section{Concluding remarks} \label{conclude} We end our paper with a few comments. We note that Derksen \cite{Der} also proved a refinement of his Theorem \ref{thm:derksen}. Let us state his result. Let $p$ be a prime number and let $q$ be a power of $p$. Given $c_0,\ldots ,c_d\in
\mathbb{Q}^*$ with $(q-1)c_i\in \mathbb{Z}$
for $i\in \{1,\ldots ,d\}$ and
$c_0+\cdots + c_d\in \mathbb{Z}$, we define $$ \tilde{S}_q(c_0,\ldots ,c_d):=\left\{c_0+c_1 q^{i_1}+\cdots + c_d q^{i_d}~\mid~i_1,\ldots ,i_d\ge 0\right\} $$ and we take $$ S_q(c_0,\ldots ,c_d):=\mathbb{N}\cap \tilde{S}_q(c_0,\ldots ,c_d)\, . $$ If $c_i>0$ for some $i\in \{1,\ldots ,d\}$, we say that $S_q(c_0,\ldots ,c_d)$ is an elementary $p$-nested set of order $d$. We say that a subset of the natural numbers is $p$-nested of order $d$ if it is a finite union of elementary $p$-nested sets of order at most $d$ with at least one set having order exactly $d$. We then say that a subset of the natural numbers is $p$-normal of order $d$ if it is, up to a finite set, the union of a finite number of arithmetic progressions along with a $p$-nested set of order $d$. Derksen \cite[Theorem 1.8]{Der} proved that the zero set of a linear recurrence of order $d$ is a $p$-normal set of order at most equal to $d-2$. Of course, this refines the fact that such a set is $p$-automatic.
We already observed in the introduction that Theorem \ref{thm: main} is in some sense best possible since any $p$-automatic subset of $\mathbb N^d$ can be obtained as the set of vanishing coefficients of an algebraic power series in $\mathbb F_p[[t_1,\ldots,t_d]]$. However, one might hope that a refinement, involving a reasonable version of multidimensional $p$-normal set, could hold if we restrict our attention to multivariate rational functions. This is actually not the case. Even for bivariate rational functions over finite fields, the set of vanishing coefficients can be rather pathological. Indeed, Furstenberg \cite{Fur} showed that the diagonal of a multivariate rational power series with coefficients in a field of positive characteristic is an algebraic power series in one variable\footnote{Deligne \cite{De} generalized this result to diagonals of algebraic power series with coefficients in a field of positive characteristic.}. Moreover, the converse holds for any field: any one variable algebraic power series can be obtained as the diagonal of a bivariate rational power series\footnote{This result is essentially proved in \cite{Fur}. Denef and Lipshitz \cite{DL} actually obtained the following stronger result: any algebraic power series in $n$ variables with coefficients in an arbitrary field can be obtained as the diagonal of a rational power series in $2n$ variables.}. In light of Christol's theorem, this implies in particular that any $p$-automatic subset of $\mathbb N$ can be realized as the diagonal of the set of vanishing coefficients of a bivariate rational power series with coefficients in $\mathbb F_p$.
Nevertheless, we may imagine that a similar refinement of Theorem \ref{thm: main} does exist for the special rational functions that appear in the Diophantine applications given in Sections \ref{decidability}, \ref{sunit} and \ref{MLT}. Finally, since these applications only involve multivariate rational functions, it would be interesting to find natural Diophantine problems that would involve some sets of vanishing coefficients of algebraic irrational multivariate power series.
\noindent{\bf \itshape Addendum.}\,\,--- During the last stage of the writing of this paper, the authors learned about a related work (though not written in terms of automata) of Derksen and Masser \cite{DM}. These authors obtain in particular strong effective results for the general $S$-unit equations over fields of positive characteristic and more generally for the Mordell--Lang theorem, in the special case of linear subvarieties of $G_m^n(K)$ for fields $K$ of positive characteristic.
\noindent{\bf \itshape Acknowledgement.}\,\,--- The authors would like to thank Jean-Paul Allouche, David Masser and the anonymous referees for their useful remarks. They are also indebted to Ga\"el R\'emond for his interesting comments concerning the relation between Theorem 4.1 and Corrolary 4.1. The first author is also most grateful to Aur\'elie and Vadim for their constant patience and support during the preparation of this paper.
\end{document} |
\begin{document}
\title{On sheaf cohomology and natural expansions \thanks{Ana Luiza Tenorio: Supported by CAPES Grant Number 88882.377949/2019-01} }
\author{
{Ana Luiza TenΓ³rio, IME-USP, \small{[email protected]}}
\and
{Hugo Luiz Mariano, IME-USP, {\small [email protected]}}
}
\maketitle
\begin{abstract} In this survey paper, we present \v{C}ech and sheaf cohomologies -- themes that were presented by Koszul in University of S\~ao Paulo (\cite{koszul1957faisceaux}) during his visit in the late 1950s -- we present expansions for categories of generalized sheaves (i.e, Grothendieck toposes), with examples of applications in other cohomology theories and other areas of mathematics, besides providing motivations and historical notes. We conclude explaining the difficulties in establishing a cohomology theory for elementary toposes, presenting alternative approaches by considering constructions over quantales, that provide structures similar to sheaves, and indicating researches related to logic: constructive (intuitionistic and linear) logic for toposes, sheaves over quantales, and homological algebra.
\end{abstract}
\section{Introduction} \label{intro}
Sheaf Theory explicitly began with the work of J. Leray in 1945 \cite{leray1945forme}. The nomenclature ``sheaf'' over a space $X$, in terms of closed subsets of a topological space $X$, first appeared in 1946, also in one of Leray's works, according to \cite{gray1979fragments}. He was interested in solving partial differential equations and build up a strong tool to pass local properties to global ones. Currently, the definition of a sheaf over $X$ is given by a ``coherent family'' of structures indexed on the lattice of open subsets of $X$ or as Γ©tale maps (= local homeomorphisms) into $X$. Both formulations emerged in the late 1940s and early 1950s in Cartanβs seminars and, in modern terms, they are intimately related by an equivalence of categories.
H. Cartan proposed a concept of βcoherent familyβ for ideals \cite{cartan1944ideaux} before Lerayβs study on sheaves. His idea is more related to the development of sheaf theory in Complex Analysis, where certain conditions that hold for a point remains valid for the neighborhood of the point - as convergence properties of power series. On the other hand, the presentation of sheaves as Γ©tales spaces - due to Lazard - is closer to Algebraic Topology: sections of Γ©tale maps compose the construction of a local section functor (explained in Section \ref{sec:5}). The global section functor (introduced in Section \ref{sec:6}) gives rise to cohomology groups with coefficients in a sheaf, which computes the obstruction from local input to global input.
We will define sheaves, using open sets, as a special kind of functor. The language of category theory will help us to deal with sheaf cohomology and allows its generalizations. However, the relation between sheaves and Γ©tales maps is important to obtain geometric intuition about the object's capacity to pass local problems to global ones through cohomology with coefficients in a sheaf.
In the 1950s, sheaves on topological spaces and their cohomology were studied by the greatest mathematicians of the time. In addition to those previously mentioned, we list J.P. Serre, A. Grothendieck, O. Zariski, and R. Godement; the latter managed to establish a standard nomenclature in his book ``Topologie alg{\'e}brique et th{\'e}orie des faisceaux'' \cite{godement1958topologie}, one of the most important references about sheaf theory so far.
At the same time, some of them were at the institute known today as Instituto de MatemΓ‘tica e EstatΓstica (IME-USP), at Universidade de SΓ£o Paulo. The influence of the French school on the formation of Brazilian mathematicians initiated with the arrival of A. Weil (of Weilβs conjecture and founder of Bourbaki group) in 1945 and reached O. Zariski, J. DieudonnΓ©, J-L Koszul, A. Grothendieck, among others. We highlight that in 1956 J-L. Koszul lectured a course about sheaves and cohomology at IME-USP, whose class notes were published in 1957 \cite{koszul1957faisceaux}, and the A. Grothendieckβs course about topological vector spaces of 1953 was published five years later \cite{grothendieck1958espaces}.
Turning to the matter of theoryβs development, J-P. Serre was the one who first applied sheaf theory in Algebraic Geometry in \cite{10.2307/1969915}, and later A. Grothendieck successfully replicated sheaf methods to spaces where the correspondent topology is not adequate. His notion of {\em topos} used a collection of morphisms in a small category satisfying certain rules - a Grothendieck topology - to extend the notion of open covers in the definition of sheaves on topological spaces. This construction was essential to prove Weilβs conjectures, under Γ©tale cohomology (but it provided others, such as crystalline and flat cohomology), and to reformulate Algebraic Geometry. The SΓ©minaire de GΓ©omΓ©trie AlgΓ©brique du Bois Marie compose the enormous project that found proof for Weilβs conjectures. The project started with Bourbaki Seminars about foundations of Algebraic Geometry, published in 1962 \cite{grothendieck1962fondements} and by 1974 it was finished with Deligne's first proof of the third conjecture \cite{deligne1974conjecture}.
What Grothendieck named \textit{topos} in \cite{grothendieck1972topos}, is nowadays known as a \textit{Grothendieck topos} (a category of sheaves over a {\em site}, i.e. a pair $(\mathcal{C},J)$, with $\mathcal{C}$ a small category and $J$ a Grothendieck topology); the general notion of topos, or elementary topos, is due to the work of W. Lawvere and M. Tierney in the early 1970s. They realized that a Grothendieck topos have categorical properties that make it close to the category $Set$ of all sets and functions. For example, sheaves admit exponential objects that are analogs of the set $A^B$ of all function from $B$ to $A$, and there is an object of truth-values (subobject classifier) that, in the category $Set$, is the set $\{true,false\}$. Thus, by only assuming that a category has a subobject classifier and satisfies some conditions (as cartesian closed to guarantee the existence of an exponential object) they reached the definition of (elementary) topos, such that any Grothendieck topos is a topos but the converse does not hold.
Soon the study of topos theory developed many fronts. For instance, the description of an internal language (MitchellβBΓ©nabou language) and its Kripke-Joyal semantic, variations of Cohenβs forcing techniques using toposes, and the establishment of higher-order logic in terms of categories.
In this survey, we present sheaf cohomology and some of its possible extensions. Section 2 is devoted to preliminaries: we remind that homological algebra deals, mainly, with abelian categories (we will define this concept, but for now the reader can replace abelian categories by the category $Ab$, of all abelian groups, or $R$-$Mod$, of all modules over a given unitary ring $R$). Since most of the literature deals with specific abelian categories such as modules over rings, we provide preliminaries about cohomology for any abelian category. Besides that, we explain how to extract abelian categories from a not necessarily abelian category $\mathcal{C}$, requesting that $\mathcal{C}$ has finite limits and satisfies some other regularity properties, which is the case for the category of sheaves over topological spaces and, more generally, a Grothendieck topos. In fact, we are particularly interested in categories $\mathcal{C}$ that we could call a ``Set-likeβ category, i.e, a category that keeps the basics properties and allows to perform constructions that we usually made in the category $Set$ - of all sets and functions - that play the same role of $Set$ but could be more general than just $Set$. The essential part is: since we use sets to define another structure (topological spaces, groups, rings, manifolds), we can use a $Set$-$like$ category $\mathcal{C}$ to construct other categories. In the particular case we will see, categories with abelian group structure.
In Section 3, we introduce the basics of sheaf theory, sheaf cohomology, and \v{C}ech cohomology, following the work of H. Cartan, J-L. Koszul and R. Godement.
In Section 4, we define Grothendieck toposes, exploring elementary toposes to furnish the internal logical tool, and apply it to simplify arguments in\\ Grothendieck topos cohomology.
We do not show new results, but we do point out the main ideas behind proofs that are already known and choose a presentation that allows awareness of how Grothendieck topos cohomology extends sheaf cohomology. Some demonstrations are omitted because they require excessively technical machinery (such as spectral sequences) and so would be out of our purpose of making this text a gentle introduction to topos cohomology.
In Section 5, we clarify that the current topos cohomology has issues - the definition of flabby sheaves does not work properly and the category of abelian groups over toposes that are not Grothendieck does not have enough injectives - and a strong dependence on classical logic that hinders the ``internalization'' of these notions to the intrinsic intuitionistic (constructive) character of the toposes. We describe some attempts to address these problems, including extensions of topos cohomology over ``sheaf-like'' categories that are internally governed by an even more general logic: the linear logic.
\section{Preliminaries} \label{sec:1}
Summarily, a cohomology theory associates a sequence of algebraic objects to a certain space. The objects can be abelian groups, the space can be any topological space, and we can associate one with the other using chain complexes. However, the reader a bit more familiar with Homological Algebra knows that, instead of abelian groups, we can work with modules over commutative rings, vector bundles over topological spaces, or even abelian sheaves. This happens because these objects can be collected and organized in their respective categories, and these ones are examples of abelian categories. To summarize, when we work with cohomology we are working with functors into abelian categories.
In this section, we present the basics of abelian categories, state the main results of Homological Algebra in this general setting, and define the notion of abelian group object - which later will provide a technique to extract abelian categories from toposes.
{\em We will assume that the reader is familiar with the basic notions of category theory: category, functor, natural transformation, (co)limits, subobjects, generators and equivalence of categories.}
\subsection{Abelian Categories} \label{sec:2}
Let $s$ and $t$ be objects in a category $\mathcal{C}$. Recall that: If for all object $a$ in $\mathcal{C}$ there is a unique morphism $s \rightarrow a$ then $s$ is a \textit{initial object}; if there is an unique morphism $a \rightarrow t$, then $t$ is a \textit{terminal object}. The uniqueness property satisfied by a initial (respectively, terminal) object ensures that it is unique up to (unique) isomorphism. In case an object is simultaneously initial and terminal, it is called a zero object. After these preliminaries, we will change notation: initial and terminal objects are denoted by $0$ and $1$, respectively.
In categories with some zero object, we also have a notion of null morphism, i.e, a morphism $f: A \rightarrow B$ that factors through any zero object (since they are pairwise isomorphic). The null morphism from $A$ to $B$ is unique and is denoted by $0_{A,B}$ or just by $0$.
Now we can define an important concept to construct cohomology in general abelian categories.
Let $f: A \rightarrow B$ be a morphims in a category $\mathcal{C}$ with zero object. A morphism $k: K \rightarrow A$ is the \textit{kernel of $f$} if $f \circ k = 0$ and, for all morphism $h : C \to A$ such that $f \circ h = 0$, there is a unique morphism $h' : C \to K$ such that $h = k \circ h'$. Or simply, if $k$ is the equalizer of $f$ and the null morphism $0$. Diagramatically,
\begin{center}
\begin{tikzcd}
K \arrow[r,"k"] \arrow[rr, controls={+(0.5,0.5) and +(-1,0.8)},"0_{K,B}"] & A \arrow[r, yshift=0.7ex, "f"] \arrow[r, yshift=-0.7ex, swap, "0_{A,B}"] & B \\ C \arrow[u, dashrightarrow, "h'"] \arrow[ur, swap,"h"] \arrow[urr, bend right, swap,"0_{C,B}"] \end{tikzcd}
\end{center}
The cokernel of $f$ is defined dually, i.e., by the interchanging of source and target of all arrows. Throughout the text, we will denote the morphism $k$, kernel of $f$, by $ker(f)$, and the object $K$ associated to it by $Ker(f)$. Analogously, for the cokernel.
If the set of morphisms $Hom(A,B)$ of a category $\mathcal{C}$ has the structure of an abelian group, and the composition of morphisms is bilinear, then $\mathcal{C}$ is an $Ab$-category (or \textit{preadditive}). Here $Ab$ is the category of abelian groups and we adopted the nomenclature of $Ab$-category to familiarize the reader with the idea of an enriched category. In this case, $\mathcal{C}$ is enriched over $Ab$, so $Hom(A,B)$ is an object in $Ab$, for every $A, B$ objects in $\mathcal{C}$.
Examples of $Ab$-categories are the categories $Ab$, of all abelian groups and its homomorphisms, and $R$-$Mod$, of all left modules over a ring $R$ and homomorphisms. More intricate examples came from categories useful in homological algebra whose objects are complexes of abelian groups, complexes of modules over a ring and filtered modules over a ring. Moreover, every triangulated category\footnote{This is an important category in the study of Homological Algebra, but much more sophisticated than the previously examples. We will not explain it in the survey.} is an $Ab$-category.
A \textit{biproduct}\label{biprod-def} is a quintuple $(P,p_A,p_B,s_A,s_B)$ such that: \begin{center}
$p_A: P \rightarrow A$, $p_B: P \rightarrow B$, $s_A: A \rightarrow P$ and $s_B: B \rightarrow P$ satisfies the equations: $p_A \circ s_A = id_A$, $p_B \circ s_B = id_B$, $p_A \circ s_B = 0$, $p_B \circ s_A = 0$ and $s_A \circ p_A + s_B \circ p_B = id_P$ \end{center}
We observe that in $Ab$-categories the existence of biproduct is equivalent to the existence of product and, also, the existence of coproduct.
When a category is an $Ab$-category that has biproducts and a zero object, then it is called an \textit{additive} category. So, with the abelian groups structure in the set of morphisms, we obtain that the zero of $Hom(A,B)$ coincides with the null morphism $0_{A,B}$. See a demonstration of this in \cite[Chap. 1.2]{borceux1994handbook}. This is an interesting property since additive categories have to indirectly handle null morphisms, through kernels and cokernels.
An additive category $\mathcal{C}$ is an \textbf{abelian category} if it satisfies also the following conditions: \begin{enumerate}
\item[AB1] Every morphism has kernel and cokernel.
\item[AB2] Every monomorphism is a kernel and every epimorphism is a cokernel. \end{enumerate}
Except for filtered modules over a ring, and, in general, triangulated categories, all the given examples of $Ab$-categories are also abelian categories. Additionally, an important exemplar of an abelian category is the category of abelian sheaves (see Theorem \ref{abE-th}).
Note that $AB1$ allow us to construct kernels of cokernels and vice versa. Furthermore, for any morphism $f$ in an abelian category, we have: \begin{center}
\begin{tikzcd} Ker (f) \arrow[r, "ker(f)", tail] & A \arrow[r, "f"] \arrow[d, "coker(ker(f))"', two heads] & B \arrow[r, "coker (f)", two heads] & Coker (f) \\
& Coker(ker(f)) \arrow[r, "\Bar{f}", dashed] & Ker(coker(f)) \arrow[u, "ker(coker(f))"', tail] & \end{tikzcd} \end{center}
It is not difficult to see there is a unique $\Bar{f}: Coker(ker(f)) \to Ker(coker(f))$ which makes the above square commutative. An important observation here is that the $AB2$ axiom is equivalent to the condition of $\Bar{f}$ be an isomorphism. Defining $Im(f) = Ker(coker(f))$ and $Coim(f) = Coker(ker(f))$, we can say that asking for $\Bar{f}$ be an isomorphism is the same as asking for the validity of the Fundamental Homomorphism Theorem, in an abstract form.
Given an abelian category $\mathcal{C}$ we can add $ABn$ axioms. In this survey, the important one is $AB5$. \begin{enumerate}
\item[AB3] Given a family $\{A_i\}_{i \in I}$ of objects in $\mathcal{C}$, then the direct sum $\bigoplus\limits_{i \in I} A_i$ exists.
\item[AB4] The $AB3$ axiom holds and direct sum of family of monomorphisms also is a monomorphism.
\item[AB5] The $AB3$ axiom holds and if $\{A_i\}_{i \in I}$ is a direct family of subobjects of an object $A$ in $\mathcal{C}$, and $B$ is any subobject of $A$, then $(\sum\limits_{i \in I}A_i)\cap B = \sum\limits_{i \in I}(A_i \cap B)$, where the capital-sigma denotes sup of $A_i$, and the intersection denotes inf of subobjects. \end{enumerate}
The $AB5$ will be central because of the following Grothendieck's Theorem:
\begin{center}
If an abelian category satisfies $AB5$ and has a generator then it has enough injectives \cite[Theorem 1.10.1]{grothendieck1957} \end{center} \label{grotheorem}
We will use this Theorem to show that the abelian categories defined from Grothendieck toposes are good enough to develop a cohomology theory. Now, let's return to our preliminaries.
Most part of the simplest notions and proofs that occur in ``concrete'' abelian categories (as $Ab$, or $R$-$Mod$) can be reproduced in general abelian categories by the systematic use of universal properties. However, more sophisticated results in Homological Algebra demand specific techniques to handle the fact that we do not know what kind of structure the objects have. For example, when we consider the category $Ab$, we know that the objects are abelian groups. However, if we have to deal with an arbitrary abelian category this information is not (directly) available. To manage this delicate scenario there are at least two ways of proving results regarding general abelian categories.
The simplest technique is apply the Freyd-Mitchell embedding Theorem - originally stated in \cite[Theorem 7.34]{freyd1964abelian} and reformulated in modern terms in \cite[ Theorem 1.6.1]{weibel1994introduction} - that guarantees we can fully embed {\em small} abelian categories into the category $R$-Mod, for some ring $R$. Roughly speaking, this means all morphisms that exist in $R$-$Mod$, such as kernels and cokernels (quocients), and all diagram chasing that can be done in $R$-$Mod$, still holds for the correspondent small abelian category. We recommend \cite{stacks-project} for more results related to the Freyd-Mitchell embedding Theorem.
Nevertheless, there are non small abelian categories so we may use a stronger but more complex technique: construct the notion of \textit{pseudoelement}, as nominated in \cite{borceux1994handbook} (or \textit{generalized element}, as in \cite{zbMATH01216133}). This technique enable us to do ``half of the job with elements''. More precisely, to check by this simulation of ``elements'' if some candidate arrow, previously build by combined applications of universal properties, indeed satisfies some desired property.
Once mentioned these two techniques, we state that the famous snake lemma holds in any abelian category \cite[Section 1.10]{borceux1994handbook}. We will skip even state it here, but we highlight that, as a general ambient for Homological Algebra, abelian categories were built so that the Snake Lemma is valid. When Cartan's and Eilenberg's book ``Homological Algebra'' appeared in 1956 \cite{eilenberg1956homological} the theory was exhibited for categories of modules over rings but was also know that it could be replicated for other structures, for instance, abelian sheaves. This motivated A. Grothendieck - and not only him - to define abelian categories and establish Homological Algebra for it in \cite{grothendieck1957}. Nowadays, we have even more general categories where the Snake Lemma holds, for instance, the \textit{homological categories} \cite{borceux2004mal} (observe that the lemma hold for non-abelian groups, even so they do not form an additive category \cite[Chap 1.2]{borceux1994handbook}). However, abelian categories still are the most common scenario for the study of (co)homologies.
\subsection{Homological Algebra} \label{sec:3} The reader familiar with Homological Algebra techniques for a particular abelian category (as presented in \cite{weibel1994introduction}, for example) can skip this subsection. However, if there is a curiosity to see how to construct cohomology in the abstract setting of any abelian category, we exhibit the modifications that have to be done to define the basics concepts. Here we state without proof results that are needed in Sections \ref{sec:6} and \ref{sec:12}.
For any abelian category $\mathcal{C}$ we define a \textit{cochain complex} by taking sequences $\{C^q\}_{q \in \mathds{Z}}$ of objects in $\mathcal{C}$, and endow it with coboundary morphisms $d^q_C: C^q \rightarrow C^{q+1}$ such that $d^{q+1} \circ d^q = 0$, for all $q \in \mathds{Z}$. A cochain complex is denoted by $C^{\bullet},$ and we establish morphisms of complexes $h^{\bullet}: C^{\bullet} \rightarrow D^{\bullet}$ with a colection of morphism $h^q: C^q \rightarrow D^q$ such that $h^{q+1} \circ d^q_C = d^{q+1}_D \circ h^q$, for all $q \in \mathds{Z}.$ Observe that, with coordinatewise composition and identities, this forms a category $Ch(\mathcal{C})$, called category of cochain complex of $\mathcal{C}$, and it is as abelian category whenever $\mathcal{C}$ is abelian \cite[Theorem 1.2.3]{weibel1994introduction}.
Since $d^{q} \circ d^{q-1} = 0$, we have that $0 \subseteq Im(d^{q-1}) \subseteq Ker(d^{q}) \subseteq C^q$. So it is possible to define the $q$-th cohomology object of $C^\bullet$ by $$H^q(C^{\bullet}) = Ker(d^{q})/Im(d^{q-1}) = Coker(Im(d^{q-1}) \rightarrow Ker(d^{q})).$$ where the image is $Im(d^{q-1})=Ker(coker(d^{q-1}))$.
As the reader may suspect, a sequence of objects in an abelian category $\dots \xrightarrow{f_{q-1}} A_q \xrightarrow{f_q} A_{q+1} \xrightarrow{f_{q+1}} \dots$ is exact if $Ker(f^q) = Im(f^{q-1})$. So cohomology measures the failure of exactness in the cochain complex.
Let $f^{\bullet} : C^{\bullet} \rightarrow D^{\bullet}$ be a complex morphism. Since we are working with arbitrary abelian categories, define an induced morphism $H^q(f) : H^q(C^\bullet) \to H^q(D^\bullet)$, $q \in \mathds{Z}$, is more complicated than usual, but lets describe the idea:
For each $q \in \mathds{Z}$, a morphism $f^q : C^q \to D^q$ restricts to $$f^q_K : Ker(d_C^{q}) \to Ker(d_D^{q}) \mbox{ and to } f^q_I : Im(d_C^{q-1}) \to Im(d_D^{q-1})$$ The above follows directly from diagram chases, by the universal properties of kernels and cokernels. The coboundary morphism also provides a morphism $\alpha^{q}:Im(d_C^{q-1}) \rightarrow Ker(d_C^{q})$ such that $Coker(\alpha^q_C) = H^q(C^{\bullet})$. By the universal property of cokernel, there is a unique morphism $Coker(\alpha^q_C) \rightarrow Ker(d^{q}_D)$ as below:
\begin{center}
\begin{tikzcd} Im(d^{q-1}_C) \arrow[d,"f^q_I"] \arrow[r, "\alpha^q_C", tail] & Ker( d^{q}_C) \arrow[d,"f^q_K"] \arrow[r, two heads] & Coker (\alpha^q_C) \arrow[ld, dashed] \\ Im(d^{q-1}_D) \arrow[r, "\alpha^q_D", tail] & Ker( d_D^{q}) \arrow[r, two heads] & Coker (\alpha^q_D) \end{tikzcd} \end{center}
Completing the bottom part of this diagram with the cokernel of $\alpha^q_D$ we obtain a unique morphism $H^q(C^{\bullet}) \cong Coker(\alpha^q_C) \rightarrow Coker(\alpha^q_D) \cong H^q(D^{\bullet}) $. This induced morphism is $H^q(f) : H^q(C^\bullet) \to H^q(D^\bullet)$. Clearly, the mapping $f \mapsto H^q(f)$ determines a (covariant) functor $H^q : Ch({\cal C}) \to {\cal C}$, for all $q \in \mathds{Z}$.
Given two complex morphisms $f^{\bullet},g^{\bullet}: C^{\bullet} \rightarrow D^{\bullet},$ they are called \textit{homotopic} if, for each $q \in \mathds{Z}$, there is $h^q: C^q \rightarrow D^{q-1}$ (called \textit{cochain homotopy}) such that $f^q - g^q = d^{q-1}_D\circ h^q + h^{q+1}\circ d^q_C$. Chain homotopies are important because they relate two different morphisms through their induced maps in the cohomology objects. More precisely: \begin{prop}\label{homotopic} If $f^{\bullet}$ is homotopic to $g^{\bullet}$, then $H^q(f^{\bullet}) \cong H^q(g^{\bullet})$, for all $q \in \mathds{Z}$. \end{prop}
Now, we introduce exact functors. First, a (covariant) functor $F: \cal{C} \to \mathcal{C}'$ between abelian categories is \textit{additive} if the map that sends morphisms $f$ in $\mathcal{C}$ to morphisms $F(f)$ in $\mathcal{C}'$ is a homomorphism of groups.
Then, given an exact sequence $0\to A \to B \to C \to 0$ in $\mathcal{C}$, we say $F$ is \begin{enumerate}
\item \textit{exact} if $0\to F(A) \to F(B) \to F(C) \to 0$ is an exact sequence;
\item \textit{left exact} if $0\to F(A) \to F(B) \to F(C)$ is an exact sequence;
\item \textit{right exact} if $ F(A) \to F(B) \to F(C) \to 0$ is an exact sequence. \end{enumerate}
Two important examples of left exact functors are $Hom_{\mathcal{C}}(-,A)$ (contravariant case) and $Hom_{\mathcal{C}}(A,-)$ (covariant case), for $A$ a fixed object of $\mathcal{C}$.
Now, remember that an object $I$ in an abelian category is \textit{injective} if for all morphism $\alpha: A \rightarrow I$ and all monomorphism $m : A \rightarrow B$, there is at least one morphism $\beta: B \rightarrow I$ such that $\alpha = \beta \circ m$ (equivalently, $I$ is injective if and only if the functor $Hom(-,I)$ is exact). A \textit{resolution $A \rightarrow I^{\bullet}$ of an object} $A$ is an exact sequence $0 \rightarrow A \rightarrow I^0 \rightarrow I^1 \rightarrow ... $; this resolution is an \textit{injective resolution} if $I^i$ in injective for each $i \geq 0$. If an abelian category \textit{has enough injectives}, then any of its objects $A$ admits some injective resolution. Dually, we define projective objects and projective resolutions.
The concept of enough injectives is central in homological algebra because of the following theorem.
\begin{teo}\label{theo:derivedfunctor} Let $\mathcal{C}$ and $\mathcal{C'}$ abelian categories, with $\mathcal{C}$ having enough injectives, and let $F: \mathcal{C} \rightarrow \mathcal{C'}$ be a (covariant) left exact additive functor. Then: \begin{enumerate}
\item[(i)] There are additive functors $R^qF : \mathcal{C} \rightarrow \mathcal{C'} $ for all $q \geq 0$;
\item[(ii)] There is an isomorphism $F \cong R^0F$;
\item[(iii)] For each exact sequence $E: 0 \rightarrow A_1 \rightarrow A_2 \rightarrow A_3 \rightarrow 0 $ and each $q \geq 0$, there is a morphism $\delta^q_E: R^qFA_3 \rightarrow R^{q+1}FA_1$ that makes the following sequence exact
$$ \dots \rightarrow R^{q}FA_1 \rightarrow R^{q}FA_2 \rightarrow R^{q}FA_3 \xrightarrow{\delta^q_E} R^{q+1}FA_1 \rightarrow \dots$$
\item[(iv)] The morphisms $\delta^q_E$ are natural in $E$. \end{enumerate} \end{teo}
These $R^qF : \mathcal{C} \rightarrow \mathcal{C'} $ functors are unique up to natural isomorphisms, they are called \textit{$q$-th right derived functor of $F$} and $R^qF(A) \cong H^qF(I^{\bullet})$, where $I^{\bullet}$ is a resolution of $A$.
It is worth to mention that the famous $Ext(-,A)$ functor is the derived functor of $Hom_{\mathcal{C}}(-,A)$. Since $Hom_{\mathcal{C}}(-,A)$ is exact iff $A$ is injective, and $Ext$ measures how far $Hom_{\mathcal{C}}(-,A)$ is from be an exact functor, we can say $Ext$ measures the failure of $A$ in being injective.
We introduce a last definition in this section: let $F: \mathcal{C} \rightarrow \mathcal{C'}$ as in the above theorem. An object $A$ of $\mathcal{C}$ is \textit{$F$-acyclic} (or acyclic for $F$) if $R^qF(A) = 0$ for all $q > 0$.
\textbf{Remark:} This definition also describes a way to measure the failure of a sequence to be exact, so we could define derived functors using acyclic objects instead of injective ones.
So far, we discussed that if $\mathcal{C}$ is an abelian category, then we can define \textit{cohomology objects} of its correspondent cochain complex $\mathcal{C}^{\bullet}$, and several constructions and results of Homological Algebra are available. However, what is a \textit{cohomology theory}? That is, for different abelian categories (and even not abelian categories) what guarantees we are dealing with a cohomological structure? The answer is: the EilenbergβSteenrod axioms.
The EilenbergβSteenrod axioms state that a collection of functors form a (co)homology theory if it satisfies a certain list of axioms, for fixed coefficients (we will see cohomologies where the coefficients are sheaves, but the reader can think, for instance, in the singular cohomology with coefficients in a fixed abelian group). Moreover, we may obtain other types of cohomology theories if we remove one of the axioms; in particular, the removal of the dimension axiom provides a ``generalized (co)homology theory'', which is the case, of some $K$-theories. In other words, ``cohomology'' has a broad application. It is interesting observe that different cohomologies may coincide for suitable choices of spaces and coefficients (see Theorem \ref{Cech-te}, and Sections \ref{sec:8} and \ref{sec:14}).
\subsection{Abelian Group Object} \label{sec:4}
If $\mathcal{C}$ is a category with binary products and terminal object $1,$ we can define the notion of \textbf{group object in $\mathcal{C}$} as an object $G$ in $\mathcal{C}$ equipped with morphisms \begin{center} \begin{tikzcd} e: 1 \arrow[r] & G & i: G \arrow[r] & G & m:G \times G \arrow[r] & G \end{tikzcd} \end{center} in $\mathcal{C}$, such the following diagrams commute \begin{center} \begin{tikzcd} G \times G \times G \arrow[r, "id_G\times m"] \arrow[d, "m \times id_G"'] & G \times G \arrow[d, "m"] & 1\times G \arrow[r, "e \times id_G"] \arrow[rd, "\cong"'] & G \times G \arrow[d, "m"] & G\times 1 \arrow[l, "id_G \times e"'] \arrow[ld, "\cong"] \\ G \times G \arrow[r, "m"'] & G & & G & \end{tikzcd}
\begin{tikzcd} G \arrow[d, "!"] \arrow[r, "\triangle"] & G \times G \arrow[r, "i\times id_G"] & G \times G \arrow[d, "m"] \\ 1 \arrow[rr, "e"'] & & G \end{tikzcd}
\begin{tikzcd} G \arrow[d, "!"] \arrow[r, "\triangle"] & G \times G \arrow[r, "id_G\times i"] & G \times G \arrow[d, "m"] \\ 1 \arrow[rr, "e"'] & & G \end{tikzcd}
\end{center}
The morphism $\triangle = (id_G,id_G): G \rightarrow G \times G$ is the diagonal morphism. Note that these diagrams are expressing the group axioms. If we want to add an abelian condition and form an abelian group object, then we must include \begin{center} \begin{tikzcd} G \times G \arrow[r, "\tau"] \arrow[rd, "m"'] & G \times G \arrow[d, "m"] \\
& G \end{tikzcd} \end{center} commutative, where $\tau = (\pi_2, \pi_1): G \times G \rightarrow G \times G $ is the twist morphism.
So an \textit{abelian group object} is a quadruple $(G,e,i,m),$ where the diagrams above commute, and the \textit{category} $Ab(\mathcal{C})$ \textit{of abelian groups object} in $\mathcal{C}$ is the category defined over the base category where the objects are abelian groups objects in $\cal{C}$ and the morphisms are morphisms in $\mathcal{C}$ that commute with the corresponding morphisms $e, i$, and $m.$ In more details, if $\mathcal{G} =(G,e,i,m)$ and $\mathcal{G}'=(G',e',i', m')$ are abelian group objects in $\mathcal{C}$, then an arrow $h : G \to G'$ in $\mathcal{C}$ determines an arrow $h : \cal G \to \cal G'$ in $Ab(\mathcal{C})$ iff $h\circ e = e'$, $h \circ i = i' \circ h$ and $h \circ m = m' \circ (h \times h)$.
Since this internal notion of group uses only products and commutative diagrams in the category $\mathcal{C}$, it follows easily that the forgetful functor $E: Ab(\mathcal{C}) \to \mathcal{C}$ creates limits.
Two notable examples of group objects are topological groups, when $\mathcal{C}$ is the category of topological spaces, and Lie groups, when $\mathcal{C}$ is the category of smooth manifolds. The base category $\mathcal{C}$ will be a topos throughout this survey.
\section{Sheaves} \label{sec:5}
Interested in fixed points results applied to the realm of partial differential equations, Jean Leray published in 1945, while a prisoner in the 2nd World War, the paper \cite{leray1945forme} that would originate sheaf theory. He published a more refined paper about sheaf theory and spectral sequences in 1950 \cite{leray1950anneau}, with the original ideas preserved. Meanwhile, Henri Cartan starts the SΓ©minaire at the Γcole Normale SupΓ©rieure, and reformulates sheaf theory. Also in 1950, in the third year of this seminar, sheaves appear as what is now know as ``Γ©talΓ© spaces''. Results using sheaf methods were increasingly showing up, but the terminology was not established. It was Roger Godement who achieve a standard language for the theory (for example, presheaves are functors, sheaves are a special kind of presheaves; the notion of sheaf in Cartan's seminars pass to be nominated an Γ©talΓ© space) with his book published in 1958 \cite{godement1958topologie}.
Less about the history and more about the philosophy of sheaf theory: since the beginning, there was some notion that allows pass local data to global data. In the work of Godement, the flabby sheaves were responsible to play this role, while Grothendieck worked more with injective sheaves. The idea is that the cohomology groups obtained from resolutions of this specific kind of sheaves are trivial, so we do not have obstructions from local to global. The power of sheaf theory is to provide machinery to solve global problems by resolving them locally, which is especially interesting for Algebraic Geometry and Complex Analysis.
Let $X$ be a topological space. We denote by $\mathcal{O}(X)$ the category associated to the poset of all open sets of $X$. A \textit{presheaf of sets} is a (covariant) functor $F: \mathcal{O}(X)^{op} \rightarrow Set$, and a morphism of presheaves is a natural transformation. Given inclusions $U \subseteq V$, we use $s_{|^V_U}$ (or just $s_{|_U}$) to denote the ``restriction map'' from $F(V)$ to $F(U)$.
If $U \subseteq X$ is open and $U = \bigcup\limits_{i\in I} U_i$ is an open cover, a presheaf $F$ is a \textit{sheaf} (of sets) when we have the following diagram \begin{center}
\begin{tikzcd} F (U) \arrow[r, "e"] & \prod\limits_{i\in I}F (U_i) \arrow[r, "p", shift left=1 ex] \arrow[r, "q"', shift right=0.5 ex] & {\prod\limits_{(i,j) \in I \times I}F (U_i \cap U_j)} \end{tikzcd}
\end{center}
is an equalizer in the category $Set$, where:
\begin{enumerate}
\item $e(t) = \{t_{|_{U_i}} \enspace | \enspace i \in I\}, \enspace t \in F (U)$
\item $p((t_k)_{ k \in I}) = (t_{i_{|_{U_i \cap U_j}}})_{(i,j)\in I\times I}$ \\ $q((t_k)_{k \in I}) = (t_{j_{|_{U_i \cap U_j}}})_{(i,j)\in I\times I}, \enspace (t_k)_{k \in I} \in \prod\limits_{k\in I}F (U_k)$
\end{enumerate}\label{sheaf}
This definition is useful to understand categorical properties and provide a simple way to visualize its generalization when we substitute $\mathcal{O}(X)$ by an arbitrary category. However, there is an equivalent and more concrete form to describe a sheaf. Instead of presenting an equalizer diagram, we say that the preasheaf $F$ satisfies two conditions: \begin{enumerate}
\item \textbf{(Gluing)} If $s_i \in F(U_i)$ is a \textit{compatible family}, i.e., $s_{i_{|_{U_i \cap U_j}}} = s_{j_{|_{U_i \cap U_j}}}$ for all $i,j \in I$, there is some $s \in F(U)$ such that $s_{|_{U_i}} = s_i, i \in I$. We say $s$ is the \textit{gluing} of the compatible family.
\item \textbf{(Separability)} Given $s, s' \in F(U)$ such that $s_{|_{U_i}} = s'_{|_{U_i}},$ for all $i \in I$, $s = s'$. \end{enumerate}
A morphism of sheaves is a morphism of presheaves, that is, a natural transformation between functors, and it is clear that this defines a category, denoted by $Sh(X)$. Note that in the definition of sheaves we could replace $Set$ by any category with all small products, for example, the category of abelian groups $Ab$, and in this case we change the nomenclature to \textit{abelian sheaves}\label{absheaves}. We will return to this in Section \ref{sec:6}.
If $F$ is a presheaf, the \textit{stalk of $F$ at the point $x \in X$} is the direct limit $F_x := \varinjlim\limits_{U \in \mathcal{U}_x} F(U)$, where $\mathcal{U}_x = \{U \in {\cal O}(X): x \in U\}$ is the poset of open neighborhoods of $x$. A presheaf $F$ satisfies the separability condition above if and only if the canonical morphisms $F(U) \to \prod\limits_{x \in U} F_x$, $U \in {\cal{O}}(X)$ are monomorphisms. We will see in the next paragraphs that stalks are important to transform presheaves into sheaves.
Now we can say that sheaves capture global information from the gluing of local properties. For example, given an open subspace $U$ of a topological space $X$, and an open cover $U = \bigcup\limits_{i\in I} U_i$, there is a functor, ${\cal C}_\mathds{R}$, that takes opens $U$ in $X$ and sends to the set ${\cal C}_\mathds{R}(U)= \{f: U \rightarrow \mathds{R} \,|\, f$ is a continuous function$\}$. Since the restriction of a continuous function to a subset of its domain is still a continuous function, ${\cal C}_{\mathds{R}}$ is a presheaf. Since $f_i(x) = f_j(x), \forall x \in U_i \cap U_j,$ there is a unique function $f$ such that $f_{|_{U_i}} = f_i$. Besides that, the continuity of the $f_i$'s implies the continuity of the gluing $f$, so $f \in {\cal C}_{\mathds{R}}(U)$. Analogously, the presheaves of differential, smooth, or analytic functions are sheaves \cite{tennison_1975}.
This example may remind the reader of germs and stalks over points in a topological space with respect to Γ©tale bundles (local homeomorphisms) and this is not only a coincidence: for any continuous function $p: E \rightarrow X$ we define $\Gamma_p(U) = \{s: U \rightarrow E \enspace| s$ is continuous and $\enspace p(s(x)) = x, \forall x \in U\}$ and is possible to prove that $\Gamma_p$ is a sheaf, called \textit{sheaf of sections of the continuous function $p$}. Moreover, if $F$ is sheaf over a topological space $X$, taking $E_F := \coprod\limits_{x\in X}F_x$ the disjoint union of stalks of $F$ for each point $x$ in $X$, and defining an adequate topology in $E_F$, the (obvious) projection function $p_F : E_F \rightarrow X$ determines a local homeomorphism: this leads to a natural isomorphism between $F$ and $\Gamma(p_F)$. So every sheaf over $X$ is (naturally isomorphic to) the sheaf of sections of a local homeomorphism over $X$. Sheaf Theory inherits the nomenclature of constructions involving Γ©tale bundles because the two notions are strongly related through the category equivalence between the category of Γ©tale bundles over $X$ and the category of sheaves over $X$, for each topological space $X$. The reader can find a detailed account on this subject in \cite[Chap. II]{maclane1992sheaves}.
The spatial-functorial identification process described above is useful to provide the ``best sheaf approximation of a given presheaf'' as follows: any presheaf $F : {\cal O}(X)^{op} \to Set$, can be ``sheafificated'' into $a(F) := \Gamma(p_F) : {\cal O}(X)^{op} \to Set$ above $F$, i.e. $a(F)$ is a sheaf over $X$ and there is a natural transformation $\eta_F : F \to a(F)$ that is initial among the natural transformations $\sigma : F \to S$, where $S$ is a sheaf over $X$; moreover, the stalk of $a(F)$ at a point $x \in X$ is isomorphic to the stalk $F_x$. For instance, given a set $A$, the ``constant presheaf'' with value $A$ is the contravariant functor $F_A(U \hookrightarrow V) = (A \overset{id_A}\leftarrow A)$; its stalk at a point $x \in X$ is isomorphic to $A$ and its sheafification, $a(F_A) : {\cal O}(X)^{op} \to Set$, is isomorphic to the sheaf of continuous function with value $A$ (viewed as a discrete topological space): ${\cal C}_{A}(U) = \{ f : U \to A \, | \, f$ is a continuous function$\}$, $U \in {\cal O}(X)$.
Another relevant example of sheaf came from Commutative Algebra and it is central for the development of modern Algebraic Geometry: for each commutative unitary ring $R$, there is a canonical sheaf, ${\cal O}_R$, of rings defined over its prime spectrum space\footnote{$Spec(R) = \{ p \subseteq R: p$ is a proper prime ideal of $R\}$, and it is endowed with the so called ``Zariski Topology''.}, $Spec(R)$, this sheaf is determined on a (canonical) basis of the (spectral) topology of $Spec(R)$ just taking adequate localizations of the ring $R$; the stalk of this sheaf at a proper prime ideal $p \in Spec(R)$ is isomorphic to the local ring $R_p = R[R\setminus p]^{-1} $. The pair $(Spec(R), {\cal O}_R)$ is called the affine scheme associated to $R$; we will return to this example later, in Section \ref{sec:8}.
\subsection{Sheaf Cohomology} \label{sec:6}
In this section, we present the subject ``Sheaf Cohomology'' in the usual way, omitting proofs that can be easily found in the literature, as in \cite{grothendieck1971revetement,godement1958topologie}, but providing intuition about the associated ideas. Our aim here is to list some results of this theory that will reappear in the next section with the appropriate modifications.
For the reader's convenience, we start explaining why we can do sheaf cohomology in $Ab(Sh(X))$, i.e., how abelian sheaves are equivalent to abelian groups objects of $Sh(X)$
Note that abelian presheaves $\mathcal{O}(X)^{op} \rightarrow Ab$ form the category of functors $Ab^{\mathcal{O}(X)^{op}}$. Then, for every functor $F$ that is an object in $Ab^{\mathcal{O}(X)^{op}}$, we have that $F(U)$ is an abelian group for every $U \in \mathcal{O}(X)$. So, for each $U \in {\cal{O}}(X)$, there are $m_U: (F \times F)(U) \cong F(U)\times F(U) \rightarrow F(U)$, $i_U:F(U) \rightarrow F(U)$, and $e_U: 1 \rightarrow F(U)$ such that they determine natural transformations and the diagrammatic rules of abelian group object holds, i.e., $F$ is an abelian group object of $Set^{\mathcal{O}(X)^{op}}$. On the other hand, if $G \in Ab(Set^{\mathcal{O}(X)^{op}})$, then $G \in Set^{\mathcal{O}(X)^{op}}$ and we have $m, i,$ and $e$ as in the definition of an abelian group object. For every $U \in \mathcal{O}(X)$ we consider $m_U, i_U,$ and $e_U$ such that the diagrammatic rules still hold, then, $G(U)$ is an abelian group, i.e., $G$ is a functor of $\mathcal{O}(X)^{op}$ to $Ab$. These correspondences describe an equivalence of categories $Ab(Set^{\mathcal{O}(X)^{op}}) \simeq Ab^{\mathcal{O}(X)^{op}}.$
Observe that $Ab(Set) \simeq Ab$ and consider $E: Ab(Set) \rightarrow Set$ the forgetful functor ($E$ ``forgets'' the group operations); note that this functor preserves all limits. Thus an abelian sheaf is a functor $F: \mathcal{O}(X)^{op} \rightarrow Ab$ where the composition $\mathcal{O}(X)^{op} \rightarrow Ab \rightarrow Sets$ is a sheaf of sets. Denote the category of abelian sheaves by $Sh_{Ab}(X)$. Since we have inclusions $Sh(X) \rightarrow Set^{\mathcal{O}(X)^{op}}$ and $Sh_{Ab}(X) \rightarrow Ab^{\mathcal{O}(X)^{op}}$, the equivalence $Ab(Set^{\mathcal{O}(X)^{op}}) \simeq Ab^{\mathcal{O}(X)^{op}}$ induces an equivalence $Ab(Sh(X)) \simeq Sh_{Ab}(X)$, since the subcategories of sheaves, over $Set$ and over $Ab$, are closed under all small limits.
Therefore, to apply cohomological techniques in $Ab(Sh(X))$ is equivalent to apply it in $Sh_{Ab}(X)$. Many classical books of Sheaf Cohomology prove that $Sh_{Ab}(X)$ is an abelian category (see, for instance, \cite[Theorem 2.5]{iversen1986cohomology}). We, alternatively, can show that $Ab(\mathcal{E})$ is an abelian category for any topos $\mathcal{E}$ so, in particular, $Ab(Sh(X))$ is abelian. We will comment more on this in Section \ref{sec:12}.
We will use right derived functors to define the cohomology group of sheaves, thus we need to ensure that $Sh_{Ab}(X)$ has enough injectives: see \cite[Theorem 3.1]{iversen1986cohomology} for a proof of this fact.
For every sheaf $F$ in $Sh_{Ab}(X)$ and $U$ open set of $X$, we have the abelian group of \textit{sections of $F$ over $U$} defined by $\Gamma(U,F)=F(U)$. Sections over $X$ are called \textit{global sections}, and $\Gamma(X,-):Sh_{Ab}(X) \rightarrow Ab$ is a left exact functor\footnote{It preserves all small limits.} that sends an abelian sheaf to its global section abelian group, know as \textit{global section functor}.
Then the $q$-group cohomology group of $X$ with coefficients in $F$ is, by definition, the $q$-th right derived functor of $\Gamma(X,F)$. In other words, given an injective resolution $F \rightarrow I^{\bullet}$, we have $H^q(X,F) = R^q\Gamma (X,I^{\bullet})$.
A special type of sheaves are the flabby sheaves. As we will see, they are important because, like injective objects, they allow the construction of acyclic resolutions. By definition, if the restriction maps $s_U: \mathcal{F}(X) \rightarrow \mathcal{F}(U)$ is onto for every $U \subseteq X$ open, the sheaf $\mathcal{F}$ is flabby. Equivalently, $\mathcal{F}$ is flabby if $\mathcal{F}(V) \rightarrow \mathcal{F}(U)$ is onto for any pair $U \subseteq V$ of open sets in $X.$
\begin{prop} Every injective sheaf is flabby. \end{prop} \begin{proof} To establish this result, we will need an auxiliary construction.
Consider a functor $x_*: Set \rightarrow Sh(X)$, such that
\[
(x_*H)(U)= \begin{cases}
H, & x \in U\\
\{*\}, & x \notin U \end{cases} \] where $H$ is set, $U$ an open set in $X$, and \{*\} unitary set. This is known as the \textit{skyscraper sheaf}. In the abelian sheaf version, we have $x_*: Ab \rightarrow Sh_{Ab}(X),$ $H \mapsto (x_*H)(U),$ with the difference $H$ is now an abelian group and $x_*H$ is a functor that sends open sets of $X$ to $H$ or in the trivial group.
For each $x \in X$, let $D_x$ be an injective abelian group. We define an injective sheaf $D := \prod\limits_{x \in X} x_{*}D_{x}$. It is not difficult to see that $D(X) \rightarrow D(U)$ is surjective, i.e, $D$ is flabby.
Now suppose $F$ is an injective sheaf. We will show that $F$ is flabby. Since $F$ is injective, for each $x \in X$, the stalk $F_x$ is an injective abelian group. Consider the family of injective abelian groups $D(F)_x := F_x, x \in X$. Then $D(F) := \prod\limits_{x \in X} x_{*}D(F)_{x}$ is an injective and flabby sheaf and, since $F(U) \to \prod\limits_{x \in U} F_{x}$ is a monomorphism, $U \in {\cal O}(X)$, there is a mono $i: F \rightarrow D(F)$. Since $F$ is an injective sheaf, we can select a morphism $f: D(F) \rightarrow F$ such that $f \circ i = id_F$. Since all components of identity morphism are surjective homomorphism, the same holds for the components of $f$. Besides that, by naturality of $f$, the following diagram commutes: \begin{center}
\begin{tikzcd} D(X) \arrow[r, "{s_{U,D}}"] \arrow[d, "f(X)"'] & D(U) \arrow[d, "f(U)"] \\ F(X) \arrow[r, "{s_{U,F}}"'] & F(U) \end{tikzcd} \end{center}
We already know $f(U)$ and ${s_{U,D}}$ are surjectives, so $f(U)\circ {s_{U,D}}$ is surjective. By commutative of the diagram, ${s_{U,F}}\circ f(X)$ is surjective and so also is ${s_{U,F}}$. This holds for every open set $U$ of $X$, then $F$ is a flabby sheaf. \end{proof}
Now we show that flabby sheaves can build acyclic resolutions.
\begin{prop} If $F$ is an flabby sheaf, then $H^q(X,F) = 0,$ for all $q > 0$. In other words, $F$ is $\Gamma(X,-)$-acyclic. \end{prop}
\begin{proof}Since $F$ is flabby, we can construct $0 \rightarrow F \xrightarrow{f} G \xrightarrow{g} Q \rightarrow 0$ an exact sequence, where $G$ is injective because $Sh_{Ab}(X)$ has enough injectives. By the proposition above, $G$ is flabby.
Using the left exactness of the global section functor, we immediately obtain the exact sequence $$0 \rightarrow \Gamma(X,F) \xrightarrow{\Gamma_f} \Gamma(X,G) \xrightarrow{\Gamma_g} \Gamma(X,Q) $$
The flabby condition of $F$ implies more: $$0 \rightarrow \Gamma(X,F) \xrightarrow{\Gamma_f} \Gamma(X,G) \xrightarrow{\Gamma_g} \Gamma(X,Q) \rightarrow 0 $$ is exact. This is not straightforward and uses Zorn's Lemma to be proved \cite[Theorem 3.5]{iversen1986cohomology}.
By Theorem \ref{theo:derivedfunctor}, the derived functors induce a long exact sequence. We will analyze the following part of the sequence:
\begin{center}
$\Gamma(X,G) \xrightarrow{\Gamma_{g_0}} \Gamma(X,Q) \xrightarrow{\delta_0} H^1(X,F) \xrightarrow{f_1} H^1(X,G)$ \end{center}
Where $g_0 = g$. Note $H^1(X,G)=0,$ because $G$ is injective. Since the sequence above is exact, by the Isomorphism Theorem: $$H^1(X,F) \cong \frac{\Gamma(X,Q)}{Ker(g_0)}$$ But $g_0 = g$ is a surjective morphism, so $Ker(\delta) = Im(g_0) \cong \Gamma(X,Q).$ Then, $H^1(X,F) = 0$.
To conclude the result, use an induction argument in $q$ and the fact that if the first two objects in a short exact sequence are flabby, the third one is also flabby. \end{proof}
\textbf{Remark:} All proofs we know of this proposition require Zorn Lemma, so a constructive proof may not be available yet (or maybe there is not a constructive proof).
Given the fact that every sheaf admits a flabby resolution, via Godement resolution, the Proposition above implies we can define cohomology groups with coefficient in $\mathcal{F}$ using flabby sheaves instead of injective ones. The reason why this is possible is that we need a procedure that measures the ``failure of its right exactness'' to construct cohomology, and the proposition above guarantees such procedure for flabby sheaves \cite{godement1958topologie}.
\subsection{\v{C}ech Cohomology} \label{sec:7}
The nerve construction of an open covering first appeared in \cite{alexandroff1928allgemeinen}, before its debut in Sheaf Theory. Originally, the nerve associated an open covering of a topological space to an abstract simplicial complex, in an algorithmic form. Currently, nerve constructions preserve the algorithmic form but they deal with more general settings than topological spaces and simplicial complexes. We will use the \v{C}ech nerve to develop \v{C}ech Cohomology.
Godement improved in his book the brief discussion about \v{C}ech Cohomology made in Cartan's seminars, and it is a fundamental reference on the subject until today. Additionally, we recommend Kozsul's note classes \cite{koszul1957faisceaux} and, for references in English, there are algebraic geometry books as \cite{hartshorne1977algebraic}. Here we introduce \v{C}ech Cohomology as a technique to calculate Sheaf Cohomology by taking open covers of a fixed topological space, construct a cochain complex from it, and finally compute the cohomology groups. We aim to use this section to compare it with \v{C}ech Cohomology for Grothendieck Toposes.
Fix $F$ in $Sh_{Ab}(X)$ and consider ${\cal U} = (U_i )_{i \in I}$ an open cover of $X$, where $I$ is a set of indices. For each $q \in \mathds{N}$, denote the $U_{{i_0},...,{i_q}} = U_{i_0} \cap ... \cap U_{i_q}$ for $i_0, ..., i_q \in I$ (this is the \v{C}ech nerve). The \textit{\v{C}ech cochain complex} is $$C^q({\cal U}, F) = \prod\limits_{i_0,...,i_q}F(U_{{i_0},...,{i_q}}), \forall q \geq 0,$$ and its coboundary morphisms $d^q : C^{q}({\cal U},F) \to C^{q+1}({\cal U},F)$ are $$(d^q\alpha) = \sum\limits_{k=0}^{q+1}(-1)^k\alpha(\delta_k)_{\big|_{U_{{i_0},...,{i_{q+1}}}}}$$ where $\delta_k$ is used to indicate that we are removing $i_k$, i.e., $\alpha(\delta_k) = \alpha_{i_0,...,\widehat{i_k},...,i_{q+1}}$.
A straightforward verification shows that $d^{q+1}\circ d^q = 0$ so, indeed, this is a cochain complex and we can define the $q$-th \v{C}ech cohomology group of $F$ with respect to the covering ${\cal U}$ by $\check{\mathrm{H}}^q({\cal U},F) = Ker (d^{q})/Im (d^{q-1})$.
The result below gives us a first clue that \v{C}ech cohomology can be useful to calculate cohomology of sheaves. See \cite[Lemma III 4.4]{hartshorne1977algebraic} for a proof.
\begin{prop} \label{Cech-U} Let $F$ be a sheaf in $Sh_{Ab}(X)$, and ${\cal U} = (U_i)_{i \in I}$ a covering of $X$. There is a canonical morphism $k^q_{\cal U} : \check{\mathrm{H}}^q({\cal U},F) \rightarrow \mathrm{H}^q(X,F)$ natural and functorial in $F$ for each $q \in \mathds{N}.$ \end{prop}
Next, we will briefly examine the behavior of the \v{C}ech cohomology groups under the dynamic of refinements of coverings. We will return to this point later, in Section \ref{sec:13}.
Let ${\cal{V}} = (V_j)_{j \in J}$ be another covering of $X$. Suppose that ${\cal{U}}$ is a refinement of ${\cal{V}}$, i.e, for each $i \in I$, there is $j \in J$ such that $U_i \subseteq V_j$. Choose any function $c : I \to J$ such that $U_i \subseteq V_{c(i)}, i \in I$; then there is a induced morphism of cochain complexes $m_c: C^\bullet({\cal{V}}, F) \to C^\bullet({\cal{U}}, F)$ and a corresponding morphism of \v{C}ech cohomology groups w.r.t. the coverings ${\cal U}$ and ${\cal V}$, $\check{m}_c : \check{\mathrm{H}}^\bullet({\cal V},F) \to \check{\mathrm{H}}^\bullet({\cal U},F) $. Moreover, if $d : I \to J$ is another chosen function w.r.t. the refinement of ${\cal V}$ by ${\cal U}$, then the induced morphisms of complexes $m_c, m_d$ are homotopic, thus, by Proposition \ref{homotopic}, there is a unique induced morphism of cohomology groups $\check{m}_{{\cal U}, {\cal V}} : \check{\mathrm{H}}^\bullet({\cal V},F) \to \check{\mathrm{H}}^\bullet({\cal U},F) $.
Note that the class $Ref(X)$ of all coverings of $X$ is partially ordered under the refinement relation; this is a directed ordering relation.
The construction above is functorial in the following sense: \begin{itemize}
\item $\check{m}_{{\cal U}, {\cal U}} = id : \check{\mathrm{H}}^\bullet({\cal U},F) \to \check{\mathrm{H}}^\bullet({\cal U},F)$;
\item If ${\cal W} = (W_k)_{k \in K}$ is a covering of $X$ such that ${\cal V}$ is a refinement of ${\cal W}$, then $\check{m}_{{\cal U}, {\cal W}} = \check{m}_{{\cal U}, {\cal V}} \circ \check{m}_{{\cal V}, {\cal W}} : \check{\mathrm{H}}^\bullet({\cal W},F) \to \check{\mathrm{H}}^\bullet({\cal U},F) $. \end{itemize}
The (absolute) \v{C}ech cohomology group is, by definition, the directed (co)limit\footnote{This (co)limit has to be taken with some set-theoretical care, we will not detail this point here.} $$\check{\mathrm{H}}^\bullet(X,F) := \varinjlim\limits_{{\cal U} \in Ref(X)}\check{\mathrm{H}}^\bullet({\cal U},F) .$$
The main result concerning \v{C}ech cohomology is the following:
\begin{teo} \label{Cech-te}
The canonical morphisms $k^q_{\cal U} : \check{\mathrm{H}}^q({\cal U},F) \rightarrow \mathrm{H}^q(X,F), q \in \mathds{N},$ according notation in Proposition \ref{Cech-U}, are compatible under refinement. Moreover, the induced morphism on colimit
$$k^q : \check{\mathrm{H}}^q(X,F) \to \mathrm{H}^q(X,F), q \in \mathds{N}, $$
is an isomorphism if $q \leq 1$ and a monomorphism if $q=2$.
\end{teo}
Far more interesting, under reasonable geometrical hypothesis on the topological space $X$ (for instance, if $X$ is a Hausdorff paracompact space\footnote{This holds for any CW-complex or any topological manifold.}), then the canonical morphisms $k^q$ are isomorphisms for all $q \geq 0.$
\subsection{Applications} \label{sec:8}
Most of mathematicians will not be interested in abstract sheaf theory alone, but in its applications for specific sheaves. For example, if $(X,\mathcal{O}_X)$ is a ringed space, i.e., $X$ is a topological space and $\mathcal{O}_X$ is a ring-valued sheaf, we can define a coherent sheaf $F$ on $(X,\mathcal{O}_X)$ that will look like a vector bundle with the advantage of forming an abelian category. Thus, we can study coherent sheaf cohomology. In this context, we have an analog of PoincarΓ© Duality of Algebraic Topology, and the Serre Duality, that relates cohomology groups at level $n-q$ with $Ext$ groups at level $q$, where $n$ is the dimension of the particular scheme we are studying, by \cite[Theorem III 7.6]{hartshorne1977algebraic}). Coherent sheaf cohomology also provides a characterization of Euler Characteristic by an alternating sum of the dimension of cohomology groups of a scheme with coefficient in a coherent sheaf.
We observe that schemes are essential in modern Algebraic Geometry, and its definition arises from the affine (locally) ringed space $(Spec(R),\mathcal{O}_R)$. We use Zariski Topology to construct the sheaf $\mathcal{O}_R$ and furnish the spectrum $Spec(R)$ of a commutative ring with a topological structure. The gluing of ringed spaces of the form $(Spec(R),\mathcal{O}_R)$ results in the notion of schemes. We may use schemes to construct quasi-coherent sheaves, a generalizatin of coherent sheaves introduced by Serre in \cite{10.2307/1969915}. Quasi-coherent sheaves constitutes an interesting class of coefficients for cohomologies in Algebraic Geometry: \v{C}ech and sheaf cohomology agree on a noetherian separated scheme with the Zariski topology, for any quasi-coherent sheaf as coefficient.
Another application of sheaf-theoretical methods is the relation between \v{C}ech cohomology and De Rham cohomology which is obtained as follows: Given a topological space $X$, and a set $A$, the constant presheaf with values in $A$ that we mentioned earlier can be transformed into a constant sheaf with values in $A$ by a standard ``sheafificationβ process. In particular, the set $A$ can be the underlying set of an abelian group such as $\mathds{R}$, the additive group of real numbers, and the topological space can be a compact manifold $M$ of dimension $m$ and class at least ${\cal C}^{m+1}$. In this case, there is an isomorphism $H^q_{dR}(M) \cong \check{H}^q(M,\mathds{R})$, for all $q \leq m$, where $H^q_{dR}$ denotes the de Rham cohomology groups \cite[Appendix]{petersen2006riemannian}. Similarly, \v{C}ech cohomology and singular cohomology coincide for any topological space $X$ that is homotopically equivalent to a CW-complex, with the constant sheaf of an abelian group $A$ as coefficient.
More recently, sheaf and \v{C}ech cohomologies have been used in quantum mechanics because of the general idea of measuring the obstruction between local and global properties. For example, in \cite{abramsky2015contextuality}, the \v{C}ech cohomology groups are defined for specifics topological spaces, with a corresponding open cover, and show they identify the obstructions that characterize logical forms of \textit{contextuality}.
In the next section, we will generalize the categories of sheaves over some topological space defining the notion of Grothendieck topos and exhibit specific Grothendieck topos that appears in other areas of Mathematics.
\section{Toposes} \label{sec:9}
\subsection{Grothedieck Toposes} \label{sec:10}
Cohomology groups often provide good invariants to classify objects: if two Riemann surfaces (with some additional conditions) agree in each level of the cohomology groups, then they are the same from a topological point of view. In the 1950s, this problem was well understood for algebraic curves over the field of complex numbers, but not much was known for algebraic curves over other fields. In 1954, Jean-Pierre Serre introduced sheaf theory in Algebraic Geometry with coherent sheaves \cite{10.2307/1969915}, and one year later, in \cite{serre1956geometrie}, he showed that with coherent sheaves in hand there are cases such that the cohomology groups of complex and non-complex algebraic varieties coincide, by using the Zariski topology.
However, in most cases, the Zariski topology does not have ``enough'' open sets. So, motivated to prove the Weil's Conjectures, A. Grothendieck had the idea of stop trying to find open sets, in the usual sense, and defined an analogous version of inclusion of open sets using more general morphisms in small categories. This gave birth to Grothendieck topologies and to Grothendieck toposes, particularly, the Γ©tale topos of a scheme X - the category of all Γ©tale sheaves on a scheme X - and so to Γtale Cohomology. A. Grothendieck, M. Artin, and J-L. Verdier proved three of the four Weil's Conjectures, and the remaining one was proved by Deligne in 1974 \cite{deligne1974conjecture}. The main references to see the development of this program aiming the proof of Weil's Conjectures passes through Bourbaki seminars \cite{grothendieck1962fondements}, ``ElΓ©ments de GeomΓ©trie AlgΓ©brique'' \cite{PMIHES_1960__4__5_0,PMIHES_1961__8__5_0,PMIHES_1961__11__5_0,PMIHES_1963__17__5_0,PMIHES_1964__20__5_0,PMIHES_1965__24__5_0,PMIHES_1966__28__5_0,PMIHES_1967__32__5_0}, and ``SΓ©minaire de GΓ©omΓ©trie AlgΓ©brique'' (SGA). We highlight SGA4 \cite{grothendieck1972topos}, as the one dedicated to topos theory and Γ©tale cohomology.
Now, remember that a \textit{locale} $(L,\leq)$ is a complete lattice such that
\begin{center} \begin{center}
$a \wedge (\bigvee\limits_{i \in I} b_i) = \bigvee\limits_{i \in I}(a \wedge b_i)$, $ \forall a, b_i \in L$.
\end{center}
\end{center} The poset of all open sets of a topological space $X$ is a locale. Locales coincide with complete Heyting algebras\footnote{The class of all Heyting algebras provides the natural algebraic semantics for the intuitionistic propositional logic, that is the ``constructive fragment'' of the classical propositional logic.}.
Note that in the definition of a sheaf over a topological space we did not use the points of the space, that is, only their locale structure was necessary. In fact, we can define sheaves for a presheaf $F: {\mathcal{L}}^{op} \rightarrow Set$, where $\mathcal{L}$ is the category associated to a locale $L$, since it is a poset. This is one simple case where the notion of sheaf is still available in a category different from $\mathcal{O}(X)$. There are others? Yes, by introducing an abstract idea of open cover we can define sheaves for any small category $\mathcal{C}$.
First, we will be a bit less general. Suppose $\mathcal{C}$ is a small category with finite limits (or just with pullbacks). A Grothendieck pretopology on $\mathcal{C}$ associates to each object $U$ of $\mathcal{C}$ a set $P(U)$ of families of morphisms $\{U_i \rightarrow U\}_{i \in I}$ satisfying some simple rules. They are: \begin{enumerate} \item The singleton family $\{U' \xrightarrow{f} U \}$, formed by an isomorphism $f : U' \overset{\cong}\to U$, is in $P(U)$;
\item If $\{U_i \xrightarrow{f_i} U\}_{i \in I}$ is in $P(U)$ and $\{V_{ij} \xrightarrow{g_{ij}} U_i \}_{j \in J_i}$ is in $P(U_i)$ for all $i \in I$, then $\{V_{ij} \xrightarrow{f_i \circ g_{ij}} U \}_{i \in I, j \in J_i}$ is in $P(U)$; \item If $\{U_i \rightarrow U\}_{i \in I}$ is in $P(U)$, and $V \rightarrow U$ is any morphism in $\mathcal{C}$, then the family of pullbacks $\{V \times_U U \rightarrow V \}$ is in $P(V)$.
\end{enumerate}
The families in $P(U)$ are called \textit{covering families} of $U$.\\
\textbf{Example:} Note that the ``concrete'' notion of cover of topological spaces provides an example of Grotendieck pretopology: an object in $\mathcal{O}(X)$ is an open set $U$ in $X$ and the morphisms in $\mathcal{O}(X)$ are inclusions of open subsets of $X$, this category has all finite limits (given by finite intersection of open subsets). Thus is natural to define a Grothendieck pretopology $P$ in $\mathcal{O}(X)$ by $$\{U_i \overset{f_i}\hookrightarrow U\}_{i \in I} \in P(U) \iff U = \bigcup\limits_{i \in I} U_i.$$ This can be carried out analogously for any locale ${\cal L}$.
We say that the presheaf $F: \mathcal{C}^{op} \rightarrow Set$ is a sheaf for the Grothendieck pretopology $P$ if the following diagram is an equalizer in $Set$:
\begin{center}
\begin{tikzcd}
F (U) \arrow[r] & \prod\limits_{i \in I} F (U_i) \arrow[r, shift left=1 ex]
\arrow[r, shift right=0.5 ex] & \prod\limits_{(i,j)\in I\times I} F (U_i \times_U U_j)
\end{tikzcd}
\end{center} \label{grothsheaf}
However, different pretopologies can provide the same class of sheaves. For instance, if $\bigcup_{i \in I} U_i = U$ is an open cover of the open subset $U \subseteq X$, for any $V \subseteq U_j$, for some $j \in I$, we have $ V \cup \bigcup_{i \in I} U_i = U. $ To remove this ambiguity from the above definition we use the notion of {\em covering sieve}.
Let $C$ be an object in a small category $\mathcal{C}$ (the assumption of existence of the pullbacks over ${\cal C}$ can be dropped now), a \textit{sieve} on $C$ is a collection $S$ of morphisms $f$ with codomain $C$ such that $f \circ g \in S,$ for all morphism $g$ with $dom(f) = cod(g)$. Given $h: D \rightarrow C$, define $$h^{\ast}(S) = \{ g \mid cod(g) = D, \ h\circ g \in S\}$$ The $h^{\ast}(S)$ will assume the role of a pullback in the category, as we see bellow:
A \textit{Grothendieck Topology} in $\mathcal{C}$ associates each object $C$ of $\mathcal{C}$ to a collection $J(C)$ of sieves on $C$ such that:
\begin{enumerate}
\item The maximal sieve on $C$, $\{f \mid cod(f) = C\}$, is in $J(C)$;
\item If $R$ and $S$ are sieves on $C$, $S$ is in $J(C)$ and $h^{\ast}(R)$ is in $J(D)$ for all $h: D \rightarrow C$ in $S$, then $R$ is in $J(C)$;
\item If $S$ is in $J(C)$, then $h^{\ast}(S)$ is in $J(D)$ for all $h: D \rightarrow C$.
\end{enumerate}
The collection of sieves in $J(C)$ are the \textit{covering sieves} (or $J$-covers). The pair $(\mathcal{C},J)$ formed by a small category $\mathcal{C}$ and a Grothendieck Topology $J$ is called \textit{site}. Each pretopology $P$ on a category with pullbacks ${\mathcal{C}}$ determines a least Grothendieck topology $J_P$ on $\mathcal{C}$: a covering sieve $S \in J_P(U)$ is a sieve on the object $U$ that contains some family in $P(U)$.
We can also define sheaves for the Grothendieck topology $J$, but more concepts would be introduced and we can be satisfied with what we have since both definitions - for Grothendieck topologies and pretopologies - are equivalent\cite{johnstone77topostheory}. In particular, if ${\mathcal{C}}$ is a small category with pullbacks, a presheaf $F : \mathcal{C}^{op} \to Set$ is a sheaf for the pretopology $P$ iff it is a sheaf for the induced topology $J_P$. Morphisms of sheaves are natural transformations, and so we obtain $Sh(\mathcal{C},J),$ the category of sheaves over this site.
Finally, a \textit{Grothendieck Topos} is a category that is equivalent to $Sh(\mathcal{C},J)$, for some site. Note $Sh(X) = Sh(\mathcal{C},J_P)$ is a Grothendieck topos where $\mathcal{C} = \mathcal{O}(X)$ and $J_P$ is the Grothendieck topology generated by the pretopology $P$ described in the example above (that pretopology is not a topology).
Grothendieck toposes also are characterized by purely categorical axioms, by Giraudβs Theorem \cite[Theorem 0.45]{johnstone77topostheory}. If a category has some specific properties, it is a Grothendieck topos. Conversely, every Grothendieck topos satisfies these same properties.
We provide below a list of properties we will need in Section \ref{sec:12} to sketch the proof that $Ab(\mathcal{E})$ is $AB5$ and has generators:
\begin{lem}\label{giraud}
A Grothendieck topos $\mathcal{E}$ satisfies the following conditions:
\begin{enumerate}
\item all colimits are universal (i.e, preserved by pullback);
\item has all small coproducts;
\item has a set of generators (i.e., exists a small family $\{G_i\}_{i \in I}$ of objects in $\mathcal{E}$ where given distinct morphisms $f,g: X \rightarrow Y$ in $\mathcal{E}$ there are $i \in I$ and $h: G_i \rightarrow X$ such that $f \circ h \neq g \circ h$);
\item filtered colimits commute with finite limits.
\end{enumerate} \end{lem}
In this list, only the last property is not part of Giraudβs Theorem, but we will use it and it follows, not immediately, from the fact the same holds for $Set$.
\subsection{Elementary Toposes} \label{sec:11}
We add here a short section on a generalization of Grothendieck topos: the categorical concept of ``elementary topos'', introduced by Lawvere and Tierney in the early 1970s. The relatively simple axioms that defines an elementary topos allows a description of an internal language and an internal (intuitionistic) logic: this machinery is useful to perform ``high level arguments'', for instance to provide a simple proof that the category of abelian group objects in an elementary topos is an abelian category (Theorem \ref{abE-th}). This ``high-level'' method was successfully explored in Algebraic Geometric \cite{blechschmidt2018using}, revealing its potential to other applications in Mathematics; first steps towards high-level Homological Algebra were given in \cite{blechschmidt2018flabby}.
An \textit{elementary topos} is a (locally small) category that is cartesian closed, has a subobject classifier, and has all finite limits (or equivalently, has all finite products and equalizers, or has pullbacks and a terminal object).
A category is cartesian closed if it has binary products and it is possible to define an exponential object for every two objects as follows: given $B$ and $C$ objects, there is an \textit{exponential object} $C^B$ endowed with an \textit{evaluation map} $ev: C^B \times B \rightarrow C$ such that for any other object $A$, endowed with an arrow $f: A \times B \rightarrow C$, there is a unique morphism $\Bar{f}: A \rightarrow C^B$ where $ev \circ (\Bar{f}\times id_B) = f$. An important property that arises from this definition is the isomorphism $\phi_{AC}^B: Hom(A \times B, C) \overset\cong\to Hom(A, C^B)$, which are natural in $A$ and $C$.
A subobject classifier of a locally small category that has all finite limits, and $1$ as terminal object consists of an object $\Omega$ of \textit{truth values} and a \textit{truth morphism}\footnote{In fact, these data are unique up to unique isomorphisms.} $t: 1 \rightarrow \Omega$ such that given any object $E$, and any ``subobject'' \begin{tikzcd} r: U \arrow[r, tail] & E \end{tikzcd}, there is a unique morphism \begin{tikzcd} \chi_r: E \arrow[r] & \Omega \end{tikzcd} that makes the following diagram a pullback:
\begin{center}
\begin{tikzcd}
U \arrow[d, "r"', tail] \arrow[r, "!"] & 1 \arrow[d, "t"] \\
E \arrow[r, "\chi_r"'] & \Omega
\end{tikzcd}
\end{center}
This \begin{tikzcd} \chi_r: E \arrow[r] & \Omega \end{tikzcd} is called \textit{characteristic morphism of $r$}. It can look too abstract, but when the category is $Set$, we have $\Omega = \{0,1\}$ and, for each subset $U$ of a fixed set $E$, the morphism $\chi_U$ is the well known characteristic function. In fact, $Set$ is an example of elementary topos \cite[Example 5.2.1]{borceux1994handbook3}. More generally, every Grothendieck topos is an elementary topos \cite[Example 5.2.9]{borceux1994handbook3}.
Any elementary topos ${\cal E}$ enjoys some categorical properties that holds in the category $Set$, e.g.: a morphism in ${\cal E}$ is an isomorphism iff it a monomorphism and an epimorphism; every epimorphism in ${\cal E}$ is a coequalizer; any morphism in ${\cal E}$ has a (unique up to unique isomorphism) factorization through the image - it is composition of a monomorphism with an epimorphism.
An important type of morphism between toposes $f: \mathcal{F} \rightarrow \mathcal{E}$ is called \textit{geometric morphism}. It consists of a pair of functors, $f_* : \mathcal{F} \rightarrow \mathcal{E}$, the \textit{direct image}, and $f^*: \mathcal{E} \rightarrow \mathcal{F}$, \textit{the inverse image}, such that: \begin{enumerate}
\item $f^*$ is left adjoint of $f_*$;
\item $f^*$ preserves finite limits, i.e, it is left exact. \end{enumerate}
The reader does not need to know the definition of adjoint pair of functors to understand the ideas covered in this survey and can think in adjointness as an abstraction of free constructions in Algebra; the sheafification process is an instance of adjointness; the ``exponencial convertion'', described by the natural isomorphisms $\phi^{B}_{AC}$ above shows that the functor $(-) \times B$ is the left adjoint of the functor $( - )^B$. We recommend \cite{zbMATH01216133} if there is an interest to better understand the proofs in which we explicitly use the adjoint property of geometric morphisms.
In general, each side of an adjoint pair of functors determines the other side, up to isomorphism, so it is clear that $Set$ is the terminal Grothendieck topos, concerning the geometric morphisms. This motivates the definition of point in a topos ${\cal E}$; it is a geometric morphism $f : Set \to {\cal E}$.
Another distinguished geometric morphism is $i = (i_*, i^*) : Sh({\cal C},J) \to Set^{{\cal C}^{op}}$, here the direct image part is just the (full) inclusion $i_* : Sh({\cal C},J) \hookrightarrow Set^{{\cal C}^{op}}$ and the inverse image part is the sheafification functor, $i^* : Set^{{\cal C}^{op}} \to Sh({\cal C},J)$.
Every topos naturally encodes a ``local set theory'' \cite{bell1988topos}. Indeed, each topos has an internal language, known as \textit{Mitchell-BΓ©nabou language}, and a canonical \textit{interpretation} - a procedure to give a meaning for the symbols introduced in the canonical language. In the next section, we will use these notions to proof Theorem \ref{abE-th}.
Provide the complete definition of the internal language of a topos and its respective interpretation would spend about tree pages of this survey so we have restricted ourselves to only present a general idea. We hope this approach helps to understand the rigorous definitions given in \cite[Chap 6]{borceux1994handbook3}.
Given a topos $\cal E$, the Mitchell-B\'enabou language $L(\cal{E})$ consists of three parts: \begin{center}
$\bullet$ Sorts (or types) $\enspace\enspace\enspace\enspace\enspace\bullet$ Terms $\enspace\enspace\enspace\enspace\enspace\bullet$ Formulas \end{center} For each object $A$ in ${\cal E}$, there is an associated sort $s_A$ (they are distinct from each other). The terms $\tau$ of $L({\cal E})$ have a value sort $s(\tau)$ and are inductively defined from the basic terms by applying certain natural constructors - the basic terms of sort $s_A$ are the constants of value sort $s_A$ that corresponds to a morphism $1 \to A$ in ${\cal E}$ and an enumerable set of variables $\{x^A_i: i \in \mathds{N}\}$ of sort $s_A$; more complex terms, $t:s_B$, are inductively constructed from simpler terms $t_0:s_{A_0}, \cdots, t_{n-1}:s_{A_{n-1}}$ by a formal application of morphisms $t = f(t_0, \cdots, t_{n-1})$, where $f : A_0 \times \cdots \times A_{n-1} \to B$ is an arrow in ${\cal E}$. Formulas are inductively constructed from the basic (or atomic) formulas by applying (firs-order and higher-order) logical constructors - an atomic formulas is defined ``to abbreviate relations between terms''. As a simple example of (atomic) formula we have $\tau =_A \sigma$, where $\tau$ and $\sigma$ are terms with same value sort $s_A$.
For the canonical interpretation of the language $L({\cal E})$ in the topos ${\cal E}$, the main idea is establish a \textit{realization} for each term, and a \textit{truth table} for each formula, as follows: Let $\tau$ be a term of type $s_A$ with variables $x_1,...,x_n$ of types $s_{X_1},...,s_{X_n}$, respectively. A \textit{realization} of $\tau$ is an arrow in ${\cal E}$, written $[\tau]: X_1 \times ... \times X_n \rightarrow A $.
Now, given a formula $\varphi$ with (free) variables $x_1,...,x_n$ of types $s_{X_1},...,s_{X_n}$, a \textit{truth table} of $\varphi$ is an arrow in ${\cal E}$, $[\varphi]: X_1 \times ... \times X_n \rightarrow \Omega $, where $\Omega$ is the subobject classifier of ${\cal E}$.
Next, we exemplify the abstract ideas presented above \begin{enumerate}
\item[(i)] Consider $x$ a variable of type $s_A$. Then its realization is established to be the identity morphism $$[x] \overset{\mathclap{\strut\text{def}}}= id_A : A \to A$$
\item[(ii)] Previously, we mentioned that $\tau=_A \sigma$, where $s(\tau) = s(\sigma) = s_A$, is a formula. Continuing this, we establish that its truth table is the morphism $$X_1 \times ... \times X_n \xrightarrow{([\tau], [\sigma]))} A \times A \xrightarrow{\delta_A} \Omega$$ where the free variables in $\tau$ and $\sigma$ have types among $s_{X_1},..., s_{X_n}$ and $\delta_A$ is the characteristic morphism of $\triangle_A \overset{\mathclap{\strut\text{def}}}= (id_A,id_A) : A \rightarrow A \times A$, the diagonal morphism. \end{enumerate}
In this setting, a formula $\varphi$ can be valid or not. We say $\varphi$ is \textit{valid} if the canonical interpretation $$X_1 \times .... \times X_n \xrightarrow{!} 1 \xrightarrow{t} \Omega$$ is the truth table of $\varphi$, where $x_1,...,x_n$ are free variables of types $s_{X_1},...,s_{X_n}$. To denote that $\varphi$ is \textit{valid} we use ${\cal E} \models \varphi$.
As a simple example, we will show that $\mathcal{E} \models x =_A x$, where $x$ is a variable of type $s_A$; in other words, the formula $x =_A x$ is valid in $\mathcal{E}$. By the discussion above, we know that the truth table of $x =_A x$ is $A \xrightarrow{([x], [x]))} A \times A \xrightarrow{\delta_A} \Omega$. Since $[x] = id_A: A \rightarrow A$, the morphism $([x],[x])$ is precisely the diagonal morphism $\triangle_A$. By definition, $\delta_A$ is the characteristic morphism of $\triangle_A$. Thus, the following diagram is a pullback \begin{center}
\begin{tikzcd} A \arrow[r, "!"] \arrow[d, "{([x],[x])}"'] & 1 \arrow[d, "t"] \\ A\times A \arrow[r, "\delta_A"'] & \Omega \end{tikzcd} \end{center}
In particular, the diagram commutes, so $A \xrightarrow{!} 1 \xrightarrow{t} \Omega$, is the truth table of $x =_A x$. Therefore, $x =_A x$ is valid in $\mathcal{E}$.
We saw $x=_A x$ is valid for any topos, but the equality sign carries a lot of information - it is a specific characteristic morphism - and, at first, any property regarding $=_A$ should work explicitly with characteristic morphism and other tools presented by the internal language. However, when we work with toposes is usual to omit the internal language machinery and {\em pretend} that objects are sets, monomorphisms are injective functions, epimorphisms are surjective functions, isomorphisms are bijective functions, and so on. Basically, we {\em pretend} that a topos is the topos $Set$, which is possible due to the \textit{Soundness Theorem} \cite[Chap 15]{mclarty1992elementary}. This advantage has a cost: we only can replicate a construction in $Set$ to an arbitrary topos if we restrain ourselves to ``constructive aspects'' presented in intuitionistic logic, because, in general, the law of excluded middle ( i.e., $ \varphi \lor \neg \varphi$) does not hold for all toposes. For the same reason, we avoid using the axiom of choice, a ``non-constructive'' set-theoretical axiom. We will apply this procedure in the next section (see Theorem \ref{abE-th}).
\subsection{Grothedieck Topos Cohomology} \label{sec:12} Now, we replicate the formerly cohomology constructions (Section \ref{sec:6}). We hope it is clear that the (Grothendieck) Topos Cohomology exhibited is an extension of Sheaf Cohomology; however, new techniques are necessary to prove the toposes versions of the results introduced in the previous section\footnote{On the other hand, if a Grothendieck topos has ``enough points'', then its cohomology coincides with some spatial sheaf cohomology, see \cite{moerdijk1996cohomology}.}. We begin with a useful but simple concept:
A parallel pair of morphisms $f,g: A \rightarrow B$ is \textit{reflexive} if exists a common section $s: B \rightarrow A$ of $f$ and $g$, that is, $f \circ s = id_B = g\circ s$. In particular, a reflexive coequalizer is a coequalizer of a reflexive pair.
\begin{lem} \label{abE-le}
\begin{enumerate}
\item For any elementary topos $\cal{E}$, the forgetful functor \\ $E: Ab(\mathcal{E}) \rightarrow \mathcal{E}$ creates limits and reflexives coequalizers \cite[Section 6]{johnstone77topostheory};
\item For abelian categories, the $AB5$ condition is equivalent to the category having all small colimits and all filtered colimits being universal \cite{grothendieck1957}. \end{enumerate} \end{lem}
We use the above lemma to sketch the proofs of the main results in this subsection.
\begin{teo} \label{abE-th} The category $Ab(\mathcal{E})$ is abelian for any elementary topos $\mathcal{E}.$ \end{teo} \begin{proof} Show that $Ab(\mathcal{E})$ is $Ab$-category follows by straightforward calculations. To see it is an additive category we will use the internal language of $\mathcal{E}$. Thus we need to prove that the following objects exist in $Ab(\mathcal{E})$: terminal and initial objects, binary products, and binary coproducts; moreover finitary products and finitary coproducts must coincide in $Ab(\mathcal{E})$.
We already know that terminal objects and binary products exists in $Ab(\mathcal{E})$ because $\mathcal{E}$ has finite limits and, by Lemma \ref{abE-le}.1, the forgetful functor creates finite limits, so $Ab(\mathcal{E})$ has finite limits.
We will use the internal logic of the topos to continue the proof. Suppose $\mathcal{E} = Set,$ then $Ab(\mathcal{E}) \simeq Ab.$ It is know that $Ab$ is an additive category and the demonstration of this fact only uses constructive arguments. By the discussion in \ref{sec:11}, $Ab(\mathcal{E})$ is an additive category for any topos $\cal{E}$, and not only $\mathcal{E} = Set$. Usually, this argumentation is enough, but let's elaborate a bit more.
Consider $X$ an object of a topos $\cal{E}$ equipped with morphisms $m_X: X \times X \rightarrow X,$ $i_X: X \times X$ and $e_X: 1 \times X$ satisfying the following formulas of abelian groups \begin{center} $\forall x : X, y: X, z: X \,(m_X(m_X(x,y),z) = m_X(x,(m_X(y,z))$\\ $\forall x: X, 0 : 1 \, (m_X(e_X \times id_X)(0,x)) = m_X(id_X \times e_X(x,0)))$ \\ $\forall x: X \, (m_X(i_X \times id_X(\triangle(x))) = e_X(!(x)))$\\ $\forall x : X, y: X \, (m_X(x,y) = m_X(y,x))$ \end{center} This formulas may be described by diagrams, so $X$ is equivalent to an object in $Ab(\mathcal{E})$ (see Section \ref{sec:4} to remind the notation). In particular, the terminal object in $Set$ - a singleton - corresponds to the terminal object in $Ab(Set) \simeq Ab$ - the trivial group denoted by $1$ - thus, for each object $A$ of $Ab(Set)$, there is a unique morphism $f: A \to 1$. In other words: $$\forall A : Ab \,\, (f: A \to 1) \wedge (g: A \to 1) \implies f=g$$ That is, the terminal object can be described by a formula. In the same way, the fact that there is a unique morphism $1 \to A$, for all $A$ object in $Ab$, also can be described by a formula. Therefore, we have a constructive proof that $1$ is a zero object for $Ab(Set)$. The \textit{Soundness Theorem} mentioned in the previous section guarantees we can replace $Set$ by any topos $\cal{E}$, so $1$ is a zero object in $Ab(\mathcal{E})$. Similarly, if we take the product of two objects in $Set$, we may obtain a product in $Ab(Set)$. Since in $Ab(Set)$ the product has an (internal) abelian group structure and all finitary products are finitary coproducts in $Ab$ (direct sum and direct product coincide for finite indices), we conclude the binary product is a coproduct in $Ab(Set)$ (in fact, they are biproducts, an equational notion described in Section 2.1) and, again, this proof is constructive thus is still valid in $Ab(\mathcal{E})$. Summing up, $Ab(\cal{E})$ is an additive category for any elementary topos $\cal{E}$ see \cite[Chap 16.6]{mclarty1992elementary} for the description of the coproduct diagram using formulas.
It is not difficult to see that any morphism $f: A \rightarrow B$ in $Ab({\cal E})$ has a kernel; since the forgetful functor creates limits (Lemma \ref{abE-le}.1), $ker(f)$ is the equalizer $equal(0,f)$.
Now, let $f$ be an epimorphism in $Ab({\cal E})$, then it is also an epi in ${\cal F}$. But any epi in ${\cal E}$ is a coequalizer, then $f = coeq(g,h)$ for some $g, h \in {\cal E}$. Since $f \in Ab({\cal E})$, $f$ can be rewritten as $f=coeq(g',h')$ for some $g',h' \in Ab({\cal E})$. Thus $$f = coeq(g',h') = coeq(0, h'-g') = coker(h'-g')$$
To conclude $Ab(\mathcal{E})$ is an abelian category we have to construct a cokernel of an arbitrary morphism $f: A \rightarrow B$ in $Ab({\cal E})$ and show that any monomorphism in $Ab(\mathcal{E})$ is a kernel in $Ab(\mathcal{E})$.
We take a coequalizer in $\mathcal{E}$ of the pair $m \circ (f \times id_B)$ and $p_2$, where $m: B \times B \rightarrow B$ is the morphism $m$ introduced at the definition of group object, $p_2: A\times B \rightarrow B$ is the projection in the second coordinate, and $f: A \rightarrow B$ is a morphism in $Ab(\mathcal{E})$.
Let $q = coeq(m \circ (f \times id_B),p_2)$. First, note that \begin{tikzcd} A \times B \arrow[r, "m\circ(f\times id_B)", shift left=1 ex] \arrow[r, "p_2"', shift right=0.5 ex] & B\end{tikzcd} is a reflexive pair with section $s = (0,id_B): B \rightarrow A \times B$. Considering parts of the diagram of coequalizer, and of the cartesian product of morphisms, we have: \begin{center} \begin{tikzcd} B \arrow[r, "s"] & A\times B \arrow[d, "p_1"'] \arrow[r, "f\times id_B"] \arrow[rr, "p_2", bend left=49] & B\times B \arrow[r, "m"] \arrow[d, "p_1"] & B \arrow[r, "q"] & C \\
& A \arrow[r, "f"'] & B \arrow[ru, "id_B"'] & & \end{tikzcd} \end{center}
With a lot of diagram calculations and the coequalizer universal property, is possible to show that $q$, a coequalizer in $Ab(\mathcal{E})$, is the cokernel of $f,$ for any $f$ in $Ab(\mathcal{E}).$
Finally, let $f$ be a monomorphism in $Ab(\mathcal{E})$. Denote $coker(f) = q$, then $q\circ f =0$. Since $q \circ ker(q) =0$, by the universal property of $ker(q)$, there exists a unique $t \in Ab({\cal E})$ such that $f = ker(q) \circ t$ and this $t$ is a mono, because $f$ is a mono. Until now, all the information were obtained from very general categorical arguments. However, ${\cal E}$ is an elementary topos and we can simulate in ${\cal E}$ the proof, made in $Set$ with elements, that establishes that $t$ is ``surjective'' (i.e. an epimorphism) in ${\cal E}$ thus, as we already mentioned before, it follows that $t$ it is an isomorphism in the topos ${\cal E}$. Since $t \in Ab({\cal E})$, $t$ is an isomorphism in $Ab({\cal E})$. Summing up, we have shown that any mono in $Ab({\cal E})$ is a kernel, indeed, it is the kernel of its own cokernel.
\end{proof}
\textbf{Remark:} It is possible to prove the above Theorem without the internal language machinery, but the paper \cite{10.2307/43681686} shows it requires 10 pages of diagram calculations to fulfill the verification. We know that a proper introduction to the internal language of toposes requires more than 10 pages, yet it is more convenient and efficient in the long haul.
By the Grothendieck Theorem \ref{grotheorem}, if an abelian category satisfies $AB5$ and has a generator then it has enough injectives. Thus, to state $Ab(\mathcal{E})$ has enough injectives, we only need to prove this two conditions.
\begin{teo} If $\mathcal{E}$ is a Grothendieck topos, then $Ab(\mathcal{E})$ is $AB5$ category and has a generator. \end{teo} \begin{proof} Let's see that $Ab(\mathcal{E})$ satisfies $AB5.$ By Lemma \ref{abE-le}.2, we need to prove that $Ab(\mathcal{E})$ has all small colimits with all filtered colimits being universal. The first part - $Ab(\mathcal{E})$ has all small colimits - can be proven in several ways, none of them is simple: a possible form is presented in the proof of \cite[Theorem 8.11.iii]{johnstone77topostheory}; another argument follows from the construction of the associated sheaf functor (or sheafification functor), which is left adjoint to the inclusion functor $i: Sh(C,J) \to Set^{C^{op}}$ and preserves colimits. We choose not to present the associated sheaf functor here, but we indicate \cite[Chap III.5]{maclane1992sheaves} for a complete explanation.
In a Grothendieck topos, filtered colimits and finite limits commutes. Since ${E}$ creates finite limits, it creates filtered colimits and pullbacks. Besides that, all colimits are universal in a topos, that is, they are preserved by pullbacks. Thus filtered colimits are universal in $Ab(\mathcal{E}).$ See \ref{giraud} to remember Grothendieck toposes's properties.
Now we prove that $Ab(\mathcal{E})$ has a set of generators.
By Giraud Theorem, $\mathcal{E}$ has a set of generators $\{G_i\}_{i \in I}$. Let $f,g: X \rightarrow Y$ in $Ab(\mathcal{E})$ so $f$ and $g$ are morphisms in $\mathcal{E}$. If $f \neq g$, since $\{G_i\}_{i \in I}$ is a generator of $\mathcal{E}$, there is $h_i: G_i \rightarrow E(X),$ for some $i \in I$, such that $$E(f) \circ h_i \neq E(g) \circ h_i$$ Consider the coproduct universal morphism $h:\coprod_{i \in I}G_i \rightarrow E(X)$ and the canonical morphism $\alpha_i: G_i \rightarrow \coprod_{i \in I}G_i$. We have $h_i = h \circ \alpha_i$ so $$E(g) \circ h \circ \alpha_i = E(g) \circ h_i \neq E(f) \circ h_i = E(f) \circ h \circ \alpha_i $$
Then $E(g) \circ h \neq E(f) \circ h$.
Use the fact that the forgetful functor has a left adjoint functor, $Z: \mathcal{E} \rightarrow Ab(\mathcal{E})$; this is a generalization of the ``free abelian group'' construction from the topos $Set$ to any Grothendieck topos\footnote{ Because these toposes have the internal ``set of all natural numbers''.}. Apply it in $h$, so $Z(h):Z(\coprod_{i \in I}G_i) \to X,$ is the associated morphism in $Ab({\cal E})$.
The adjointness of $Z$ and $E$ guarantees that $f \circ Z(h) \neq g \circ Z(h)$, thus $Z(\coprod_{i \in I}G_i)$ is generator of $Ab(\mathcal{E}).$
\end{proof}
Therefore, by the Grothendieck Theorem: \begin{teo} If $\mathcal{E}$ is a Grothendieck topos, then the abelian category $Ab(\mathcal{E})$ has enough injectives. \end{teo}
By Theorem \ref{theo:derivedfunctor}, we need a left exact additive functor, which we will again call global section functor. First, note there is a unique (up to isomorphism) geometric morphism $\Gamma : {\cal{E}} \to Set$, and is enough to argue its inverse image $\Gamma^*: Set \to \cal{E}$ is unique (up to natural isomorphisms): by definition of geometric morhism $\Gamma^*$ send terminal objects to terminal objects, and preserver colimits. Besides that, every set is a disjoint union of singletons (terminal objects in $Set$), so $\Gamma^*$ must be given by $S \mapsto \coprod_{s \in S} 1,$ where $1$ is the terminal object in $\cal{E}$.
Next, we take the direct image functor of $\Gamma$, which must be $\Gamma_* = Hom_{\mathcal{E}}(1,-): \mathcal{E} \to Set$. Finally, we define the global section functor as $$\Gamma_{Ab} := Hom_{\mathcal{E}}(1,-): Ab(\mathcal{E}) \rightarrow Ab(Set),$$ Since $\Gamma_{Ab}$ is induced by the direct image of $\Gamma$ then, by Lemma \ref{abE-le}.(1), $\Gamma_{Ab}$ preserves limits, thus it is left exact.
Is not difficult to prove that the direct image of any geometric morphism preserves injectives. We will show this here to introduce a usual and simple manipulation with direct and inverse images using adjoint properties:
Let $f: \mathcal{F} \rightarrow \mathcal{E}$ geometric morphism, and $I$ injective object in $Ab(\mathcal{F})$. Consider the following diagram in $Ab({\cal E})$ \begin{center} \begin{tikzcd} X \arrow[r, "m", tail] \arrow[d, "h"] & Y \\ f_*(I) & \end{tikzcd} \end{center} The adjoint property of geometric morphisms allow us to transpose this diagram and obtain the following diagram in $Ab({\cal F})$ \begin{center} \begin{tikzcd} f^*(X) \arrow[r, "f^*(m)", tail] \arrow[d, "\tilde{h}"] & f^*(Y) \\ I & \end{tikzcd} \end{center} Note that $f^*(m)$ is a monomorphism in ${\cal F}$ and thus in $Ab({\cal F})$, since $f^*$ preserves finite limits.
Next, we use the injectiviness of $I$ to complete the diagram with $ g : f^*(Y) \to I$ that makes it commutative in $Ab({\cal F})$ . Then we transpose, by adjoint property, one last time, and find a commutative diagram in $Ab({\cal E})$ that guarantees that $f_*(I)$ is an injective object in $Ab(\mathcal{E})$ \begin{center} \begin{tikzcd} X \arrow[r] \arrow[d] & Y \arrow[ld] \\ f_*(I) & \end{tikzcd} \end{center}
We define the $q$-th cohomology group of $\mathcal{E}$ with coefficientes in $F$, object in $Ab(\mathcal{E})$ as the $q$-th right derived functor of $\Gamma_{Ab}(F)$. In other words, $$H^q(\mathcal{E}, F) = R^q(\Gamma_{Ab})(F)$$
We can define cohomology for objects different from the terminal: Let $B$ be an object of $\mathcal{E}$, since $Hom_{\mathcal{E}}(B,-)$ is a left exact functor we can consider right derived functor for it, denoted by $H^q(\mathcal{E},B;F)$. The problem is how to describe $H^q(\mathcal{E},B;F)$ in terms of $H^q(\mathcal{E}, F)$. The idea is that the funtor $B^* : \mathcal{E} \rightarrow \mathcal{E} \downarrow B$, which sends an object $A$ in $\mathcal{E}$ into $p_2: A \times B \rightarrow B$ in $\mathcal{E} \downarrow B$, induces an exact functor $B^*_{Ab} : Ab(\mathcal{E}) \rightarrow Ab(\mathcal{E} \downarrow B)$ that preserves injectives, and is possible to establish an isomorphism $H^q(\mathcal{E}, B;F) \cong H^q(\mathcal{E}\downarrow B,B^*_{Ab}(F))$. See \cite[page 262]{johnstone77topostheory}.
Grothendieck toposes admit a notion of flabby object: We say that $F$ in $Ab(\mathcal{E})$ is \textit{flabby} if $H^q(\mathcal{E}, B; F) = 0$, for all $q>0$ and all $B$ object in $\mathcal{E}$.
\begin{prop} Every injective object in $Ab(\mathcal{E})$ is a flabby object in $Ab(\mathcal{E})$. \end{prop} \begin{proof}
More generally, for any injective object $F$ in an abelian category we have an injective resolution $0 \rightarrow F \xrightarrow{id_F} F \rightarrow 0 \rightarrow 0 \rightarrow ...$ of $F$. Applying the left exact functor $\Gamma$ and taking its right derived functors: $$0 = R^q\Gamma(F) \cong H^q(\Gamma(F^{\bullet}))$$ Translating to our scenario, $F$ is an injective object in $Ab(\mathcal{E})$ with the above injective resolution. For each object $B$ in $\mathcal{E}$, we construct a left exact functor $B^*_{Ab} : Ab(\mathcal{E}) \rightarrow Ab(\mathcal{E} \downarrow B)$, as previously mentioned. Then $H^q(\mathcal{E}\downarrow B,B^*_{Ab}(F)) \cong R^q(B^*_{Ab}(F)) = 0$. Therefore, $F$ is flabby. \end{proof}
The following lemma is useful to prove the analogous version of Proposition 3.2. We exhibit the proof provided in \cite{johnstone77topostheory} because it uses manipulations with geometric morphisms that show up every time we are working with Grothendieck Toposes. \begin{lem} Let $f: \mathcal{F} \rightarrow \mathcal{E}$ be a geometric morphism, with $\mathcal{E} = Sh(\mathcal{C},J)$, $F$ an object in $Ab(\mathcal{F})$, and $l: \mathcal{C} \rightarrow Sh(\mathcal{C},J)$ the canonical functor ($U \mapsto i^*(Hom(-,U))$). Then $R^qf_*(F)$ is the $J$-sheaf associated to the presheaf $U \mapsto H^q(\mathcal{F},f^*l(U);F)$. \end{lem} \begin{proof} We split the proof of this lemma in two parts. First, we consider $J$ as minimal topology, and after that $J$ will be an arbitrary Grothendieck Topology.
The Grothendieck topology $J$ be minimal means $J(C) = \{ \text{maximal sieve in }C \}$, where $C$ is an object in $\mathcal{C}$. The minimal topology implies that $\mathcal{E} = Set^{\mathcal{C}^{op}}.$ Since $f$ is a geometric morphism, $f^*$ preservers finite limits and is left adjoint of $f_*$. So $f_*$ preserves small limits, $f_*(-)(U)$ is a left exact functor, and we can obtain the right derived functor $f_*(-)(U)$. Besides that, by group cohomology definition and adjoint property of geometric morphism: \begin{align*} R^0f_*(-)(U) \cong f_*(-)(U) & \cong {} Hom_{\mathcal{E}}(Hom(-,U),f_*(-))\\
& \cong {} Hom_{\mathcal{F}}(f^*(Hom(-,U)),-) \\
& \cong {} H^0(\mathcal{F},f^*(Hom(-,U)),-): Ab(\mathcal{F}) \rightarrow Ab({Sets}) \end{align*} So the lemma holds for $J$ minimal.
Suppose $J$ is an arbitrary Grothendieck Topology in ${\cal C}$, let $i = (i_*, i^*) : \mathcal{E} \rightarrow Set^{\mathcal{C}^{op}}$ be the inclusion geometric morphism, and define $g = i \circ f: \mathcal{F} \rightarrow Set^{\mathcal{C}^{op}}$. The adjoint properties guarantees that $i^*g_* = (i^*i_*)f_* \cong f_*$. Since $i^*$ is an exact functor, $i^*R^qg_* \cong R^q(i^*g_*) \cong R^q(f_*)$. By the facts that $l: \mathcal{C} \rightarrow Sh(\mathcal{C},J)$ is the canonical functor and $i^*$ is the associated sheaf functor \cite[Chap III.5]{maclane1992sheaves}, we have $$g^*(Hom(-,U)) = f^*i^*(Hom(-,U)) = f^*l(U)$$ Finally, we apply this in the calculations for $J$ minimal and conclude the desired result. \end{proof}
\begin{prop} If $F$ is a flabby sheaf, then $R^qf_*(F) = 0,$ for all $q > 0$. In other words, $F$ is $f_*$-acyclic. \end{prop} \begin{proof} We have $R^qf_*(F)$ is the $J$-sheaf associated to $U \mapsto H^q(\mathcal{F},f^*l(U);F)$, by the above Lemma. Since $F$ is flabby, $H^q(\mathcal{F},f^*l(U);F) = 0$ for all $q > 0$. Thus, $R^qf_*(F) = 0,$ for all $q > 0$. \end{proof}
We also have a (Godement) resolution in this context \cite[page 265]{johnstone77topostheory}, and since the notion of flabby sheaf implies an acyclicity, we could use it to define cohomology groups using flabby sheaves instead of injective ones - see the discussion at the end of Section \ref{sec:6}. This approach is particularly interesting for cohomology in a topos because injectives resolutions depend on the axiom of choice to works properly while general toposes rely - internally - on intuitionistic logic. However, we observe that this definition of flabby does not coincide with the flabby definition for sheaves over topological spaces when $Sh(\mathcal{C},J) = Sh(X)$. Therefore, how we constructively generalize the flabby definition in $Sh(X)$ to $Sh(\mathcal{C},J)$? We do not know a definite answer to that but we will explain more about it in the last section.
\subsection{\v{C}ech Cohomology revisited} \label{sec:13}
As expected, \v{C}ech Cohomoloy in the Grothendieck Topos case is more complicated. We will proceed more carefully now, and use some lemmas without proofs to not exceed in technicalities.
We fix $\mathcal{E} = Sh(\mathcal{C},J)$, consider ${\cal P} = Set^{\mathcal{C}^{op}}$ it correspondent presheaves category, and $i: \mathcal{E} \rightarrow {\cal P}$ the canonical inclusion.
Suppose that $\mathcal{C}$ has pullbacks. For sheaves over topological spaces, when constructing the \v{C}ech Cohomology, we considered $U_{{i_0},...,{i_q}}$ as a intersection of finite subfamilies of open sets that cover an open $U$. Now we need to find an analogous of this. Let $\mathcal{U}=(U_i \overset{f_i}\rightarrow U)_{i \in I} $ be a family of morphisms in $\mathcal{C}$, define\footnote{In other words, we select a specific pullback for each subfamily.} $U_{{i_0},...,{i_q}} := U_{i_0} \times_U ... \times_U U_{i_q}$ (this is the \v{C}ech nerve). Applying morphisms $U_{{i_0},...,{i_q}} \xrightarrow{\delta_k} U_{{i_0},...,\widehat{i_k},...{i_q}}$ that ``forgets $i_k$'', we have a diagram in ${\cal P}$ as follows:
\begin{center} \begin{tikzcd} \dots \arrow[r, shift left=1 ex] \arrow[r, shift left=0.25 ex] \arrow[r, shift right=0.5 ex] & \coprod\limits_{i_0, i_1, i_2}h_{U_{i_0,i_1,i_2}} \arrow[r, shift left=1 ex] \arrow[r, shift right=0.5 ex] & \coprod\limits_{i_0, i_1}h_{U_{i_0,i_1}} \arrow[r] & h_U \end{tikzcd} \end{center} where $h_U \cong Hom(-,U)$ is a representable functor. Remind that the forgetful functor $E: Ab({\cal P}) \rightarrow {\cal P}$ has a left adjoint $Z: {\cal P} \rightarrow Ab({\cal P})$, called free functor. Since left adjoint functors preserve colimits, we have a canonical isomorphism $Z(\coprod\limits_{j \in J}h_{V_j}) \cong \coprod\limits_{j \in J}Z(h_{V_j}) $.
Apply the free functor in the diagram above to obtain a diagram in $Ab({\cal P})$: \begin{center} \begin{tikzcd} \arrow[r, shift left=1 ex] \arrow[r, shift left=0.25 ex] \arrow[r, shift right=0.5 ex] & \coprod\limits_{i_0, i_1, i_2}Z(h_{U_{i_0,i_1,i_2}}) \arrow[r, shift left=1 ex] \arrow[r, shift right=0.5 ex] & \coprod\limits_{i_0, i_1}Z(h_{U_{i_0,i_1}}) \arrow[r] & Z(h_U) \end{tikzcd} \end{center}
Defining a boundary morphism $(d_q\alpha) = \sum\limits_{k=0}^{q+1}(-1)^k\alpha(\delta_k)_{\mid_{U_{{i_0},...,{i_{q+1}}}}}$, and using the above diagram, we construct a chain complex, denoted by $N_{\bullet}({\cal U})$, where $$N_q({\cal U}) := \coprod\limits_{i_0, i_1, ..., i_q}Z(h_{U_{i_0,i_1,...,i_q}})$$ Since, by \cite[Lemma 8.22]{johnstone77topostheory}, the sequence $\dots \rightarrow N_2({\cal U}) \rightarrow N_1({\cal U}) \rightarrow N_0({\cal U})$ is exact in ${Ab}({\cal P})$, we can use this chain complex to define the \textit{\v{C}ech cochain complex}.
Given a presheaf $F$ in $Ab({\cal P})$, the \textit{\v{C}ech cochain complex} is $$C^q(\mathcal{{U}},F) = Hom_{{Ab}({\cal P})}(N_q(\mathcal{U}),F),$$ with coboundary morphisms $d^q = - \circ d_q$. Since $(-\circ d_{q+1})\circ (-\circ d_{q}) = -\circ(d_q \circ d_{q+1}) = - \circ 0 = 0$, we define the $q$-th \v{C}ech cohomology group of $\mathcal{U}$ with coefficients in $F$ by $H^q(\mathcal{U},A) = Ker( d^{q})/Im(d^{q-1})$.
Now, we want to define \v{C}ech Cohomology of an object in the category instead of its coverings.
Considering $\mathcal{V} = (V_j \overset{g_j}\rightarrow U \enspace | \enspace j \in J)$ another family of morphisms in $\mathcal{E}$
that refines the family $\mathcal{U} = (U_i \overset{f_i}\rightarrow U \enspace | \enspace i \in I)$, we select a refinement map $r: \mathcal{V} \rightarrow \mathcal{U}$, that is, a pair formed by a function $r : J \rightarrow I$ and a family of factorizations $$\Bigg\{\begin{tikzcd}[cramped, sep=small] V_j \arrow[r, "r_j"] \arrow["g_j"', rd] & U_{r(j)} \arrow[d, "f_{r(j)}"] \\
& U \end{tikzcd} : j \in J\Bigg\}$$
In the following, we will abuse the notation and use $r_j$ to denote the value in the index set ($r_j = r(j) \in I$) and also the arrow in ${\cal C}$ ($r_j : V_j \to U_{r(j)}$). This will not cause confusion.
If $R$ is the sieve of $U$ generated by the family $\mathcal{U}$ (i.e., for any morphism $\alpha$ in $R$, $\alpha = f_i \circ h_i$, for some $i \in I$ and some $h_i$), then the inclusion map $\mathcal{U} \rightarrow R$ determines a refinement map.
\begin{prop} Given $r, s: \mathcal{V} \rightarrow \mathcal{U}$ refinement maps, $r_{\bullet}$ and $s_{\bullet}$ are chain homotopic. \end{prop} \begin{proof} We have to find a sequence of morphisms $N_q(\mathcal{V}) \rightarrow N_{q+1}(\mathcal{U})$ that makes $r_{\bullet}$ and $s_{\bullet}$ chain homotopics.
Consider $ \sigma = (j_0,...,j_q)$ where $j_0,...,j_q \in J$. For each $l \in \{0,1,...,q\}$ we define a morphism over $U$ as follows:
$$t^l_{\sigma} = (r_{j_0},...,r_{j_l},s_{j_l},...,s_{j_q}): V_{\sigma} \rightarrow U_{(r_{j_0},...,r_{j_l},s_{j_l},...,s_{j_q})} $$
This morphism induces a ``group homomorphism'': $$Z({t^l_\sigma}) : Z(h_{V_{\sigma}}) \rightarrow Z(h_{U_{(r_{j_0},...,r_{j_l},s_{j_l},...,s_{j_q})}}).$$
We can perform two kind of actions:
(i) Consider the alternating sum of homomorphisms $t_\sigma := \sum_{l =0}^q (-1)^{l+1} \iota_{\sigma}^l \circ Z({t^l_\sigma})$, for this fixed $\sigma$, where $$\iota_\sigma^l : Z(h_{U_{(r_{j_0},...,r_{j_l},s_{j_l},...,s_{j_q})}}) \to \coprod\limits_{i_0, i_1, ..., i_q, i_{q+1}}Z(h_{U_{i_0,i_1,...,i_q,i_{q+1}}})$$ is the canonical homomorphism.
(ii) ``Put together'' the homomorphisms $Z(t^l_\sigma)$, for a fixed $l$: $$t^l_q : \coprod\limits_{j_0, j_1, ..., j_q}Z(h_{V_{j_0,j_1,...,j_q}}) \to \coprod\limits_{i_0, i_1, ..., i_q, i_{q+1}}Z(h_{U_{i_0,i_1,...,i_q,i_{q+1}}})$$ that we denote simply by $t^l_q: N_q(\mathcal{V}) \rightarrow N_{q+1}(\mathcal{U})$
These two actions can be applied in any order we choose, without changing the resulting homomorphism, that we will denote by
$$t^{(q)}: N_q(\mathcal{V}) \rightarrow N_{q+1}(\mathcal{U})$$
Since $r$ and $s$ are refinement maps we can extract indices $i_0,...,i_q \in I$ from $j_0, ..., j_q \in J$, thus we obtain an homomorphism
$$(r_q - s_q) : \coprod\limits_{j_0, j_1, ..., j_q}Z(h_{V_{j_0,j_1,...,j_q}}) \to \coprod\limits_{i_0, i_1, ..., i_q}Z(h_{U_{i_0,i_1,...,i_q}}) .$$
The (non commutative) diagram we must have in mind is:
\begin{center} \begin{tikzcd} \dots \arrow[r, "d^{\mathcal{V}}_3"] & {\coprod\limits_{j_0,j_1,j_2}Z(h_{V_{j_0,j_1,j_2}}}) \arrow[r, "d^{\mathcal{V}}_2"] \arrow[d, "r_2 - s_2"'] & {\coprod\limits_{j_0,j_1}Z(h_{V_{j_0,j_1}})} \arrow[d, "r_1 - s_1"] \arrow[ld, "t^{(1)}"'] \arrow[r, "d^{\mathcal{V}}_1"] & \dots \\ \dots \arrow[r, "d^{\mathcal{U}}_3"] & {\coprod\limits_{i_0,i_1,i_2}Z(h_{U_{i_0,i_1,i_2}}}) \arrow[r, "d^{\mathcal{U}}_2"] & {\coprod\limits_{i_0,i_1}Z(h_{U_{i_0,i_1}})} \arrow[r, "d^{\mathcal{U}}_1"] & \dots \end{tikzcd} \end{center}
We will exhibit the homotopy chain construction for the case $q = 2$:
Let $\sigma = (j_0,j_1), \enspace t^0_{\sigma} = (r_{j_0},s_{j_0},s_{j_1}), \enspace t^1_{\sigma} = (r_{j_0},r_{j_1},s_{j_1})$, and denote $$t_\sigma = \sum\limits_{l=0}^1(-1)^{l+1}\iota^l_\sigma \circ Z(t^l_\sigma) = - \iota^0_\sigma \circ Z(t^0_\sigma) + \iota^1_\sigma \circ Z(t^1_\sigma) := - (r_{j_0},s_{j_0},s_{j_1}) + (r_{j_0},r_{j_1},s_{j_1}).$$
Let $\tau = (j_0,j_1,j_2)$ and $\alpha_{\tau}: Z(h_{V_{\tau}}) \rightarrow \coprod\limits_{j_0, j_1, j_2}Z(h_{V_{j_0,j_1,j_2}})$. For an object $C$ in $\mathcal{C}$, we take $\theta^C_\tau \in Z(h_{V_{\tau}}(C))$ and obtain $$(r_2 - s_2) \circ \alpha_{\tau}(\theta_{\tau}) = \theta_{r_{i_0},r_{i_1},r_{i_2}} - \theta_{s_{i_0},s_{i_1},s_{i_2}}.$$ Apart from that, $d^{\mathcal{V}}_2(\alpha_{\tau}(\theta_{\tau})) = \theta_{j_0,j_1} - \theta_{j_0,j_2} + \theta_{j_1,j_2}$. Applying $t^{(1)}$ in the last equation: \begin{align*}
t^{(1)} \circ d^{\mathcal{V}}_2(\alpha_{\tau}(\theta_{\tau})) &= t^{(1)}(\theta_{j_0,j_1} - \theta_{j_0,j_2} + \theta_{j_1,j_2}) =\\
&+(- \theta_{r_{j_0},s_{j_0},s_{j_1}} + \theta_{r_{j_0},r_{j_1},s_{j_1}}) \\
&- (- \theta_{r_{j_0},s_{j_0},s_{j_2}} + \theta_{r_{j_0},r_{j_2},s_{j_2}}) \\
&+ (- \theta_{r_{j_1},s_{j_1},s_{j_2}} + \theta_{r_{j_1},r_{j_2},s_{j_2}} ) \end{align*}
On the other hand:
\begin{align*}
d^{\mathcal{U}}_3\circ t^{(2)}(\alpha_{\tau}(\theta_{\tau})) &= d^{\mathcal{U}}_3(-\theta_{r_{j_0},s_{j_0},s_{j_1},s_{j_2}}+\theta_{r_{j_0},r_{j_1},s_{j_1},s_{j_2}} -\theta_{r_{j_0},r_{j_1},r_{j_2},s_{j_2}}) \\
&=-(\theta_{s_{j_0},s_{j_1},s_{j_2}} - \theta_{r_{j_0},s_{j_1},s_{j_2}}+\theta_{r_{j_0},s_{j_0},s_{j_2}}-\theta_{r_{j_0},s_{j_0},s_{j_1}})\\
&+(\theta_{r_{j_1},s_{j_1},s_{j_2}}-\theta_{r_{j_0},s_{j_1},s_{j_2}}+\theta_{r_{j_0},r_{j_1},s_{j_2}}-\theta_{r_{j_0},r_{j_1},s_{j_1}})\\
&-(\theta_{r_{j_1},r_{j_2},s_{j_2}}-\theta_{r_{j_0},r_{j_2},s_{j_2}}+\theta_{r_{j_0},r_{j_1},s_{j_2}}-\theta_{r_{j_0},r_{j_1},r_{j_2}}) \end{align*}
Therefore, $$d^{\mathcal{U}}_3\circ t^{(2)}(\alpha_{\tau}(\theta_{\tau})) + t^{(1)} \circ d^{\mathcal{V}}_2(\alpha_{\tau}(\theta_{\tau})) = \theta_{r_{i_0},r_{i_1},r_{i_2}} - \theta_{s_{i_0},s_{i_1},s_{i_2}}
=(r_2 - s_2) \circ \alpha_{\tau}(\theta_{\tau}).$$
So the chain homotopy is proved for $q=2$. For the general case, we consider $\tau = (j_0,...,j_q)$ then, by similar calculations, we obtain:
$$(r_q-s_q)(\alpha_{\tau}(\theta_{\tau})) = d^{\mathcal{U}}_{q+1}\circ t^{(q)}(\alpha_{\tau}(\theta_{\tau})) + t^{(q-1)} \circ d^{\mathcal{V}}_q(\alpha_{\tau}(\theta_{\tau}))$$ \end{proof}
The following result provides an isomorphism between cohomology groups of a family of morphism $\mathcal{U}$ and cohomology groups of a sieve R generated by $\mathcal{U}$.
\begin{prop}
Let $\mathcal{U} = (U_i \rightarrow U \enspace | \enspace i \in I)$ be a family of morphisms and $R$ the sieve generated by $\mathcal{U}$. Then the inclusion $i: \mathcal{U} \rightarrow R$ induces an isomorphism $\mathrm{H}^q(\mathcal{U},F) \cong \mathrm{H}^q(R,F)$, for any presheaf $F$ in $Ab(\mathcal{P})$. \end{prop} \begin{proof} Since $R$ is generated by $\mathcal{U},$ there is a refinement map $h : R \rightarrow {\cal U}$. On the other hand, we also have that the inclusion $i: \mathcal{U} \rightarrow R$ is a refinement map. By the previously proposition, this refinement is unique up to homotopy, thus $h \circ i$ and $i \circ h$ are cochain homotopic to the corresponding identity refinements. So, by Proposition \ref{homotopic}, $i$ induces a map in the cohomology group that is invertible. In other words,
$\mathrm{H}^q(\mathcal{U},F) \cong \mathrm{H}^q(R,F)$, canonically.
\end{proof}
If $\mathcal{C}$ has pullbacks we can define \v{C}ech cohomology groups of an object $U$ of $\mathcal{C}$ with coefficient in an abelian presheaf $F$ in $\mathcal{C}$ as the filtered colimit below $$ \check{\mathrm{H}}^q(U,F) := \varinjlim\limits_{R \in J(U)} H^q(R,F) $$
Previously, we defined \v{C}ech Cohomology for a family of morphisms with the same codomain instead of considering sieves, but both cases are related: we can switch cover sieves with the family that generates it, by the above Proposition. We introduce this definition to obtain an analogous version of Theorem \ref{Cech-te} for Grothendieck topos.
\begin{teo} Let $U$ be an object in $\mathcal{C}$ and $F$ a sheaf in $Ab(\mathcal{E})$. There is a homomorphism $k^q: \check{\mathrm{H}}^q(U,F) \rightarrow \mathrm{H}^q(\mathcal{E},l(U);F)$, $q \in \mathds{N}$, where $l: \mathcal{C} \rightarrow Sh(\mathcal{C},J)$ is the canonical functor. Moreover, $k^q$ is a isomorphism if $q = 0$ or $1$, and it is a monomorphism if $q=2$. \end{teo}
To have an isomorphism in other cases we need to impose conditions on subsets of the set of objects in $\mathcal{C}$ as follows:
\begin{prop} Let $\mathcal{E} = Sh(\mathcal{C},J)$, $F$ sheaf in $Ab(\mathcal{E})$. If there is a subset $K$ of the set of objects in $\mathcal{C}$ such that: \begin{enumerate}
\item[(i)] $\check{\mathrm{H}}^q(V,F) = 0, \forall q > 0$, for each $V \in K;$
\item[(ii)] For each object $U$ in $\mathcal{C}$, there is a $J$-cover $\{V_j \overset{g_j}\rightarrow U \enspace | \enspace j \in J\}$ with $V_j \in K, \forall j \in J$; \item[(iii)] Every pullback of the form $V \times_U W$ is in $K$, whenever $V$ and $W$ are in $K$. \end{enumerate} Then the homomorphism $ k^q : \check{\mathrm{H}}^q(U;F) \rightarrow \mathrm{H}^q(\mathcal{E},l(U);F) $ is an isomorphism for any object $U$ in $\mathcal{C}$ and for all $q\in\mathds{N}$ \end{prop}
The proofs for both results above use spectral sequences and can be found at \cite[Chap 8]{johnstone77topostheory}.
In particular, this last result can be applied to show the coincidence between sheaf and \v{C}ech cohomology in two cases, each one mentioned in \ref{sec:7} and \ref{sec:8}. Respectively, they are: \begin{enumerate}
\item[(i)] $({\cal C},J)$ as the site canonically associated to a paracompact Hausdorff space $X$ and the coefficient $F$ as any sheaf of abelian groups in $Sh({\cal C}, J)$;
\item[(ii)] $({\cal C},J)$ as the site canonically associated to a scheme $(X, {\cal O}_X)$ and the coefficient sheaf $F$ as any quasi-coherent ${\cal O}_X$-module. \end{enumerate}
\subsection{Applications} \label{sec:14}
We already mentioned that Grothendieck topos cohomology was constructed to prove Weilβs conjectures. However, for this propose, Γtale Cohomology is enough: there is no need to work with an arbitrary site $(\mathcal{C},J).$ If $\mathcal{C}$ is the slice category of schemes over a scheme $X$, where the objects are Γ©tale morphisms $Spec(R) \xrightarrow{f} X$, and, by abuse of notation, the morphism $f \xrightarrow{\varphi} g$ are morphisms of schemes $Spec(R) \xrightarrow{\varphi} Spec(R\text{\textquoteright}) $ such that $g \circ \varphi = f$.
Γtale cohomology has good properties, e.g, can be related to singular cohomology, and has a KΓΌnneth formula, and PoincarΓ© Duality with an adequate formulation. Furthermore, it has applications in number theory, $K$-theory, and representation theory of finite groups, besides its original use in algebraic geometry for fields different of $\mathds{C}$ and $\mathds{R}$.
For other sites, we obtain other cohomologies such as crystalline, Deligne, and flat cohomologies. They also are instances of the Grothendieck topos cohomology we presented.
There are other kinds of applications of Grothendieck topos cohomology. If $\mathcal{C}$ is a small category, and $F$ is an abelian presheaf in $Ab(Set^{C^{op}})$, we can define a cochain complex $C^q(\mathcal{C},F) = \prod\limits_{c_0\leftarrow...\leftarrow c_q } F(c_q)$ with an appropriate coboundary $d^{q}: C^{q}(\mathcal{C},F) \rightarrow C^{q+1}(\mathcal{C},F)$, to obtain $H^{q}(\mathcal{C},F) = Ker(d^q)/Im(d^{q-1})$ as the cohomology groups of the category $\mathcal{C}$ with coefficients in $F$. Then, we have an isomorphism $H^{\bullet}(\mathcal{C},F) \cong H^{\bullet}(Set^{\mathcal{C}^{op}},F)$. For a proof of this and an explicit description of the coboundary maps, consult \cite[Chap.II.6]{moerdijk1995classifying}.
A simple example of this isomorphism manifests when the presheaf is $Set^G$, where $G$ is group seen as a category with a single object. In such case, the objects in $Ab(Set^G)$ are right modules over the group ring $\mathds{Z}G$. Thus, the cohomology groups of $G$ obtained from group cohomology are isomorphic to the sheaf cohomology groups of $Set^{\mathcal{C}^{op}}$. This is better know in the form $H^{\bullet}(BG,M) \cong H^{\bullet}(G,M)$, where $BG$ is the classifying space of $G$ and $M$ is a $G$-module. Consult \cite[Chap. II]{adem2013cohomology} to see the usual approach. Furthermore, given a topological group $G$, there is a natural and useful variation of the formerly mentioned group cohomology but defined on the abelian category ${\cal A}_G$ of all {\em discrete $G$-modules for a continuous $G$-action} - in particular, this is the case studied in profinite cohomology, that encompasses Galois Cohomology. If ${\cal E}_G$ is the category of all sets (i.e., discrete spaces) endowed with a continuous $G$-action and $G$-equivariant maps, then: ${\cal E}_G$ is a Grothendieck topos, the expected equivalence $Ab({\cal E}_G) \simeq {\cal A}_G$ holds, and the cohomology of the topos ${\cal E}_G$ coincides with the above described {\em continuous} cohomology of $G$.
Hence, Grothendieck topos cohomology also is related to non-sheaf cohomology, and not only with cohomology for specific sites. We will provide further applications in the next section.
\section{Remarks and New Frontiers} \label{sec:15}
Topos are excellent environments for internalizing mathematical objects, and we can write formulas for a language (type theory) like arrows hitting the subobject classifier. For example, to each formula $\phi(x)$ with a free variable $x$ of type $ X $ is associated with the subobject of $X$ that classically corresponds to β$ \{x \in X \mid \phi (x) \} $β. In this way, we can interpret a high order type theory in a topos via the so-called semantics of Kripke-Joyal. Results on elementary topos include that they are finitely co-complete, represent the idea of ``parts of an object'' and that its internal logic is intuitionist and, in particular, the parts of an object define an internal Heyting algebra. So, a topos is an environment for higher order intuitionist mathematics --- evidently not all the topos
are equivalent, so there is a diversity of environments.
Daily mathematics makes use of set theories to represent higher-order aspects of mathematical theories: this can be understood as the use of the higher-order internal logic of the $ Set $ topos. Since the 1970s, mathematical applications of higher-order intuitionist internal logic approaches have been applied to topos:\\ (i) an internal approach to the Serre-Swan duality, through a simple theorem, was described in \cite{mulvey1974intuitionistic} (essentially) of Linear Algebra, Kaplansky's Theorem \footnote{Every module on a local ring that is projective and finitely generated is a free module.}; \\ (ii) in model constructions, via Grothendieck topos, of synthetic differential geometry (\cite{moerdijk1991models}, \cite{mclarty1992elementary}): for instance, in \cite{moerdijk1991models}, there is an internal version of de Rham Theorem (a deep connection between de Rham cohomology and singular homology);\\ (iii) to represent results of quantum mechanics as results of classical mechanics internal to a topos \cite{flori2013first}; \\ (iv) in algebraic geometry, although the origin of Grothendieck's notion of topos came from specific needs of algebraic geometry, more systematic explorations of the internal language of topos for this area are very recent: e.g., \cite{blechschmidt2018using} contains a dictionary between the external and the internal point of view (for example, objects in a topos, are, internally, just sets; monomorphism are injections; sheaves of rings are rings), works with the big and small Zariski Topos associated to a scheme to exhibit simpler definitions and proofs by using the internal language provided by these toposes, and in \cite{blechschmidt2018flabby} explores a proposal of a constructive version of the main homological tools (flabby and injective objects).
Attempts to develop constructive approaches to homological algebra, without the aid of axiom of choice (as in the usual injective resolution construction), are different from ``cohomology in topos'', although they can be related. In the latter case, we usually are interested in a Grothendieck topos, and constructing cohomology groups with coefficient in $Ab(Sh(\mathcal{C},J)),$ for some site $(\mathcal{C},J)$. That is exactly what we exposed in the previous section, using P. Johnstone's book ``Topos Theory'' \cite{johnstone77topostheory}, as the main reference. However, similar to the extension of sheaf cohomology to Grothendieck topos cohomology, how could we extend Grothendieck topos cohomology to (elementary) topos cohomology? The first problem is that for an elementary topos $\mathcal{E}$ we can not guarantee that $Ab(\mathcal{\mathcal{E}})$ have enough injectives so it is not clear how to define the right derived functors - there is a form to construct them using noetherian abelian categories \cite{WILDESHAUS2000207}, but we do not know any systematically study to identify when $Ab(\mathcal{E})$ is noetherian abelian. Still in the topic of ``topos cohomology'' we could try to switch $Ab(\mathcal{\mathcal{E}})$ for $\mathcal{E}$. In this direction, we have the work of I. Blechschmidt that is closely related to develop a constructive version of homological algebra: he reintroduces the concepts of injective object and flabby sheaf, as objects in an elementary topos, and replaces injective resolutions with flabby resolutions to avoid the use of the axiom of choice. However, in the final chapter of his article, he calls attention to the open problem of how to embed an arbitrary sheaf of modules into a flabby sheaf in intuitionistic logic. We understand that this way of proceeding (defining objects inside a topos) was successfully adopted before in another context, by A. Grothendieck, when he defined the fundamental group on a topos and originated the ``Grothendieck's Galois Theory'' \cite{grothendieck1971revetement}. The theory was later extended by A. Joyal and M. Tierney in \cite{joyal1984extension}. The results of this latter article are constantly used in nowadays works, which indicates that studies in the same direction for homological algebra would provide important discoveries.
Pertinent to this discussion, we can cite Blass's work that shows cohomology can detect the failure of the axiom of choice \cite{blass1983cohomology}. He demonstrates the axiom of choice is equivalent to $H^1(X,G) = 0$, for all discrete set $X$, and all group $G$. Also, the triviality of $H^1(X,G)$ for all $G$ is equivalent to the projectivity of $X$. This strengthens the relation between logic and geometry that we have been pointing through toposes. Note Blass's results indicate a justification for the fact that $Ab(\mathcal{E})$ does not have enough projectives, in general, because of toposes' intuitionistic logic.
We believe the subject of ``topos cohomology'' is far from maturity. One of the main references into the subject, SGA4, only addresses the case $Ab(Sh(\mathcal{C},J))$. P. Johnstone, one of the most prominent topos theorists of our days, had not published the third volume of ``Sketches of an Elephant: A Topos Theory Compendium'' that would contain the subject of homotopy and cohomology in toposes (besides chapters about toposes as mathematical universes) and the first two volumes were released in 2002 \cite{johnstone2002sketches,johnstone2002sketches2}.
Regarding constructive methods for homological algebra, there also are investigations not involving toposes. For example, in \cite{ubsi_1179}, S. Posur's provides constructive methods in the context of abelian categories using generalized morphisms (we highlight it is not the same definition given by S. MacLane in \cite{zbMATH01216133}). He proves the Snake Lemma, establish what are generalized cochain complex and generalized homological groups, and present a notion of homological group in a concrete way, i.e., he displays explicitly the connecting morphism, and not only states it exists by universal properties. More than that, he applies the theoretical definitions to create an algorithm capable of computing spectral sequences for a certain abelian category, and use it to calculate cohomology groups of (specific) equivariant sheaves.
In recent work (\cite{shulman2018linear}), M. Schulman defends that ``Linear Logic'' can clarify some constructive methods better than intuitionistic logic. We highlight Schulman's state that it provide constructivist definitions (and proofs) of concepts elaborated in classical logic. Then a ``linear approach'' could also be useful for the problems we mentioned concerning constructive cohomology. Furthermore, generalized metric spaces (or quasi-psudo-metric space, or Lawvere metric space \cite{lawvere1973metric}) can be redefined using linear logic \cite{shulman2018linear}.
Linear Logic is a weakening of intuitionistic logic: it is a ``sub-structural'' logic, i.e., the usual demonstrability rules do not apply in general, with only restricted versions of the contraction and weakening rules available. In linear logic, the intuitionist conjunction splits into two binary operators: $\wedge$, the binary infimum of the lattice, which is not necessarily distributes over the supreme; $\&$, another operation that does distribute with arbitrary supreme, but it doesn't have to be idempotent or commutative.
The study of linear logic was initially developed by Jean-Yves Girard \cite{girard1987linear} in the context of polymorphic $ \lambda $ calculus, but its nature matches - through splits - somewhat irreconcilable elements, and their many interpretations have profound meaning. Pure intuitionistic contexts cannot prove the excluded middle law and, in classical logic, this is nothing less than an axiom. Linear logic has two candidates for disjunction $ \lor $, one for which it is impossible to prove the excluded middle, and another for which the evidence is trivial.
The presence of βduplicationβ of operators is natural, as these represent useful fragments of the usual logical operations. The result interesting is related to the famous correspondence of Curry-Howard: in the same way that intuitionistic logic is related with type theory and $ \lambda $ calculation simply typed (the implication can be interpreted as the type of functions, conjunction with product and disjunction with co-product) giving rise to βproof-relevanceβ, linear logic introduces, via non-idempotency or non-commutativity, the relationship of linear implication to processes that are βResource-relevantβ.
Categorical semantics for various forms of linear logics have long been explored (e.g. \cite{seely1987linear}, \cite{hyland1993full}). Roughly speaking, we can say that closed monoidal categories have (some form of) internal linear logic.
Something very different occurs when we focus on possible conjunctistic or higher-order aspects (\cite{lambekscott1986catlog}, \cite{bell1988topos}) that are internal to a special type of category governed by some form of linear logic.
A natural, and relatively simple, way to expand the notion of (categories of) sheaves with internal logic that is no longer intuitionistic is through appropriate adaptations of the sheaf notion defined over a complete Heyting algebra $(H, \leq, \wedge) $ to other algebras that are also complete lattices.
These set-theoretical aspects of the sheaves on ``good'' complete lattices can also be approached in an alternative, but often ``equivalent'' way, through the notion of expansion of the universe of all sets, $V$, by an algebra, $A$, which is a complete lattice, $ V^{(A)} $: in the (traditional) case where $ A $ is a Boolean algebra or Heyting algebra this is presented in \cite{bell2005boolean}.
The complete lattices that have natural relationship with linear logics are the quantales (see \cite{yetter1990quantal}). A quantale $ (Q, \leq, \otimes, \top) $ is a structure where: $ (Q, \leq) $ is a complete lattice where $\top$ is the top element, $ (Q, \otimes) $ is a semigroup and the distributive laws are valid: $ a \otimes \bigvee_{i \in I} b_i = \bigvee_{i \in I} a \otimes b_i $, $ (\bigvee_{i \in I} b_i) \otimes a = \bigvee_{i \in I} b_i \otimes a $.
There are some early explorations of the strategy of considering ``generalized sheaves'', with applications in Mathematics. In \cite{con97}, is established a notion of category of ``sheaves'' over a quantale $ (Q, \leq, \otimes, \top) $, which is right-sided ($ a \otimes \top = a, a \in Q $) and idempotent ($ a \otimes a = a, a \in Q $), and is explored the above mentioned Kaplansky's Theorem, now reformulated in the internal linear logic of this category of sheaves. In the work \cite{MeMa}, categories of sheaves are considered over quantales $ (Q, \leq, \otimes, \top) $ satisfying a different balance: they are commutative and semicartesian (or two-sided\footnote{Since it is already commutative, this is the same that require to be right-sided.}). It is important to emphasize that the two-sided, commutative, and {\em idempotent} quantales coincides with the complete Heyting algebras.
In \cite{MeMa}, given a commutative semicartesian quantale $(Q,\leq,\odot, 1)$, we can construct what is called a $Q$-$set$, in the same spirit of the construction of sheaves over complete Heyting algebras. These $Q$-$set$ will not provide a sheaf, but will preserve a significant part of a sheaf structure, which had motivated the authors to called it a ``Sheaf-Like category'', besides that, pseudo-metric spaces are examples of a $Q$-$set$ (when $Q = ([0,\infty],\geq, +, 0)$), and also of an enriched category over $Q$. This approach seems to expand the development of model theory of Continuous Logic, useful in Functional Analysis.
In \cite{leinster2017magnitude}, M. Schulman and T. Leinster use semicartesian monoidal categories $V$ to define magnitude homology of $V$-categories (enriched categories over $V$). In particular, if $V$ is the extended non-negative real numbers $[0, \infty]$, that admits a natural structure of a commutative semicartesian quantale $([0,\infty],\geq, +, 0)$, then the correspondent $V$-category is a generalized metric space. Magnitude homology describes a general notion of ``size''. Depending on the case, it coincides with the cardinality of a set, the Euler Characteristic of topological space, or of an associate algebra. For the metric space context, magnitude machinery provide interesting geometric properties as area \cite{willerton2014magnitude}, volume \cite{barcelo2018magnitudes}, and Minkowski dimension \cite{meckes2015magnitude}.
This conjuncture motivates the authors of this survey to wonder about internal cohomological aspects to other categories governed by other logics. In particular: (i) if developments in continuous model theory -for instance with applications to the theory of Banach algebras- could have internal cohomological aspects better represented in the linear logic style; (ii) if exploring metric spaces as enriched categories over a quantale, it is natural to consider possible connections between magnitude (co)homology with an adapted sheaf-like cohomology by some appropriate version of sheaves over quantales.\\
\textbf{Acknowledgements:} The comments of Peter Arndt and Walter de Siqueira Pedra, members of the judging committee in the master dissertation (supported by CNPq) of the first author, under supervision by the second author, guided for paths of study we would not perceive alone, so improving this text with more applications and related lines of research. We are thankful for it.
\end{document} |
\begin{document}
\title{Complex Energies and Beginnings of Time Suggest a Theory of
Scattering and Decay} \author{A.~Bohm\thanks{[email protected]}\\
Department of Physics\\
The University of Texas at Austin\\
Austin, TX 78712 \and
P.~Kielanowski\thanks{[email protected]}\\
Departamento de F\'{\i}sica, CINVESTAV, Mexico\\
\and
S.~Wickramasekara\thanks{[email protected]}\\
Department of Physics and Astronomy, Rice University\\
Houston, TX 77005}
\maketitle \begin{abstract}
Many useful concepts for a quantum theory of scattering and decay
(like Lippmann-Schwinger kets, purely outgoing boundary conditions,
exponentially decaying Gamow vectors, causality) are not well
defined in the mathematical frame set by the conventional (Hilbert
space) axioms of quantum mechanics. Using the Lippmann-Schwinger
equations as the takeoff point and aiming for a theory that unites
resonances and decay, we conjecture a new axiom for quantum
mechanics that distinguishes mathematically between prepared states
and detected observables. Suggested by the two signs $\pm i\epsilon$
of the Lippmann-Schwinger equations, this axiom replaces the one
Hilbert space of conventional quantum mechanics by two Hardy spaces.
The new Hardy space theory automatically provides Gamow kets with
exponential time evolution derived from the complex poles of the
$S$-matrix. It solves the causality problem since it results in a
semigroup evolution. But this semigroup brings into quantum physics
a new concept of the semigroup time $t=0$, a beginning of time. Its
interpretation and observations are discussed in the last section. \end{abstract}
\section{Introduction}\label{sec1} Quantum theory falls into, roughly, two categories~\cite{bohm1}: \begin{enumerate} \item[{I.}] The description of spectra and structures of micro-physical
systems \item [{II.}]Scattering and decay phenomena \end{enumerate}
The distinction between the two categories is primarily one between two ways one looks at the physical objects, rather than a separation of physics into two different areas. The first is used for stable states and also for slowly decaying states when the finiteness of their lifetime is ignored. The second is used for rapidly decaying states and resonance phenomena. The notions of slow and fast are not defined by a time scale in nature but by the capabilities of the experimental apparatuses that we choose or are forced to use in a particular experiment. For instant, the singly excited states of atoms and molecules are mostly treated like stable states whereas the doubly excited states (Auger states) are mostly treated as resonances or decaying states. However, when one does the calculations of the energies of the Auger states (e.g., of He) one ignores that they decay~\cite{bohm1}.
The same holds in nuclear physics and in high energy physics. When one is interested only in the spectra and the structure of relativistic particles, one ignores their lifetimes even though the different states of the same multiplet can have lifetimes that are orders of magnitudes apart. (E.g., one can measure the lifetime of $\Omega^-$ but one cannot measure the lifetime of $\Delta$~\cite{frauenfelder}. The existence and properties of $\Delta$ are determined from lineshape measurements and lifetime was chosen as the inverse of the lineshape width on the basis of some theoretical ideas/approximations for which a theory did not exist \cite{levy}.)
For category I (spectra and structure), one uses a theory of stationary states and time symmetric (reversible) evolutions. The energy values are discrete and the time evolution is unitary and the superpositions are effectively finite. Such systems are well described by conventional quantum mechanics in the Hilbert space ${\cal H}$. Infinite superpositions are handled by perturbative methods (of discrete spectra).
The second category (scattering and decay) deals with continuous energy spectra and predominantly asymmetric time evolutions. If one wants to use energy eigenstates, the continuous energy values already require more than what the conventional axioms of quantum mechanics are able to accommodate. This has been overcome by introducing the Dirac kets $\left|E\rangle\right.$, which -if they are mathematically defined at all- are defined as functionals on the Schwartz space. With this definition, energy wave functions $\psi(E)=\<E|\psi\rangle$ do not constitute the entire Hilbert space of (Lebesgue) square-integrable functions, but only the subspace of infinitely differentiable, rapidly decreasing functions, i.e., Schwartz space functions.
The introduction of Dirac kets augments the conventional axiomatic framework of quantum mechanics based on the Hilbert space and leads to the Gelfand triplet $\Phi\subset{\cal H}\subset\Phi^\times$, where $\Phi$ is the Schwartz space and
$\left|E\rangle\right.\in~\Phi^\times$~\cite{bohm1}. However, the Gelfand triplet based on the Schwartz space is not sufficient to obtain a theory that includes scattering and decay. The reason is that the dynamical (Schr\"odinger or Heisenberg) equations, when defined as differential equations in the Schwartz space of wavefunctions, integrate to a continuous group of evolution operators, much like the unitary group solution of these equations in the Hilbert space.
In contrast, resonances and decaying states have been intuitively associated to an asymmetric ``irreversible'' time evolution~\cite{merzbacher}. Thus, they require a time asymmetric theory, and in the absence of such a mathematical theory, their description can only be approximate and must contain contradictions. If one is guided by the Hilbert space mathematics, one always runs into problems with a quantum theory of resonances and decay; in particular, Gamow vectors with exponential decay do not exist in the Hilbert space. Therefore, in the heuristic treatment of scattering theory, one just ignored the mathematical subtleties of the Hilbert space. In particular, one worked with mathematically undefined kets
$\left|E^\pm\rangle\right.$, used an infinitesimal imaginary energy part $\pm i\epsilon$ to obtain, respectively, the incoming and outgoing solutions of the Lippmann-Schwinger equations~\cite{lippmann}, and distinguished between ``states at time $t'<t_0=$ time defined by preparation'' and ``states characteristic of the experiment'', observed at $t''>t_0$~\cite{feynman}. One restricted by fiat the time $t$ in $e^{iHt}$ to $t\geq0$~\cite{gell-mann}, and for decaying states, one postulated purely outgoing boundary conditions~\cite{peierls}, undisturbed by the fact that it was in conflict with the unitary group evolution $-\infty<t<\infty$, a direct consequence of the conventional Hilbert space axioms of quantum mechanics (by the Stone-von Neumann theorem \cite{stone, neumann}). These heuristic methods were successful for physical applications, but when one compared them with the mathematical consequences of the Hilbert space axiom, one had contradictions. Examples of these are: the exponential catastrophe in which Gamow vectors and unitary time evolution were mutually contradictory~\cite{bohm2} and references therein; deviations from the exponential decay law~\cite{bohm3}; and problems with (Einstein) causality~\cite{fermi}.
It is thus clear that one has to go beyond the mathematical theory which has worked for Category I problems. But many of the empirical notions, like Gamow states and Lippmann-Schwinger kets, have been very successful for the descriptions of scattering and decay, and their successful features need to be preserved when they are incorporated into the new rigorous theory. However, other mathematical consequences of the conventional axioms need to be eliminated. This means we require a new hypothesis which preserve the successful features and alter the conflicting fallouts from the conventional theory. New mathematical entities will have to be defined, which we will call again by their old names, like Lippmann-Schwinger kets or Gamow kets, but they will now have new features and are constituents of a consistent theory of resonance scattering and decay. The new mathematical hypothesis will be conjectured taking the useful features of these heuristic notions as the starting point.
\section{Conventional Quantum Theory Conflicts\\
with the Lippmann-Schwinger Equations}\label{sec2}
By conventional quantum theory, we hereon mean not only the usual axioms~\cite{neumann} in terms of the Hilbert space mathematics, but also the Dirac formalism mathematically justified by, as stated above in the Introduction, a Gelfand triplet of the Schwartz space. The axiomatic framework of conventional quantum mechanics consists of the following: \begin{itemize} \item[{(A1)}] One distinguishes (physically) between observables
represented by self-adjoint operators (e.g., $A$, $\Lambda$
(positive operators), or vectors $\psi$ if
$\Lambda=|\psi\rangle\langle\psi|$) and states represented by trace
class operators (e.g., $W$ or vectors $\phi$ if $W=|\phi\rangle\langle\phi|$).\\
The quantities compared with experimental data are the Born
probabilities ${\cal P}_{W(t)}(\Lambda)={\rm Tr}(W(t)\Lambda)={\rm
Tr}(W\Lambda(t))$, or, in the special case $W=|\phi\rangle\langle\phi|$ and
$\Lambda=|\psi\rangle\langle\psi|$, ${\cal P}_{\phi(t)}(\psi)=
\left|\langle\psi|\phi(t)\rangle\right|^2=\left|\langle\psi(t)|\phi\rangle\right|^2$.
That is,
\begin{equation}
{\cal P}_{\phi(t)}(\psi)=\left|\langle\psi|\phi(t)\rangle\right|^2=
\left|\langle\psi(t)|\phi\rangle\right|^2\simeq\frac{N_1(t)}{N}\nonumber
\end{equation}
The experimental quantities $\frac{N_1(t)}{N}$ are the ratios of
large integers (detector counts which necessarily change in time in
discrete steps). On the other hand, every mathematical theory is an
idealization and thus quantum theory also idealizes to continuous
time translations, in consequence of which the calculated Born
probabilities ${\cal P}_{\phi(t)}(\psi)$ change continuously in time
in a particular way. The equality between the two quantities ${\cal
P}_{\phi(t)}(\psi)$ and $\frac{N_1(t)}{N}$ is approximate --and
the sign $\simeq$ expresses this aspect of the statistical character
of quantum mechanical predictions-- and the meaning of the
continuity for $\phi(t)$ or ${\cal P}_{\phi(t)}(\psi)$ as a function
of $t$ is a mathematical choice. \end{itemize} In conventional quantum mechanics one makes this choice by identifying \begin{itemize} \item[{(A2)}] The set of states $\{\phi\}$= The set of observables
$\{\psi\}={\cal H}=$ Hilbert space \end{itemize} In Dirac's formalism one assumes in addition that \begin{itemize} \item[{(A3)}] for every observable, e.g., $H$, one has a complete set
of eigenkets $|E\rangle$ such that
\begin{itemize}
\item [{(3a)}]$H|E\rangle=E|E\rangle$
\noindent and
\item[{(3b)}] Every vector, state $\phi$ or observable $\psi$, is a
continuous superposition of the eigenkets extending over all
``physical values'' $0\leq E<\infty$:
\begin{equation}
\phi=\sum_{j,j_3,\eta}\int
dE|E,j,j_3,\eta\rangle\<E,j,j_3,\eta|\phi\rangle\nonumber
\end{equation}
(here $j,j_3$ and $\eta$ are some additional quantum numbers
representing the degeneracy of the eigenkets with energy $E$.)
\end{itemize} \end{itemize}
Nearly everyone discussing the foundations of quantum mechanics \cite{foundations} distinguishes between states and observables as asserted by (A1) above. The Hilbert space axiom (A2) is already in conflict with this hypothesis (A1) because the content of (A1) is a basic distinction between a state and an observable. Also, the hypothesis (A3), the Dirac formalism, is not possible within the framework of the Hilbert space axiom (A2) since neither (3a) nor (3b) is well defined as a vector identity in the Hilbert space when $E$ is a continuous parameter.
One can overcome this difficulty and make (A3) mathematically tenable by restricting the vectors $\{\phi\}$ and $\{\psi\}$ to a subspace $\Phi$ of the Hilbert space and constructing a Gelfand triplet,
$\{\phi\}=\{\psi\}=\Phi\subset{\cal H}\subset\Phi^\times$. With this choice of $\Phi$, the eigenkets $|E\rangle$ can be defined as the elements of the dual space $\Phi^\times$ and (3b) can be proved as the nuclear spectral theorem. As stated above, if the Schwartz space is chosen for $\Phi$ so that energy wavefunctions
$\phi(E)=\<E|\phi\rangle=\overline{\langle\phi|E\rangle}$ are smooth and rapidly decreasing at infinity, then the dual space $\Phi^\times$, which consists of continuous anti-linear functionals on $\Phi$, is realized by the space of tempered distributions. Therefore, in this representation, the eigenkets $|E\rangle$ find realization as tempered distributions.
In scattering theory, one has in-states $\{\phi^+\}$ and out-observables $\{\psi^-\}$ (which are usually called out-states). An in-state $\phi^+$ is prepared at $t\rightarrow-\infty$ in the asymptotic region as the interaction-free in-states $\phi^{\rm in}$ such that \begin{equation}
\phi^{\rm in}\rightarrow\phi^+\nonumber\\ \end{equation} Similarly, for $t\rightarrow\infty$, the out-observable $\psi^-$ becomes the interaction free out-observable $\psi^{\rm out}$ which describes a measurable property in the asymptotic region: \begin{equation}
\psi^-\rightarrow\psi^{\rm out}\nonumber \end{equation}
The superscripts $\pm$ of state vectors $\phi^+$ and $\psi^-$ have their origins in the labels of the eigenkets $|E^\pm\rangle$ of the full Hamiltonian $H=H_0+V$, \begin{equation}
H|E^\pm\rangle=E|E^\pm\rangle\label{2.1} \end{equation} The Dirac basis vector expansion of (3b) above holds for every
$\phi^+$ and every $\psi^-$ in terms of the eigenkets $|E^+\rangle$ and
$|E^-\rangle$, respectively: \begin{subequations}
\label{2.2}
\begin{equation}
\tag{\ref{2.2}+}
\{\phi^+\}\ni\phi^+=\sum_{jj_3\eta}\int_0^\infty
dE|Ejj_3\eta^+\rangle\langle^+Ejj_3\eta|\phi^+\rangle\label{2.2+}
\end{equation}
\begin{equation}
\tag{\ref{2.2}$-$}
\{\psi^-\}\ni\psi^-=\sum_{jj_3\eta}\int_0^\infty
dE|Ejj_3\eta^-\rangle\langle^-Ejj_3\eta|\psi^-\rangle\label{2.2-}
\end{equation} \end{subequations}
The eigenkets $|E^\pm\rangle$ of the full Hamiltonian in \eqref{2.1} are also assumed to be the plane-wave solutions to the Lippmann-Schwinger equations \begin{subequations}
\label{2.3}
\begin{equation}
|E^\pm\rangle=|E\rangle+\lim_{\epsilon\rightarrow0}\frac{1}{E-H_0\pm
i\epsilon}V|E^\pm\rangle=\Omega^\pm|E\rangle
\tag{\ref{2.3}$\pm$} \end{equation} \end{subequations}
where $|E\rangle$ fulfill the eigenvalue equation $H_0|E\rangle=E|E\rangle$ for the ``free Hamiltonian'' $H_0$ of \eqref{2.1}.
As seen from \eqref{2.1}, the eigenkets $|E^+\rangle$ and $|E^-\rangle$ both correspond to the same eigenvalue $E$, but (\ref{2.3}$\pm$) shows that they fulfill different boundary conditions expressed by $+i0$ and $-i0$.
In scattering theory, the set of functions that are admitted to serve as energy wave functions in (\ref{2.2}$\pm$), \begin{subequations}
\label{2.4}
\begin{equation}
\tag{\ref{2.4}+}
\phi^+(E)=\<Ejj_3\eta^+|\phi^+\rangle=\<E|\phi^{\rm in}\rangle\label{2.4+}
\end{equation}
and
\begin{equation}
\tag{\ref{2.4}$-$}
\psi^-(E)=\<Ejj_3\eta^-|\psi^-\rangle=\<E|\psi^{\rm out}\rangle\label{2.4-}
\end{equation} \end{subequations}
are usually assumed to be the same set of smooth functions as the functions $\<Ejj_3\eta|\phi\rangle$ that appear in the basis vector expansion hypothesis (A3b). That is, \begin{subequations}
\label{2.6}
\begin{equation}
\{\phi^+(E)\}=\{\psi^-(E)\}=\{\phi(E)\}=\text{Schwartz function
space}\label{2.6a}
\end{equation}
For the vectors, this means
\begin{equation}
\{\phi^+\}=\{\psi^-\}=\Phi\subset{\cal H}\subset\Phi^\times\label{2.6b}
\end{equation} \end{subequations} (where $\Phi$ is dense in $\cal H$). The assumption $\{\phi^+\}=\{\psi^-\}=\Phi$ (or, the version ${{\cal H}}^{\rm
in}={{\cal H}}^{\rm out}={\cal H}$) is known in scattering theory and quantum field theory as the assumption of asymptotic completeness.
The time evolution of the state $\phi^+(t)$ is given by the Schr\"odinger equation \begin{subequations}
\label{2.7}
\begin{equation}
\tag{\ref{2.7}+}
i\hbar\frac{d\phi^+(t)}{dt}=H\phi^+(t)\label{2.7+}
\end{equation} \end{subequations} The solution to this equation under the Hilbert space boundary condition of assumption (A2) above is \begin{subequations}
\label{2.8}
\begin{equation}
\tag{\ref{2.8}+}
\phi^+(t)=e^{-iHt}\phi^+,\ \text{with}\ -\infty<t<\infty\label{2.8+}
\end{equation} \end{subequations} The time evolution of the observable
$\Lambda(t)=\left|\psi^-(t)\rangle\langle\psi^-(t)\right|$ is given by the Heisenberg equation of dynamical motion
\begin{equation}
\tag{\ref{2.7}$-$}
\frac{d\Lambda(t)}{dt}=\frac{-i}{\hbar}[\Lambda(t),H],\quad\text{or
by}\quad i\hbar\frac{d\psi^-(t)}{dt}=-H\psi^-(t)\label{2.7-} \end{equation}
The solution of this equation under the Hilbert space boundary condition of assumption (A2) is
\begin{equation}
\label{2.8-}
\tag{\ref{2.8}$-$}
\Lambda(t)=e^{iHt}\Lambda e^{-iHt},\quad\text{or}\quad
\psi^-(t)=e^{iHt}\psi^-\quad
\text{with}\; -\infty<t<\infty \end{equation}
If $\{\phi^+\}$ and $\{\psi^-\}$ are assumed to be a Hilbert space and if the Hamiltonian $H$ is a self-adjoint operator, then, by the well-known Stone-von Neumann theorem \cite{stone}, \eqref{2.8+} and \eqref{2.8-} are necessarily the unique solutions to the dynamical equations in the Schr\"odinger and Heisenberg pictures, \eqref{2.7+} and \eqref{2.7-}. Moreover, this theorem asserts that the operators $e^{-iHt}$ and $e^{iHt}$ are unitary for each $-\infty<t<\infty$ and that the mappings $t\rightarrow e^{-iHt}\phi^+$ and $t\rightarrow e^{iHt}\psi^-$ are continuous. It is noteworthy that Stone's theorem requires the (norm complete) Hilbert space $\{\phi^+\}=\{\psi^-\}={\cal H}$, in contrast to, say, \eqref{2.6} above. However, it is possible to show that the solutions (\ref{2.8}$\pm$) hold for all $-\infty<t<\infty$ also for the Schwartz space completion of \eqref{2.6}, although there are subtle mathematical differences between the two cases (A2) and \eqref{2.6}~\cite{wick}.
If the solutions (\ref{2.8}$\pm$) hold for the vectors $\phi^+$ and
$\psi^-$, then it follows, by duality, that the eigenkets $|E^+\rangle$
and $|E^-\rangle$ behave much like $\psi^-$ and $\phi^+$, respectively. That is, \begin{subequations}
\label{2.9}
\begin{equation}
\langle\phi(t)|E^+\rangle=\<e^{-iHt}\phi^+|E^+\rangle=\langle\phi^+|e^{iH^\times t}|E^+\rangle
=e^{iEt}\langle\phi^+|E^+\rangle\label{2.9a}
\end{equation}
Or, as an eigenvalue equation between functionals,
\begin{equation}
e^{iH^\times t}|E^+\rangle=e^{iEt}|E^+\rangle,\quad -\infty<t<\infty\label{2.9b}
\end{equation} \end{subequations} Likewise, \begin{subequations}
\label{2.10}
\begin{equation}
\langle\psi^-(t)|E^-\rangle=\<e^{iHt}\psi^-|E^-\rangle=\langle\psi^-|e^{-iH^\times t}|E^-\rangle
=e^{-iEt}\langle\psi^-|E^-\rangle\label{2.10a}
\end{equation}
Or, as an eigenvalue equation between functionals,
\begin{equation}
e^{-iH^\times t}|E^-\rangle=e^{-iEt}|E^-\rangle,\quad -\infty<t<\infty\label{2.10b}
\end{equation} \end{subequations} In \eqref{2.10a} and \eqref{2.10b}, $H^\times$ is the uniquely defined extension of $\bar{H}=H^\dagger$ to the space $\Phi^\times$. It is clear that \eqref{2.9b} and \eqref{2.10b} depend on the time evolution of $\phi^+$ and $\psi^-$, given by \eqref{2.8+} and \eqref{2.8-}. The latter equations depend on the assumption that $\phi^+$ and $\psi^-$
are elements of the Schwartz space $\Phi$ of \eqref{2.6}. Therefore, if \eqref{2.9b} and \eqref{2.10b} hold, then $|E^\pm\rangle$ must be Schwartz space kets, i.e., functionals on the Schwartz space, meaning that $\phi^+(E)=\langle^+E|\phi^+\rangle$ and $\psi^-(E)=\langle^-E|\psi^-\rangle$ are infinitely differentiable, rapidly decreasing functions on the {\em
real} (and positive) energy axis.
This requirement on $|E^\pm\rangle$, however, is in contradiction with the requirement that $|E^\pm\rangle$ be solutions of the Lippmann-Schwinger equations (\ref{2.3}$\pm$) which contain the complex energies $E\pm i\epsilon$. As already mentioned, there is a physical distinction between the vectors $\phi^+$ and $\psi^-$ as being related to experimentally accessible $\phi^{\rm in}$ and $\psi^{\rm out}$ for $t\rightarrow-\infty$ and $t\rightarrow\infty$, respectively. As we shall see in the next section, these {\em asymmetric} boundary conditions in time are what give rise to the limits $\epsilon\rightarrow0^+$ and $\epsilon\rightarrow0^-$ in
(\ref{2.3}$\pm$) that define the $\pm$ signs in the kets $|E^\pm\rangle$.
\section{What the Lippmann-Schwinger Equations Suggest}\label{sec3}
It is the term $\pm i\epsilon$ in (\ref{2.3}$\pm$) which tells us that the Lippmann-Schwinger kets
$|E^\pm\rangle=\lim_{\epsilon\rightarrow0}|E\pm i\epsilon\rangle$ cannot be ordinary Dirac kets (Schwartz space functionals). The infinitesimals $\pm i\epsilon$ indicate that the energy wave functions
$\langle\phi^+|E^+\rangle$ and $\langle\psi^-|E^-\rangle$ must not only be Schwartz space functions of the real variable $E$, as asserted by the axiom
\eqref{2.6}, but they must also be limits of functions defined on some region of the upper and lower complex plane of $E$. It is simplest to assume that $\langle\phi^+|E^+\rangle$ and $\langle\psi^-|E^-\rangle$ are boundary values of {\em analytic} functions defined on such a region in the (open) upper complex half-plane $\mathbb{C}_+$ and lower complex half-plane $\mathbb{C}_-$, respectively. As the complex semi-plane in energy, one takes the second (or higher) Riemann surface of the analytic $S$-matrix. Thus, we have the following basic hypothesis which replaces \eqref{2.6}: \begin{subequations}
\label{3.1}
\begin{equation}
\tag{\ref{3.1}+}
\text{Functions}\ \phi^+(E)=\langle^+E|\phi^+\rangle=
\overline{\langle\phi^+|E^+\rangle}\ \text{have analytic
extensions into}\ {\mathbb{C}_-}\label{3.1+}
\end{equation}
and
\begin{equation}
\tag{\ref{3.1}$-$}
\text{Functions}\ \psi^-(E)=\langle^-E|\psi^-\rangle=
\overline{\langle\psi^-|E^-\rangle}\ \text{have analytic
extensions into}\ {\mathbb{C}_+}\label{3.1-}
\end{equation} \end{subequations}
To make (\ref{2.3}$\pm$) possible, the analytic extensions of \eqref{3.1+} and \eqref{3.1-} must exist at least on a small strip below and above on the real energy axis (i.e., the physical scattering energies). We shall generalize this to the hypothesis that the analytic extensions of the energy wave functions should exist on the entire upper and lower energy half-planes.
The requirement (\ref{3.1}$\pm$) is not inconsistent with the Schwartz space hypothesis of \eqref{2.6}. Rather, (\ref{3.1}$\pm$) strengthens \eqref{2.6}. However, the stronger condition (\ref{3.1}$\pm$) is not consistent with the solutions (\ref{2.8}$\pm$) of the dynamical equations (\ref{2.7}$\pm$), obtained as consequences of the weaker condition \eqref{2.6}. Likewise, the time evolutions equations \eqref{2.9b} and \eqref{2.10b}, which one universally assumes for (all) energy eigenkets, also do not hold under the hypothesis (\ref{3.1}$\pm$).
As stated above, the requirements of \eqref{3.1} are supplementary to the usual hypothesis of quantum mechanics. Thus, the wave functions $\phi^+(E)$ and $\psi^-(E)$ are still assumed to be, for instant, smooth, rapidly decreasing and square integrable. The simultaneous requirements of analyticity and square integrability introduces certain (unexpected) restrictions into the theory. For instant, it can be shown~\cite{bohm4,gadella} that these requirements can be met for the time translated functions \eqref{2.9a} and \eqref{2.10a} only if $t\geq0$.\footnote{Actually, this feature of time evolution can be
seen from a simple heuristic argument that goes as follows. If the
time translated function $\langle\phi^+(t)|E^+\rangle$, just like the
function $\langle\phi^+|E^+\rangle$ is the square integrable boundary value
function of an analytic function defined in the upper half-plane,
then for $E=E+i\epsilon$, we have
$\langle\phi^+(t)|E+i\epsilon\rangle=e^{i(E+i\epsilon)t}\langle\phi^+|E+i\epsilon\rangle$.
Since $\epsilon$ is positive,
$e^{i(E+i\epsilon)t}\langle\phi^+|E+i\epsilon\rangle$ is bounded for
arbitrary values of $\epsilon$ only if $t$ is positive. A similar
argument holds for the time translation of the observable wave
functions $\langle\psi^-(t)|E^-\rangle$ of \eqref{2.10a}. The rigorous proof
is given in text following \eqref{3.7.5}.} Since the time translation equations \eqref{2.9b} and \eqref{2.10b} are derived from
\eqref{2.9a} and \eqref{2.10a}, the conclusion $t\geq0$ also holds for the kets $e^{\pm iEt}|E^\pm\rangle$.
Thus, the first conclusion that we draw from the Lippmann-Schwinger equations (\ref{2.3}$\pm$) is that the time evolution of the vectors $\phi^+$ and $\psi^-$ in (\ref{2.2}$\pm$) should not be given by the unitary group solution of the the dynamical equations (\ref{2.7}$\pm$), but by the {\em semigroup} solution: \begin{subequations}
\label{3.2}
\begin{equation}
\tag{\ref{3.2}+}
\phi^+(t)=e^{-iHt}\phi^+\quad\text{for}\ 0\leq t<\infty\
\text{only}.\label{3.2+}
\end{equation}
\begin{equation}
\tag{\ref{3.2}$-$}
\psi^-(t)=e^{iHt}\psi^-\quad\text{for}\ 0\leq t<\infty\
\text{only}.\label{3.2-}
\end{equation} \end{subequations} From this we see that as a consequence of the $\pm i\epsilon$ in the Lippmann-Schwinger equations (\ref{2.3}$\pm$), the $\{\phi^+\}$ and $\{\psi^-\}$ given by the Dirac basis vector expansion (\ref{2.2}$\pm$) are in general different mathematical quantities with different (``conjugate'') semigroups (\ref{3.2}$\pm$) of time evolution. The unitary group evolution (\ref{2.8}$\pm$) which follows from \eqref{2.6} is in conflict with the Lippmann-Schwinger equations. Time evolutions which are not in conflict with the Lippmann-Schwinger equations (\ref{2.3}$\pm$) are (\ref{3.2}$\pm$).
Thus, on the basis of (\ref{3.1}$\pm$), we identify two different vector spaces $\{\phi^+\}\not=\{\psi^-\}$, one for the states and the other for the observables. The operators $e^{-iHt}$ and $H$ in \eqref{3.2+} are operators defined in the vector space $\{\phi^+\}$. Likewise, operators $e^{iHt}$ and $H$ in \eqref{3.2-} are operators defined in the vector space $\{\psi^-\}$\footnote{To be precise in
notation, one should distinguish between $H=H_-$, the restriction of
the Hilbert space operator $\bar{H}$ to $\Phi_-=\{\phi^+\}$ and
$H=H_+$, the restriction of the Hilbert space operator $\bar{H}$ to
$\Phi_+=\{\psi^-\}$. For the sake of notational simplicity we will
avoid this distinction whenever it does not lead to
misunderstanding.}. Now, from \eqref{3.1+} we know that the wave functions $\phi^+(E)=\langle^+E|\phi^+\rangle$ corresponding to the vectors
$\phi^+$ are analytic in ${\mathbb{C}}_-$. Therefore, we call the vector space $\{\phi^+\}\equiv\Phi_-$. Similarly, from \eqref{3.1-}, the wave functions $\psi^-(E)=\langle\psi^-|E^-\rangle$ are analytic in ${\mathbb{C}}_+$, and for this reason we call the vector space $\{\psi^-\}\equiv\Phi_+$. The two vector spaces $\Phi_\pm$ are then two different subspaces of the Hilbert space ${\cal H}$ (and also of the Schwartz space $\Phi$): \begin{subequations}
\label{3.3}
\begin{equation}
\tag{\ref{3.3}+}
\phi^+\in\Phi_-\subset{\cal H}\label{3.3+}
\end{equation}
\begin{equation}
\tag{\ref{3.3}$-$}
\psi^-\in\Phi_+\subset{\cal H}\label{3.3-}
\end{equation} \end{subequations}
What remains now is to put additional conditions on the analytic functions (\ref{3.1}$\pm$) such that the spaces $\Phi_\pm$ become nuclear spaces. Then, the triplet of spaces \begin{subequations}
\label{3.4}
\begin{equation}
\tag{\ref{3.4}+}
\Phi_-\subset{\cal H}\subset\Phi_-^\times\label{3.4+}
\end{equation}
\begin{equation}
\tag{\ref{3.4}$-$}
\Phi_+\subset{\cal H}\subset\Phi_+^\times\label{3.4-}
\end{equation} \end{subequations} become Gelfand triplets, also known as Rigged Hilbert Spaces. The ordinary Dirac kets require one RHS \eqref{2.6b}. However, if the kets are also to fulfill the Lippmann-Schwinger equations (\ref{2.3}$\pm$), one needs the pair of RHS's, (\ref{3.4}$\pm$). The $\Phi^\times_\pm$
in (\ref{3.4}$\pm$) are the dual spaces, consisting of continuous anti-linear functionals on $\Phi_\pm$. The new kets $|E^\pm\rangle$ have then a well defined meaning as elements of the dual spaces $\Phi_\pm^\times$, and the nuclear property of (\ref{3.4}$\pm$) allows Dirac's basis vector expansion (\ref{2.2}$\pm$) to be established as the nuclear spectral theorem of Gelfand {\em at al} and Maurin~\cite{gelfand}. The pair of Gelfand triplets \eqref{3.4} have been constructed by Gadella~\cite{gadella} by choosing for the spaces of wave functions \eqref{3.1} particular subspaces of Hardy functions~\cite{duren} \footnote{ This choice is the following:
\begin{subequations}
\label{3.5}
\begin{equation}
\tag{\ref{3.5}+}
\phi^+(E)\in\left.{\cal H}_-^2\cap{\cal S}\right|_{\mathbb{R}_+}\label{3.5+}
\end{equation}
\begin{equation}
\tag{\ref{3.5}$-$}
\psi^-(E)\in\left.{\cal H}_+^2\cap{\cal S}\right|_{\mathbb{R}_+}\label{3.5-}
\end{equation}
\end{subequations}
Here, ${{\cal H}}^2_\pm$ denote Hardy class functions. ${\cal S}$ stands for
the Schwartz space, and the symbol $\left.\right|_{{\mathbb{R}_+}}$
represents the restriction of the domains of functions in
${\cal H}^2_\pm\cap{\cal S}$ to the positive real line, ${\mathbb{R}}_+$,
assumed to be the range of scattering energy values. Loosely
speaking, Hardy class functions $f^\pm\in{\cal H}_\pm^2$ are functions
defined on the real line fulfilling the following two
properties~\cite{bohm4,gadella,duren}:
\begin{enumerate}
\item $f^\pm(x)$ are point-wise limits of analytic functions
$F^\pm(z)$ on ${{\mathbb{C}}}_\pm$, i.e.,
$f^\pm(x)=\lim_{y\rightarrow0}F^\pm(x\pm iy)$
\item The $f^\pm$ are square integrable,
$\int_{-\infty}^\infty\left|f^\pm(x)\right|^2dx<\infty$
\end{enumerate}
The intersections ${\cal H}^2_\pm\cap{\cal S}$ ensure that the functions
$\phi^+(E)$ and $\psi^-(E)$, in addition to having the desired
analyticity properties for complex energies, are, for real energy
values, infinitely differentiable and rapidly decreasing at
infinity. Equally importantly, when defined as in \ref{3.5}, the
nuclearity of the Schwartz space ${\cal S}$ can be used to define a
topology for $\Phi_\pm$ so that these spaces are nuclear. The
one-to-one association of smooth Hardy functions for the energy wave
functions in (\ref{3.5}$\pm$) is more restrictive than the
analyticity of the wave functions in the small strip above or below
the real axis, the weakest condition demanded by the
Lippmann-Schwinger equations \eqref{2.3}. It is a mathematical
idealization, like the idealization to Lebesgue square integrable
functions in Hilbert space quantum mechanics. The Hardy space
idealization, a refinement of the Hilbert space idealization, is
better suited for quantum physics because it provides a mathematical
distinction between states $\phi^+\in\Phi_-$ and observables
$\psi^-\in\Phi_+$. It also provides a mathematical basis for the
Lippmann-Schwinger integral equations, which incorporate the
in-coming and out-going boundary conditions.}
Associated with an operator defined in the Hilbert space ${\cal H}$, there exist two triplets of operators corresponding to the two triplets of spaces in \eqref{3.5}. For instant, for the Hamiltonian $H$, \begin{subequations}
\label{3.6}
\begin{equation}
\tag{\ref{3.6}+}
H_-\subset\bar{H}=H^\dagger\subset H_-^\times\label{3.6+}
\end{equation}
\begin{equation}
\tag{\ref{3.6}$-$}
H_+\subset\bar{H}=H^\dagger\subset H_+^\times\label{3.6-}
\end{equation} \end{subequations} where $H_\mp$ are the uniquely defined restrictions
$\left.\bar{H}\right|_{{\Phi_\mp}}$ of the self-adjoint Hamiltonian $\bar{H}$ to the dense subspace $\Phi_\mp$ of ${\cal H}$. The operators $H_\mp^\times$ are the conjugate operators of $H_\mp$, which are uniquely defined extensions of $H^\dagger$ to $\Phi_\mp^\times$. When their meaning is clear from the context, we usually omit the subscripts $\mp$ and superscript $\times$ in these various operators and denote all of them simply by $H$.
Defining the Lippmann-Schwinger kets now as functionals on $\Phi_\mp$, the $|Ejj_3\eta^\pm\rangle\in\Phi^\times_\mp$ have analytic extensions into the whole complex semi-plane ${\mathbb{C}}_\pm$ of the second sheet of the $S$-matrix.
This property has turned out be to be very important for the unified theory of resonances and decay.
In sum, we have conjectured the new hypothesis which distinguishes mathematically between states and observables:\\
\begin{subequations}
\label{3.7} Set of prepared states defined by preparation apparatus
(accelerator), e.g., in-states
\begin{equation}
\tag{\ref{3.7}+}
\{\phi^+\}=\Phi_-\subset{\cal H}\subset\Phi_-^\times\label{3.7+}
\end{equation}
Set of registered observables defined by registration apparatus
(detector), e.g., out-states
\begin{equation}
\tag{\ref{3.7}$-$}
\{\psi^-\}=\Phi_+\subset{\cal H}\subset\Phi_+^\times\label{3.7-}
\end{equation} \end{subequations} We take \eqref{3.7} as a fundamental axiom which replaces the Hilbert space axiom (A2) of Section \ref{sec2}.
The spaces $\Phi_\pm$ are two different dense subspaces of the same Hilbert space ${\cal H}$. As stated above, the spaces $\Phi_\pm$ can be understood as the abstract vector spaces whose realizations in terms of energy wave functions have the smooth Hardy space property (\ref{3.5}$\pm$). In other words, the space $\Phi_-$ is given by the set of vectors $\{\phi^+\}$ whose Dirac vector expansion is given by
\eqref{2.2+}, where the ``coordinates $\langle^+Ejj_3\eta|\phi^+\rangle$ with the continuous label'' $E$ (the analogue of the label $i=1,2,3$ in the basis vector expansion $\vec{x}=\sum_{i=1}^3\vec{e}_ix^i$) are the smooth Hardy functions $\phi^+(E)=\langle^+Ejj_3\eta|\phi^+\rangle$ with the property \eqref{3.5+}. Similarly, the space $\Phi_+$ is the set of vectors $\{\psi^-\}$ whose ``coordinates'' with respect to the continuous basis $|Ejj_3\eta^-\rangle$ are the smooth Hardy functions \eqref{2.4-} with the property \eqref{3.5-}. An immediate mathematical consequence of the Hardy space axiom (\ref{3.5}$\pm$) is that the solutions of the dynamical equations (\ref{2.7}$\pm$) have the important (semigroup) property \eqref{3.2}: \begin{subequations}
\label{3.7.5}
\begin{equation}
\tag{\ref{3.7.5}+}
\text{For $\phi^+(t)$ fulfilling Schr\"odinger's Eq.,}\
\phi^+(t)=e^{-iH_-t}\phi^+\ \text{for}\ t\geq0\label{3.7.5+}
\end{equation}
\begin{equation}
\tag{\ref{3.7.5}$-$}
\text{For $\psi^-(t)$ fulfilling Heisenberg's Eq.,}\
\psi^-(t)=e^{iH_+t}\psi^-\ \text{for}\ t\geq0\label{3.7.5-}
\end{equation} \end{subequations}
This semigroup time evolution (\ref{3.7.5}$\pm$) is a consequence of a theorem of Paley and Wiener \cite{paley} (See also the appendix of \cite{bohm5}) for Hardy class functions. The theorem states that if $G_-(E)$ is a Hardy class function, then its Fourier transform \begin{subequations}
\label{paley1}
\begin{equation}
\tag{\ref{paley1}a}
{\check{G}}_-(\tau)=\frac{1}{{\sqrt{2\pi}}}\int_{-\infty}^\infty dE
e^{iE\tau}G_-(E)
\label{paley1a}
\end{equation}
must fulfill the condition
\begin{equation}
\tag{\ref{paley1}b}
{\check{G}}_-(\tau)=0\quad \text{for}\ -\infty<\tau<0
\label{paley1b}
\end{equation} \end{subequations} It further follows from the theorem that for any positive value of
$\tau$, say $|\tau_0|$, there exists a Hardy function $G^{\tau_0}_-(E)\in{\cal{H}}\cap{\cal{S}}$ such that \begin{equation} {\check{G}}^{\tau_0}_-(\tau)\not=0\quad \text{for}\
0<\tau<|\tau_0|\label{paley1.1} \end{equation}
Now, consider the Hardy space function $\langle^+E|\phi^+\rangle$ and the Hardy space function $\langle^+E|\phi^+(t)\rangle$ of the time translated state $\phi^+(t)$. Since $\phi^+(t)$ fulfills the Schr\"odinger equation
\eqref{2.7+}, $\phi^+(t)=e^{-iHt}\phi^+$ and its expansion coefficients $e^{-iEt}\langle^+E|\phi^+\rangle$ in the basis vector expansion \begin{eqnarray}
\phi^+(t)=\int dE|E^+\rangle\langle^+E|\phi^+(t)\rangle&=&\int
dE|E^+\rangle\langle^+E|e^{-iHt}\phi^+\rangle\nonumber\\
&=&\int dE|E^+\rangle\left(e^{-iEt}\langle^+E|\phi^+\rangle\right)\nonumber \end{eqnarray}
as well as the expansion coefficient $\langle^+E|\phi^+\rangle$ in \eqref{2.2+} must, according to \eqref{3.7+}, be a Hardy function of the lower half-plane ${\mathbb{C}}_-$ if both $\phi^+$ and $\phi^+(t)$ are to represent prepared states. That is, \begin{subequations}
\label{paley2}
\begin{equation}
\tag{\ref{paley2}a}
G_-(E)\equiv\langle^+E|\phi^+\rangle\in{\cal{H}}_-^2\cap{\cal{S}}\label{paley2a}
\end{equation}
as well as
\begin{equation}
\tag{\ref{paley2}b}
G^t_-(E)\equiv
e^{-iEt}\langle^+E|\phi^+\rangle\in{\cal{H}}_-^2\cap{\cal{S}} \label{paley2b}
\end{equation} \end{subequations} It is an elementary property that the Fourier transform ${\check{G}}^t_-$ of the function \eqref{paley2b} is related to the Fourier transform ${\check{G}}_-$ of the function \eqref{paley2a}: \begin{equation} {\check{G}}^t_-(\tau)\equiv\frac{1}{\sqrt{2\pi}}\int
dE e^{iE\tau}G_-^t(E)= \frac{1}{\sqrt{2\pi}}\int dE
e^{iE(\tau-t)}G_-(E)=\check{G}_-(\tau-t) \label{paley2.5} \end{equation} Now, if we want both $G_-(E)$ and $G^t_-(E)$ to be Hardy space functions as in \eqref{paley2a} and \eqref{paley2b}, then it follows from the Paley-Wiener theorem \eqref{paley1} that \begin{subequations}
\label{paley3}
\begin{equation}
\tag{\ref{paley3}a}
\check{G}_-(\tau)=0\quad \text{for}\ -\infty<\tau<0
\label{paley3a}
\end{equation}
and
\begin{equation}
\tag{\ref{paley3}b}
{\check{G}}^t_-(\tau)=0\quad \text{for}\ -\infty<\tau<0
\label{paley3b}
\end{equation}
But, becuase of \eqref{paley2.5}, we also have
\begin{equation}
\tag{\ref{paley3}c}
{\check{G}}_-(\tau-t)=0\quad\text{for}\
-\infty<\tau-t<0\label{paley3c}
\end{equation} \end{subequations} From \eqref{paley3a} and \eqref{paley3c}, we have the simultaneous conditions $-\infty<\tau<0$ and $-\infty<\tau<t$. These two requirements on $\tau$ are clearly satisfied for positive values of
$t$. If $t$ is negative, say $t=-|t|$, then the property ${\check{G}}^t(\tau)=\check{G}_-(\tau-t)=0$ is ensured only for
$-\infty<\tau<-|t|$, not for $-\infty<\tau<0$ as required by \eqref{paley3b}. In fact, from \eqref{paley1.1}, we see that there is at least one function in the space ${\cal{H}}_-^2\cap{\cal{S}}$ for which the condition \eqref{paley3b} is not fulfilled for
$-|t|<\tau<0$. Therefore, $t\geq0$ must hold, and the time evolution for the states $\phi^+(t)$ can only be defined for the semigroup \eqref{3.7.5+}. A similar argument using the Hardy functions ${\cal{H}}_+^2\cap{\cal{S}}$ leads to the conclusion \eqref{3.7.5-}.
The conjugate operators\footnote{Note that the operators acting on the
spaces $\Phi_\mp$ are labeled by the $\mp$ signs, e.g., $U_\mp,\
H_\mp$. The $\mp$ signs labeling the spaces follow from the
mathematicians' convention for the lower and upper Hardy class. The
signs that label the vectors, on the other hand, follow from most
physicists' notation of scattering theory and are opposite to those
that label the spaces: $\phi^+\in\Phi_-,\ \psi^-\in\Phi_+,\
|E^\pm\rangle\in\Phi_\mp^\times$.} of $U_\pm(t)$, defined by the identities $\<U_-\phi^+|F^+\rangle=\langle\phi^+|U_-^\times F^+\rangle$ for every $\phi^+\in\Phi_-,\ F^+\in\Phi_-^\times$ and
$\<U_+\psi^-|F^-\rangle=\langle\psi^-|U_+^\times F^-\rangle$ for every $\psi^-\in\Phi_-,\ F^-\in\Phi_+^\times$, give the time evolutions in the dual spaces $\Phi_\pm^\times$: \begin{subequations}
\label{3.8}
\begin{equation}
\tag{\ref{3.8}+}
U_-^\times(t)|F^+\rangle=e^{iH_-^\times t}|F^+\rangle,\ t\geq0,\
F^+\in\Phi_-^\times\label{3.8+}
\end{equation}
\begin{equation}
\tag{\ref{3.8}$-$}
U_+^\times(t)|F^-\rangle=e^{-iH_+^\times t}|F^-\rangle,\ t\geq0,\
F^-\in\Phi_+^\times\label{3.8-}
\end{equation} \end{subequations}
For the special case $F=|Ejj_3\eta^-\rangle$ where
$H_+^\times|Ejj_3\eta^-\rangle=E|Ejj_3\eta^-\rangle$, \begin{equation}
U^\times_+(t)|Ejj_3\eta^-\rangle=e^{-iH_+^\times t}|Ejj_3\eta^-\rangle
=e^{-iEt}|Ejj_3\eta^-\rangle\ \text{for $t\geq0$ only.}\label{3.9} \end{equation}
The set of operators $\{U_-(t)=e^{-iH_-t}:\ 0\leq t<\infty\}$ do not form a group because there is no inverse operator $\left(U_-(t)\right)^{-1}$ for every element of this set as required by the group axioms. In contrast, for the set of unitary operators $\{U(t)=e^{-iHt}:\ -\infty<t<\infty\}$ in the Hilbert space ${\cal H}$ there is an inverse operator $\left(U(t)\right)^{-1}=U(-t)$ for every $U(t)$ so that the set constitutes a group. Aside from the absence of inverse operators, the set of operators $\{U_-(t)=e^{-iH_-t}:\ 0\leq t<\infty\}$ fulfills all other defining axioms of a group, and is called a semigroup. Therefore, there are two different representations of the time translation semigroup $0\leq t<\infty$ given by the operators $U_\mp(t)=e^{\mp iH_\mp t}$ of (\ref{3.2}$\pm$) in the two spaces $\Phi_\mp$. Likewise, the conjugate operators defined above in (\ref{3.8}$\pm$) also furnish two representations of the time translation semigroup $0\leq t<\infty$ in the dual spaces $\Phi_\pm^\times$. In both of these cases, we have the condition $t\geq0$ (because of the difference in sign on the right hand side of the dynamical equations (\ref{2.7}$\pm$)).
The semigroup time evolution is an important consequence of the axiom (\ref{3.7}$\pm$). This axiom makes it possible for the Hamiltonians $H_\pm^\times$ to have eigenkets with complex eigenvalues. The semigroup character of time evolution makes the probability densities for complex energy eigenstates finite. If one would force the unitary time evolution (\ref{2.8}$\pm$) on these eigenstates with complex energy, one would obtain infinite probabilities, which is the well-known ``exponential catastrophe'' for the original Gamow wave functions~\cite{bohm2}.
Under the new axiom (\ref{3.7}$\pm$), the Gamow state vector is derived from the $S$-matrix pole at complex energy value
$z_R=E_R-i\Gamma/2$ as an eigenket (functional) $|z_Rjj_3\eta^-\rangle\in \Phi_+$ with generalized eigenvalue $z_R$~\cite{bohm3.5, bohm4,
bohm5}. In the construction of these Gamow kets, the eigenvalue
$z_R$ is the complex position of the $S$-matrix pole. Under the new axiom (\ref{3.7}$\pm$), eigenkets of essentially self-adjoint Hamiltonians with complex energy are now well defined as functionals on the spaces $\Phi_\pm$: the Lippmann-Schwinger kets $|E\mp i\epsilon, jj_3\eta\rangle$ can be analytically extended into the complex semi-plane ${\mathbb{C}}_\mp$ (this means the bra
$\langle^+\overline{E+i\epsilon},jj_3\eta|$ and the ket $|E-i\epsilon, jj_3\eta\rangle$ as well as the integrand in the scalar product $(\psi^-,\phi^+)$ can be analytically extended into the lower semiplane ${\mathbb{C}}_-$ of the second sheet of the $S$-matrix
$S_j(E)$ except at singularities). The Gamow vectors are the evaluation of the analytically extended kets $|zjj_3\eta^-\rangle$ in the lower half plane at the position $z_R=E_R-i\frac{\Gamma}{2}$ of the first order $S$-matrix pole. (Gamow-Jordan vectors belong to the higher order poles \cite{gamow-jordan}.) Then, from \eqref{3.9}, the time evolution of the Gamow vectors is given by \begin{eqnarray}
e^{-iH_+^\times
t}|z_Rjj_3\eta^-\rangle&=&e^{-iz_Rt}|z_Rjj_3\eta^-\rangle\nonumber\\
&=&e^{-iE_Rt}e^{-\frac{\Gamma}{2}t}|z_Rjj_3\eta^-\rangle\ \text{for
$t\geq0$ only.}\label{3.10} \end{eqnarray} This means there is an association between the the resonance pole of the $j$-th partial scattering amplitude $a_j(E)$ and the Gamow vectors: \begin{equation}
\left.
\begin{matrix}
\text{Resonance pole at $z_R=E_R-i\frac{\Gamma}{2}$}\\
\text{described by}\ a_j(E)=\frac{r}{E-z_R}
\end{matrix}
\right\}\quad
\Longleftrightarrow\quad
\left\{
\begin{matrix}
\text{Space of states of Gamow}\\
\text{vectors spanned by}\ |z_Rjj_3\eta^-\rangle
\end{matrix}\right.
\label{3.10.1} \end{equation} The resonance is defined by a pole of the $S$-matrix element of angular momentum $j$ at the complex energy $z_R=E_R-i\frac{\Gamma}{2}$ and is measured as a Lorentzian (Breit-Wigner) bump with maximum at $E_R$ and full width at half-maximum~$\Gamma$: \begin{equation}
\left|a_j(E)\right|^2=
\frac{|r|}{(E-E_R)^2+\left(\frac{\Gamma}{2}\right)^2}
\label{3.10.2} \end{equation} To this resonance corresponds a ket which is defined by the Cauchy integral around the $S$-matrix pole $z_R$ \begin{equation}
|z_Rjj_3\eta^-\rangle=\frac{1}{2\pi i}\oint dz\frac{|zjj_3\eta^-\rangle}{z-z_R}
=\frac{i}{2\pi}\int_{-\infty_{II}}^\infty
dE\frac{|Ejj_3\eta^-\rangle}{E-z_R}\label{3.10.3} \end{equation} The second equality of \eqref{3.10.3} is the Titchmarsh theorem for Hardy functions (written here for functionals). This equality and the association \eqref{3.10.1} between Breit-Wigner resonance amplitude and Gamow state therefore require the new axiom (\ref{3.7}$\pm$).
\eqref{3.10.3} expresses the new ket $|z_Rjj_3\eta^-\rangle$ by a Dirac basis vector expansion as in (A3), except that the continuous summation extends over all real energy values $-\infty_{II}<E<\infty$, where $-\infty_{II}$ means that for the ``unphysical'' values $E<0$, the energy $E$ is on the second Riemann sheet. We call the ket \eqref{3.10.3} with the energy wave function given by the Breit-Wigner amplitude \eqref{3.10.1} a Gamow vector because one can prove (again, using axiom (\ref{3.7}$\pm$) that it fulfills \eqref{3.10}. This Gamow vector \eqref{3.10.3} provides a state vector description to the Breit-Wigner resonance \eqref{3.10.2}. The semigroup time evolution \eqref{3.10} of this state vector shows that this state is exponentially decaying with a lifetime $\tau=\frac{1}{\Gamma}$, where $\Gamma=-2\Im(z_R)$.
Unstable particles that are characterized by their lifetime are called decaying states, and they are conceptually and experimentally different from resonances, which are characterized by the resonance energy and width. From \eqref{3.10} and the fact that $z_R$ is the $S$-matrix pole, we see that the Gamow vector provides a unified description of decaying states and resonances, which can now be collectively called quasistable states. They elevate the heuristic lifetime-width relation $\tau=\frac{1}{\Gamma}$ to an exact and universal identity between two quantities that are observationally and mathematically different.
The time evolution equations \eqref{3.2}, \eqref{3.8}, \eqref{3.9} and \eqref{3.10} imply a particular finite value $t=0$ at which time begins. What is the physical meaning of this initial moment of time? To answer the question, notice that under the axiom \eqref{3.7}, the Born probabilities ${\cal P}_{\phi^+}(\psi^-(t))$ are defined, due to \eqref{3.7.5-}, only for $t\geq0$: \begin{equation}
{\cal P}_{\phi^+}(\psi^-(t))=\left|\langle\psi^-(t)|\phi^+\rangle\right|^2=
\left|\langle\psi^-|\phi^+(t)\rangle\right|^2\ \text{for $t\geq0$
only.}\label{3.11} \end{equation} For a resonance or decaying state represented by a Gamow vector
$|z_R^-\rangle$, we have, using \eqref{3.10}, \begin{equation}
{\cal P}_{|z_R\rangle}(\psi^-(t))=\left|\langle\psi^-(t)|z_R^-\rangle\right|^2=
\left|\langle\psi^-|z_R^-(t)\rangle\right|^2=e^{-\Gamma
t}\left|\langle\psi^-|z^-_R\rangle\right|^2\ \text{for $t\geq0$ only.}\label{3.12} \end{equation} Equations \eqref{3.11} and \eqref{3.12} tell us that a time independent observable $\psi^-$ can be measured in a time dependent state $\phi^+(t)$ only after a particular instant $t=0$. (or, equivalently, the time dependent observable $\psi^-(t)$ can be measured in a time independent state $\phi^+$ only {\em after} the same instant $t=0$). In the case of the quasistable state of
\eqref{3.12}, the time $t=0$ is interpreted as the time at which the state $|z_R^-(t)\rangle$ has been prepared, i.e., the quasistable particle is produced or formed. The observable $|\psi^-(t)\rangle\langle\psi^-(t)|$ representing the decay products can be detected only after this time, $t\geq0$. From this point of view, the semigroup condition $t\geq0$ expresses a simple causality condition: The observable $\psi^-$ can be measured only at times $t$ larger than the time $t=0$ at which the state is prepared.
Such a particular moment {\em cannot} be singled out if we instead use the unitary group evolution of the Hilbert space, for which the probabilities \eqref{3.11} are necessarily defined for all $-\infty<t<\infty$. It is well known that there are serious problems with accommodating causality into the conventional formalism of quantum mechanics~\cite{fermi}. Therefore, the causal time evolution that follows from the new Hardy space axiom is welcome. But it also poses a new question: what is the meaning of the semigroup time $t=0$ and how can we observe it? This will be discussed in the following section.
\section{Observing the Semigroup Time of Causal Evolution}\label{sec4}
The causal quantum mechanical semigroup (\ref{3.2}$\pm$) introduces a new concept, the semigroup time $t=t_0$. In the mathematical description, we call this $t_0=0$, but physically $t_0$ could be any finite time $(\not=-\infty)$. This concept of a beginning of time is foreign to the conventional mathematical theory of quantum physics based on the Hilbert space axiom (A2) (or its slightly strengthened version \eqref{2.6}), in consequence of which follow the time evolution equations (\ref{2.8}$\pm$) with $-\infty<t<+\infty$. Nevertheless, a beginning of time $t_0$ has been mentioned before by Gell-Mann and Hartle in their quantum theory of the universe \cite{gell-mann}, where $t_0$ was chosen as the big bang time and where the restriction of the unitary group evolution \eqref{2.8-} to $(t-t_{\rm big bang})\geq0$ was introduced by fiat, in contradiction to the prediction~(\ref{2.8}$\pm$) of the Hilbert space axiom~(A2). In our theory presented in this paper, the time asymmetry (\ref{3.2}$\pm$) is a consequence of our Hardy space axiom (\ref{3.7}$\pm$) which was demanded by the heuristic ($\mp i\epsilon$) in scattering theory (and also in the propagator of field theory).
We now want to answer the questions: what is the meaning of this beginning of time $t_0$ for quantum systems in experiments in the laboratory, and why have we not been more aware of its existence before?
In the usual experiments with quantum systems one works with a large ensemble. For example, the preparation time of an excited state of an atom or ion corresponds to the many different laboratory clock times at which each individual atom or ion of the ensemble is created. The situation is different if one can work with single quantum systems. By now, there are several experiments that use single, laser-cooled ions \cite{dehmelt,sauter}. The original experiments used $Ba^+$ in a Paul-Straubel trap, Fig.~\ref{fig:1}. This is one of the simplest cases that nature provides with the most suitable arrangements for resonance energy levels and lifetimes, as depicted in Fig.~\ref{fig:2}.
\begin{figure}
\caption{Schematics of the experimental setup used
in~\cite{dehmelt,sauter}}
\label{fig:1}
\end{figure}
\begin{figure}
\caption{Simplified energy-level scheme of $Ba^{+}$.}
\label{fig:2}
\end{figure} In these experiments a single laser-cooled $Ba^+$ ion in a trap undergoes two laser driven transitions. First, driven by the 493-nm dye laser (Fig.~\ref{fig:1}), the ion goes from the ground state $6S_{1/2}$ into the excited state $6P_{1/2}$ from where it almost instantaneously (8 ns) decays into state $5D_{3/2}$. Second, from state $5D_{3/2}$ the ion is driven back to state $6P_{1/2}$ by the 650-nm dye laser (Fig.~\ref{fig:1}), from where it decays into the ground state, emitting 493-nm fluorescence radiation. This fluorescence radiation is monitored by the photo multiplier tube (PMT) in Fig.~\ref{fig:1}. Initially, the intensity of the fluorescence radiation shown in Fig.~\ref{fig:3} is essentially a constant at about 16,000 counts/sec. Then, at the time ``lamp on'', a 455-nm filtered Barium lamp (Fig.~\ref{fig:1}) is turned on. After this ``lamp-on''
time, the fluorescence radiation changes rapidly at random times from the initial value of 16,000 counts/sec to the background value of no fluorescence. The explanation is the following: The Barium lamp occasionally excites the $Ba^+$ into the state $6P_{3/2}$ from where it makes a fast transition into the state $5D_{5/2}$. This is a metastable state described by the Gamow vector $|z_R\ 5{D_{5/2}}^-\rangle \equiv\psi^G$. Since there is only one $Ba^+$ atom, it can either go through the transition levels $6S_{1/2}\leftrightarrow6P_{1/2}\leftrightarrow5D_{3/2}$ or be ``shelved'' in the metastable state $5D_{5/2}$. While it is shelved there cannot be fluorescent radiation $6P_{1/2}\rightarrow6S_{1/2}$, which results in a dark period.
\begin{figure}\label{fig:3}
\end{figure} The experiment \cite{dehmelt} reported 203 dark periods, of which three are shown in Fig.~\ref{fig:3}. The state vector $\psi^G$
represents the ensemble of these 203 single quantum systems. (The superscript $^-$ in $\psi^G=|z_R\ 5{D_{5/2}}^-\rangle$ indicates that this is an eigenstate of the total Hamiltonian $H=H_0+H_I$, including the interaction $H_I$ and thus not an eigenstate of the orbital angular momentum with $(L=2)=D$.) The state $\psi^G$ evolves in time according to \eqref{3.10} and decays exponentially in time according to
\eqref{3.12}.) Fig.~\ref{fig:3} shows that each of the single systems making up the ensemble described by the state vector $|z_R\ 5{D_{5/2}}^-\rangle=~\psi^G$ is individually produced by the resonance production process \begin{equation}
\gamma(455\text{-nm})+6S_{1/2}\rightarrow6P_{3/2}
\rightarrow\gamma(615\text{-nm})+5D_{5/2}
\label{4.1} \end{equation} at particular laboratory times $t_0^1,\ t_0^2,\ t_0^3,\cdots,\ t_0^{203}$. (Of these, $t_0^1,\ t_0^2$ and $t_0^3$ are shown in Fig.~\ref{fig:3} as the onset time of the first three dark periods.) These excited ions in $5D_{5/2}$ then decay according to \begin{equation}
5D_{5/2}\rightarrow6S_{1/2}+\gamma(1.76\text{-$\mu$m})\label{4.2} \end{equation} at times $t_1^1,\ t_1^2,\ t_1^3,\ \cdots,\ t_1^{203}$, the instances at which the fluorescence returns to its pre-``lamp-on'' levels. The duration of the dark period $\Delta t^{i}=t_1^i-t_0^i,\ i=1,2,3,\cdots,203$, is the time which the $i$-th individual quantum system $5D_{5/2}$ ``lives''. That is, at every onset time $t_0^i$ of the $i^{\rm th}$ dark period, the accuracy of which is determined by the short production time of \eqref{4.1}, an individual $5D_{5/2}$ is ``created''. It ``lives'' for the duration $\Delta t^i=t_1^i-t_0^i$ and decays at $t_1^i$, the end of the $i^{\rm th}$ dark period.
This is a rather remarkable observation because it means that the excited $Ba^+$ in the quasistable $5D_{5/2}$-level lives for a precise time $\Delta t^i$. However, these times $\Delta t^i$ are {\em not
reproducible} quantities, as seen from the different duration lengths of the dark fluorescence periods.
The reproducible quantity is the ensemble average of the time intervals $\Delta t^i$, the lifetime of the state $5D_{5/2}$: \begin{equation}
\tau^{\rm exp}=\sum_i\Delta t^i\frac{N_D(t:\ \Delta t^i>t)}{N_D}. \label{4.3} \end{equation} Here, $N_D(t:\ \Delta t^i>t)$ is the number of dark periods of duration $\Delta t^i>t$ and $N_D$ is the total number of dark periods (203 for this experiment). In the Gamow vector description of the quasistable state $5D_{5/2}$, a theoretical prediction of the quantity $\tau^{\rm exp}$ can be made in terms of the resonance width, as shown below. The individual times $\Delta t^i$ are {\em not predictable} quantities in quantum mechanics.
Let us now turn to the description of the state $5D_{5/2}$ by the Gamow state $\psi^G$ and the problem of the physical meaning of the beginning semigroup time $t_0$. As discussed above, the ensemble state $5D_{5/2}$ consists of a large number of individual quantum physical systems, each created at a different laboratory time $t_0^i$. These times depend on the preparation conditions such as the intensity of the barium lamp (in the present experiment, it is chosen such that a transition to $P_{3/2}$ takes place once every 10 s). However, as seen from \eqref{4.3}, the reproducible experimental quantities depend only on the time intervals $\Delta t^i$, and not on the individual creation times $t_0^i$ or the decay times $t_1^i$. The time interval $\Delta t^i=t_1^i-t_0^i$ is clearly invariant under a translation by $t^i$, i.e., $\Delta t^i=t_1^i-t_0^i=(t_1^i-t^i)-(t_0^i-t^i)$. Now, a time $t^i$ can be chosen for each laboratory creation time $t_0^i$ such that \begin{equation}
t_0^i-t^i=t_0\label{4.4} \end{equation} where the time $t_0$ is independent of the index $i$. The particular choice $t_0=0$ (i.e., $t^i=t_0^i$) corresponds to the beginning semigroup evolution time of the Gamow state $\psi^G$.
What \eqref{4.4} shows, above all, is that the individual micro-physical systems that make up an ensemble described by a quantum mechanical state can be prepared at different times (and, for that matter, different points in space). The time $t_0=0$ of \eqref{4.4} provides a reference time for the entire ensemble of the creation times $\left\{t_0^i\right\}$, \begin{equation}
\left.\begin{matrix}
\text{Ensemble of experimental}\\
\text{ preparation times}
\{t_0^i\}
\end{matrix}\right\}
=
\left\{\begin{matrix}
\text{Theoretical semigroup time}\\
t_0=0\ \text{of the prepared state}
\end{matrix}\right.
\label{4.5} \end{equation}
Thus, the individual systems of the ensemble can be treated as if they were created at the same laboratory time and the duration that each micro system ``lives'' can simply be characterized by the time at which it decays. This feature makes it possible to describe the entire ensemble by a single Gamow state vector $\psi^{G}$ and the time evolution of the entire ensemble by a single time variable $t\geq t_0$. Such a state vector description, in turn, makes it possible to use the standard probability interpretation also for an ensemble that consists of a large number of micro systems created at vastly different laboratory times. For instance, by using \eqref{3.10} for the Gamow vector $\psi^G(t)=e^{-iHt}|z_Rjj_3\eta^-\rangle=e^{-iHt}|z_R 5{D_{5/2}}^-\rangle$, the lifetime of the excited state $5D_{5/2}$ can be computed in analogy to \eqref{3.12} as: \begin{equation}
\tau^{\rm theor}=\int_{t_0=0}^\infty dte^{-\Gamma
t}=\frac{1}{\Gamma}\label{4.6} \end{equation} The experimental quantity of \eqref{4.3} is to be compared with this theoretical quantity.
New in these remarkable experiments of \cite{dehmelt, sauter} is that the different creation times $t_0^i$ and durations times $\Delta t^i$ for the single quantum systems are precisely and individually measured as the onset and duration of the dark periods of Fig.~\ref{fig:3}. These onset times are an experimental demonstration of the semigroup time $t_0=0$ of time asymmetric quantum theory.
\section{Summary}\label{sec5}
Many of the heuristic notions used in the description of scattering and decay phenomena, like the incoming and outgoing Lippmann-Schwinger kets $|E^\pm\rangle=|E\pm i\epsilon\rangle$ with infinitesimal $\epsilon$, purely outgoing boundary conditions, time asymmetry and causality are not well defined in the mathematical frame set by the conventional (Hilbert space) quantum mechanics. Combining these notions with the Hilbert space axiom leads to contradictions, like the exponential catastrophe in which Gamow vectors and unitary time evolution conflicted \cite{bohm2}, the deviations from the exponential decay where the exponential time dependence for the experimental counting rates conflicted with the mathematical properties of Hilbert space vectors \cite{bohm3}, and the problems with (Einstein) causality where stability of matter (semi-boundedness of the Hilbert space Hamiltonian) leads to instant propagation of probabilities \cite{fermi}. The $\pm i\epsilon$ of the Lippmann-Schwinger kets (or, of the propagator in relativistic quantum field theory) overcomes many of these problems.
But the Lippmann-Schwinger kets are mathematically undefined kets; they are not vectors of the Hilbert space and they cannot be defined as Schwartz space functionals because of the $\pm i\epsilon$. Therefore one cannot derive their time evolution (or, in the relativistic case, their evolution under Poincar\'e transformations). Nevertheless, one {\em assumes} it to be a unitary time evolution (as one also had {\em assumed} for the ordinary Dirac kets) with time extending over $-\infty<t<\infty$. This however is in conflict with the infinitesimal imaginary part $\pm i\epsilon$ since it would lead to non-continuous and unbounded (non-unitary) operators for time evolution (or, in the relativistic case, non-unitary representations of the Poincar\'e group). Complex extensions of energy (or, in the relativistic case, the invariant square mass $s=p_\mu p^\mu$) away from the real axis requires that the energy wave functions be boundary values of analytic functions in the complex semi-planes, not just (Lebesgue) square-integrable or smooth functions of real energy.
Using the Lippmann-Schwinger equation as the takeoff point and attempting to accommodate as many of the heuristic notions of scattering and decay as possible, we conjectured in this paper the new hypothesis (\ref{3.7}$\pm$). It replaces the Hilbert space boundary conditions (A2) for the solutions of the Schr\"odinger or Heisenberg equation by the Hardy space boundary conditions (\ref{3.7}$\pm$). Many of the heuristic notions, such as Gamow's wave functions, that had been introduced phenomenologically into the description of scattering and decay phenomena appear also in this new quantum theory, but now they have a rigorous mathematical foundation. Furthermore, the new theory leads to important novel conclusions, salient among which is a basic, quantum mechanical time asymmetry, expressed by the semigroup evolution of (\ref{3.7.5}$\pm$). This overcomes the causality problem and leads to exponential decay for certain kets with complex energy, the Gamow kets.
Gamow kets have been derived from the resonance poles of the $S$-matrix using the new axiom (\ref{3.7}$\pm$), Their energy wave function is a Lorentzian (Breit-Wigner) energy distribution characterized by its central value $E_R$ and width $\Gamma$, and the lifetime of its exponential decay is exactly $\tau=\frac{\hbar}{\Gamma}$. The new axiom (\ref{3.7}$\pm$) thus provides a unified theory of resonance scattering and exponential decay.
But the semigroup also introduces a beginning of time for quantum systems, which is represented by the mathematical semigroup time $t=0$. Though such a time has been mentioned before as the big bang time for universes \cite{gell-mann} and its idea is already contained in the classic paper \cite{feynman}, one has not been much aware of it in the usual experiments with quantum systems in the laboratory. In the final section \ref{sec4}, we therefore discussed an experiment with single laser-cooled $Ba^+$ ions in a trap \cite{dehmelt} where the beginnings of time for single micro-systems have been observed.
\end{document} |
\begin{document}
\title{The Error in Multivariate Linear Extrapolation with Applications to Derivative-Free Optimization}
\begin{abstract}
We study in this paper the function approximation error of multivariate linear extrapolation. The sharp error bound of linear interpolation already exists in the literature. However, linear extrapolation is used far more often in applications such as derivative-free optimization, while its error is not well-studied. We introduce in this paper a method to numerically compute the sharp bound on the error, and then present several analytical bounds along with the conditions under which they are sharp. We analyze in depth the approximation error achievable by quadratic functions and the error bound for the bivariate case. All results are under the assumptions that the function being interpolated has Lipschitz continuous gradient and is interpolated on an affinely independent sample set. \end{abstract}
\section{Introduction} \label{sec:intro} Polynomial interpolation is one of the most basic techniques for approximating functions and plays an essential role in applications such as finite element methods and derivative-free optimization. This led to a large amount of literature concerning its approximation error. This paper contributes to this area of study by analyzing the function approximation error of linear interpolation and extrapolation. Specifically, given a function $f: \R^n \rightarrow \R$ and an affinely independent sample set $\Theta:= \{\mathbf{x}_1,\mathbf{x}_2,\dots,\mathbf{x}_{n+1}\} \subset \R^n$, one can find a unique affine function $\hat{f}: \R^n \rightarrow \R$ such that $\hat{f}(\mathbf{x}_i) = f(\mathbf{x}_i)$ for all $i \in \{1,\dots,n+1\}$.
We investigate in this paper the (sharp) upper bound on the approximation error $|\hat{f}(\mathbf{x}) - f(\mathbf{x})|$ when the sample set $\Theta$ and the point where the error is measured $\mathbf{x}$ are given, and $f$ is assumed to belong to $C_\nu^{1,1}(\R^n)$. The class $C_\nu^{1,1}(\R^n)$ represents the differentiable functions defined on $\R^n$ with their first derivative $Df$ being $\nu$-Lipschitz continuous, i.e., \begin{equation} \label{eq:Lipschitz}
\|Df(\mathbf{u}) - Df(\mathbf{v})\| \le \nu \|\mathbf{u} - \mathbf{v}\| \quad \text{for all } \mathbf{u},\mathbf{v} \in \R^n, \end{equation} where $\nu>0$ is the Lipschitz constant, and the norms are Euclidean.
The sharp bound on $|\hat{f}(\mathbf{x}) - f(\mathbf{x})|$ is already discovered and proved in \cite{waldron1998error} for linear interpolation, but only for the case when the word ``interpolation'' is used in its narrow sense, i.e., when $\mathbf{x} \in \conv(\Theta)$, the convex hull of $\Theta$. In this paper, we make no assumption on the location of $\mathbf{x}$ relative to $\Theta$, and the word ``interpolation'' is typically used to refer to this general case.
The function approximation error of univariate ($n=1$) interpolation using polynomials of any degree is already well-studied, and the results can be found in classical literature such as \cite{davis1975book}. If a $(d+1)$-times differentiable function $f$ defined on $\R$ is interpolated by a polynomial of degree $d$ on $d+1$ unique points $\{x_1, x_2, \dots, x_{d+1}\} \subset \R$, then the resulting polynomial has the approximation error \begin{equation} \label{eq:Cauchy remainder}
\frac{(x-x_1)(x-x_2)\cdots(x-x_{d+1})}{(d+1)!} D^{n+1} f(\xi) \quad \text{for all } x \in \R \end{equation} for some $\xi$ with $\min(x,x_1,,\dots,x_{d+1}) < \xi < \max(x,x_1,\dots,x_{d+1})$.
Unfortunately this result cannot be extended to the multivariate ($n>1$) case directly, even if the polynomial is linear ($d=1$).
The function approximation error of multivariate polynomial interpolation has been studied by researchers from multiple research fields. Motivated by their application in finite element methods, formulae for the errors in both Lagrange and Hermite interpolation with polynomials of any degree were derived in \cite{ciarlet1972general}. As a part of an effort to develop derivative-free optimization algorithms, a bound on the error of quadratic interpolation was provided in \cite{powell2001lagrange}. The sharp error bound for linear interpolation was found by researchers of approximation theory for the case when $\mathbf{x} \in \conv(\Theta)$ using the unique Euclidean sphere that contains $\Theta$ in \cite{waldron1998error}. Following \cite{waldron1998error}, a number of sharp error bounds were derived in \cite{stampfle2000optimal} for linear interpolation under several different smoothness or continuity assumptions in addition to \eqref{eq:Lipschitz}.
While the sharp error bound for the $\mathbf{x} \in \conv(\Theta)$ case is already established, in applications like model-based derivative-free optimization (DFO), where linear interpolation is employed to approximate the black-box objective function \cite{powell1994direct, DFO_book}, the approximation model $\hat{f}$ is used more often than not to estimate the function value at a point outside $\conv(\Theta)$. As illustrated in Figure~\ref{fig:DFO-TR}, these optimization algorithms attempt to minimize the objective function by alternately constructing a linear interpolation model and minimizing the model inside a trust region, where the trust region is typically a ball around the point with the lowest known function value. The minimizer of the model inside the trust region would then have its function value evaluated and become part of the sample set for constructing the linear interpolation model in the next iteration. In practice, this minimizer rarely locates inside $\conv(\Theta)$.
There is also another class of DFO methods known as the simplex methods. One example is the famous Nelder-Mead method \cite{nelder1965simplex}. As illustrated in Figure~\ref{fig:NelderMead}, the main routine of these algorithms involves taking a set of $n+1$ affinely independent points $\Theta$ (the vertices of a simplex) and reflecting the one with the largest function value through the hyperplane defined by the rest. While linear interpolation is not used in these algorithms, the range of the function value at this reflection point ($\mathbf{x}_4$ in Figure~\ref{fig:NelderMead} and is always outside $\conv(\Theta)$) can be determined by the sum of the value estimated by interpolation model and the error of the estimation.
\begin{figure}
\caption{An illustration of two DFO algorithm when minimizing a bivariate function, where $f(\mathbf{x}_1) >$ $f(\mathbf{x}_2) >$ $f(\mathbf{x}_3)$. The vertices of the triangles represents $\Theta$. This figure only illustrates the algorithms' behavior when the trial point $\mathbf{x}_4$ satisfies $f(\mathbf{x}_4) < f(\mathbf{x}_3)$.}
\label{fig:DFO-TR}
\label{fig:NelderMead}
\label{fig:DFO}
\end{figure}
To further the design and analysis of these DFO algorithms, we use both numerical and analytical approaches to investigate the sharp upper bound on the function evaluation error of linear interpolation.
The results of this investigation provides a theoretical basis to the analysis of numerical methods that use linear interpolation including the DFO methods mentioned above. Furthermore, it can also be directly applied to improve certain DFO algorithms. For example, the model-based algorithms, which are usually designed to optimize functions that are computationally expensive to evaluate, typically request a function evaluation for one of two purposes: to check a point predicted by the model to have an improvement in function value (as shown in Figure~\ref{fig:DFO-TR}) or to explore a point that can contribute to the construction of a more accurate approximation model. Being able to estimate the magnitude of the approximation error at a given point in the former case allows the algorithm to compare it to the predicted improvement in function value and make an informed decision on whether the point is worth evaluating. By prioritizing spending the function evaluation to improve the model rather than check the point when the error is relatively large, the algorithm's overall efficiency can be improved.
The applications of this paper's results in DFO will be further discussed later, but please keep in mind that our analysis is for linear interpolation in general and can be applied wherever this approximation technique is used. Our main contributions are as follows. \begin{enumerate}
\item We formulate the problem of finding the sharp error bound as a nonlinear programming problem and show that it can be solved numerically to obtain the desired bound.
\item An analytical bound on the function approximation error is derived and proved to be sharp for interpolation and, under certain conditions, for extrapolation.
\item The largest function approximation error that is achievable by quadratic functions in $C_\nu^{1,1}(\R^n)$ is derived, and the condition under which it is an upper bound on the error achievable by all functions in $C_\nu^{1,1}(\R^n)$ is determined.
\item For bivariate ($n=2$) linear extrapolation, we analyze the case when neither of the two previous results equals to the sharp bound on the function approximation error and provide the formula for the actual sharp bound. We also show piecewise quadratic functions can achieve the approximation error indicated by the sharp bound. \end{enumerate}
The paper is organized as follows.
Our notation and the preliminary knowledge are introduced in Section~\ref{sec:preliminaries}. The nonlinear programming problem is present in Section~\ref{sec:numerical}. In Section~\ref{sec:phase1}, we generalize an existing analytical bound and then improve it. In Section~\ref{sec:phase2}, we study the error in approximating quadratic functions. In Section~\ref{sec:phase3}, we show how to calculate the sharp bound on function approximation error of bivariate linear interpolation. We conclude the paper in Section~\ref{sec:discussion} by discussing our findings and some open questions.
\section{Notation and Preliminaries} \label{sec:preliminaries} Since the research in this paper involves approximation theory and optimization, to appeal to audiences from both research fields, we provide a detailed introduction to our notation and the preliminary knowledge.
Throughout the paper, vectors are denoted by boldface letters and matrices by capital letters.
We denote by $\|\cdot\|$ the Euclidean norm. The dot product between vectors or matrices of the same size, $\mathbf{u}\cdot\mathbf{v}$ or $U \cdot V$, is the summation of the entry-wise product, which are customarily denoted by $\mathbf{u}^T\mathbf{v}$ and Tr$(U^T V)$ in optimization literature.
Let $\mathbf{e}_i$ be the vector that is all 0 but have 1 as its $i$th entry. Let $Y \in \R^{(n+1)\times n}$ be the matrix such that its $i$th row $\displaystyle Y^T \mathbf{e}_i = \mathbf{x}_i-\mathbf{x}$ for all $i = 1,2,\dots,n+1$. We define $\phi:\R^n \rightarrow \R^{n+1}$ as the {\it basis function} such that $\phi(\mathbf{u}) = \begin{bmatrix} 1 &\mathbf{u}^T \end{bmatrix}^T$ for all $\mathbf{u}\in\R^n$, and $\Phi$ as the $(n+1)$-by-$(n+1)$ matrix $\begin{bmatrix} \mathbf{1} &Y \end{bmatrix}$, where $\mathbf{1}$ is the all one vector. Notice the affine independence of $\Theta$ implies the nonsigularity of $\Phi$.
Let $\ell_1, \dots, \ell_{n+1}$ be the {\it Lagrange polynomials}, i.e. the unique set of polynomials such that $\ell_i(\mathbf{x}_j) = 1$ if $i=j$, and $\ell_i(\mathbf{x}_j) = 0$ if $i\neq j$.
The values of these polynomials at $\mathbf{x}$ coincides with the set of barycentric coordinates of $\mathbf{x}$ with respect to $\Theta$ and have the following properties: \begin{align}
\sum_{i=1}^{n+1} \ell_i(\mathbf{x}) f(\mathbf{x}_i) &= \hat{f}(\mathbf{x}), \label{eq:Lagrange m} \\
\sum_{i=1}^{n+1} \ell_i(\mathbf{x}) &= 1, \label{eq:Lagrange 0} \\
\text{and } \sum_{i=0}^{n+1} \ell_i(\mathbf{x}) \mathbf{x}_i &= \mathbf{0}, \label{eq:Lagrange Y}. \end{align} The concepts of basis functions and Lagrange polynomials are fundamental to approximation theory. The book \cite{DFO_book} offers a comprehensive introduction to them in the context of derivative-free optimization.
For the ease of exposition, we abbreviate $\ell_i(\mathbf{x})$ to $\ell_i$ and define $\mathbf{x}_0 = \mathbf{x}$ and $\ell_0 = -1$. Another reason for the artificially defined $\mathbf{x}_0$ and $\ell_0$ will be made clear in Section~\ref{sec:numerical}. Without loss of generality, we assume the set $\Theta = \{\mathbf{x}_1,\mathbf{x}_2,\dots,\mathbf{x}_{n+1}\}$ is ordered in a way such that $\ell_1 \ge \ell_2 \ge \cdots \ge \ell_{n+1}$. We define the following two sets of indices: \begin{subequations} \begin{align}
\cI_+ &= \{i\in \{0,1,\dots,n+1\}:~ \ell_i>0\} = \{1,2,\dots, |\cI_+|\}, \\
\cI_- &= \{i\in \{0,1,\dots,n+1\}:~ \ell_i<0\} = \{0, n+3-|\cI_-|, \dots, n+1\}. \end{align} \end{subequations} Notice \eqref{eq:Lagrange 0} implies $\cI_+ \neq \emptyset$, and $\ell_0=-1$ implies $\cI_- \neq \emptyset$.
It is possible for $n+3-|\cI_-| > n+1$, in which case $\cI_- = \{0\}$.
We define the following matrix $G\in\R^{n\times n}$: \begin{equation} \label{eq:G}
G = \sum_{i=0}^{n+1} \ell_i \mathbf{x}_i \mathbf{x}_i^T, \end{equation} which will be used frequently in our analysis. The notation $\mathbf{x}_i \mathbf{x}_i^T$ is the outer product of $\mathbf{x}_i$ and is sometimes denoted by $\mathbf{x}_i^2$ or $\mathbf{x}_i \otimes \mathbf{x}_i$ otherwise. The matrix $G$ has the property that for any $\mathbf{u},\mathbf{v} \in \R^n$, \begin{equation} \label{eq:G recenter} \begin{aligned}
\sum_{i=0}^{n+1} \ell_i [\mathbf{x}_i-\mathbf{u}] [\mathbf{x}_i-\mathbf{v}]^T
&= \sum_{i=0}^{n+1} \ell_i \left[\mathbf{x}_i\mathbf{x}_i^T - \mathbf{u}\mathbf{x}_i^T - \mathbf{x}_i\mathbf{v}^T + \mathbf{u}\mathbf{v}^T \right] \\
&\leftstackrel{\eqref{eq:Lagrange Y}}{=} \sum_{i=0}^{n+1} \ell_i \left[\mathbf{x}_i\mathbf{x}_i^T + \mathbf{u}\mathbf{v}^T\right]
\stackrel{\eqref{eq:Lagrange 0}}{=} \sum_{i=0}^{n+1} \ell_i \mathbf{x}_i\mathbf{x}_i^T = G. \end{aligned} \end{equation}
The class of functions $C_\nu^{1,1}(\R^n)$ is ubiquitous in the research of nonlinear optimization. It is well-known (see, e.g., section 1.2.2 of the textbook \cite{Nesterov_book}) that the inclusion $f \in C_\nu^{1,1}(\R^n)$ implies \begin{equation} \label{eq:Lipschitz quadratic}
|f(\mathbf{v}) - f(\mathbf{u}) - Df(\mathbf{u}) \cdot (\mathbf{v} - \mathbf{u})| \le \frac{\nu}{2} \|\mathbf{v} - \mathbf{u}\|^2 \text{ for all } \mathbf{u},\mathbf{v} \in \R^n, \end{equation} and that if $f$ is twice differentiable on $\R^n$, \eqref{eq:Lipschitz} and \eqref{eq:Lipschitz quadratic} are equivalent to \begin{equation} \label{eq:Lipschitz Hessian}
-\nu I \preceq D^2 f(\mathbf{u}) \preceq \nu I \text{ for all } \mathbf{u} \in \R^n, \end{equation}
where the condition \eqref{eq:Lipschitz Hessian} is often written as $\|~|D^2 f|~\|_{L_\infty(\R^n)} \le \nu$ in approximation theory literature. What is less well-known about the class $C_\nu^{1,1}(\R^n)$ is that $f \in C_\nu^{1,1}(\R^n)$ also implies \begin{equation} \label{eq:Lipschitz stronger} \begin{aligned}
f(\mathbf{v}) \le &f(\mathbf{u}) + \frac{1}{2} (Df(\mathbf{u}) + Df(\mathbf{v})) \cdot (\mathbf{v} - \mathbf{u}) \\
&+ \frac{\nu}{4} \|\mathbf{v}-\mathbf{u}\|^2 - \frac{1}{4\nu} \|Df(\mathbf{v}) - Df(\mathbf{u})\|^2 \text{ for all } \mathbf{u},\mathbf{v} \in \R^n. \end{aligned} \end{equation} For differentiable functions, \eqref{eq:Lipschitz}, \eqref{eq:Lipschitz quadratic}, and \eqref{eq:Lipschitz stronger} are equivalent.
\section{Error Estimation Problem} \label{sec:numerical} In this section, we formulate the problem of finding the sharp error bound as a numerically solvable nonlinear optimization problem. We first make the important observation that the problem of finding the sharp upper bound on the error is the same as asking for the largest error that a function from $C_\nu^{1,1}(\R^n)$ can achieve. Thus, it can be formulated as the following problem of maximizing the approximation error over the functions in $C_\nu^{1,1}(\R^n)$: \begin{equation} \label{prob:D} \tag{EEP} \everymath{\displaystyle} \begin{array}{ll}
\max_f &|\hat{f}(\mathbf{x}) - f(\mathbf{x})| \\
\text{s.t. } &f \in C_\nu^{1,1}(\R^n), \end{array} \end{equation} where $\hat{f}$ is the affine function that interpolates $f$ on a given set of $n+1$ affinely independent points $\Theta = \{\mathbf{x}_1,\dots,\mathbf{x}_{n+1}\}$.
We call this problem the \textit{error estimation problem} (EEP), a name inspired by the \textit{performance estimation problem} (PEP).
First proposed in \cite{drori2014performance}, a PEP is a nonlinear programming formulation of the problem of finding an optimization algorithm's worst-case performance over a set of possible objective functions. It involves maximizing a performance measure of the given algorithm (the larger the measure, the worse the performance) over the objective functions and, similar to \eqref{prob:D}, is an infinite-dimensional problem. However, with some algorithms and functions, particularly first-order nonlinear optimization methods and convex functions, the PEP is shown to have finite-dimensional equivalents that can be solved numerically \cite{taylor2017smooth,taylor2017exact}, thus providing a computer-aided analysis tool for estimating an algorithm's worst-case performance. Using these theories developed for PEP, we can process the functional constraint $f \in C_\nu^{1,1}(\R^n)$ and turn \eqref{prob:D} into a finite-dimensional problem. Particularly, we use the following theorem from \cite{taylor2017exact}, which states $f \in C_\nu^{1,1}(\R^n)$ can be replaced by \eqref{eq:Lipschitz stronger} for every pair of points in $\Theta \cup \{\mathbf{x}\}$.
\begin{proposition}[Theorem 3.10 \cite{taylor2017exact}]\label{prop:functional} Let $\nu > 0$ and $\cI$ be an index set, and consider a set of triples $\{(\mathbf{x}_i,\mathbf{g}_i,y_i)\}_{i\in\cI}$ where $\mathbf{x}_i\in\R^n$, $\mathbf{g}\in\R^n$, and $y_i\in\R$ for all $i\in\cI$. There exists a function $f\in C_\nu^{1,1}(\R^n)$ such that both $\mathbf{g}_i = Df(\mathbf{x}_i)$ and $y_i = f(\mathbf{x}_i)$ hold for all $i\in\cI$ if and only if the following inequality holds for all $i,j\in\cI$: \begin{equation} \label{eq:Lipschitz stronger ij} \begin{aligned}
y_j \le y_i + \frac{1}{2} (\mathbf{g}_i + \mathbf{g}_j) \cdot (\mathbf{x}_j - \mathbf{x}_i) + \frac{\nu}{4} \|\mathbf{x}_j-\mathbf{x}_i\|^2 - \frac{1}{4\nu} \|\mathbf{g}_j - \mathbf{g}_i\|^2. \end{aligned} \end{equation} \end{proposition}
The above proposition allows us to replace the functional variable $f$ with the function values $\{y_i\}$ and gradients $\{\mathbf{g}_i\}$ at $\Theta$ and $\mathbf{x}$. Before applying this proposition, we first substitute the approximated function value $\hat{f}(\mathbf{x})$ in \eqref{prob:D} with $\sum_{i=1}^{n+1} \ell_i f(\mathbf{x}_i)$ using \eqref{eq:Lagrange m} and drop the absolute sign in the objective function. The absolute sign can be dropped thanks to the symmetry of \eqref{eq:Lipschitz}, that is, $-f \in C_\nu^{1,1}(\R^n)$ for any $f \in C_\nu^{1,1}(\R^n)$, and the approximation error on the two functions $f$ and $-f$ are negatives of each other. Finally, by applying Proposition~\ref{prop:functional}, we arrive at \eqref{prob:f-D}, a finite-dimensional equivalent to \eqref{prob:D}: \begin{equation} \label{prob:f-D} \tag{f-EEP} \everymath{\displaystyle} \begin{array}{ll}
\max_{y_i,\mathbf{g}_i} &\sum_{i=0}^{n+1}\ell_i y_i \\
\text{s.t. } &y_j \le y_i + \frac{1}{2} (\mathbf{g}_i + \mathbf{g}_j) \cdot (\mathbf{x}_j - \mathbf{x}_i) + \frac{\nu}{4} \|\mathbf{x}_j-\mathbf{x}_i\|^2 \\
&\qquad - \frac{1}{4\nu} \|\mathbf{g}_j - \mathbf{g}_i\|^2 \quad \forall i,j\in\{0,\dots,n+1\}. \end{array} \end{equation}
The optimization problem \eqref{prob:f-D} is a convex quadratically constrained quadratic program (QCQP). This type of problem can be solved by standard nonlinear optimization solvers. However, \eqref{prob:f-D} contains $n+1$ redundant degrees of freedom, which means it has infinitely many optimal solutions, and the solvers can sometimes have difficulty solving it. It is best to eliminate these degrees of freedom first. The elimination can be done in many ways. For example, one can fix $\{y_i\}_{i=1}^{n+1}$ in \eqref{prob:f-D} to their observed values. Indeed, these function values are needed for constructing the affine approximation $\hat{f}$, so it is natural to assume they are known. However, we note that the optimal value of \eqref{prob:D} and \eqref{prob:f-D} is affected by the locations of the sample points $\Theta$ in the input space but is invariant to the observed function values at these points. Thus, for the purpose of solving \eqref{prob:f-D}, it is also justified to simply set $y_i = 0$ for all $i=1,\dots,n+1$. Alternatively, one can also fix $(\mathbf{g}_i,y_i)$ to $(\mathbf{0}, 0)$ for any $i \in \{0,1,\dots,n+1\}$. We formally prove in the following proposition the $n+1$ degrees of freedom can be removed in these two ways.
\begin{proposition}
The following statements are true.
\begin{enumerate}
\item If any function $f$ is optimal to \eqref{prob:D}, then the function $f'(\mathbf{u}) = f(\mathbf{u}) + c + \mathbf{g}\cdot\mathbf{u}$ is also optimal with any $c\in\R$ and $\mathbf{g}\in\R^n$.
\item The optimal value of \eqref{prob:f-D} does not change if $\{y_i\}_{i=1}^{n+1}$ are fixed to any arbitrary values.
\item The optimal value of \eqref{prob:f-D} does not change if $\mathbf{g}_k$ and $y_k$ are fixed to any arbitrary values for some $k \in \{0,1,\dots,n+1\}$.
\end{enumerate} \end{proposition}
\begin{proof}
By the definition \eqref{eq:Lipschitz}, it is easy to see $f' \in C_\nu^{1,1}(\R^n)$ whenever $f\in C_\nu^{1,1}(\R^n)$. The two objective values can also be shown to be the same using \eqref{eq:Lagrange m}, \eqref{eq:Lagrange 0}, and \eqref{eq:Lagrange Y}:
\[ \sum_{i=0}^{n+1} \ell_i f'(\mathbf{x}_i) = \sum_{i=0}^{n+1} \ell_i [f(\mathbf{x}_i) + c + \mathbf{g}\cdot\mathbf{x}_i] = \sum_{i=0}^{n+1} \ell_i f(\mathbf{x}_i). \]
The first statement is thus true.
To prove the second statement, we first assume \eqref{prob:f-D} has an optimal solution $\{y_i^\star, \mathbf{g}_i^\star\}_{i=0}^{n+1}$.
Now suppose the problem has an additional set of constraints that fixes the function values of the points in $\Theta$ to some arbitrary values $\{y_i\}_{i=1}^{n+1}$.
Then, this new problem has the exact same optimal value as the original \eqref{prob:f-D}, and an optimal solution satisfies $\mathbf{g}_i = \mathbf{g}_i^\star + \mathbf{g}$ for all $i=0,1,\dots,n+1$ and $y_0 = y_0^\star + c + \mathbf{g}\cdot\mathbf{x}_0$, where $(\mathbf{g},c)$ is the unique solution to the linear system $c + \mathbf{g}\cdot\mathbf{x}_i = y_i - y_i^\star, i=1,\dots,n+1$.
Indeed, the constraints of this new problem are satisfied as
\[ \begin{aligned}
&- y_j + y_i + \frac{1}{2} (\mathbf{g}_i + \mathbf{g}_j) \cdot (\mathbf{x}_j - \mathbf{x}_i) + \frac{\nu}{4} \|\mathbf{x}_j-\mathbf{x}_i\|^2 - \frac{1}{4\nu} \|\mathbf{g}_j - \mathbf{g}_i\|^2 \\
&= - y_j + y_i + \frac{1}{2} (\mathbf{g}_i^\star + \mathbf{g}_j^\star + 2\mathbf{g}) \cdot (\mathbf{x}_j - \mathbf{x}_i) + \frac{\nu}{4} \|\mathbf{x}_j-\mathbf{x}_i\|^2 - \frac{1}{4\nu} \|\mathbf{g}_j^\star - \mathbf{g}_i^\star\|^2 \\
&= - y_j^\star + y_i^\star + \frac{1}{2} (\mathbf{g}_i^\star + \mathbf{g}_j^\star) \cdot (\mathbf{x}_j - \mathbf{x}_i) + \frac{\nu}{4} \|\mathbf{x}_j-\mathbf{x}_i\|^2 - \frac{1}{4\nu} \|\mathbf{g}_j^\star - \mathbf{g}_i^\star\|^2
\ge 0
\end{aligned} \]
for all $i,j = 0,1,\dots,n+1$,
where the second equality is true because $\mathbf{g}\cdot(x_j-x_i) = (y_j - y_j^\star - c) - (y_i - y_i^\star - c)$, and the objective function
\[ \sum_{i=0}^{n+1} \ell_i y_i
= y_0^\star + c + \mathbf{g}\cdot\mathbf{x}_0 + \sum_{i=1}^{n+1} \ell_i y_i
\stackrel{\eqref{eq:Lagrange 0}\eqref{eq:Lagrange Y}}{=} y_0^\star + \sum_{i=0}^{n+1} \ell_i [y_i + c + \mathbf{g}\cdot\mathbf{x}_i]
= \sum_{i=0}^{n+1} \ell_i y_i^\star.
\]
Similarly, \eqref{prob:f-D} with $(\mathbf{g}_k,y_k)$ fixed for some $k\in\{0,1,\dots,n+1\}$ also has the same optimal value as \eqref{prob:f-D}, and its optimal solution satisfies $\mathbf{g}_i = \mathbf{g}_i^\star - \mathbf{g}_k^\star + \mathbf{g}_k$ and $y_i = y_i^\star - y_k^\star + y_k + (\mathbf{g}_k-\mathbf{g}_k^\star)\cdot(\mathbf{x}_i-\mathbf{x}_k)$ for all $i = 0,1,\dots,n+1$.
The constraints are satisfied as
\[ \begin{aligned}
&- y_j + y_i + \frac{1}{2} (\mathbf{g}_i + \mathbf{g}_j) \cdot (\mathbf{x}_j - \mathbf{x}_i) + \frac{\nu}{4} \|\mathbf{x}_j-\mathbf{x}_i\|^2 - \frac{1}{4\nu} \|\mathbf{g}_j - \mathbf{g}_i\|^2 \\
&= - [y_j^\star - y_k^\star + y_k + (\mathbf{g}_k-\mathbf{g}_k^\star)\cdot(\mathbf{x}_j-\mathbf{x}_k)] + [y_i^\star - y_k^\star + y_k + (\mathbf{g}_k-\mathbf{g}_k^\star)\cdot(\mathbf{x}_i-\mathbf{x}_k)] \\
&\quad + \frac{1}{2} (\mathbf{g}_i^\star + \mathbf{g}_j^\star + 2\mathbf{g}_k - 2\mathbf{g}_k^\star) \cdot (\mathbf{x}_j - \mathbf{x}_i) + \frac{\nu}{4} \|\mathbf{x}_j-\mathbf{x}_i\|^2 - \frac{1}{4\nu} \|\mathbf{g}_j^\star - \mathbf{g}_i^\star\|^2 \\
&= - y_j^\star + y_i^\star + \frac{1}{2} (\mathbf{g}_i^\star + \mathbf{g}_j^\star) \cdot (\mathbf{x}_j - \mathbf{x}_i) + \frac{\nu}{4} \|\mathbf{x}_j-\mathbf{x}_i\|^2 - \frac{1}{4\nu} \|\mathbf{g}_j^\star - \mathbf{g}_i^\star\|^2
\ge 0
\end{aligned} \]
for all $i,j = 0,1,\dots,n+1$,
and the objective function
\[ \sum_{i=0}^{n+1} \ell_i y_i
= \sum_{i=0}^{n+1} \ell_i [y_i^\star - y_k^\star + y_k + (\mathbf{g}_k-\mathbf{g}_k^\star)\cdot(\mathbf{x}_i-\mathbf{x}_k)]
\stackrel{\eqref{eq:Lagrange 0}\eqref{eq:Lagrange Y}}{=} \sum_{i=0}^{n+1} \ell_i y_i^\star.
\] \end{proof}
Apart from its application in model-based derivative-free optimization as introduced in Section~\ref{sec:intro}, \eqref{prob:f-D} also offers us insight into the approximation error and guidance in seeking the analytical form of the sharp bound. Particularly, it can be used to visualize the sharp error bound for bivariate linear interpolation. We do this by first selecting a fixed set of three affinely independent sample points $\Theta \subset \R^2$ and a $100\times100$ grid. Then, \eqref{prob:f-D} is solved repeatedly while $\mathbf{x}$ is set to each point on the grid. The result of one instance of this numerical experiment is shown in Figure~\ref{fig:numerical}. It can be observed that this bound is a piecewise smooth function of $\mathbf{x}$, and the boundaries between the smooth pieces align with the edges of the triangle defined by $\Theta$. It will be shown in Section~\ref{sec:phase2} that this piecewise smooth function, at least in the case shown in Figure~\ref{fig:numerical} where $\conv(\Theta)$ is an acute triangle, can the represented by a single formula. \begin{figure}
\caption{The sharp error bound on $|\hat{f}(\mathbf{x}) - f(\mathbf{x})|$ for each $\mathbf{x}$ on the $100\times100$ grid that covers the area $[-2.5,2.5]\times[-1.5,2.5]$ evenly. The sample set and the Lipschitz constant are chosen as $\Theta = \{(-0.3,1), (-1.1,-0.5), (1,0)\}$ and $\nu = 1$.}
\label{fig:numerical}
\end{figure}
In \eqref{prob:f-D}, the point $\mathbf{x}$ and its derivative and function value are represented by $(\mathbf{x}_0, \mathbf{g}_0, y_0)$, whereas $(\mathbf{x}_i, \mathbf{g}_i,y_i)$ are used for the points $\mathbf{x}_i\in\Theta$ with $i=1,\dots,n+1$. If we ignore what these points represent in linear interpolation and look at the optimization problem \eqref{prob:f-D} as it is, we can see that, in \eqref{prob:f-D}, the point $\mathbf{x}$ is not special comparing to the points in $\Theta$, with the only difference being the coefficient of $y_0$ in the objective is fixed to $\ell_0 = -1$. Therefore, to symbolize the point's ordinary status and simplify the expressions, we index $\mathbf{x}$ the zeroth point and sometimes use $\mathbf{x}_0$ in place of the customary $\mathbf{x}$. This observation also leads us to the following proposition, which shows how the sharp error bound changes when $\mathbf{x}$ is swapped with a point in $\Theta$ and will be used to greatly simplify the analysis in Section~\ref{sec:phase3}. \begin{proposition} \label{thm:swap}
Assume there is an affinely independent sample set $\Theta=\{\mathbf{x}_1,\dots,\mathbf{x}_{n+1}\}$ and a point $\mathbf{x}\in\R^n$ such that $\Theta\setminus\{\mathbf{x}_k\}\cup\{\mathbf{x}\}$ is also affinely independent for a given $k\in\{1,\dots,n+1\}$.
Let $\ell_k$ be the Lagrange polynomial (with respect to $\Theta$ not $\Theta\setminus\{\mathbf{x}_k\}\cup\{\mathbf{x}\}$) corresponding to $\mathbf{x}_k$.
Let $\hat{f}$ and $\hat{f}'$ be the affine functions that interpolates some $f:\R^n \rightarrow \R$ on $\Theta$ and $\Theta\setminus\{\mathbf{x}_k\}\cup\{\mathbf{x}\}$, respectively.
The following two statements hold.
\begin{enumerate}
\item The function approximation error of $\hat{f}'$ at $\mathbf{x}_k$ is the error of $\hat{f}$ at $\mathbf{x}$ divided by $-\ell_k(\mathbf{x})$, i.e., $\hat{f}'(\mathbf{x}_k) - f(\mathbf{x}_k) = (\hat{f}(\mathbf{x}) - f(\mathbf{x})) / (-\ell_k(\mathbf{x}))$.
\item If $f\in C_\nu^{1,1}(\R^n)$ and $|\hat{f}(\mathbf{x}) - f(\mathbf{x})|$ is the largest error achievable by any function in $C_\nu^{1,1}(\R^n)$, then $f$ also achieves the largest $|\hat{f}'(\mathbf{x}_k) - f(\mathbf{x}_k)|$.
\end{enumerate} \end{proposition}
\begin{proof} If we divide $\hat{f}(\mathbf{x}) - f(\mathbf{x}) = \sum_{i=0}^{n+1} \ell_i(\mathbf{x}) y_i$ by $-\ell_k(\mathbf{x})$, the coefficient before $y_i$ becomes $\alpha_i = -\ell_i(\mathbf{x})/\ell_k(\mathbf{x})$ for all $i = 0,1,\dots,n+1$. Since $\alpha_k = -1$, $\sum_{i=0}^{n+1} \alpha_i = 0$, and $\sum_{i=0}^{n+1} \alpha_i \mathbf{x}_i = 0$, the coefficients $\{\alpha_i\}_{i=0,i\neq k}^{n+1}$ are the values of the Lagrange polynomials with respect to $\Theta\setminus\{\mathbf{x}_k\}\cup\{\mathbf{x}\}$ at $\mathbf{x}_k$. Thus, the quotient is exactly $\hat{f}'(\mathbf{x}_k) - f(\mathbf{x}_k)$.
The premise of the second statement assumes $f$ is an optimal solution to \eqref{prob:D}.
The same $f$ must also be an optimal solution to the problem of finding the largest $|\hat{f}'(\mathbf{x}_k) - f(\mathbf{x}_k)|$, since this optimization problem is simply \eqref{prob:D} with its objective function divided by the constant $-\ell_k(\mathbf{x}_k)$, and, as discussed before, the absolute sign can be ignored due to symmetry. \end{proof}
\section{An Improved Upper Bound} \label{sec:phase1}
We now begin our attempt at finding the analytical form of the bound. The theoretical results in \cite{ciarlet1972general} and \cite{powell2001lagrange} are obtained by comparing $f$ against its Taylor expansion at $\mathbf{x}$. We generalize their approach in Theorem~\ref{thm:phase1} by using the Taylor expansion of $f$ at an arbitrary $\mathbf{u} \in \R^n$. \begin{theorem}\label{thm:phase1} Assume $f \in C^{1,1}_\nu(\R^n)$. Let $\hat{f}$ be the linear function that interpolates $f$ at any set of $n+1$ affinely independent vectors $\Theta = \{\mathbf{x}_1,\dots,\mathbf{x}_{n+1}\}\subset \R^n$. The function approximation error of $\hat{f}$ at any $\mathbf{x}\in\R^n$ is bounded as \begin{equation} \label{eq:phase1 u}
|\hat{f}(\mathbf{x}) - f(\mathbf{x})| \le \frac{\nu}{2} \left(\|\mathbf{x}-\mathbf{u}\|^2 + \sum_{i=1}^{n+1} |\ell_i(\mathbf{x})| \|\mathbf{x}_i-\mathbf{u}\|^2\right), \end{equation} where $\mathbf{u}$ can be any vector in $\R^n$. \end{theorem}
\begin{proof} By \eqref{eq:Lipschitz quadratic}, we have for any $\mathbf{u} \in \R^n$ \begin{subequations} \label{phase1 set of inequalities} \begin{align}
\ell_i [f(\mathbf{x}_i) - f(\mathbf{u}) - Df(\mathbf{u})\cdot (\mathbf{x}_i-\mathbf{u})] &\le \ell_i \frac{\nu}{2} \|\mathbf{x}_i-\mathbf{u}\|^2 \text{ for all } i\in \cI_+, \\
-\ell_i [-f(\mathbf{x}_i) + f(\mathbf{u}) + Df(\mathbf{u})\cdot(\mathbf{x}_i-\mathbf{u})] &\le -\ell_i \frac{\nu}{2} \|\mathbf{x}_i-\mathbf{u}\|^2 \text{ for all } i\in \cI_-. \end{align} \end{subequations} Now add all inequalities above together. The sum of the left-hand sides is \[ \begin{aligned}
&\sum_{i=0}^{n+1} \ell_i [f(\mathbf{x}_i) - f(\mathbf{u})] + Df(\mathbf{u}) \cdot \sum_{i=0}^{n+1} \ell_i [\mathbf{u}-\mathbf{x}_i] \\
&\stackrel{\eqref{eq:Lagrange 0}}{=} \sum_{i=0}^{n+1} \ell_i f(\mathbf{x}_i) + Df(\mathbf{u}) \cdot \sum_{i=1}^{n+1} \ell_i \mathbf{x}_i
\stackrel{\eqref{eq:Lagrange m}\eqref{eq:Lagrange Y}}{=} \hat{f}(\mathbf{x}) - f(\mathbf{x}), \end{aligned} \]
while the sum of the right-hand sides is $\nu/2 \sum_{i=0}^{n+1} |\ell_i| \|\mathbf{x}_i-\mathbf{u}\|^2$. Thus the sum of the inequalities in \eqref{phase1 set of inequalities} is \eqref{eq:phase1 u} when $\hat{f}(\mathbf{x})-f(\mathbf{x}) \ge 0$. If the inequalities in \eqref{phase1 set of inequalities} have their left-hand sides multiplied by $-1$, they would still hold according to \eqref{eq:Lipschitz quadratic}, and their summation would be \eqref{eq:phase1 u} for the $\hat{f}(\mathbf{x})-f(\mathbf{x}) < 0$ case. \end{proof}
The existing bounds from \cite{ciarlet1972general} is similar to \eqref{eq:phase1 u} but has $\mathbf{u}$ fixed to $\mathbf{x}$. In comparison, the new bound provides more convenience in analyzing DFO algorithms that use trusting region methods, since the free point $\mathbf{u}$ can be set to the center of the trust region. Another advantage of the new bound is that it can be minimized with respect of $\mathbf{u}$, especially considering the right-hand side of \eqref{eq:phase1 u} is a convex function of $\mathbf{u}$ defined on $\R^n$. This results in the improved bound \eqref{eq:phase1}. \begin{corollary} \label{cor:phase1} Under the setting of \eqref{thm:phase1}, the function approximation error of $\hat{f}$ at any $\mathbf{x}\in\R^n$ is bounded as \begin{equation} \label{eq:phase1}
|\hat{f}(\mathbf{x}) - f(\mathbf{x})| \le \frac{\nu}{2} \left(\|\mathbf{x}-\mathbf{w}\|^2 + \sum_{i=1}^{n+1} |\ell_i| \|\mathbf{x}_i-\mathbf{w}\|^2\right), \end{equation} where
\[ \mathbf{w} = \frac{\mathbf{x} + \sum_{i=1}^{n+1} |\ell_i| \mathbf{x}_i}{1 + \sum_{i=1}^{n+1} |\ell_i|}. \] \end{corollary}
To check the sharpness of the bound \eqref{eq:phase1}, we compare it against the optimal value of \eqref{prob:f-D} numerically. The comparison shows that \eqref{eq:phase1} is sharp if and only if $\mathbf{x}$ is located in $\conv(\Theta)$ or in one of the cones \begin{equation} \label{eq:cone}
\left\{\mathbf{x}_i + \sum_{j=1}^{n+1} \alpha_j(\mathbf{x}_i - \mathbf{x}_j):~ \alpha_j \ge 0 \text{ for all } j = 1,2,\dots,n+1 \right\} \end{equation} for some $i\in\{1,\dots,n+1\}$. We illustrate the geometric meaning of this observation in Figure~\ref{fig:phase1}, which shows the three sets of areas in which $\mathbf{x}$ can locate relative to the sample set $\Theta$ from Figure~\ref{fig:numerical}. Figure~\ref{fig:phase1 hull} shows the convex hull of $\Theta$, and Figure~\ref{fig:phase1 negative} shows the cones. In all the remaining areas, as shown in Figure~\ref{fig:phase1 not covered}, the bound \eqref{eq:phase1} is observed to be smaller than the solution of \eqref{prob:f-D}. Additionally, we want to mention that these areas can also be classified using the signs of the values of the Lagrange functions at $\mathbf{x}$. The point $\mathbf{x} \in \conv(\Theta)$ if and only if $\ell_i \ge 0$ for all $i=1,\dots,n+1$; and $\mathbf{x}$ is in the cone \eqref{eq:cone} if and only if $\ell_i$ is the only positive one among $\{\ell_i\}_{i=1}^{n+1}$. \begin{figure}\label{fig:phase1 hull}
\label{fig:phase1 negative}
\label{fig:phase1 not covered}
\label{fig:phase1}
\end{figure}
When $f \in C^{1,1}_\nu(\R^n)$, the proof of Theorem 3.1 in \cite{waldron1998error} essentially shows that \begin{equation} \label{eq:Waldron}
|\hat{f}(\mathbf{x}) - f(\mathbf{x})| \le \frac{\nu}{2} \left(\sum_{i=1}^{n+1} \ell_i \|\mathbf{x}_i\|^2 - \|\mathbf{x}\|^2\right), \end{equation}
holds for all $\mathbf{x} \in \conv(\Theta)$ and is a sharp upper bound, as linear interpolation makes an error equal to this upper bound when approximating the quadratic function $f(\mathbf{u}) = \nu \|\mathbf{u}\|^2/2$. We show in Theorem~\ref{thm:phase1 convex hull} that \eqref{eq:phase1} is indeed the same as \eqref{eq:Waldron} in this case.
\begin{theorem} \label{thm:phase1 convex hull} When $\mathbf{x} \in \conv(\Theta)$, the bound \eqref{eq:phase1} has $\mathbf{w}=\mathbf{x}$ and is identical to \eqref{eq:Waldron}. \end{theorem} \begin{proof} This theorem is a direct result of the properties of the Lagrange functions \eqref{eq:Lagrange 0} and \eqref{eq:Lagrange Y}.
\end{proof}
In Theorem~\ref{thm:phase1 negative}, we verify mathematically that the improved bound \eqref{eq:phase1} is sharp for linear extrapolation when $\mathbf{x}$ is in one of the cones indicated by \eqref{eq:cone} and depicted in Figure~\ref{fig:phase1 negative}. \begin{theorem} \label{thm:phase1 negative} Assume the sample points are ordered such that $\ell_1\ge\ell_2\ge\cdots\ge\ell_{n+1}$ and $\ell_1$ is the only positive one, then the bound \eqref{eq:phase1} is sharp with $\mathbf{w}=\mathbf{x}_1$. \end{theorem} \begin{proof} Since $\ell_i \le 0$ for all $i=0,2,3,\dots,n+1$, \[ \mathbf{w} = \frac{2\ell_1 \mathbf{x}_1 - \sum_{i=0}^{n+1} \ell_i \mathbf{x}_i}{2\ell_1 -\sum_{i=0}^{n+1} \ell_i}
\stackrel{\eqref{eq:Lagrange 0}\eqref{eq:Lagrange Y}}{=} \frac{2\ell_1 \mathbf{x}_1}{2\ell_1}
= \mathbf{x}_1. \] The bound \eqref{eq:phase1} equals $\nu/2$ multiplies \[ \begin{aligned}
\sum_{i=0}^{n+1} |\ell_i| \|\mathbf{x}_i-\mathbf{w}\|^2
&= - \sum_{i=0}^{n+1} \ell_i \|\mathbf{x}_i-\mathbf{x}_1\|^2
= \text{Tr}\left( - \sum_{i=0}^{n+1} \ell_i [\mathbf{x}_i-\mathbf{x}_1][\mathbf{x}_i-\mathbf{x}_1]^T \right) \\
&\leftstackrel{\eqref{eq:G recenter}}{=} \text{Tr}\left( - \sum_{i=0}^{n+1} \ell_i \mathbf{x}_i\mathbf{x}_i^T \right)
=-\sum_{i=0}^{n+1} \ell_i \|\mathbf{x}_i\|^2. \end{aligned} \]
Consider the function $f(\mathbf{u}) = -\frac{\nu}{2} \|\mathbf{u}\|^2 \stackrel{\eqref{eq:Lipschitz Hessian}}{\in} C^{1,1}_\nu(\R^n)$. We have \[\hat{f}(\mathbf{x}) - f(\mathbf{x}) \stackrel{\eqref{eq:Lagrange m}}{=} \sum_{i=0}^{n+1} \ell_i f(\mathbf{x}_i)
= -\sum_{i=0}^{n+1} \ell_i \frac{\nu}{2}\|\mathbf{x}_i\|^2, \] which matches \eqref{eq:phase1}. \end{proof}
\section{The Worst Quadratic Function} \label{sec:phase2} We have derived an improved error bound in the previous section and showed when it is sharp. In this section, we try to find the mathematical formula for the piecewise smooth function in the remaining areas indicated in Figure~\ref{fig:phase1 not covered}. Instead of attempting to improve another existing upper bound, we take the opposite approach by trying to find the function that can achieve the maximum error. Considering quadratic functions are easier to analyze as they share a general closed-form formula and, under the settings of both Theorem~\ref{thm:phase1 convex hull} and Theorem~\ref{thm:phase1 negative}, the optimal set of \eqref{prob:D} contains at least one quadratic function, we investigate whether \eqref{prob:D} has an analytical solution when $f$ is restricted to be quadratic.
Let $f$ be a quadratic function of the form $f(\mathbf{u}) = c + \mathbf{g} \cdot \mathbf{u} + \frac{1}{2} H\mathbf{u} \cdot \mathbf{u}$ with $c\in\R, \mathbf{g}\in\R^n$, and symmetric $H \in \R^{n \times n}$. Because of \eqref{eq:Lipschitz Hessian} and \[ \begin{aligned} \hat{f}(\mathbf{x}) - f(\mathbf{x}) ~ &\leftstackrel{\eqref{eq:Lagrange m}}{=} \sum_{i=0}^{n+1} \ell_i f(\mathbf{x}_i) = \sum_{i=0}^{n+1} \ell_i \left[c + \mathbf{g} \cdot \mathbf{x}_i + \frac{1}{2} H\mathbf{x}_i \cdot \mathbf{x}_i \right] \\ &\leftstackrel{\eqref{eq:Lagrange Y}}{=} \sum_{i=0}^{n+1} \ell_i \left[c + \frac{1}{2} H\mathbf{x}_i \cdot \mathbf{x}_i \right] \stackrel{\eqref{eq:Lagrange 0}}{=} \sum_{i=0}^{n+1} \ell_i \left[\frac{1}{2} H\mathbf{x}_i \cdot \mathbf{x}_i \right] \\ &= \frac{1}{2} H \cdot \sum_{i=0}^{n+1} \ell_i \mathbf{x}_i \mathbf{x}_i^T \stackrel{\eqref{eq:G}}{=} \frac{1}{2} G \cdot H, \end{aligned} \] the problem of maximizing linear interpolation's approximation error over quadratic functions in $C_\nu^{1,1}(\R^n)$ can be formulated as \begin{equation} \label{prob:quadratic} \everymath{\displaystyle} \begin{array}{ll}
\max_H &G \cdot H / 2 \\
\text{s.t.} &-\nu I \preceq H \preceq \nu I. \end{array} \end{equation} The absolute sign in the objective function is again dropped due to symmetry.
It turns out the problem \eqref{prob:quadratic} can be solved analytically. Since $G$ is real and symmetric, it must have eigendecomposition $G = P \Lambda P^T$, where $\Lambda \in \R^{n \times n}$ is the diagonal matrix of the eigenvalues $\lambda_1,\dots,\lambda_n$, and $P \in \R^{n \times n}$ is the orthonormal matrix whose columns are the corresponding eigenvectors. The objective function $G\cdot H/2 = (P\Lambda P^T)\cdot H/2 = \Lambda \cdot (P^T H P)/2$. Since $P$ is orthonormal, the constraint in \eqref{prob:quadratic} is equivalent to $-\nu I \preceq P^T H P \preceq \nu I$, indicating all diagonal elements of $P^T H P$ are bounded between $-\nu$ and $\nu$. Since $\Lambda$ is diagonal, only the diagonal elements of $P^T H P$ would affect the objective function value. Therefore, a solution to \eqref{prob:quadratic}, denoted by $H^\star$, has the property $P^T H^\star P = \nu \text{sign}(\Lambda)$. This optimal solution is \begin{equation} \label{eq:Hstar}
H^\star = \nu P \text{sign}(\Lambda) P^T. \end{equation}
Solution \eqref{eq:Hstar} indicates the maximum approximation error by quadratic functions of \begin{equation} \label{eq:phase2}
G \cdot H^\star/2 = \frac{\nu}{2} \sum_{i=1}^n |\lambda_i|. \end{equation} We again compare this new bound to the optimal value of \eqref{prob:f-D} numerically. Our results show these two are exactly the same in all three cases in Figure~\ref{fig:phase1}, and \eqref{eq:phase2} is a formula of the piecewise smooth function in Figure~\ref{fig:numerical}. However, this does not mean \eqref{eq:phase2} is a formula to the optimal value of \eqref{prob:f-D}. For example, for bivariate linear interpolation, it is observed that when the triangle $\conv(\Theta)$ is obtuse and $\mathbf{x}$ locates in one of the four shaded areas indicated in Figure~\ref{fig:phase2}, the optimal value of \eqref{prob:f-D} is larger than \eqref{eq:phase2}. These shaded areas are open subsets of $\R^2$ and do not include their boundaries. From left to right, they can be described as \begin{itemize}
\item $\ell_1 [\mathbf{x}_2-\mathbf{x}_1]\cdot[\mathbf{x}_3-\mathbf{x}_1] - \ell_2 [\mathbf{x}_3-\mathbf{x}_2]\cdot[\mathbf{x}_1-\mathbf{x}_2] > 0$ and $\ell_2>0$;
\item $\ell_1 [\mathbf{x}_2-\mathbf{x}_1]\cdot[\mathbf{x}_3-\mathbf{x}_1] - \ell_2 [\mathbf{x}_3-\mathbf{x}_2]\cdot[\mathbf{x}_1-\mathbf{x}_2] < 0$, $\ell_3>0$, and $\ell_2<0$;
\item $\ell_1 [\mathbf{x}_2-\mathbf{x}_1]\cdot[\mathbf{x}_3-\mathbf{x}_1] - \ell_3[\mathbf{x}_2-\mathbf{x}_3]\cdot[\mathbf{x}_1-\mathbf{x}_3] < 0$, $\ell_2>0$, and $\ell_3<0$;
\item $\ell_1 [\mathbf{x}_2-\mathbf{x}_1]\cdot[\mathbf{x}_3-\mathbf{x}_1] - \ell_3 [\mathbf{x}_2-\mathbf{x}_3]\cdot[\mathbf{x}_1-\mathbf{x}_3] > 0$ and $\ell_3>0$. \end{itemize} In the remaining parts of this section, we will investigate analytically when \eqref{eq:phase2} is the sharp error bound.
\begin{figure}
\caption{The areas to which if $\mathbf{x}$ belongs, \eqref{eq:phase2} is not an upper bound on the function approximation error for bivariate interpolation.
The dashed line on the left is perpendicular to the line going through $\mathbf{x}_1$ and $\mathbf{x}_2$; and the one on the right is perpendicular to the line going through $\mathbf{x}_3$ and $\mathbf{x}_1$. }
\label{fig:phase2}
\end{figure}
\subsection{Certification of Upper Bound} The maximum error \eqref{eq:phase2} provides a lower bound to the optimal value of \eqref{prob:D}, while \eqref{eq:phase1} provides an upper bound. By evaluating both \eqref{eq:phase1} and \eqref{eq:phase2}, one can have a reasonable estimation of sharp error bound without having to solve the QCQP \eqref{prob:f-D}. However, the formula \eqref{eq:phase2} would be a lot more useful if there is an efficient way to check whether $\mathbf{x}$ is in one of those areas where \eqref{eq:phase2} is not an upper bound on the approximation error.
The existence of these areas appears to be influence by the existence of obtuse angles at the vertices of the simplex $\conv(\Theta)$. Unlike triangles, which can only have up to one obtuse angle, simplices in higher dimension can have obtuse angles in many ways. They can have $(\mathbf{x}_j-\mathbf{x}_i)\cdot(\mathbf{x}_k-\mathbf{x}_i) < 0$ at multiple vertices $\mathbf{x}_i$ and at the same time for multiple $(j,k)$ for each $\mathbf{x}_i$. While there can only be up to four disconnected subset of $\R^2$ where \eqref{eq:phase2} is not an upper bound on the approximation error, our numerical experiments show this number can go up to at least twenty for trivariate ($n=3$) linear interpolation. Considering a precise description of the four shaded areas in Figure~\ref{fig:phase2} already requires four unintuitive inequalities or some wordy explanation, any description of these areas would almost certainly be extremely complicated, especially in higher dimension.
Regardless, we have found an efficient way to check whether $\mathbf{x}$ is in one of these areas without having to describe any of them. The theoretical proof that validates our approach is extremely technical and will be presented later in section~\ref{sec:phase2 proofs}. Our approach relies on a set of parameters $\{\mu_{ij}\}_{(i,j)\in\cI_+\times\cI_-}$ that can be computed as follows. Remember $\Theta$ is assumed to be ordered in a way so that $\ell_1 \ge \ell_2 \ge \cdots \ge \ell_{n+1}$, and let $\diag(\ell) \in \R^{(n+1)\times(n+1)}$ be the diagonal matrix containing $\ell_1, \dots, \ell_{n+1}$. We now partition $\diag(\ell), G$, and $H^\star$ with respect to $\cI_+$ and $\cI_-$.
Let $\diag(\ell_+)\in\R^{|\cI_+|\times|\cI_+|}$ be the diagonal matrix containing $\{\ell_i\}_{i\in\cI_+}$, and $\diag(\ell_-) \in \R^{(|\cI_-|-1)\times(|\cI_-|-1)}$ be the diagonal matrix containing $\{\ell_i\}_{i\in\cI_-\setminus\{0\}}$.
Let $Y_+ \in \R^{|\cI_+| \times n}$ and $Y_- \in \R^{(|\cI_-|-1) \times n}$ be the first $|\cI_+|$ and the last $|\cI_-|-1$ rows of $Y$, respectively.
The matrix $G$ has $|\cI_+|-1$ positive eigenvalues and $|\cI_-|-1$ negative eigenvalues, as will be proved later.
Let $\Lambda_+ \in \R^{(|\cI_+|-1) \times (|\cI_+|-1)}$ and $\Lambda_- \in \R^{(|\cI_-|-1) \times (|\cI_-|-1)}$ respectively be the the diagonal matrices that contain the positive and negative eigenvalues of $G$, and $P_+ \in \R^{n \times (|\cI_+|-1)}$ and $P_- \in \R^{n \times (|\cI_-|-1)}$ their corresponding eigenvector matrices. Then we have \begin{equation} \label{eq:G+-} \begin{aligned} G~ &\leftstackrel{\eqref{eq:G recenter}}{=} Y^T \diag(\ell) Y = Y_+^T \diag(\ell_+) Y_+ + Y_-^T \diag(\ell_-) Y_- \\ &= P \Lambda P^T = P_+ \Lambda_+ P_+^T + P_- \Lambda_- P_-^T \end{aligned} \end{equation} and \begin{equation} \label{eq:Hstar+-}
H^\star = \nu P \text{sign}(\Lambda) P^T
= \nu (P_+ P_+^T - P_- P_-^T). \end{equation}
We now present the definition of $\{\mu_{ij}\}_{(i,j)\in\cI_+\times\cI_-}$ and the main theorem of this section.
\begin{theorem} \label{thm:phase2}
Consider the matrix $M \stackrel{\rm def}{=} \diag(\ell_+) Y_+ P_- (Y_- P_-)^{-1}$.
Let $\mu_{ij} = \mathbf{e}_i^T M \mathbf{e}_{j - n-1+|\cI_-|}$ for all $i\in\cI_+$ and $j\in\cI_-\setminus\{0\}$, and $\mu_{i0} = \ell_i - \sum_{j \in \cI_-\setminus\{0\}} \mu_{ij}$ for all $i\in\cI_+$.
Assume $f \in C^{1,1}_\nu(\R^n)$.
If $\mu_{ij} \ge 0$ for all $(i,j) \in \cI_+\times\cI_-$, then \eqref{eq:phase2} is a sharp upper bound on the function approximation error $|\hat{f}(\mathbf{x}) - f(\mathbf{x})|$ for linear interpolation.
\end{theorem}
\begin{remark}
We note that $\{j-n-1+|\cI_-|\}_{j\in\cI_-\setminus\{0\}} = \{1,2,\dots,|\cI_-|\}$.
The matrix $M$ is of size $|\cI_+| \times (|\cI_-|-1)$.
Each of its row corresponds to a sample point with positive Lagrange polynomial values at $\mathbf{x}$, while each of its column corresponds to a sample point with negative Lagrange polynomial values at $\mathbf{x}$. \end{remark}
\subsection{Technical Proofs} \label{sec:phase2 proofs} In the remaining of this section, we provide the complete proof to Theorem~\ref{thm:phase2}. We start with the number of positive and negative eigenvalues in the matrix $G$.
\begin{lemma} \label{lem:Sylvester}
The numbers of positive and negative eigenvalues in $G$ are $|\cI_+| - 1$ and $|\cI_-|-1$, respectively. \end{lemma} \begin{proof} Let $\diag(\ell)$ be the diagonal matrix containg $\ell_1, \dots, \ell_{n+1}$. Consider the matrix $\bar{G} = \sum_{i=1}^{n+1} \ell_i \phi(\mathbf{x}_i-\mathbf{x}) \phi(\mathbf{x}_i-\mathbf{x})^T = \Phi^T \diag(\ell) \Phi$. The first element of the first column is $\sum_{i=1}^{n+1} \ell_i \stackrel{\eqref{eq:Lagrange 0}}{=} 1$, while the rest of the column is $\sum_{i=1}^{n+1} \ell_i [\mathbf{x}_i-\mathbf{x}] \stackrel{\eqref{eq:Lagrange Y}}{=} \mathbf{x} - \sum_{i=1}^{n+1} \ell_i \mathbf{x} \stackrel{\eqref{eq:Lagrange 0}}{=} \mathbf{0}$. The bottom-right $n\times n$ submatrix of $\bar{G}$ is \[ \sum_{i=1}^{n+1} \ell_i [\mathbf{x}_i-\mathbf{x}][\mathbf{x}_i-\mathbf{x}]^T
= \sum_{i=0}^{n+1} \ell_i [\mathbf{x}_i-\mathbf{x}][\mathbf{x}_i-\mathbf{x}]^T
\stackrel{\eqref{eq:G recenter}}{=} G. \] Thus, $\bar{G}$ and its eigendecomposition should be \[ \bar{G} = \begin{bmatrix} 1 &\mathbf{0}^T\\ \mathbf{0} &G \end{bmatrix} = \begin{bmatrix} 1 &\mathbf{0}^T\\ \mathbf{0} &P \end{bmatrix} \begin{bmatrix} 1 &\mathbf{0}^T\\ \mathbf{0} &\Lambda \end{bmatrix} \begin{bmatrix} 1 &\mathbf{0}^T\\ \mathbf{0} &P^T \end{bmatrix}. \] Then we have \[ \bar\Lambda \stackrel{\rm def}{=} \begin{bmatrix} 1 &\mathbf{0}^T\\ \mathbf{0} &\Lambda \end{bmatrix} = \begin{bmatrix} 1 &\mathbf{0}^T\\ \mathbf{0} &P^T \end{bmatrix} \Phi^T \diag(\ell) \Phi \begin{bmatrix} 1 &\mathbf{0}^T\\ \mathbf{0} &P \end{bmatrix}, \] which shows $\bar\Lambda$ is congruent to $\diag(\ell)$.
Then by Sylvester's law of inertia \cite{sylvester1852xix} (or Theorem 4.5.8 of \cite{horn2012matrix}), the number of positive and negative eigenvalues in $\bar\Lambda$ are $|\cI_+|$ and $|\cI_-|-1$, respectively. Since $\bar{G}$ shares the same eigenvalues as $G$ except an additional one that is 1, the lemma is proven. \end{proof}
The next lemma shows that $\{\mu_{ij}\}_{(i,j)\in\cI_+\times\cI_-}$ is well-defined by proving the invertibility of $Y_- P_-$. \begin{lemma} \label{lem:invertible}
The matrix $Y_- P_-$ is invertible. \end{lemma} \begin{proof} For the purpose of contradiction, assume $Y_- P_-$ is singular.
That means there is a non-zero vector $\mathbf{u} \in \R^{|\cI_-|-1}$ such that $Y_- P_- \mathbf{u} = \mathbf{0}$. Let $\mathbf{v} = P_- \mathbf{u}$. We have $Y_- \mathbf{v} = \mathbf{0}$, $P_+ \mathbf{v} = P_+ P_- \mathbf{u} = \mathbf{0}$ and $P_-^T \mathbf{v} = P_-^T P_- \mathbf{u} = \mathbf{u}$. Then we have the contradiction \[ \begin{aligned} \mathbf{v}^T G \mathbf{v} &= (Y_+\mathbf{v})^T \diag(\ell_+) Y_+\mathbf{v} + (Y_-\mathbf{v})^T \diag(\ell_-) Y_-\mathbf{v} = (Y_+\mathbf{v})^T \diag(\ell_+) Y_+\mathbf{v} \ge 0 \\ \mathbf{v}^T G \mathbf{v} &= (P_+^T \mathbf{v})^T \Lambda_+ P_+^T \mathbf{v} + (P_-^T \mathbf{v})^T \Lambda_- P_-^T \mathbf{v} = (P_-^T \mathbf{v})^T \Lambda_- P_-^T \mathbf{v} = \mathbf{u}^T \Lambda_- \mathbf{u} < 0. \end{aligned} \] \end{proof}
We develop in the following lemma the essential properties of $\{\mu_{ij}\}$. \begin{lemma} \label{lem:mu} The following properties hold: \begin{align}
\sum_{j \in \cI_-} \mu_{ij} &= \ell_i &&\text{for all } i \in \cI_+, \label{eq:mu0 +}\\
\sum_{i \in \cI_+} \mu_{ij} &= - \ell_j &&\text{for all } j \in \cI_-, \label{eq:mu0 -}\\
(\nu I-H^\star) \sum_{j \in \cI_-} \mu_{ij} \mathbf{x}_j &= (\nu I-H^\star) \ell_i \mathbf{x}_i &&\text{for all } i \in \cI_+, \label{eq:mu1 +}\\
(\nu I+H^\star) \sum_{i \in \cI_+} \mu_{ij}\mathbf{x}_i &= -(\nu I+H^\star) \ell_j \mathbf{x}_j &&\text{for all } j \in \cI_-. \label{eq:mu1 -} \end{align} \end{lemma} \begin{proof} The equations \eqref{eq:mu0 +} are true by their definition. Since \[ \begin{aligned}
\diag(\ell_-) \mathbf{1} + M^T \mathbf{1}
&= \diag(\ell_-) \mathbf{1} + (P_-^T Y_-^T)^{-1} P_-^T Y_+^T \diag(\ell_+) \mathbf{1} \\
&= (P_-^T Y_-^T)^{-1} P_-^T [Y_-^T \diag(\ell_-) \mathbf{1} + Y_+^T \diag(\ell_+) \mathbf{1}]
\stackrel{\eqref{eq:Lagrange Y}}{=} \mathbf{0}, \end{aligned} \] the equations \eqref{eq:mu0 -} are also true. Notice $P_-^T (Y_-^T M^T - Y_+^T \diag(\ell_+)) = \mathbf{0}$ by the definition of $M$, and $\nu I - H^\star \stackrel{\eqref{eq:Hstar+-}}{=} \nu (P_+P_+^T + P_-P_-^T) - \nu (P_+P_+^T - P_-P_-^T) = 2\nu P_-P_-^T$. Following these two equations, we have for all $i \in \cI_+$, \[ \begin{aligned}
(\nu I-H^\star) \left[\sum_{j \in \cI_-} \mu_{ij} \mathbf{x}_j - \ell_i \mathbf{x}_i\right]
&\leftstackrel{\eqref{eq:mu0 -}}{=} (\nu I-H^\star) \left[\sum_{j \in \cI_-} \mu_{ij} (\mathbf{x}_j-\mathbf{x}) - \ell_i [\mathbf{x}_i-\mathbf{x}]\right] \\
&= (\nu I-H^\star) (Y_-^T M^T - Y_+^T \diag(\ell_+)) \mathbf{e}_i \\
&= 2\nu P_- P_-^T (Y_-^T M^T - Y_+^T \diag(\ell_+)) \mathbf{e}_i \\
&= 2\nu P_- \mathbf{0} \mathbf{e}_i = \mathbf{0}, \end{aligned} \] which proves \eqref{eq:mu1 +}. To prove \eqref{eq:mu1 -}, we use $G$ and its eigendecomposition. The diagonal matrix of the eigenvalues $\Lambda$ is \[ \begin{bmatrix} \Lambda_- &\mathbf{0}\\ \mathbf{0} &\Lambda_+ \end{bmatrix}
= \begin{bmatrix} P_-^T Y_-^T &P_-^T Y_+^T\\ P_+^T Y_-^T &P_+^T Y_+^T \end{bmatrix}
\begin{bmatrix} \diag(\ell_-) &~\\ ~ &\diag(\ell_+) \end{bmatrix}
\begin{bmatrix} Y_- P_- &Y_- P_+\\ Y_+ P_- &Y_+ P_+ \end{bmatrix}, \] which contains two equivalent block equalities with zero left-hand side. They are $P_+^T Y_-^T \diag(\ell_-) Y_- P_- + P_+^T Y_+^T \diag(\ell_+) Y_+ P_- = \mathbf{0}$, so \[ P_+^T Y_-^T \diag(\ell_-) + P_+^T Y_+^T \diag(\ell_+) Y_+ P_- (Y_- P_-)^{-1} = P_+^T Y_-^T \diag(\ell_-) + P_+^T Y_+^T M = \mathbf{0}. \] Then with $\nu I+H^\star \stackrel{\eqref{eq:Hstar+-}}{=} \nu(P_+P_+^T + P_-P_-^T) + \nu(P_+P_+^T - P_-P_-^T) = 2\nu P_+ P_+^T$, we obtain \[ (\nu I + H^\star) (Y_-^T \diag(\ell_-) + Y_+^T M)
= 2\nu P_+ P_+^T (Y_-^T \diag(\ell_-) + Y_+^T M)
= 2\nu P_+ \mathbf{0} = \mathbf{0}, \] which proves \eqref{eq:mu1 -} for all $j \in \cI_- \setminus \{0\}$; and \[ \begin{aligned}
(\nu I+H^\star) \left( \ell_0 \mathbf{x} + \sum_{i\in\cI_+} \mu_{i0} \mathbf{x}_i \right)
&\leftstackrel{\eqref{eq:mu0 -}}{=} 2\nu P_+P_+^T \sum_{i\in\cI_+} \mu_{i0} (\mathbf{x}_i-\mathbf{x}) \\
&= 2\nu P_+P_+^T \sum_{i\in\cI_+} \left(\ell_i - \sum_{j\in\cI_-\setminus\{0\}} \mu_{ij}\right) (\mathbf{x}_i-\mathbf{x}) \\
&= 2\nu P_+P_+^T Y_+^T (l_+ - M\mathbf{1}) \\
&= 2\nu P_+P_+^T (Y_+^T l_+ + Y_-^T \ell_-)
\stackrel{\eqref{eq:Lagrange Y}}{=} \mathbf{0}, \end{aligned} \] which proves \eqref{eq:mu1 -} for $j = 0$. \end{proof}
The function $\psi$ is defined and proved non-positive in Lemma~\ref{lem:psi}. It will be used to prove Theorem~\ref{thm:phase2} in conjunction with the parameters $\{\mu_{ij}\}$. \begin{lemma} \label{lem:psi} Assume $f \in C^{1,1}_\nu(\R^n)$. For any $\mathbf{u}, \mathbf{v} \in \R^n$ and any matrix $H \in \R^{n \times n}$, we have \begin{equation} \label{eq:Lipscthiz stronger H} \begin{aligned}
\psi(\mathbf{u},\mathbf{v},H) \stackrel{\rm def}{=} &f(\mathbf{u}) - f(\mathbf{v}) - \frac{1}{2\nu} [(\nu I-H)(\mathbf{u}-\mathbf{v})] \cdot Df(\mathbf{u}) \\
&- \frac{1}{2\nu} [(\nu I+H) (\mathbf{u}-\mathbf{v})] \cdot Df(\mathbf{v})
- \frac{1}{4\nu} \|H (\mathbf{u} - \mathbf{v})\|^2 - \frac{\nu}{4} \|\mathbf{u} - \mathbf{v}\|^2 \le 0. \end{aligned} \end{equation} \end{lemma} \begin{proof} For the purpose of contradiction, assume \eqref{eq:Lipscthiz stronger H} is false. Then we have \[ \begin{aligned}
- f(\mathbf{u}) <& - f(\mathbf{v}) - \frac{1}{2\nu} [(\nu I+H) (\mathbf{u}-\mathbf{v})] \cdot Df(\mathbf{u}) \\
&- \frac{1}{2\nu} [(\nu I-H)(\mathbf{u}-\mathbf{v})] \cdot Df(\mathbf{v}) - \frac{1}{4\nu} \|H (\mathbf{u} - \mathbf{v})\|^2 - \frac{\nu}{4} \|\mathbf{u} - \mathbf{v}\|^2. \end{aligned} \] Add this inequality to \eqref{eq:Lipschitz stronger} and we arrive at
\[ \frac{1}{4\nu} \|H (\mathbf{u} - \mathbf{v}) - (Df(\mathbf{u}) - Df(\mathbf{v}))\|^2 < 0, \] which leads to contradiction. \end{proof}
Finally, we prove the main result of this section, Theorem~\ref{thm:phase2}, which states \eqref{eq:phase2} is a sharp bound when $\{\mu_{ij}\}$ are all non-negative.
\begin{proof}[proof of Theorem~\ref{thm:phase2}] We only provide the proof for the case when $\hat{f}(\mathbf{x}) - f(\mathbf{x}) \ge 0$. When $\mu_{ij} \ge 0$ for all $(i,j) \in \cI_+\times\cI_-$, the following inequality holds \begin{equation} \label{eq:phase2 summation}
\sum_{i \in \cI_+} \sum_{j \in \cI_-} \mu_{ij} \psi(\mathbf{x}_i,\mathbf{x}_j,H^\star) \stackrel{\eqref{eq:Lipscthiz stronger H}}{\le} 0. \end{equation} The zeroth-order term in the summation \eqref{eq:phase2 summation} is \[ \begin{aligned}
\sum_{i \in \cI_+} \sum_{j \in \cI_-} \mu_{ij} (f(\mathbf{x}_i) - f(\mathbf{x}_j))
&= \left[\sum_{i \in \cI_+} \sum_{j \in \cI_-} \mu_{ij} f(\mathbf{x}_i)\right] - \left[\sum_{i \in \cI_+} \sum_{j \in \cI_-} \mu_{ij} f(\mathbf{x}_j)\right] \\
&\leftstackrel{\eqref{eq:mu0 +}\eqref{eq:mu0 -}}{=} \left[ \sum_{i \in \cI_+} \ell_i f(\mathbf{x}_i)\right] + \left[\sum_{j \in \cI_-} \ell_j f(\mathbf{x}_j)\right] \\
&\leftstackrel{\eqref{eq:Lagrange m}}{=} \hat{f}(\mathbf{x}) - f(\mathbf{x}). \end{aligned} \] The sum of the first-order terms is $-1/(2\nu)$ multiplies \[ \begin{aligned}
&\sum_{i \in \cI_+} \sum_{j\in\cI_-} \mu_{ij} \big( [(\nu I-H^\star)(\mathbf{x}_i-\mathbf{x}_j)] \cdot Df(\mathbf{x}_i) + [(\nu I+H^\star)(\mathbf{x}_i-\mathbf{x}_j)] \cdot Df(\mathbf{x}_j) \big) \\
&= \left[\sum_{i \in \cI_+} \sum_{j\in\cI_-} \mu_{ij} [(\nu I-H^\star)\mathbf{x}_i] \cdot Df(\mathbf{x}_i) \right]
- \left[\sum_{i \in \cI_+} \sum_{j\in\cI_-} \mu_{ij} [(\nu I+H^\star)\mathbf{x}_j] \cdot Df(\mathbf{x}_j) \right] \\
&\quad -\left[\sum_{i \in \cI_+} \sum_{j\in\cI_-} \mu_{ij} [(\nu I-H^\star)\mathbf{x}_j] \cdot Df(\mathbf{x}_i) \right] + \left[\sum_{i \in \cI_+} \sum_{j\in\cI_-} \mu_{ij} [(\nu I+H^\star)\mathbf{x}_i] \cdot Df(\mathbf{x}_j) \right] \\
&= \left[\sum_{i \in \cI_+} \ell_i [(\nu I-H^\star)\mathbf{x}_i] \cdot Df(\mathbf{x}_i) \right]
+ \left[\sum_{j\in\cI_-} \ell_j [(\nu I+H^\star)\mathbf{x}_j] \cdot Df(\mathbf{x}_j) \right] \\
&\quad -\left[\sum_{i \in \cI_+} \ell_i [(\nu I-H^\star)\mathbf{x}_i] \cdot Df(\mathbf{x}_i) \right] - \left[\sum_{j\in\cI_-} \ell_j [(\nu I+H^\star)\mathbf{x}_j] \cdot Df(\mathbf{x}_j) \right]
= \mathbf{0}, \end{aligned} \] where the second equality holds because of \eqref{eq:mu0 +}, \eqref{eq:mu0 -}, \eqref{eq:mu1 +}, and \eqref{eq:mu1 -} respectively for the four terms. Notice $H^{\star T} H^\star = \nu^2I$. The constant term in the summation \eqref{eq:phase2 summation} is $-1/2$ multiplies \[ \begin{aligned}
&\sum_{i \in \cI_+} \sum_{j\in\cI_-} \mu_{ij} \left(\frac{1}{2\nu}\|H^\star(\mathbf{x}_i-\mathbf{x}_j)\|^2 + \frac{\nu}{2} \|\mathbf{x}_i-\mathbf{x}_j\|^2\right) \\
&= \nu \left[ \sum_{i \in \cI_+} \sum_{j\in\cI_-} \mu_{ij} (\mathbf{x}_i-\mathbf{x}_j) \cdot \mathbf{x}_i \right] - \nu \left[ \sum_{i \in \cI_+} \sum_{j\in\cI_-} \mu_{ij} (\mathbf{x}_i-\mathbf{x}_j) \cdot \mathbf{x}_j \right] \\
&\stackrel{\mathmakebox[\widthof{=}]{\scriptsize \begin{array}{c}\eqref{eq:mu0 +}\\\eqref{eq:mu0 -}\end{array}}}{=} \sum_{i \in \cI_+} \nu \left(\ell_i \mathbf{x}_i- \sum_{j\in\cI_-} \mu_{ij} \mathbf{x}_j\right) \cdot \mathbf{x}_i - \sum_{j\in\cI_-} \nu \left(\sum_{i \in \cI_+} \mu_{ij} \mathbf{x}_i + \ell_j \mathbf{x}_j\right) \cdot \mathbf{x}_j \\
&\leftstackrel{\mathmakebox[\widthof{=}]{\scriptsize \begin{array}{c}\eqref{eq:mu1 +}\\ \eqref{eq:mu1 -}\end{array}}}{=} \sum_{i \in \cI_+} \left[H^\star \left(\ell_i \mathbf{x}_i- \sum_{j\in\cI_-} \mu_{ij} \mathbf{x}_j\right)\right] \cdot \mathbf{x}_i + \sum_{j\in\cI_-} \left[H^\star \left(\sum_{i \in \cI_+} \mu_{ij} \mathbf{x}_i + \ell_j \mathbf{x}_j\right)\right] \cdot \mathbf{x}_j \\
&= \left[ \sum_{i \in \cI_+} \ell_i [H^\star \mathbf{x}_i] \cdot \mathbf{x}_i \right] + \left[ \sum_{j\in\cI_-} \ell_j [H^\star \mathbf{x}_j] \cdot \mathbf{x}_j \right]
= G \cdot H^\star. \end{aligned} \] Thus the summation \eqref{eq:phase2 summation} is \eqref{eq:phase2} when $\hat{f}(\mathbf{x}) - f(\mathbf{x}) \ge 0$. \end{proof}
\section{Sharp Error Bounds for Bivariate Extrapolation} \label{sec:phase3}
We investigate in this section the sharp error bounds when $\mathbf{x}$ is in the four areas shown in Figure~\ref{fig:phase2}. This investigation is not just for the completeness of our analysis of the sharp error bound, but also to understand what type of function can be more difficult for linear interpolation to approximate than the quadratics.
We first notice the case where $\mathbf{x}$ is in the shaded triangle on the left in Figure~\ref{fig:phase2} is symmetric to the case where $\mathbf{x}$ is in the triangle on the right, and they are essentially the same. The same argument applies the two shaded cones. This reduces the cases that need to be studied to the two in Figure~\ref{fig:phase3}. Furthermore, after we obtain a formula for the sharp error bound for the case in Figure~\ref{fig:phase3 triangle}, a formula for the case in Figure~\ref{fig:phase3 cone} can be obtained by switching the roles of $\mathbf{x}$ and $\mathbf{x}_2$ and apply Proposition~\ref{thm:swap}. Therefore, the only case that needs to be studied is the one in Figure~\ref{fig:phase3 triangle}.
\begin{figure}
\caption{Two configurations of $\Theta$ and $\mathbf{x}$ where \eqref{eq:phase2} is an invalid error bound for bivariate extrapolation.}
\label{fig:phase3 triangle}
\label{fig:phase3 cone}
\label{fig:phase3}
\end{figure}
The case in Figure~\ref{fig:phase3 triangle} can be defined mathematically as $\ell_2>0, \ell_3<0$, and $\ell_1[\mathbf{x}_2-\mathbf{x}_1] \cdot [\mathbf{x}_3-\mathbf{x}_1] - \ell_3[\mathbf{x}_2-\mathbf{x}_3] \cdot [\mathbf{x}_1-\mathbf{x}_3] < 0$. The following lemma shows the point $\mathbf{w}$, as defined in \eqref{eq:phase3 w}, is the intersection of the line going through $\mathbf{x}_1$ and $\mathbf{x}_3$ and the line going through $\mathbf{x}$ and $\mathbf{x}_2$.
\begin{lemma} Assume $-\ell_0-\ell_2 \stackrel{\eqref{eq:Lagrange 0}}{=} \ell_1+\ell_3\neq 0$ for some affinely independent $\Theta\subset\R^2$ and $\mathbf{x}\in\R^2$. Let \begin{equation} \label{eq:phase3 w}
\mathbf{w} = \frac{-\ell_0\mathbf{x}+\ell_1\mathbf{x}_1-\ell_2\mathbf{x}_2+\ell_3\mathbf{x}_3}{-\ell_0+\ell_1-\ell_2+\ell_3}. \end{equation} Then \[ \mathbf{w} = \frac{\ell_1\mathbf{x}_1+\ell_3\mathbf{x}_3}{\ell_1+\ell_3} = \frac{\ell_0\mathbf{x}+\ell_2\mathbf{x}_2}{\ell_0+\ell_2}, \] and \begin{subequations} \label{eq:phase3 w y} \begin{align}
\ell_0[\mathbf{x}-\mathbf{w}] + \ell_2[\mathbf{x}_2-\mathbf{w}] &= 0, \\
\ell_1[\mathbf{x}_1-\mathbf{w}] + \ell_3[\mathbf{x}_3-\mathbf{w}] &= 0. \end{align} \end{subequations} \end{lemma} \begin{proof} These equalities are direct results of \eqref{eq:Lagrange 0} and \eqref{eq:Lagrange Y}. \end{proof}
We define in the following lemma an $H^\star$, which is different from the one defined in \eqref{eq:Hstar} and is asymmetric. \begin{lemma} Assume for some affinely independent $\Theta\subset\R^2$ and $\mathbf{x}\in\R^2$ that $\ell_2>0, \ell_3<0$, and $\ell_1[\mathbf{x}_2-\mathbf{x}_1] \cdot [\mathbf{x}_3-\mathbf{x}_1] - \ell_3 [\mathbf{x}_2-\mathbf{x}_3] \cdot [\mathbf{x}_1-\mathbf{x}_3] < 0$. Let \begin{equation} \label{eq:Hstar 3}
H^\star = P \begin{bmatrix} +\nu &0\\ 0 &-\nu \end{bmatrix} P^{-1} \text{ with } P = \begin{bmatrix} \mathbf{x}_2-\mathbf{x} &\mathbf{x}_1-\mathbf{x}_3 \end{bmatrix}. \end{equation} Let $\mathbf{w}$ be defined as \eqref{eq:phase3 w}. Then \begin{equation} \label{eq:phase3 Hstar eigvector} \begin{aligned}
H^\star(\mathbf{x}_i-\mathbf{w}) &= \nu(\mathbf{x}_i-\mathbf{w}) \text{ for } i\in\{0,2\}, \\
H^\star(\mathbf{x}_i-\mathbf{w}) &= -\nu(\mathbf{x}_i-\mathbf{w}) \text{ for } i\in\{1,3\}. \end{aligned} \end{equation} \end{lemma} \begin{proof} It is clear from Figure~\ref{fig:phase3 triangle} that the assumption guarantees the invertibility of $P$ and $-\ell_0-\ell_2 = \ell_1+\ell_3\neq 0$. Notice by the definition of $H^\star$, we have $H^\star(\mathbf{x}_2-\mathbf{x}) = \nu(\mathbf{x}_2-\mathbf{x})$ and $H^\star(\mathbf{x}_1-\mathbf{x}_3) = -\nu(\mathbf{x}_1-\mathbf{x}_3)$. The lemma holds true because $\mathbf{x}_i-\mathbf{w}$ is parallel to $\mathbf{x}_2-\mathbf{x}$ for $i\in\{0,2\}$ and to $\mathbf{x}_1-\mathbf{x}_3$ for $i\in\{1,3\}$. \end{proof}
Now we are ready to show $G \cdot H^\star/2$, with $H^\star$ defined in \eqref{eq:Hstar 3}, is an upper bound on the function approximation error for the case in Figure~\ref{fig:phase3 triangle}. \begin{theorem} \label{thm:phase3} Assume $f \in C^{1,1}_\nu(\R^2)$. Let $\hat{f}$ be the affine function that interpolates $f$ at any set of three affinely independent vectors $\Theta = \{\mathbf{x}_1,\mathbf{x}_2,\mathbf{x}_3\}\subset \R^2$ such that $(\mathbf{x}_2-\mathbf{x}_1) \cdot (\mathbf{x}_3-\mathbf{x}_1) < 0$. Let $\mathbf{x}$ be any vector in $\R^2$ such that its barycentric coordinates satisfies $\ell_2>0, \ell_3<0$, and $\ell_1 [\mathbf{x}_2-\mathbf{x}_1] \cdot [\mathbf{x}_3-\mathbf{x}_1] - \ell_3 [\mathbf{x}_2-\mathbf{x}_3] \cdot [\mathbf{x}_1-\mathbf{x}_3] < 0$. Let $G$ and $H^\star$ be the matrices defined in \eqref{eq:G} and \eqref{eq:Hstar 3}. Then the function approximation error of $\hat{f}$ at $\mathbf{x}$ is bounded as \begin{equation} \label{eq:phase3}
|\hat{f}(\mathbf{x}) - f(\mathbf{x})| \le \frac{1}{2} G \cdot H^\star. \end{equation} \end{theorem}
\begin{proof} We only provide the proof for the case when $\hat{f}(\mathbf{x})-f(\mathbf{x}) \ge 0$. We use the function $\psi$ defined in \eqref{eq:Lipscthiz stronger H} again. Since $\ell_3<0$, $(\mathbf{x}_2-\mathbf{x}_1) \cdot (\mathbf{x}_3-\mathbf{x}_1) < 0$, and \[ \begin{aligned}
0 &> \ell_1 [\mathbf{x}_2-\mathbf{x}_1] \cdot [\mathbf{x}_3-\mathbf{x}_1] - \ell_3 [\mathbf{x}_2-\mathbf{x}_3] \cdot [\mathbf{x}_1-\mathbf{x}_3] \\
&\leftstackrel{\eqref{eq:Lagrange 0}}{=} (1-\ell_2-\ell_3) [\mathbf{x}_2-\mathbf{x}_1] \cdot [\mathbf{x}_3-\mathbf{x}_1] - \ell_3 [\mathbf{x}_2-\mathbf{x}_3] \cdot [\mathbf{x}_1-\mathbf{x}_3] \\
&= (1-\ell_2) [\mathbf{x}_2-\mathbf{x}_1] \cdot [\mathbf{x}_3-\mathbf{x}_1] - \ell_3 \|\mathbf{x}_1-\mathbf{x}_3\|^2, \end{aligned} \] we have $1-\ell_2 > 0$, and thus the following inequalities hold: \begin{subequations} \label{eq:phase3 sum} \begin{align}
(1-\ell_2) \psi(\mathbf{x}_1, \mathbf{x}, H^\star) &\le 0, \\
\ell_2 \psi(\mathbf{x}_2, \mathbf{x}, H^\star) &\le 0, \\
-\ell_3 \psi(\mathbf{x}_1, \mathbf{x}_3, H^\star) &\le 0. \end{align} \end{subequations} Similar to the previous proofs, we add these inequalities together. The sum of their zeroth-order terms is \[ \begin{aligned} &\hspace{-1em} (1-\ell_2) [f(\mathbf{x}_1) - f(\mathbf{x})] + \ell_2 [f(\mathbf{x}_2) - f(\mathbf{x})] - \ell_3 [f(\mathbf{x}_1) - f(\mathbf{x}_3)] \\ &= (1-\ell_2-\ell_3) f(\mathbf{x}_1) + \ell_2 f(\mathbf{x}_2) + \ell_3 f(\mathbf{x}_3) - f(\mathbf{x}) \stackrel{\eqref{eq:Lagrange m}\eqref{eq:Lagrange 0}}{=} \hat{f}(\mathbf{x}) - f(\mathbf{x}). \end{aligned} \] The sum of their first-order terms is $-1/(2\nu)$ multiplies \[ \begin{aligned} &\hspace{-2em} (1-\ell_2) \left\{ [(\nu I-H^\star) (\mathbf{x}_1-\mathbf{x})] \cdot Df(\mathbf{x}_1) + [(\nu I+H^\star) (\mathbf{x}_1-\mathbf{x})] \cdot Df(\mathbf{x}) \right\} \\ &\hspace{-2em} + \ell_2 \left\{ [(\nu I-H^\star) (\mathbf{x}_2-\mathbf{x})] \cdot Df(\mathbf{x}_2) + [(\nu I+H^\star) (\mathbf{x}_2-\mathbf{x})] \cdot Df(\mathbf{x}) \right\} \\ &\hspace{-2em} -\ell_3 \left\{ [(\nu I-H^\star) (\mathbf{x}_1-\mathbf{x}_3)] \cdot Df(\mathbf{x}_1) + [(\nu I+H^\star) (\mathbf{x}_1-\mathbf{x}_3)] \cdot Df(\mathbf{x}_3) \right\} \\ &= \{(\nu I-H^\star) [(1-\ell_2)(\mathbf{x}_1-\mathbf{x}) - \ell_3(\mathbf{x}_1-\mathbf{x}_3)]\} \cdot Df(\mathbf{x}_1) \\ &\quad + \ell_2 [(\nu I-H^\star) (\mathbf{x}_2-\mathbf{x})] \cdot Df(\mathbf{x}_2) - \ell_3 [(\nu I+H^\star) (\mathbf{x}_1-\mathbf{x}_3)] \cdot Df(\mathbf{x}_3) \\ &\quad + \left\{ (\nu I+H^\star) [(1-\ell_2)(\mathbf{x}_1-\mathbf{x}) + \ell_2(\mathbf{x}_2-\mathbf{x})] \right\} \cdot Df(\mathbf{x}) \\ &\leftstackrel{\eqref{eq:Lagrange 0}\eqref{eq:Lagrange Y}}{=} \ell_2 [(\nu I-H^\star) (\mathbf{x} - \mathbf{x}_2)] \cdot Df(\mathbf{x}_1) + \ell_2 [(\nu I-H^\star) (\mathbf{x}_2-\mathbf{x})] \cdot Df(\mathbf{x}_2) \\ &\quad - \ell_3 [(\nu I+H^\star) (\mathbf{x}_1-\mathbf{x}_3)] \cdot Df(\mathbf{x}_3) + \ell_3 [(\nu I+H^\star) (\mathbf{x}_1 - \mathbf{x}_3)] \cdot Df(\mathbf{x}) \\ &\leftstackrel{\eqref{eq:phase3 Hstar eigvector}}{=} \mathbf{0}. \end{aligned} \] Let $\mathbf{w}$ be defined as \eqref{eq:phase3 w}. The sum of the constant terms is $-1/2$ times \[ \begin{aligned}
&\hspace{-1em} (1-\ell_2) \left[\frac{1}{2\nu} \|H^\star (\mathbf{x}_1-\mathbf{x})\|^2 + \frac{\nu}{2} \|\mathbf{x}_1-\mathbf{x}\|^2 \right] + \ell_2 \left[\frac{1}{2\nu} \|H^\star (\mathbf{x}_2-\mathbf{x})\|^2 \right. \\
&\hspace{-1em} \left.+ \frac{\nu}{2} \|\mathbf{x}_2-\mathbf{x}\|^2 \right] - \ell_3 \left[\frac{1}{2\nu} \|H^\star (\mathbf{x}_1-\mathbf{x}_3)\|^2 + \frac{\nu}{2} \|\mathbf{x}_1-\mathbf{x}_3\|^2 \right] \\ &\leftstackrel{\eqref{eq:phase3 Hstar eigvector}}{=} (1-\ell_2)\left\{ -H^\star(\mathbf{x}_1-\mathbf{w}) \cdot (\mathbf{x}_1-\mathbf{w}) + H^\star(\mathbf{x}-\mathbf{w}) \cdot (\mathbf{x}-\mathbf{w}) \right\} \\ &\qquad + \ell_2 H^\star(\mathbf{x}_2-\mathbf{x}) \cdot (\mathbf{x}_2-\mathbf{x}) + \ell_3 H^\star(\mathbf{x}_1-\mathbf{x}_3) \cdot (\mathbf{x}_1-\mathbf{x}_3) \\ &\leftstackrel{\eqref{eq:Lagrange 0}}{=} H^\star[\ell_3(\mathbf{x}_1-\mathbf{x}_3)-(\ell_1+\ell_3)(\mathbf{x}_1-\mathbf{w})] \cdot (\mathbf{x}_1-\mathbf{w}) - \ell_3 H^\star(\mathbf{x}_1-\mathbf{x}_3) \cdot (\mathbf{x}_3-\mathbf{w}) \\ &\qquad + H^\star[(1-\ell_2)(\mathbf{x}-\mathbf{w}) - \ell_2(\mathbf{x}_2-\mathbf{x})] \cdot (\mathbf{x}-\mathbf{w}) + \ell_2 H^\star(\mathbf{x}_2-\mathbf{x}) \cdot (\mathbf{x}_2-\mathbf{w}) \\ &\leftstackrel{\eqref{eq:Lagrange 0}\eqref{eq:Lagrange Y}}{=} 0 - \ell_3 [H^\star(\mathbf{x}_1-\mathbf{w}) - H^\star(\mathbf{x}_3-\mathbf{w})] \cdot (\mathbf{x}_3-\mathbf{w}) \\ &\quad + 0 + \ell_2 [H^\star(\mathbf{x}_2-\mathbf{w}) - H^\star(\mathbf{x}-\mathbf{w})] \cdot (\mathbf{x}_2-\mathbf{w}) \\ &\leftstackrel{\eqref{eq:phase3 w y}}{=} \sum_{i=0}^{3} \ell_i H^\star (\mathbf{x}_i-\mathbf{w}) \cdot (\mathbf{x}_i-\mathbf{w}) \stackrel{\eqref{eq:G recenter}}{=} G \cdot H^\star. \end{aligned} \] Thus, the sum of the inequalities in \eqref{eq:phase3 sum} is \eqref{eq:phase3} when $\hat{f}(\mathbf{x}) - f(\mathbf{x}) \ge 0$. \end{proof}
We show in Theorem~\ref{thm:phase3 sharp} the upper bound \eqref{eq:phase3} can be achieved by a piecewice quadratic, and therefore \eqref{eq:phase3} is sharp. \begin{theorem} \label{thm:phase3 sharp} Under the setting of Theorem~\ref{thm:phase3}, the bound \eqref{eq:phase3} is sharp and can be achieved by \[ f(\mathbf{u}) = \left\{ \begin{aligned}
&\frac{\nu}{2} \|\mathbf{u}-\mathbf{w}\|^2 - \frac{\nu[(\mathbf{x}_1-\mathbf{x}_3) \cdot (\mathbf{u}-\mathbf{w})]^2}{\|\mathbf{x}_1-\mathbf{x}_3\|^2} &&\text{if } (\mathbf{u}-\mathbf{w}) \cdot (\mathbf{x}_1-\mathbf{x}_3) \le 0, \\
&\frac{\nu}{2} \|\mathbf{u}-\mathbf{w}\|^2 &&\text{if } (\mathbf{u}-\mathbf{w}) \cdot (\mathbf{x}_1-\mathbf{x}_3) \ge 0. \end{aligned} \right., \] where $\mathbf{w}$ is defined in \eqref{eq:phase3 w}. \end{theorem} \begin{proof} The function approximation error for this piecewise quadratic function is \[ \begin{aligned} \hat{f}(&\mathbf{x}) - f(\mathbf{x}) = \sum_{i=0}^{n+1} \ell_i \mathbf{x}_i \\
&= \frac{\nu}{2} \sum_{i=0}^3 \ell_i \|\mathbf{x}_i-\mathbf{w}\|^2 - \frac{\nu\ell_1 [(\mathbf{x}_1-\mathbf{x}_3) \cdot (\mathbf{x}_1-\mathbf{w})]^2}{\|\mathbf{x}_1-\mathbf{x}_3\|^2} - \frac{2\nu\ell_3 [(\mathbf{x}_1-\mathbf{x}_3) \cdot (\mathbf{x}_3-\mathbf{w})]^2}{\|\mathbf{x}_1-\mathbf{x}_3\|^2} \\
&= \frac{\nu}{2} \sum_{i=0}^3 \ell_i \|\mathbf{x}_i-\mathbf{w}\|^2 - \nu\ell_1\|\mathbf{x}_1-\mathbf{w}\|^2 - 2\nu\ell_3\|\mathbf{x}_3-\mathbf{w}\|^2 \\
&= \frac{\nu}{2} \left(\ell_0\|\mathbf{x}-\mathbf{w}\|^2 - \ell_1\|\mathbf{x}_1-\mathbf{w}\|^2 + \ell_2\|\mathbf{x}_2-\mathbf{w}\|^2 - \ell_3\|\mathbf{x}_3-\mathbf{w}\|^2\right) \\
&\leftstackrel{\eqref{eq:phase3 Hstar eigvector}}{=} \frac{1}{2} \sum_{i=0}^3 \ell_i \|\mathbf{x}_i-\mathbf{w}\|_{H^\star} \stackrel{\eqref{eq:G recenter}}{=} \frac{1}{2} G \cdot H^\star. \end{aligned} \] Now we prove $f \in C_\nu^{1,1}(\R^n)$. Firstly, it is clear that $f$ is continuous on $\R^2$ and differentiable on the two half spaces $\{\mathbf{u}:~ (\mathbf{u}-\mathbf{w}) \cdot (\mathbf{x}_1-\mathbf{x}_3) < 0\}$ and $\{\mathbf{u}:~ (\mathbf{u}-\mathbf{w}) \cdot (\mathbf{x}_1-\mathbf{x}_3) > 0\}$. Then given any $\mathbf{u}$ such that $(\mathbf{u}-\mathbf{w}) \cdot (\mathbf{x}_1-\mathbf{x}_3) = 0$, it can be calculated for any $\mathbf{v}\in\R^2$ that \begin{multline*}
|f(\mathbf{u}+\mathbf{v}) - f(\mathbf{u}) - \nu (\mathbf{u}-\mathbf{w})\cdot\mathbf{v}| \\
= \left\{ \begin{aligned}
&-\frac{\nu}{2}\|\mathbf{v}\|^2 - \frac{\nu [(\mathbf{x}_1-\mathbf{x}_3)\cdot \mathbf{v}]^2}{\|\mathbf{x}_1-\mathbf{x}_3\|^2} &&\text{if } (\mathbf{u}+\mathbf{v}-\mathbf{w}) \cdot (\mathbf{x}_1-\mathbf{x}_3) \le 0, \\
&-\frac{\nu}{2}\|\mathbf{v}\|^2 &&\text{if } (\mathbf{u}+\mathbf{v}-\mathbf{w}) \cdot (\mathbf{x}_1-\mathbf{x}_3) \ge 0.
\end{aligned} \right. \end{multline*} Thus
\[ \lim_{v\rightarrow\mathbf{0}} \frac{|f(\mathbf{u}+\mathbf{v}) - f(\mathbf{u}) - \nu(\mathbf{u}-\mathbf{w})\cdot\mathbf{v}|}{\|\mathbf{v}\|} = 0, \] which shows $f$ is differentiable with gradient $\nu(\mathbf{u}-\mathbf{w})$ on $\{\mathbf{u}:~ (\mathbf{u}-\mathbf{w}) \cdot (\mathbf{x}_1-\mathbf{x}_3) = 0\}$. The condition \eqref{eq:Lipschitz} is clearly satisfied if $\mathbf{u}_1$ and $\mathbf{u}_2$ are in the same half space. Now assume $(\mathbf{u}_1-\mathbf{w})\cdot(\mathbf{x}_1-\mathbf{x}_3) < 0$ and $(\mathbf{u}_2-\mathbf{w})\cdot(\mathbf{x}_1-\mathbf{x}_3) > 0$. Then, we have \[ \begin{aligned}
&\|Df(\mathbf{u}_1) - Df(\mathbf{u}_2)\|^2 \\
&= \|\nu(\mathbf{u}_1-\mathbf{w}) - 2\nu\left[(\mathbf{x}_1-\mathbf{x}_3) \cdot (\mathbf{u}_1-\mathbf{w})/\|\mathbf{x}_1-\mathbf{x}_3\|^2\right] (\mathbf{x}_1-\mathbf{x}_3)- \nu(\mathbf{u}_2-\mathbf{w})\|^2 \\
&= \nu^2\|\mathbf{u}_1-\mathbf{u}_2\|^2 + 4\nu^2 [(\mathbf{u}_1-\mathbf{w}) \cdot (\mathbf{x}_1-\mathbf{x}_3)] [(\mathbf{u}_2-\mathbf{w})\cdot(\mathbf{x}_1-\mathbf{x}_3)] /\|\mathbf{x}_1-\mathbf{x}_3\|^2 \\
&< \nu^2\|\mathbf{u}_1-\mathbf{u}_2\|^2, \end{aligned} \] which shows \eqref{eq:Lipschitz} always holds. Therefore $f \in C_\nu^{1,1}(\R^n)$. \end{proof}
\section{Discussion} \label{sec:discussion} We presented a numerical approach to calculate the sharp bound on the function approximation error of linear interpolation and extrapolation and proved several conditionally sharp analytical bound along with their conditions for sharpness. These analytically bounds include one that improves the existing ones to better cover the extrapolation case \eqref{eq:phase1}, a sharp bound for quadratic functions \eqref{eq:phase2}, and one for bivariate extrapolation \eqref{eq:phase3}. The two bounds \eqref{eq:phase2} and \eqref{eq:phase3} together provide the sharp error bound for bivariate linear interpolation under any configuration of $\mathbf{x}$ and an affinely independent $\Theta$. These bounds can provide an important theoretical foundation for the design and analysis of derivative-free optimization methods and any other numerical methods that utilizes linear interpolation.
While our results are developed under the condition that $f\in C_\nu^{1,1}(\R^n)$, they can stand under weaker conditions (but would require more complicated analysis).
In existing literature, the condition often used is that $\| |D^2 f| \|_{L_\infty(Q)} \le \nu$, where $Q$, for example, is the star-shaped set that connects $\mathbf{x}$ to each point in $\Theta$ in \cite{ciarlet1972general} and $\conv(\Theta)$ in \cite{waldron1998error}. Our results do not necessarily require the twice-differentiability of $f$ and only need $f\in C_\nu^{1,1}(Q)$ for some $Q\subset\R^n$. For \eqref{eq:phase1}, $Q$ at least needs to cover (almost everywhere, same hereafter) the star-shaped set $\cup_{i=0}^{n+1} \{\alpha \mathbf{x}_i + (1-\alpha)\mathbf{w}:~ 0\le\alpha\le1\}$. For \eqref{eq:phase2}, we need $Q$ to cover \[ \bigcup_{(i,j)\in\cI_+\times\cI_-} \left( \begin{aligned} &\{\alpha \mathbf{x}_i + (1-\alpha)[(\mathbf{u}_i+\mathbf{u}_j)/2+H^\star(\mathbf{u}_i-\mathbf{u}_j)/(2\nu)]:~ 0\le\alpha\le1 \} \\ &\quad \cup \{\alpha \mathbf{x}_j + (1-\alpha)[(\mathbf{u}_i+\mathbf{u}_j)/2+H^\star(\mathbf{u}_i-\mathbf{u}_j)/(2\nu)]:~ 0\le\alpha\le1 \} \end{aligned} \right), \] where $H^\star$ is defined as \eqref{eq:Hstar}. For \eqref{eq:phase3}, we need \[ Q \supseteq \{\alpha \mathbf{x}_2 + (1-\alpha)\mathbf{w}:~ 0\le\alpha\le1 \} \cup \{\alpha \mathbf{x}_3 + (1-\alpha)\mathbf{w}:~ 0\le\alpha\le1 \}, \] where $\mathbf{w}$ is defined as \eqref{eq:phase3 w}.
We proposed to compute $\{\mu_{ij}\}$ and check their signs to determine whether \eqref{eq:phase2} is a sharp bound and proved in Theorem~\ref{thm:phase2} that $\{\mu_{ij}\}$ being all non-negative is a sufficient condition. We want to mention that one of our numerical experiments seems to indicate that it is also a necessary condition.
This experiment involves generating many different $\Theta$ and $\mathbf{x}$ with various $n$ and calculated the corresponding $\{\mu_{ij}\}$. From this experiment, we also observed some geometric pattern of the signs of $\{\mu_{ij}\}$, which we present in the following conjecture. \begin{conjecture}
Assume $f \in C^{1,1}_\nu(\R^n)$.
Let $\hat{f}$ be the linear function that interpolates $f$ at any set of $n+1$ affinely independent vectors $\Theta = \{\mathbf{x}_1,\dots,\mathbf{x}_{n+1}\}\subset \R^n$.
Let $\mathbf{x}$ be any vector in $\R^n$.
Let $\{\mu_{ij}\}_{i \in \cI_+,~ j \in \cI_-}$ be the set of parameters defined in Theorem~\ref{thm:phase2}. Then the following statements are true.
\begin{enumerate}
\item When there is no obtuse angle at the vertices of the simplex $\conv(\Theta)$, that is, when
\begin{equation} \label{eq:acute simplex}
(\mathbf{x}_j-\mathbf{x}_i) \cdot (\mathbf{x}_k-\mathbf{x}_i) \ge 0 \text{ for all } i,j,k = 1,2,\dots,n+1,
\end{equation}
the parameters $\{\mu_{ij}\}$ are all non-negative for any $\mathbf{x} \in \R^n$.
\item If there is at least one obtuse angle at the vertices of the simplex $\conv(\Theta)$, then there is a non-empty subset of $\R^n$ to which if $\mathbf{x}$ belongs, there is at least one negative element in $\{\mu_{ij}\}$.
\end{enumerate} \end{conjecture}
A general formula for the sharp bound on the function approximation error of linear interpolation and extrapolation remains an open question. It would appear $G\cdot H^\star/2$ is a good candidate, since all the bounds developed in this paper can be written in this form, but the matrix $H^\star$ depends on the geometry of $\Theta$ and $\mathbf{x}$. Using $G\cdot H^\star/2$ as the general formula, we would need five different definition of $H^\star$ even for the bivariate case (\eqref{eq:Hstar} and four variants of \eqref{eq:Hstar 3} that corresponds to the four shaded areas in Figure~\ref{fig:phase2}). Note that the matrix $H^\star$ is tied to $\{\mu_{ij}\}$ in \eqref{eq:mu1 +} and \eqref{eq:mu1 -}, and we believe even when there are negatives in $\{\mu_{ij}\}$, they are still tied in the same manner to a version of $\{\mu_{ij}\}$ that is modified to be all non-negative. In fact, \eqref{eq:mu0 +} - \eqref{eq:mu1 -} all hold true under the setting of Theorem~\ref{thm:phase3} if $H^\star$ is defined as \eqref{eq:Hstar 3} and $\{\mu_{ij}\}$ is defined as \[ \begin{aligned} \mu_{10} &= 1-\ell_2, &\mu_{13} &= -\ell_3, &\mu_{20} &= \ell_2, &\mu_{23} &= 0, \end{aligned} \] which are the coefficients in \eqref{eq:phase3 sum}. Considering the difficulty in analyzing the signs of $\{\mu_{ij}\}$, it is unlikely for $G\cdot H^\star/2$ to be suitable for this general formula. Whether there even exists a concise analytical form to the sharp error bound that can fit all the geometric configurations of $\Theta$ and $\mathbf{x}$ is still unclear to us.
\section*{Acknowledgment} We would like to acknowledge the help from Dr. Xin Shi and Yunze Sun in solving \eqref{prob:quadratic}. We would like to thank Dr. Shuonan Wu and Dr. Katya Scheinberg for carefully reading this paper and providing their suggestions.
\end{document} |
\begin{document}
\title{Mathematical modeling and numerical analysis for the higher order Boussinesq system} \date{\today}
\author{Bashar Khorbatly} \address{Lebanese American University (LAU), Graduate Studies and Research (GSR) office, School of Arts and Sciences, Computer Science and Mathematics Department, Byblos, Lebanon} \email{[email protected]} \author{Ralph Lteif} \address{Lebanese American University (LAU), Graduate Studies and Research (GSR) office, School of Arts and Sciences, Computer Science and Mathematics Department, Beirut, Lebanon} \email{Corresponding author, [email protected]} \author{Samer Israwi} \address{Lebanese University, Laboratory of Mathematics-EDST, Department of Mathematics, Faculty of Sciences 1, Beirut, Lebanon} \email{s$\[email protected]} \author{St\'ephane Gerbi} \address{Laboratoire de Math\'ematiques UMR 5127 CNRS \& Universit\'e de Savoie Mont Blanc, Campus scientifique, 73376 Le Bourget du Lac Cedex, France} \email{[email protected]}
\subjclass[2010]{35Q35, 35L45, 35L60, 76B45, 76B55, 35C07, 65L99} \keywords{Water waves, Boussinesq system, higher-order asymptotic model, well-posedness, traveling waves, explicit solution, numerical validation.}
\date{\today}
\begin{abstract} This study deals with higher-ordered asymptotic equations for the water-waves problem. We considered the higher-order/extended Boussinesq equations over a flat bottom topography in the well-known long wave regime. Providing an existence and uniqueness of solution on a relevant time scale of order $1/\sqrt{\varepsilon}$ and showing that the solution's behavior is close to the solution of the water waves equations with a better precision corresponding to initial data, the asymptotic model is well-posed in the sense of Hadamard. Then we compared several water waves solitary solutions with respect to the numerical solution of our model. At last, we solve explicitly this model and validate the results numerically. \end{abstract} \maketitle \tableofcontents
\section{Introduction}
\subsection{The water-wave equations.} In this paper, we investigate the one-dimensional flow of the free surface of a homogeneous, immiscible fluid moving above a flat topography $z=-h_0$.
The horizontal and vertical variables are denoted respectively by $x \in \mathbb R$ and $z \in \mathbb R$ and $t \geq 0$ stands for the time variable.
The free surface is parametrized by the graph of the function $\zeta(t,x)$ denoting the variation with respect to its rest state $z=0$ (see Figure~\ref{flattopdom}).
The fluid occupies the strictly connected ($\zeta(t,x) + h_0 >0$) domain $\Omega_t$ at time $t\geq 0$ denoted by:
$$ \Omega_t= \{ (x,z) \in \mathbb R^2; \ -h_0 \leq z \leq \zeta(t,x) \}.$$
\begin{figure}
\caption{One-dimensional flat bottom fluid domain.}
\label{flattopdom}
\end{figure}
\noindent The fluid is considered to be perfect, that is with no viscosity and only affected by the force of gravity.
We also assume the fluid to be incompressible and the flow to be irrotational so that the velocity field is divergence and curl free.
We denote by $(\rho,V)$ the constant density and velocity field of the fluid. The first boundary condition at the free surface expresses a balance of forces.
Kinematic boundary conditions are considered assuming that both the surface and bottom are impenetrable, that is no particle of fluid can cross.
The set of equations describing the flow is now complete and is commonly known as the \emph{full Euler} equations:
\begin{equation}
\left\{
\begin{array}{lcl}
\displaystyle\partial_t V+V\cdot\nabla_{x,z} V = -g\overrightarrow{e}_z-\dfrac{\nabla_{x,z} P}{\rho} & \hbox{in} & (x,z)\in \Omega_t, \ t\geq 0
,\\
\displaystyle\nabla_{x,z}\cdot V=0 & \hbox{in} & (x,z)\in \Omega_t, \ t\geq 0
,\\
\displaystyle\nabla_{x,z}\times V=0 & \hbox{in} & (x,z)\in \Omega_t, \ t\geq 0
,\\
P|_{z=\zeta(t,x)}=0 & \hbox{for} & t\geq 0,\ x \in \mathbb R,\\
\displaystyle\partial_t \zeta-\sqrt{1+\vert \partial_x \zeta \vert^2} n_{\zeta}\cdot V|_{z=\zeta(t,x)} =0 & \hbox{for} & t \geq 0
, \ x \in \mathbb R,\\
\displaystyle -V\cdot \overrightarrow{e_z} = 0 & \hbox{at} & z=-h_0
, \ t\geq 0,
,\\ \displaystyle \lim_{\vert(x,z)\vert\to\infty}\vert\zeta(x,z)\vert+\vert V(t,x,z)\vert=0 & \hbox{in} & (x,z)\in \Omega_t, \ t\geq 0 \; .
\end{array}
\right.
\label{euler}
\end{equation}
where $n_{\zeta}=\dfrac{1}{\sqrt{1+|\partial_x \zeta|^2}} (-\partial_x \zeta, 1)^T$ denotes the upward normal vector to the free surface.
The theoretical study of the above system of equations is extremely difficult due to its large number of unknowns and its time-dependent moving domain $\Omega_t$.
In fact, we have a free boundary problem, in other words the domain is itself one of the unknowns. Using the assumption of irrotational velocity field, one can express the latter as the gradient of a potential function $\varphi$.
This potential satisfies the Laplace equation inside the fluid, $\Delta_{x,z} \varphi =0$ in $(x,z) \in \Omega_t$.
Consequently, the evolution of the velocity potential is written now using Bernoulli's equation.
Although the system now is simpler, a free boundary problem still exists. To get over this obstacle, Craig and Sulem~\cite{CS93,CSS92} had an interesting idea following Zakharov work~\cite{Zakharov68}, consisting of a reformulation of the system of equations~\eqref{euler} using the introduction of a Dirichlet-Neumann operator,
thus reducing the dimension of the considered space and the unknowns number. Denoting by $\psi$ the trace of the velocity potential at the free surface, $\psi(t,x) =\varphi (t,x,\zeta(t,x))=\varphi _{| z=\zeta}$, the Dirichlet-Neumann operator is introduced
\begin{equation*}\label{diriclet}
\mathcal{G}[\zeta]\psi = -\big(\partial_x\zeta\big)\cdot\big(\partial_x\varphi\big)_{\mid_{z=\zeta}} + \big(\partial_z \varphi\big)_{\mid_{z=\zeta}} = \sqrt{1 + \big\vert\partial_x\zeta\big\vert ^2}\big(\partial_n\varphi\big)_{\mid_{z=\zeta}}
\end{equation*}
where $\varphi$ is defined uniquely from $(\zeta,\psi)$ as a solution of the following Laplace problem (see~\cite{Lannes2013} for a complete and accurate analysis):
\begin{equation*}
\left\{
\begin{array}{lcl}\label{BVP1}
\displaystyle\partial_x^2 \varphi+ \partial_z^2 \varphi = 0 & \hbox{in} & - h_0 < z < \zeta(t,x),\\
\displaystyle\partial_z\varphi_{\mid_{z=-h_0}}=0,\\
\displaystyle\varphi_{\mid_{z=\zeta}}=\psi(t,x) .
\end{array}
\right.
\end{equation*}
with $\partial_n = n. \nabla_{x,z}$
the normal derivative in the direction of the concerned vector $n$. Thus, the evolution of only the two variables $(\zeta,\psi)$ located at the free surface characterize the flow. This system is known by the Zakharov/Craig-Sulem formulation of the water-waves equations giving : \begin{equation}\label{Za} \left\{ \begin{array}{lcl} \displaystyle\partial_t \zeta-\frac{1}{\mu}\mathcal{G} [ \zeta]\psi= 0 \; ,\\ \displaystyle\partial_t\psi+\zeta+\frac{1}{2}\vert\partial_x \psi\vert^2 - \displaystyle\frac{( \mathcal{G}_{\mu}[ \zeta]\psi+\partial_x(\zeta)\cdot\partial_x\psi)^2}{2(1+ \vert\partial_x\zeta\vert^2)}= 0 \; . \end{array} \right. \end{equation} The above system of equations has a particularly rich structure, and depending on the physical properties of the flow, it is possible to obtain solutions to \eqref{Za} with different qualitative properties. Nonlinear effects, for example, become more important as wave amplitude increases. Although Zakharov's reformulation resulted in a reduced system of equations, the description of these solutions from a qualitative and quantitative point of view remains very complex.
A remedy for this situation requires the construction of simplified asymptotic models whose solutions are approximate solutions of the full system.
These approximate models allow to describe in a fairly precise way the behavior of the complete system in a specific physical regime.
This requires a rescaling of the system in order to reveal small dimensionless parameters which allow to perform asymptotic expansions of non-local operators (Dirichlet-Neumann),
thus ignoring the terms whose influence is minimal.
The order of magnitude of these parameters makes it possible to identify the considered physical regime.
We start by introducing respectively the commonly known nonlinear and shallowness parameters: \begin{equation*} \varepsilon=\frac{a}{h_0}=\frac{\text{amplitude of the wave}}{\text{reference depth}} \; , \qquad\qquad\sqrt{\mu}=\frac{h_0}{\lambda}=\frac{\text{reference depth}}{\text{wave-length of the wave}} \; , \end{equation*} where $0\leq\varepsilon\leq 1$ is often called nonlinearity parameter, while $0\leq\mu \leq 1$ is called the shallowness parameter. In this manner, the dimensionless formulation of \eqref{Za} reads: \begin{equation}\label{Zakharovv} \left\{ \begin{array}{lcl} \displaystyle\partial_t \zeta-\frac{1}{\mu}\mathcal{G}_{\mu}[\varepsilon\zeta]\psi= 0 \; ,\\ \displaystyle\partial_t\psi+\zeta+\frac{\varepsilon}{2}\vert\partial_x \psi\vert^2 -\varepsilon\mu\displaystyle\frac{(\frac{1}{\mu}\mathcal{G}_{\mu}[\varepsilon\zeta]\psi+\partial_x(\varepsilon\zeta)\cdot\partial_x\psi)^2}{2(1+\varepsilon^2\mu\vert\partial_x\zeta\vert^2)}= 0 \; , \end{array} \right. \end{equation} where $\psi (t,x) =\varphi_{\mid_{z=\varepsilon \zeta}}$ and $\mathcal{G}_\mu[\varepsilon\zeta]\psi = \sqrt{1 + \mu\varepsilon^2\big\vert\partial_x\zeta\big\vert ^2}\big(\partial_n\varphi\big)_{\mid_{z=\varepsilon\zeta}}$.
Let us now identify the asymptotic geophysical shallow-water ($\mu\ll1$) category (or sub-regime) associated with our work. An additional assumption is made on the nonlinearity parameter, from which a diverse set of asymptotic models can be derived. More precisely, it is possible to deduce from \eqref{Zakharovv} a (much simpler) asymptotic model that is more amenable to numerical simulations and have more transparent properties. For instance, taking $\varepsilon\sim\mu$ into account, the flow under consideration is said to be in a small amplitude regime.
\subsection{Shallow-water, flat bottom, small amplitude variations $(\mu\ll1, \varepsilon\sim\mu)$.}
In this paper, we restrict our work on the well-known long waves regime with a flat topography for which the "original" or "standard" Boussinesq system can be derived. Defining the depth-averaged horizontal velocity by : \begin{equation}\label{defvelocity} v(t,x)=\frac{1}{1 + \varepsilon\zeta (t,x)}\int_{-1}^{\varepsilon\zeta (t,x)}\partial_x\varphi(t,x,z)\hspace{0.1cm}dz \; , \end{equation} under the extra assumption $\varepsilon\sim\mu$, we can neglect the terms which are of order $\mathcal{O}(\mu^2)$ in the Green-Naghdi equations (we refer to \cite{GN76,GLN74} for formal derivation and to \cite{Israwi2011,Israwi2010,Khorbatly2021,AMBP_2018__25_1_21_0} for well-posedness); then the standard Boussinesq equations reads: \begin{equation}\label{standard-bunsq} \left\{ \begin{array}{lcl} \displaystyle\partial_t\zeta+\partial_x\big( (1+\varepsilon\zeta )v \big)=0
\; ,\\ \displaystyle ( 1 - \varepsilon \frac{1}{3} \partial_x^2 ) \partial_t v + \partial_x\zeta + \varepsilon v\partial_x v =\mathcal{O} (\varepsilon^2) \; . \end{array} \right. \end{equation} Many strategies exist to study the water-wave problem especially by deriving equivalent models with better mathematical structure such as well-posedness, conservation of energy, solitary waves, or physical properties (see for instance \cite{BBM72,LPS2012,BCL2005,Chazel2007,MSZ2012,SX2012,SWX2017,Burtea2016-1,Burtea2016-2,Lannes2013,Saut-Li,SAUT20202627,saut2021}).
It is worth noticing that the well posed results for such model exist on a time scale of order $1/\sqrt{\varepsilon}$ (methods based on dispersive estimate in \cite{Zakharov68})
and $1/\varepsilon$ (energy estimate method in \cite{Lannes2013} ). A better precision is obtained when the $\mathcal{O}(\mu^2)$
terms are kept in the equations: only $\mathcal{O}(\mu^3)$ terms are dropped. Following the work in a series of papers on the extended Green-Naghdi equations
\cite{Matsuno2015,Matsuno2016,KZI2018,KZI2021}, one may write the extended Boussinesq equations by incorporating higher order dispersive effects as follows: \begin{equation}\label{ex-boussinesq} \left\{ \begin{array}{lcl} \displaystyle\partial_t\zeta+\partial_x(hv)=0
\; ,\\ \displaystyle ( 1+\varepsilon\mathcal{T}[\zeta]+\varepsilon^2\mathfrak{T} )\partial_t v + \partial_x\zeta+\varepsilon v \partial_x v +\varepsilon^2 \mathcal{Q}v =\mathcal{O} (\varepsilon^3) \; , \end{array} \right. \end{equation} where $h=1+\varepsilon\zeta$ is the non-dimensionalised height of the fluid and we denote the three operators : \begin{equation*} \mathcal{T}[\zeta]w =-\frac{1}{3h}\partial_x\big((1+3\varepsilon\zeta)\partial_xw\big), \quad\mathfrak{T} w = -\frac{1}{45}\partial_x^4w , \quad\mathcal{Q}v = -\frac{1}{3}\partial_x\big(vv_{xx}-v_x^2\big) \; . \end{equation*}
\subsection{Presentation of the results} As mentioned before, we will first derive an extended Boussinesq equations in the same way as the derivation of the extended Green-Naghdi equations: we will keep every terms up to the third order in $\varepsilon$. This is done in the next section, section \ref{model}.
Section \ref{justification} is devoted to the full justification of the extended Boussinesq system. We will firstly, in subsection \ref{quasilinear}, write the extended Boussinesq system in a quasilinear form. The linear analysis, performed in subsection \ref{linear-analysis} will permit by the energy estimate method to state, in the subsection \ref{mainresults}, the main results of well-posedness, stability and convergence of the proposed extended Boussinesq system.
As for usual Green-Naghdi and Boussinesq model, we are interested in the construction of a solution as a solitary wave. We will prove in section \ref{solitary-approx} that the profile of this solitary wave is a solution of a 3rd order non linear ordinary differential equation, ODE. Thus, it seems impossible to find an explicit form of this profile. Therefore, we will compute, using Matlab ODE solver \texttt{ode45}, an approximate profile. We will compare the obtained solutions with the solutions of water-waves equations and find that this solution is a better approximation than the solution of the original Green-Naghdi equation.
Lastly, instead of finding an analytical exact solitary wave, we will find an explicit solution with correctors in section \ref{explicit-solitary}.
\subsection{Comments on the results.}
In this section we try to highlight the potential need of higher-ordered models and their benefits over the classical asymptotic ones. Despite having a more complicated structure than classical models, higher ordered models may still be considered simpler than the original full Euler system~\eqref{euler}. In fact, as opposed to the full Euler system, these high order models enjoy a reduced structure in terms of number of equations, unknown numbers and dimension space which make them more suitable for theoretical and numerical study. Moreover, higher order approximations may have similar well-posedness results as classcial ones on relevant time scales due to standard mathematical tools. Based on section \ref{justification} and previous works \cite{KZI2018,KZI2021} this can be concluded at least in the one-dimensional case. However, the advantage is obvious in terms of controlling the convergence precision of the approximation error with respect to Euler equations (see in particular Theorem \ref{convergence} of section \ref{justification}).\\ On the other hand, while the solitary wave profile cannot be derived explicitly for higher order approximations, the numerical solution fits the corresponding one of the original Euler system much better than classical models (as shown by figure \ref{SWcomp}). The numerical solution computation requires simple discretization of a third-order nonlinear ODE using Matlab \texttt{ode45} solver. Furthermore, it is noteworthy that by removing the $\varepsilon^2$ extended-Boussiseq ODE terms, the Green-Naghdi's ODE can be recovered.
\subsection{Notation.} We denote by $C(\lambda_1, \lambda_2,...)$ a constant depending on the parameters $\lambda_1$, $\lambda_2$, ... and \emph{whose dependence on the $\lambda_j$ is always assumed to be nondecreasing}. The notation $a\lesssim b$ means that $a\leq Cb$, for some non-negative constant $C$ whose exact expression is of no importance (\emph{in particular, it is independent of the small parameters involved}).
We denote the $L^2$ norm $\vert\cdot\vert_{L^2}$ simply by $\vert\cdot\vert_2$. The inner product of any functions $f_1$ and $f_2$ in the Hilbert space $L^2(\mathbb R^d)$ is denoted by $ (f_1,f_2)=\int_{\mathbb R^d}f_1(X)f_2(X) dX. $ The space $L^\infty=L^\infty(\mathbb R^d)$ consists of all essentially bounded, Lebesgue-measurable functions $f$ with the norm $ \vert f\vert_{L^\infty}= \hbox{ess}\sup \vert f(X)\vert<\infty $. We denote by $W^{1,\infty}(\mathbb R)=\big\lbrace f\in L^\infty, f_x\in L^{\infty}\big\rbrace$ endowed with its canonical norm.
For any real constant $s$, $H^s=H^s(\mathbb R^d)$ denotes the Sobolev space of all tempered distributions $f$ with the norm $\vert f\vert_{H^s}=\vert \Lambda^s f\vert_2 < \infty$, where $\Lambda^s$ is the pseudo-differential operator $\Lambda^s=(1-\partial_x^2)^{s/2}$.
For any functions $u=u(t,X)$ and $v(t,X)$ defined on $[0,T)\times\mathbb R^d$ with $T>0$, we denote the inner product, the $L^p$-norm and especially the $L^2$-norm, as well as the Sobolev norm, with respect to the spatial variable, by $(u,v)=(u(\cdot,t),v(\cdot,t))$, $\vert u \vert_{L^p}=\vert u(\cdot,t)\vert_{L^p}$, $\vert u \vert_{L^2}=\vert u(\cdot,t)\vert_{L^2}$, and $ \vert u \vert_{H^s}=\vert u(\cdot,t)\vert_{H^s}$, respectively.
Let $C^k(\mathbb R^d)$ denote the space of $k$-times continuously differentiable functions. For any closed operator $T$ defined on a Banach space $Y$ of functions, the commutator $[T,f]$ is defined by $[T,f]g=T(fg)-fT(g)$ with $f$, $g$ and $fg$ belonging to the domain of $T$.
\section{The higher-order/extended Boussinesq equations}\label{model}
When the surface elevation is of small amplitude, that is, when an assumption is made on the nonlinearity parameter, the extended Green-Naghdi equations \cite{Matsuno2015, Matsuno2016, KZI2018, KZI2021} can be greatly simplified. Based on this, the extended Boussinesq with $\varepsilon\sim\mu$ reads for one-dimensional small amplitude surfaces: \begin{equation}\label{original-bous} \left\{ \begin{array}{lcl} \displaystyle\partial_t\zeta+\partial_x(hv)=0
\; ,\\ \displaystyle ( h +\varepsilon\mathcal{T}[h]+\varepsilon^2\mathfrak{T} )\partial_t v + h\partial_x\zeta+\varepsilon h v \partial_x v +\varepsilon^2 \mathcal{Q}v =\mathcal{O} (\varepsilon^3) \; , \end{array} \right. \end{equation} where the right-hand side is of order $\varepsilon^3$, and we see the dependence on $\varepsilon^2$ in the left-hand side. Here $h=1+\varepsilon\zeta$ and we denote by \begin{equation*} \mathcal{T}[h]w =-\frac{1}{3}\partial_x\big( h^3 \partial_xw\big) \; , \qquad\mathfrak{T} w = -\frac{1}{45}\partial_x^4w \; , \qquad\mathcal{Q}v = -\frac{1}{3}\partial_x\big(vv_{xx}-v_x^2\big) \; . \end{equation*} \begin{remark} Some of the components in the second equation's left-most term are of the size $\mathcal{O}(\varepsilon^3)$. They were kept to preserve the operator's $\Im= h+\varepsilon\mathcal{T}[h]-\varepsilon^2\partial_x^4$ good properties; otherwise, these properties would have been disrupted (see section \ref{invert-op}). \end{remark} \subsection{The modified system.} First of all, let us factorize all higher order derivatives (third and fifth) in the left-most term of the above system \eqref{original-bous}. In fact, we only have to factorize third-order derivatives and this is possible by setting $\pm\varepsilon^2\mathcal{T}[h](vv_x)$ in the second equation. An inconvenient feature appears in this left-most term due to the positive sign in front of the elliptic forth-order linear operator $\mathfrak{T}$ which ravel the way towards well-posedness using energy estimate method. This obviously affect the invertibility of the factorized operator as we will see in section \ref{invert-op}. For this reason we proceed as in \cite{KZI2018,KZI2021} by using a $BBM$ trick represented in the following approximate equation $\partial_tv +\varepsilon vv_x= -\zeta_x+O(\varepsilon)$ to overcome this difficulty.
At this stage, it is noteworthy that from \cite{KZI2018,KZI2021} one may conclude directly the well-posedness results for such system but when the effect of surface tension is taken into consideration, the existence time scale is up to order $1/\varepsilon$. This presence of the surface tension was essential for controlling higher order derivatives yielding from the BBM trick (see remarks in \cite{KZI2021}). In our case, the surface tension is neglected and thus we have to do proceed differently. The idea is to replace the capillary terms by a vanishing term $\pm\varepsilon^2\zeta_{xxx}$ which will play a similar role. The term with a negative sign is used for a convenient definition of the energy space (see Definition \ref{defispace}) in such a way that the other term can be controlled. As a consequence, the existence time will get smaller with respect to the case of surface tension presence, \textit{i.e.} the time scale reached is up to order $1/\sqrt{\varepsilon}$. In view of the above notes (we refer to remarks \ref{rem1} and \ref{rem2} for more details), the modified system reads: \begin{equation}\label{boussinesq} \left\{ \begin{array}{lcl} \displaystyle\partial_t\zeta+\partial_x(hv)=0
\; ,\\ \displaystyle ( h+\varepsilon\mathcal{T}[h]-\varepsilon^2\mathfrak{T} ) \big(\partial_t v+\varepsilon vv_x\big) + h\partial_x\zeta- \varepsilon^{2} \zeta_{xxx}+\frac{2}{45}\varepsilon^2 \zeta_{xxxxx} + \varepsilon^{2} \zeta_{xxx}+\varepsilon^2 \mathcal{Q}[U]v_x =\mathcal{O} (\varepsilon^3) \; , \end{array} \right. \end{equation} where $U=(\zeta,v)$, $h(t,x)=1+\varepsilon\zeta(t,x)$ and denote by \begin{equation}\label{exp1} \mathcal{T}[ h ]w =-\frac{1}{3}\partial_x( h^3 \partial_xw), \qquad\qquad \mathfrak{T} w = -\frac{1}{45}\partial_x^4w , \qquad\qquad \mathcal{Q}[U]f = \frac{2}{3}\partial_x\big(v_xf\big) \; . \end{equation}
\begin{remark} An equivalent formulation of system~\eqref{boussinesq} has been numerically studied recently in~\cite{LG2021}. This formulation is obtained by dividing the second equation of system~\eqref{boussinesq} by the water height function, $h$ and removing time dependency from the left-most factorized operator while keeping the same precision of the model. During the numerical computations this operator has to be inverted at each time step so one can solve system~\eqref{boussinesq}. The time dependency has to be amended in order to reduce the computational time.
\end{remark} We state here that the solution of~\eqref{Zakharovv} is also a solution to the extended Boussinesq system \eqref{boussinesq} up to terms of order $\mathcal{O}(\varepsilon^3)$. \begin{proposition}[Consistency]\label{consistency} Suppose that the full Euler system \eqref{Zakharovv} has a family of solutions $U^{euler}=(\zeta,\psi)^T$ such that there exists $T > 0$, $s>3/2$ for which $(\zeta,\psi' )^T$ is bounded in $L^{\infty}([0; T);H^{s+N})^2$ with N sufficiently large, uniformly with respect to $\varepsilon\in(0,1)$. Define $v$ as in \eqref{defvelocity}. Then $(\zeta,v)^T$ satisfy \eqref{boussinesq} up to a remainder $R$, bounded by \begin{equation}\label{R} \Vert R\Vert_{(L^{\infty}[0,T[;H^s)}\le \varepsilon^3 C \; , \end{equation} where $C=C(h_{min}^{-1}, \Vert \zeta \Vert_{L^{\infty}([0,T[;H^{s+N})}, \Vert \psi' \Vert_{L^{\infty}([0,T[;H^{s+N})})$ . \end{proposition} \begin{proof} Equation one of \eqref{boussinesq} exactly coincides with that of \eqref{Zakharovv}. It remains to check that the second equation is satisfied up to a remainder $R$ such that \eqref{R} holds. For this sake, we need an asymptotic expansion of $\psi'$ in terms of $v$ which can be deduced from the work done in \cite{KZI2018} as follows : \begin{equation}\label{psi'} \psi'= v -\frac{1}{3}\varepsilon\partial_x\big((1+3\varepsilon\zeta)v_x\big) + \varepsilon^2\frac{1}{3}\zeta\partial_x^2v + \varepsilon^2\mathfrak{T} v + \varepsilon^3R_{3}^{\varepsilon} \; . \end{equation} Now we proceed iusing the same arguments as the ones used in Lemmas 5.4 and 5.11 in \cite{Lannes2013} to give some control on $R_3^{\varepsilon}$ as follows : \begin{equation}\label{control-of-R3} \vert R_3^{\varepsilon}\vert_{H^s}\le C(h_{min}^{-1}, \vert\zeta\vert_{H^{s+6}}) \vert \psi'\vert_{H^{s+6}} \qquad\text{ and }\qquad \vert \partial_t R_3^{\varepsilon}\vert_{H^s}\le C(h_{min}^{-1}, \vert\zeta\vert_{H^{s+8}}, \vert \psi'\vert_{H^{s+8}} ) \; . \end{equation} Then we take the derivative of the second equation of \eqref{Zakharovv} and substitute $\mathcal{G}[\varepsilon\zeta]\psi$ and $\psi'$ by $ - \varepsilon \partial_x (hv) $ and \eqref{psi'} respectively. Therefore, taking advantage of the estimates \eqref{control-of-R3} provides the control of all terms of order $\varepsilon^3$ as in \eqref{R} with $N$ large enough (mainly greater than $8$). \end{proof}
\section{Full justification of the extended Boussinesq system $(\mu^3<\mu^2<\mu\ll1, \varepsilon\sim\mu)$}\label{justification}
The two main issues regarding the validity of an asymptotic model are the following: \begin{itemize} \item Are the Cauchy problems for both the full Euler system and the asymptotic model well-posed for a given class of initial data, and over the relevant time scale ? \item Can the water waves solutions be compared to the solutions of the full Euler system when corresponding initial data are close? If yes, can we estimate how close they are? \end{itemize} When an asymptotic model answer these two questions, it is said to be fully justified. In the sequel, after the linear analysis of our model, we refer to section \ref{mainresults} to state the answers of these questions. Existence and uniqueness of our solution on a time scale $1/\sqrt{\varepsilon}$ is given by Theorem \ref{localexistence}, while a stability property is provided by Theorem \ref{stability}. Finally, the convergence Theorem \ref{convergence} is stated and therefore the full justification of our model is proved.
Let us firstly state some preliminary results in the section below.
\subsection{Properties of the two operators $\Im$ and $\Im^{-1}$.}\label{invert-op} Assume the nonzero-depth condition that underline the fact that the height of the liquid is always confined, \textit{i.e.} : \begin{equation}\label{depthcond} \exists\quad h_{min} >0, \qquad \inf_{x\in \mathbb R} h\ge h_{min} \;\quad \text{ where } \; \quad h(t,x)=1+\varepsilon\zeta(t,x) \; . \end{equation} Under the above condition, let us introduce the operator $\Im$, where much of the modifications in the previous section hinges on it, such as: \begin{equation}\label{op-I} \Im = h+\varepsilon\mathcal{T}[h]-\varepsilon^2\mathfrak{T} =h-\frac{1}{3}\varepsilon\partial_x( h^3 \partial_x\cdot) +\frac{1}{45}\varepsilon^2\partial_x^4\cdot \; . \end{equation} The following lemma states the invertibility results of the operator $\Im$ on well chosen functional spaces. \begin{lemma}\label{lema1} Suppose that the depth condition (\ref{depthcond}) is satisfied by the scalar function $\zeta(t,\cdot)\in L^{\infty}(\mathbb R)$. Then, the operator $$ \Im\colon H^4(\mathbb R)\longrightarrow L^2(\mathbb R) $$ is well defined, one-to-one and onto . \end{lemma} \begin{proof} We refer to the recent works of two of the authors, \cite[Lemma 1]{KZI2018} and \cite[Lemma 1]{KZI2021}, for the proof of this lemma. \end{proof} Some functional properties on the operator $\Im^{-1}$ are given by the Lemma below. \begin{lemma}\label{lemma2} Let $t_0>\frac{1}{2}$ and $\zeta\in H^{t_0+1}(\mathbb R)$ be such that (\ref{depthcond}) is satisfied. Then, we have the following$\colon$ \begin{enumerate} \item[(i)] For all $0\leq s\leq t_0+1$, it holds $$ \vert \Im^{-1}f\vert_{H^s}+\sqrt{\varepsilon}\vert\partial_x \Im^{-1}f\vert_{H^s}+\varepsilon\vert\partial_x^2 \Im^{-1}f\vert_{H^s}\leq C\big(\frac{1}{h_{min}},\vert h-1\vert_{H^{t_0+1}}\big)\vert f\vert_{H^{s}} \; . $$ and $$ \sqrt{\varepsilon}\vert \Im^{-1}\partial_xf\vert_{H^s} + \varepsilon\vert \Im^{-1}\partial_x^2f\vert_{H^s} \leq C\big(\frac{1}{h_{min}},\vert h-1\vert_{H^{t_0+1}}\big)\vert f\vert_{H^{s}} \; . $$ \item[(iii)] For all $s\geq t_0+1$, it holds $$ \Vert\Im^{-1}\Vert_{H^s(\mathbb R)\rightarrow H^s(\mathbb R)}+\sqrt{\varepsilon}\Vert\Im^{-1}\partial_x\Vert_{H^s(\mathbb R)\rightarrow H^s(\mathbb R)}+\varepsilon\Vert\Im^{-1}\partial_x^2\Vert_{H^s(\mathbb R)\rightarrow H^s(\mathbb R)}\leq C_s \; , $$ and $$ \sqrt{\varepsilon}\Vert \Im^{-1} \partial_x \Vert_{H^s(\mathbb R)\rightarrow H^s(\mathbb R)}+ \varepsilon \Vert \Im^{-1}\partial_x^2\Vert_{H^s(\mathbb R)\rightarrow H^s(\mathbb R)}\leq C_s \; , $$ \end{enumerate} where $C_s$ is a constant depending on $1/h_{min}$ , $\vert h-1\vert_{H^s}$ and independent of $\varepsilon\in(0,1)$. \end{lemma} \begin{proof} We refer to the recent works of two of the authors, \cite[Lemma 2]{KZI2018} and \cite[Lemma 2]{KZI2021}, for the proof of this lemma. \end{proof}
\subsection{Quasilinear form.} \label{quasilinear}
In order to rewrite the extended Boussinesq system in a condensed form and for the sake of clarity, let us introduce an elliptic forth-order operator $T[h]$ as follows: \begin{equation}\label{J}
T[h] (\cdot) = h-\varepsilon^2\partial_x^2 (\cdot ) + \frac{2}{45}\varepsilon^2\partial_x^4 (\cdot) \; . \end{equation} The first equation of the system \eqref{boussinesq} can be written as follows: $$ \partial_t\zeta +\varepsilon v\partial_x\zeta+h\partial_xv = 0 . $$ Then we apply $\Im^{-1}$ to both sides of the second equation of the system \eqref{boussinesq}, to get: \begin{equation*} \partial_tv+\varepsilon vv_x+\Im^{-1}\big(T[h] \zeta_x\big) + \varepsilon^2\Im^{-1}\big(\partial_x^2\zeta_{x}\big) +\varepsilon^2\Im^{-1}\big(\mathcal{Q}[U]v_x\big) = \mathcal{O} (\varepsilon^3) \; . \end{equation*} Hence the higher order Boussinesq system can be written under the form: \begin{equation}\label{nonlinear} \partial_tU+A[U]\partial_xU = 0 \; , \end{equation} where the operator $A$ is denied by: \begin{equation}\label{AU} A[U]=\left( \begin{array}{cc} \varepsilon v &h\\ \Im^{-1}\big(T[h] \cdot\big) + \varepsilon^2 \Im^{-1}\big(\partial_x^2\cdot\big)& \varepsilon v+\varepsilon^2\Im^{-1}\big(\mathcal{Q}[U]\cdot\big) \end{array} \right) \; . \end{equation}
\subsection{Linear analysis.}\label{linear-analysis} We consider the following linearized system around a reference state $\underline{U}=(\underline{\zeta},\underline{v})^T$: \begin{equation}\label{LGN}
\left\lbrace
\begin{array}{l}
\displaystyle\partial_t U+A[\underline{U}]\partial_x U=0
,
\\
\displaystyle U_{\vert_{t=0}}=U_0.
\end{array}\right. \end{equation} The energy estimate method needs to define a suitable energy space for the problem we are considering here. This will permit the convergence of an iterative scheme to construct a solution to the extended Boussinesq system \eqref{boussinesq} for the initial value problem (\ref{LGN}).
\begin{definition}[Energy space]\label{defispace}
For all $s\ge 0$ and $T>0$, we denote by $X^s$ the vector space $H^{s+2}(\mathbb R)\times H^{s+2}(\mathbb R)$ endowed with the norm: \begin{eqnarray*} \textrm{ for } U=(\zeta,v) \in X^s \,,\, \vert U\vert^2_{X^s}&:=& \vert \zeta\vert^2 _{H^s}+ \varepsilon^2\vert\zeta_x\vert_{H^s}^2+\varepsilon^2\vert \zeta_{xx}\vert^2 _{H^s}+\vert v\vert^2 _{H^s}+\varepsilon\vert v_{x}\vert_{H^{s}}^2+\varepsilon^2\vert v_{xx}\vert_{H^{s}}^2 \; . \end{eqnarray*} $X^s_T$ stands for $C([0,\frac{T}{\sqrt{\varepsilon}}];X^{s})$ endowed with its canonical norm. \end{definition} \begin{remark}\label{rem2} It is worth noticing that in the presence of surface tension the second term of the energy norm, $\vert \zeta_x\vert _{H^s}^2$, is controlled by $\varepsilon$ in front of it and this is sufficiently enough to give an existence time scale of order $1/\varepsilon$. In fact, the second term here in $\vert \cdot\vert _{X^s}$ is due to the consideration of the vanishing term that is important for Definition \ref{defispace} itself and for controlling higher order terms (see Proposition \ref{prop1}). \end{remark} Now we remark that a good suggestion of a pseudo-symmetrizer for $A[\underline{U}]$ requires firstly the introduction of a forth-order linear operator $J[h]$ as follows: $$
J[h](\cdot) =1- \varepsilon ^2 \partial_x\big(h^{-1}\partial_x\cdot\big)+\frac{2}{45}\varepsilon^2\partial_x^2\big(h^{-1}\partial_x^2\cdot\big) \; , $$ where $\underline{h}=1+\varepsilon\underline{\zeta}$ . Thus a pseudo-symmetrizer for $A[\underline{U}]$ is given by: \begin{equation}\label{pseudo-symmetrizer} S=\left( \begin{array}{cc} J[ \underline{h}] & 0 \\\\ 0& \underline{\Im} \end{array} \right)=\left( \begin{array}{cc} 1- \varepsilon ^2 \partial_x\big(\underline{h}^{-1}\partial_x\cdot\big)+\frac{2}{45}\varepsilon^2\partial_x^2\big(\underline{h}^{-1}\partial_x^2\cdot\big) & 0 \\\\ 0& \underline{h}+\varepsilon \mathcal{T}[\underline{h}]-\varepsilon^2\mathfrak{T} \end{array} \right) \; . \end{equation} \begin{remark}\label{rem1} Introducing operator $J[h]$ is of great interest for defining a suitable pseudo-symmetrizer for \eqref{AU}. As the higher order derivative in $T[h]$ is not multiplied by $h$ (if this was the case then the vanishing term considered might be $\pm \varepsilon^2h\zeta_{xxx}$), therefore $J[h]$ must replace $T[h]$ in the first entity of \eqref{pseudo-symmetrizer}. This is clearly necessary for controlling $A_2+A_3$ (see Proposition \ref{prop1}). \end{remark} Also, a natural energy for the initial value problem (\ref{LGN}) is suggested to be as follows: \begin{equation}\label{es}
E^s(U)^2=(\Lambda^sU,S\Lambda^sU) \; . \end{equation} \begin{lemma}[Equivalency of $ E^s(U)$ and the $X^s$-norm]\label{lemmaes} Let $s\geq 0$ and suppose that $ \underline{\zeta}\in L^{\infty}(\mathbb R)$ satisfies consition \eqref{depthcond}. Then norm $\vert \cdot\vert_{X^s}$ and the natural energy $E^s(U)$ are uniformly equivalent with respect to $\varepsilon \in (0,1)$ such that: $$ E^s(U) \leq C\big(h_{min}, \vert\underline{h}\vert_{\infty}\big)\vert U\vert_{X^s} \quad \text{ and } \quad \vert U \vert_{X^s}\leq C\big(h_{min},\vert \underline{h}\vert_{\infty}\big) E^s(U). $$ \end{lemma} \begin{proof} We refer to the recent work of two of the authors \cite[Lemma 3]{KZI2018} for the proof of this important property. \end{proof} The well-posedness and a derivation of a first energy estimate for the linear system is given in the following proposition. \begin{proposition}[Well-posedness \& energy estimate of the linear system]\label{prop1} For $t_0>\frac{1}{2}$, $s\geq t_0+1$ and under the depth condition (\ref{depthcond}), suppose that $\underline{U}=(\underline{\zeta}, \underline{v})^T$ $\in X^{s}_{T}$
and $\partial_t \underline{U} \in X^{s-1}_{T}$
at any time in $[0,\frac{T}{\sqrt{\varepsilon}}]$. Then, there exists a unique solution $U=(\zeta, v)^T$ $\in X^{s}_{T} $ to (\ref{LGN}) for any initial data $U_0$ in $X^s$ and for all $0\leq t\leq\frac{T}{\sqrt{\varepsilon}}$ it holds that: \begin{equation}\label{energy} \displaystyle E^s\big(U(t)\big)\displaystyle\leq \big(e^{\sqrt{\varepsilon}\lambda_{T} t}\big)^{1/2}E^s(U_0) \; , \end{equation} for some $\lambda_{T}$ depending only on $ h_{min}^{-1}, \sup_{0\leq \sqrt{\varepsilon} t\leq T}E^s(\underline{U}(t))$ and $\sup_{0\leq \sqrt{\varepsilon} t \leq T}\vert\partial_t\underline{h}(t) \vert_{L^{\infty}}$ . \end{proposition} \begin{proof} For the proof of the existence and uniqueness of the solution, we refer to the proof found in \cite[Appendix A]{Israwi2011} which can be directly adapted to the problem we are considering here.
Thereafter, we will focus our attention on the proof of the energy estimate \eqref{energy}. First of all, fix $\lambda\in\mathbb R$. The proof of the energy estimate is centered on bounding from above by zero the expression $ e^{\sqrt{\varepsilon}\lambda t}\partial_t(e^{-\sqrt{\varepsilon}\lambda t}E^s(U)^2). $ For this sake, we use the fact that $\underline{\Im}$ and $J[\underline{h}]$ are symmetric to evaluate the expression under the form: \begin{align*} \frac{1}{2}e^{\sqrt{\varepsilon}\lambda t}\partial_t(e^{-\sqrt{\varepsilon}\lambda t}E^s(U)^2)&=-\frac{\lambda}{2} \sqrt{\varepsilon} E^s(U)^2 -\big(SA[\underline{U}]\Lambda^s\partial_x U,\Lambda^s U\big)- \big(\big[\Lambda^s,A[\underline{U}]\big] \partial_xU,S\Lambda^s U\big)\\ &\quad+\frac{1}{2}\big(\Lambda^s\zeta,[\partial_t,J[\underline{h}]]\Lambda^s\zeta\big)+\frac{1}{2}(\Lambda^sv,[\partial_t,\underline{\Im}]\Lambda^sv) \; . \end{align*} Now it remains to control the r.h.s components of the above equation. To do so, we firstly recall the commutator estimate we shall use due to Kato-Ponce \cite{KP88} and recently improved by Lannes \cite{Lannes2006}: in particular, for any $s>3/2$, and $ q \in H^s(\mathbb R),p\in H^{s-1}(\mathbb R)$, one has: \begin{equation}\label{cest} \big\vert [\Lambda^s, q]p\vert_{2} \lesssim \vert \nabla q\vert_{H^{s-1}}\vert p\vert_{H^{s-1}} \; . \end{equation} Also we shall use intensively the classical product estimate (see \cite{AG91,Lannes2006,KP88}): in particular, for any $p,q\in H^s(\mathbb R^2)$, $s>3/2$, one has: \begin{equation}\label{mest} \vert pq\vert_{H^s}\lesssim \vert q\vert_{H^s}\vert p\vert_{H^s} \; . \end{equation} $\bullet$ Estimation of $(SA[\underline{U}]\Lambda^s\partial_x U,\Lambda^s U).$ We have: \begin{equation*} SA[\underline{U}]=\left( \begin{array}{cc}
\varepsilon J[\underline{h}](\underline{v}\cdot)& J[\underline{h}](\underline{h}\cdot)
\\ T[\underline{h}] \cdot +\varepsilon^2 \partial_x^2 \cdot \hspace{ 1mm} & \varepsilon\underline{\Im}(\underline{v}\cdot)+\varepsilon^2\mathcal{Q}[\underline{U}]\cdot \end{array} \right), \end{equation*} then it holds that: \begin{align*} \big(SA[\underline{U}]\Lambda^s\partial_x U,\Lambda^s U\big) &=\varepsilon\big(J[\underline{h}](\underline{v}\Lambda^s\zeta_x) , \Lambda^s\zeta\big)+\big(J[\underline{h}](\underline{h}\Lambda^sv_x),\Lambda^s\zeta\big)+\big(T[\underline{h}]\Lambda^s\zeta_x , \Lambda^sv\big) \\ & +\varepsilon^{2}\big(\Lambda^s\zeta_{xxx},\Lambda^sv\big)+\varepsilon\big(\underline{\Im}(\underline{v}\Lambda^sv_x),\Lambda^sv\big) +\varepsilon^2\big(\mathcal{Q}[\underline{U}]\Lambda^sv_x,\Lambda^sv\big) =A_1+A_2+...+A_{6} \; . \end{align*} To control $A_1$, by integration by parts, we have: \begin{align*} A_1&=\varepsilon\big(\underline{v}\Lambda^s\zeta_x,\Lambda^s\zeta\big)+\varepsilon^3 \big( \underline{h}^{-1} \partial_x(\underline{v}\Lambda^s\zeta_x),\Lambda^s\zeta_x\big)+\frac{2}{45}\varepsilon^3\big( \underline{h}^{-1} \partial_x^2(\underline{v}\Lambda^s\zeta_x),\Lambda^s\zeta_{xx}\big)=A_{11}+A_{12}+A_{13} \; . \end{align*} Clearly, it holds that: \begin{align*} \vert A_{11}\vert&= \frac{1}{2}\varepsilon\vert\big(\Lambda^s\zeta,\underline{v}_x\Lambda^s\zeta\big)\vert\leq \varepsilon C\big(\vert\underline{v}\vert_{W^{1,\infty}}\big)E^s(U)^2. \end{align*} By integrating by parts, it holds that: \begin{align*} \vert A_{12}\vert= \varepsilon^3 \big(\underline{h}^{-1} \underline{v}_x\Lambda^s\zeta_{x},\Lambda^s\zeta_x\big) + \varepsilon^3 \big(\underline{h}^{-1} \underline{v}\Lambda^s\zeta_{xx},\Lambda^s\zeta_x\big) \leq \varepsilon C\big(h_{min}^{-1} , \vert\underline{v}_x\vert_{\infty}\big)E^s(U)^2. \end{align*} Now using the fact that: \begin{equation}\label{deriv} \partial_x^2(MN)=N\partial_x^2M+2M_xN_x+M\partial_x^2N \; , \end{equation} for any differentiable functions $M$, $N$ and by integration by parts, we have: \begin{align*}
A_{13} &=\frac{2}{45}\varepsilon^3\big[\big( \underline{h}^{-1} \underline{v}_{xx}\Lambda^s\zeta_x,\Lambda^s\zeta_{xx}\big)+2\big(\underline{h}^{-1}\underline{v}_x\Lambda^s\zeta_{xx},\Lambda^s\zeta_{xx}\big)
+\frac{1}{2}\big(\underline{h}^{-2}\underline{h}_x\underline{v}\Lambda^s\zeta_{xx},\Lambda^s\zeta_{xx}\big)-\frac{1}{2}\big(\underline{h}^{-1}\underline{v}_x\Lambda^s\zeta_{xx},\Lambda^s\zeta_{xx}\big)\big]\\
&=A_{131}+...+A_{314} \; .
\end{align*}
Although $A_{131}$ can be controlled directly with $\sqrt{\varepsilon}$ in front of the constant, one may improve this by $\varepsilon$ instead. Indeed by integration by parts one has: $$ A_{131}= \frac{2}{45}\varepsilon^3\big(\underline{h}^{-2}\underline{h}_x\underline{v}_{xx}\Lambda^s\zeta_x,\Lambda^s\zeta_{x}\big) -\frac{2}{45}\varepsilon^3\big(\underline{h}^{-1}\underline{v}_{xxx}\Lambda^s\zeta_x,\Lambda^s\zeta_{x}\big)=A_{1311}+A_{1312}. $$ Remark that $\underline{h}_x=\varepsilon\underline{\zeta}_x$, then $A_{1311}$ posses sufficient $\varepsilon$'s, unlike $A_{1312}$ on which we have to work a little more. Indeed, in view of \eqref{depthcond} we have that $\underline{h}^{-1}>0$, then it holds: \begin{align*} A_{1312}=-\frac{2}{45}\varepsilon^3\big(\underline{h}^{-1}\underline{v}_{xxx},(\Lambda^s\zeta_x)^2\big)\leq \frac{2}{45}\varepsilon^3\vert \underline{v}_{xxx}\vert_{\infty}\big(\underline{h}^{-1},(\Lambda^s\zeta_x)^2\big). \end{align*} Again by integration by parts, we get :$\big(\underline{h}^{-1},(\Lambda^s\zeta_x)^2\big)= (\underline{h}^{-2}\underline{h} _x\Lambda^s\zeta,\Lambda^s\zeta_x)-(\underline{h}^{-1}\Lambda^s\zeta,\Lambda^s\zeta_{xx})$. Therefore one may control $A_{1312}$ by $\varepsilon C(h_{min}^{-2}, \vert\zeta\vert_{W^{1,\infty}},\mu\vert \underline{v}_{xxx}\vert_{\infty})E^s(U)^2$. Consequently, it holds: $$ A_{1311}+A_{132}+..+A_{134}\leq \varepsilon C\big(h_{min}^{-2}, \vert\zeta\vert_{W^{1,\infty}},\vert\underline{v}\vert_{W^{1,\infty}},\sqrt{\varepsilon}\vert\underline{v}_{xx}\vert_{\infty}\big) E^s(U)^2. $$ Collecting the information provided above we get: $$ \vert A_1\vert\leq \varepsilon C\big(h_{min}^{-2}, \vert\underline{\zeta}\vert_{W^{1,\infty}},\vert\underline{v}\vert_{W^{1,\infty}},\sqrt{\varepsilon}\vert\underline{v}_{xx}\vert_{\infty}\big) E^s(U)^2 \; . $$ To control $A_2+ A_3$, by remarking firstly that $J[\underline{h}]$ and $T[\underline{h}]$ are symmetric, and then by integration by parts after having performing some algebraic calculations and using (\ref{deriv}), we have: \begin{equation*}
A_2+A_3= -\big(\Lambda^sv,\underline{h}_x\Lambda^s\zeta\big)
+\varepsilon^2\big(\underline{h}^{-1} \underline{h}_{x} \Lambda^s\zeta_{x},\Lambda^sv_{x}\big) +\frac{4}{45}\varepsilon^2\big(\underline{h}^{-1}\underline{h}_{x}\Lambda^sv_{xx},\Lambda^s\zeta_{xx}\big) -\frac{2}{45}\varepsilon^2\big( \underline{h}^{-1}\underline{h}_{xx} \Lambda^s\zeta_{xx},\Lambda^sv_{x}\big) \; . \end{equation*} Unfortunately, an inconvenient term appears in $A_2+A_3$: it is the term $\varepsilon^2\big(\underline{h}^{-1}\underline{h}_{xx}\Lambda^s\zeta_{xx},\Lambda^sv_x\big)$. This term won't be controlled without gaining $\sqrt{\varepsilon}$ taken from $\underline{h}_{xx}=\varepsilon\underline{\zeta}_{xx}$ and the other $\sqrt{\varepsilon}$ sits in front of the constant. Due to this fact, it follows that:
$$ \vert A_2+A_3\vert\le \sqrt{\varepsilon} C\big(h_{min}^{-1},\vert\underline{\zeta}\vert_{W^{1,\infty}},\vert\underline{v}\vert_{W^{1,\infty}},\varepsilon\vert\underline{\zeta}_{xx}\vert_{H^s}\big) E^s(U)^2. $$ To control $A_4$, by integration by parts, it holds: \begin{align*} A_4 =- \varepsilon^2 (\Lambda^s\zeta_{xx},\Lambda^sv_x) \leq \sqrt{\varepsilon} E^s(U)^2 \; . \end{align*} To control $A_5$, by integration by parts, we have: \begin{align*} A_5&=\varepsilon\big(\underline{h}\underline{v}\Lambda^sv_x,\Lambda^sv\big)+\frac{\varepsilon^2}{3}\big( \underline{h} ^3 \partial_x(\underline{v}\Lambda^sv_x),\Lambda^sv_x\big)+\frac{\varepsilon^3}{45}\big(\partial_x^2(\underline{v}\Lambda^sv_x),\Lambda^sv_{xx}\big)=A_{51}+A_{52}+A_{53} \end{align*} where $$ \big\vert A_{51}\big\vert=\big\vert-\frac{\varepsilon}{2}\big(\underline{h}_x\underline{v}\Lambda^sv,\Lambda^sv\big)-\frac{\varepsilon}{2}\big(\underline{h}\underline{v}_x\Lambda^sv,\Lambda^sv\big)\big\vert\leq \varepsilon C\big(\vert\underline{\zeta}_x\vert_{\infty},\vert\underline{v}_x\vert_{\infty}\big)E^s(U)^2 $$ with $$ \big\vert A_{52}\big\vert=\big\vert-\frac{\varepsilon^2}{2}\big(\underline{h}_x^3\underline{v}\Lambda^sv_x,\Lambda^sv_x\big)-\frac{\varepsilon^2}{6}\big(\underline{h}^3\underline{v}\Lambda^sv_x,\Lambda^sv_x\big)\big\vert\leq\varepsilon C\big(\vert\underline{\zeta}\vert_{W^{1,\infty}}\big)E^s(U)^2 $$ and \begin{align*} \big\vert A_{53}\big\vert&=\frac{\varepsilon^2}{45}\big\vert\big(\underline{v}_{xx}\Lambda^sv_x,\Lambda^sv_{xx}\big)+2\big(\underline{v}_x\Lambda^sv_{xx},\Lambda^sv_{xx}\big)-\frac{1}{2}\big(\underline{v}_x\Lambda^sv_{xx},\Lambda^sv_{xx}\big) \big\vert \leq \varepsilon C\big(\vert\underline{\zeta}\vert_{W^{1,\infty}},\sqrt{\varepsilon}\vert\underline{v}_{xx}\vert_{\infty}\big)E^s(U)^2. \end{align*} Therefore, it holds that: $$ \vert A_{5}\vert\leq\varepsilon C\big(\vert\underline{\zeta}\vert_{W^{1,\infty}},\vert\underline{v}_x\vert_{\infty},\sqrt{\varepsilon}\vert\underline{v}_{xx}\vert_{\infty}\big)E^s(U)^2. $$ Finally, by integration by parts, $A_6$ is controlled by $\varepsilon C\big( \vert\underline{v}_x\vert_{\infty}\big)E^s(U)^2$. Therefore, it holds: $$ \big\vert \big(SA[\underline{U}]\Lambda^s\partial_x U,\Lambda^s U\big)\big\vert\leq \sqrt{\varepsilon} C\big(\vert\zeta\vert_{W^{1,\infty}},\varepsilon\vert\underline{\zeta}_{xx}\vert_{H^s},\vert\underline{v}\vert_{W^{1,\infty}},\sqrt{\varepsilon}\vert\underline{v}_{xx}\vert_{\infty}\big) E^s(U)^2 \; . $$ $\bullet$ Estimation of $\big(\big[\Lambda^s,A[\underline{U}]\big]\partial_xU,S\Lambda^sU\big)$. Let us remark that: \begin{align*} \big(\big[\Lambda^s,A[\underline{U}]\big]\partial_xU,S\Lambda^sU\big)&=\varepsilon\big([\Lambda^s,\underline{v}]\zeta_x,J[\underline{h}]\Lambda^s\zeta\big) + \big([\Lambda^s,\underline{h}]v_x,J[\underline{h}]\Lambda^s\zeta\big) +\big([\Lambda^s,\underline{\Im}^{-1}(T[\underline{h}]\cdot)]\zeta_x,\underline{\Im}\Lambda^sv\big)\\ & + \varepsilon^2 \big([\Lambda^s,\underline{\Im}^{-1}(\partial_x^2\cdot)]\zeta_x,\underline{\Im}\Lambda^sv\big)+\varepsilon\big([\Lambda^s,\underline{v}]v_x,\underline{\Im}\Lambda^sv\big)+\varepsilon^2\big([\Lambda^s,\underline{\Im}^{-1}(\mathcal{Q}[\underline{U}]\cdot)]v_x,\underline{\Im}\Lambda^sv\big) \\& =B_1+B_2+...+B_{6}. \end{align*} To control $B_1$, we use the expression of $J[\underline{h}]$ to write: $$ B_1 =\varepsilon\big([\Lambda^s,\underline{v}]\zeta_x, \Lambda^s\zeta\big) + \varepsilon^3\big(\partial_x[\Lambda^s,\underline{v}]\zeta_x, \frac{1}{\underline{h}} \Lambda^s\zeta_{x}\big) +\frac{2}{45}\varepsilon^3\big(\partial_x^2[\Lambda^s,\underline{v}]\zeta_x, \underline{h}^{-1} \Lambda^s\zeta_{xx}\big) \; . $$ Then by using the fact that: \begin{equation}\label{MN} \partial_x[\Lambda^s,M]N=[\Lambda^s,M_x]N + [\Lambda^s,M]N_x \; \text{ and }\; \partial_x^2[\Lambda^s,M]N = [\Lambda^s,M_{xx}]N+2[\Lambda^s,M_x]N_x+[\Lambda^s,M]N_{xx} \; , \end{equation} and using \eqref{cest}, it holds that: \begin{align*} B_1 & =\varepsilon\big([\Lambda^s,\underline{v}]\zeta_x, \Lambda^s\zeta\big) + \varepsilon^3\big([\Lambda^s,\underline{v}_x]\zeta_x, \underline{h}^{-1} \Lambda^s\zeta_{x}\big) + \varepsilon^3\big([\Lambda^s,\underline{v}]\zeta_{xx}, \underline{h}^{-1} \Lambda^s\zeta_{x}\big)\\ & \quad+\frac{2}{45}\varepsilon^3 \Big\lbrace\big([\Lambda^s,\underline{v}_{xx}]\zeta_x, \underline{h}^{-1} \Lambda^s\zeta_{xx}\big) + 2\big([\Lambda^s,\underline{v}_x]\zeta_{xx}, \underline{h}^{-1} \Lambda^s\zeta_{xx}\big) +\big([\Lambda^s,\underline{v}]\zeta_{xxx},\underline{h}^{-1} \Lambda^s\zeta_{xx}\big)\Big\rbrace\\ &\leq \sqrt{\varepsilon} C\big(h_{min}^{-1} ,\vert\underline{v}\vert_{H^s},\varepsilon\vert\underline{v}_{xx}\vert_{H^s}\big)E^s(U)^2 \; . \end{align*} The $\sqrt{\varepsilon}$ in front of the constant is due to the inconvenient term represented by $\varepsilon^3 \big([\Lambda^s,\underline{v}_x]\zeta_{xx},\underline{h}^{-1}\Lambda^s\zeta_{xx}\big)$. \\ To control $B_2$, by the expression of $J[\underline{h}]$ and \eqref{MN}, we have: \begin{multline*}
B_2= \big([\Lambda^s,\underline{h}-1]v_x, \Lambda^s\zeta\big)
+ \varepsilon^3 \big([\Lambda^s,\underline{\zeta}_x]v_x, \underline{h}^{-1} \Lambda^s\zeta_{x}\big)
+ \varepsilon^2 \big([\Lambda^s,\underline{h}-1]v_{xx}, \underline{h}^{-1} \Lambda^s\zeta_{x}\big)\\
+\frac{2}{45}\varepsilon^2\Big\lbrace\big([\Lambda^s,(\underline{h}-1)_{xx}]v_x, \underline{h}^{-1} \Lambda^s\zeta_{xx}\big) +2\big([\Lambda^s,(\underline{h}-1)_{x}]v_{xx}, \underline{h}^{-1}\Lambda^s\zeta_{xx}\big) +\big([\Lambda^s,\underline{h}-1]v_{xxx}, \underline{h}^{-1}\Lambda^s\zeta_{xx}\big)\Big\rbrace. \end{multline*} Then, clearly the following estimate holds: $$ \vert B_2\vert\leq \varepsilon C\big(h_{min}^{-1} ,\vert\underline{h}-1\vert_{H^s},\varepsilon\vert\underline{\zeta}_{xx}\vert_{H^s}\big)E^s(U)^2. $$ To control $B_3$, we have that $\underline{\Im}$ is symmetric and that: \begin{equation*} \underline{\Im}[\Lambda^s, \underline{\Im}^{-1}]T[\underline{h}]\zeta_x=\underline{\Im}[\Lambda^s, \underline{\Im}^{-1}T[(\underline{h}]\cdot)]\zeta_x-[\Lambda^s, T[\underline{h}]]\zeta_x \; . \end{equation*} Moreover, since $[\Lambda^s,\underline{\Im}^{-1}]=-\underline{\Im}^{-1}[\Lambda^s,\underline{\Im}]\underline{\Im}^{-1}$, one gets: \begin{equation*} \underline{\Im}[\Lambda^s, \underline{\Im}^{-1}\;T[\underline{h}] \cdot ]\zeta_x=-[\Lambda^s, \underline{\Im}] \underline{\Im}^{-1}T[\underline{h}]\zeta_x+[\Lambda^s, T[\underline{h}] ]\zeta_x \; . \end{equation*} Therefore, one may write: \begin{align*} B_3 &=\big([\Lambda^s,\underline{\Im}]\underline{\Im}^{-1}(T[\underline{h}]\zeta_x),\Lambda^sv\big) +\big([\Lambda^s,T[\underline{h}]]\zeta_x,\Lambda^sv\big) \; . \end{align*} At this point, using the expressions of $T[\underline{h}]$ and $J[\underline{h}]$, it holds: $$ \frac{2}{45}\varepsilon^2 \partial_x^4\zeta_x = 2\underline{\Im}\zeta_x-2\underline{h}\zeta_x+\frac{2}{3} \varepsilon \partial_x(\underline{h}^3\zeta_{xx}) \; . $$ Therefore, it holds that: $$ \underline{\Im}^{-1}(T[\underline{h}]\zeta_x)=2\zeta_x - \underline{\Im}^{-1}(\underline{h}\zeta_x)- \varepsilon^2\underline{\Im}^{-1}( \zeta_{xxx})+\frac{2}{3}\varepsilon\underline{\Im}^{-1}\partial_x(\underline{h}^3\zeta_{xx}) \; , $$ which implies that: \begin{align*} B_3 &=2\big([\Lambda^s,\underline{\Im}]\zeta_x,\Lambda^sv\big) -\big([\Lambda^s,\underline{\Im}]\underline{\Im}^{-1}(\underline{h}\zeta_x),\Lambda^s v\big)+\frac{2}{3}\varepsilon\big([\Lambda^s,\underline{\Im}]\underline{\Im}^{-1}\partial_x(\underline{h}_3\zeta_{xx}),\Lambda^sv\big)\\ & \quad -\varepsilon^2 \big([\Lambda^s,\underline{\Im}]\underline{\Im}^{-1}( \zeta_{xxx}),\Lambda^sv\big) +\big([\Lambda^s,T[\underline{h}]]\zeta_x,\Lambda^sv\big)\\ &=B_{31}+B_{32}+B_{33}+B_{34}+B_{35}. \end{align*} Thanks to the fact that, for all $k\in\mathbb N, \underline{h}^{k}-1=\mathcal{O}(\varepsilon\underline{\zeta})$ and using the explicit expression of $\underline{\Im}$ combined with the identities: \begin{equation}\label{commu} [\Lambda^s,\partial_x(M\partial_x\cdot)]N = \partial_x[\Lambda^s,M]N_x \qquad\text{ and }\qquad [\Lambda^s, \partial_x^m]N=0 \; \quad\forall \; m\in\mathbb N^* \; , \end{equation}
then by integration by parts and \eqref{cest}, it holds that: $$ B_{31} = 2 \big([\Lambda^s,\underline{h}-1]\zeta_x,\Lambda^sv\big)+\frac{2}{3}\varepsilon\big([\Lambda^s,\underline{h}^3-1]\zeta_{xx},\Lambda^sv_x\big) \le \sqrt{\varepsilon} C\big(\vert\underline{h}-1\vert_{H^s})E^s(U)^2 \; . $$ Also, by \eqref{cest} it holds: \begin{multline*} \vert B_{32}\vert\leq\big\vert \big([\Lambda^s,\underline{h}]\underline{\Im}^{-1}(\underline{h}\zeta_x),\Lambda^sv\big)+\frac{1}{3}\varepsilon\big([\Lambda^s,\underline{h}^3]\partial_x\underline{\Im}^{-1}(\underline{h}\zeta_x),\Lambda^sv_x\big) \big\vert\leq \varepsilon C\big(\vert\underline{h}-1\vert_{H^s},C_s)E^s(U)^2 \; , \end{multline*} with \begin{multline*} \vert B_{33}\vert\leq\big\vert \frac{2}{3}\varepsilon\big([\Lambda^s,\underline{h}]\underline{\Im}^{-1}\partial_x(\underline{h}^3\zeta_{xx}),\Lambda^sv\big)+\frac{2}{9}\varepsilon^2\big([\Lambda^s,\underline{h}^3]\partial_x\underline{\Im}^{-1}\partial_x(\underline{h}^3\zeta_{xx}),\Lambda^sv_x\big) \big\vert\leq\varepsilon C\big(\vert\underline{h}-1\vert_{H^s},C_s)E^s(U)^2 \; , \end{multline*} and \begin{multline*} \vert B_{34}\vert\leq \varepsilon^2\big\vert \big([\Lambda^s,\underline{h}]\underline{\Im}^{-1}(\zeta_{xxx}),\Lambda^sv\big)+\frac{1}{3}\varepsilon^3\big([\Lambda^s,\underline{h}^3]\partial_x\underline{\Im}^{-1}(\zeta_{xxx}),\Lambda^sv_x\big) \big\vert\leq\varepsilon C\big(\vert\underline{h}-1\vert_{H^s},C_s)E^s(U)^2 \; . \end{multline*} For controlling $B_{35}$, the explicit expression of $T[\underline{h}]$ and \eqref{commu} gives that: \begin{equation*} B_{35}=\big([\Lambda^s,\underline{h}-1]\zeta_x,\Lambda^sv\big) \le \varepsilon C\big(\vert\underline{h}-1\vert_{H^s},C_s)E^s(U)^2 \; . \end{equation*} Thus, as a conclusion, it holds that: $$ \vert B_3\vert\leq \sqrt{\varepsilon} C\big(\vert\underline{h}-1\vert_{H^s},\vert\underline{\zeta}\vert_{\infty},\varepsilon\vert\underline{\zeta}_{xxx}\vert_{H^{s-1}},C_s\big)E^s(U)^2. $$ To control $B_4$, as for $B_3$ and using \eqref{commu} one may write: \begin{align*} B_4 &= -\varepsilon^2\big([\Lambda^s, \underline{\Im}]\underline{\Im}^{-1}\zeta_{xxx},\Lambda^s v\big)\\ & = -\varepsilon^2\big([\Lambda^s, \underline{h} ]\underline{\Im}^{-1}\zeta_{xxx},\Lambda^s v\big) -\frac{1}{3}\varepsilon^3\big([\Lambda^s, \underline{h}^3]\partial_x\underline{\Im}^{-1}\zeta_{xxx},\Lambda^s v_x\big) \leq\varepsilon C\big(\vert\underline{h}-1\vert_{H^s},C_s)E^s(U)^2 \; . \end{align*} To control $B_5$, using the expression of $\underline{\Im}$, \eqref{cest} and (\ref{MN}) with integration by parts and the fact that $\partial_x[\Lambda^s,M]N=[\Lambda^s,M_x]N+[\Lambda^s,M]N_x$, it holds: \begin{align*} &\vert B_5\vert=\varepsilon\big\vert\big([\Lambda^s,\underline{v}]v_x,\underline{h}\Lambda^sv\big)+\frac{1}{3}\varepsilon\big([\Lambda^s,\underline{v}_x]v_x,\underline{h}^3\Lambda^sv_x\big)+\frac{1}{3}\varepsilon\big([\Lambda^s,\underline{v}]v_{xx},\underline{h}^3\Lambda^sv_x\big)+\frac{1}{45}\varepsilon^2\big([\Lambda^s,\underline{v}_{xx}]v_x, \Lambda^sv_{xx}\big)\\ &+\frac{2}{45}\varepsilon^2\big([\Lambda^s,\underline{v}_x]v_{xx}, \Lambda^sv_{xx}\big)+\frac{1}{45}\varepsilon^2\big([\Lambda^s,\underline{v}]v_{xxx}, \Lambda^sv_{xx}\big)\big\vert\leq\varepsilon C\big(\vert\underline{h}\vert_{\infty},\vert\underline{v}\vert_{H^s},\sqrt{\varepsilon}\vert\underline{v}_{xx}\vert_{H^{s-1}},\varepsilon\vert\underline{v}_{xxx}\vert_{H^{s-1}}\big)E^s(U)^2. \end{align*} To control $B_6$, using the same arguments as the ones used to control $B_3$, using expression of $\underline{\Im}$, \eqref{cest} and \eqref{commu}, it follows that: \begin{equation*} B_6=-\varepsilon^2\big([\Lambda^s,\underline{h}]\underline{\Im}^{-1}\mathcal{Q}[\underline{U}]v_x,\Lambda^sv\big)-\frac{\varepsilon^3}{3}\big([\Lambda^s,\underline{h}^3]\partial_x\underline{\Im}^{-1}\mathcal{Q}[\underline{U}]v_x,\Lambda^sv_x\big) +\varepsilon^2\big([\Lambda^s,\mathcal{Q}[\underline{U}]]v_x,\Lambda^sv\big). \end{equation*} Now, using the expression of $\mathcal{Q}$ with the help of Lemma \ref{lemma2}, estimate \eqref{cest}, in addition to (\ref{commu}) and the fact that $[\Lambda^s, \partial_x(M\cdot)]N= \partial_x[\Lambda^s, M]N$, it holds: $$ \vert B_{6}\vert\leq \varepsilon C\big(\vert\underline{h}-1\vert_{H^s}, \sqrt{\varepsilon}\vert\underline{v}_x\vert_{H^{s}},C_s\big)E^s(U)^2 \; . $$ Eventually, as a conclusion, one gets: $$ \big\vert\big(\big[\Lambda^s,A[\underline{U}]\big]\partial_xU,S\Lambda^sU\big)\big\vert\leq \sqrt{\varepsilon} C\big( h_{min}^{-1}, \vert\underline{h}-1\vert_{H^s},\vert\underline{\zeta}\vert_{H^{s}},\varepsilon\vert\underline{\zeta}_{xx}\vert_{H^s},\vert\underline{v}\vert_{H^{s}},\sqrt{\varepsilon}\vert\underline{v}_x\vert_{H^s},\varepsilon\vert\underline{v}_{xx}\vert_{H^s},C_s\big)E^s(U)^2. $$ It is worth noticing that $\sqrt{\varepsilon}$ in front of the constant is due to $B_1$ and $B_{31}$. \\ $\bullet$ Estimation of $\big(\Lambda^s\zeta,[\partial_t, J[\underline{h}]]\Lambda^s\zeta\big)$. Using the expression of $J[\underline{h}]$ and by integration by parts, it holds that: $$ \big(\Lambda^s\zeta,[\partial_t, J[\underline{h}]]\Lambda^s\zeta\big)\big\vert= \varepsilon^2\big(\underline{h}^{-2} \partial_t\underline{h} \Lambda^s\zeta_{x},\Lambda^s\zeta_{x}\big) + \frac{2}{45}\varepsilon^2\big(\underline{h}^{-2} \partial_t\underline{h} \Lambda^s\zeta_{xx},\Lambda^s\zeta_{xx}\big) \leq \varepsilon C(h_{min}^{-2} , \vert\partial_t\underline{\zeta}\vert_{\infty})E^s(U)^2 . $$ $\bullet$ Estimation of $\big(\Lambda^sv,[\partial_t,\underline{\Im}]\Lambda^sv\big)$. It holds that: $$
[\partial_t , \underline{h}]\Lambda^sv = \partial_t \underline{h}\Lambda^sv \qquad\text{and }\qquad
[\partial_t , \partial_x(\underline{h}^3\partial_x\cdot)]\Lambda^sv = \partial_x(\partial_t \underline{h}^3\Lambda^sv_x) \; , $$ then by integration by parts: \begin{equation*} \big\vert\big(\Lambda^sv,[\partial_t,\underline{\Im}]\Lambda^sv\big)\big\vert=\big\vert\big(\partial_t\underline{h}\Lambda^sv,\Lambda^sv\big)+\frac{\varepsilon}{3}\big(\partial_t\underline{h}^3\Lambda^sv_x,\Lambda^sv_x\big) \big\vert\leq \varepsilon C(\vert\partial_t\underline{\zeta}\vert_{\infty},E^s(\underline{U}))E^s(U)^2. \end{equation*} Finally, combining the above estimates in addition to that fact that $H^s(\mathbb R)$ is continuously embedded in $W^{1,\infty}(\mathbb R)$, it holds that: $$ \frac{1}{2}e^{\sqrt{\varepsilon}\lambda t}\partial_t (e^{-\sqrt{\varepsilon}\lambda t}E^s(U)^2) \leq \sqrt{\varepsilon}\big(C(h_{min}^{-1},E^s(\underline{U}))-\lambda\big)E^s(U)^2. $$ Taking $\lambda=\lambda_T$ large enough (how large depending on $\displaystyle \sup_{t\in [0,\frac{T}{\sqrt{\varepsilon}}]}C(h_{min}^{-1},E^s(\underline{U}))$ such that the right hand side of the inequality above is negative for all $t\in [0,\frac{T}{\sqrt{\varepsilon}}]$, then it holds that: $$ \forall\hspace{0.1cm}t\in \Big[0,\frac{T}{\sqrt{\varepsilon}}\Big ]\hspace{0.1cm},\quad\qquad \frac{1}{2}e^{\sqrt{\varepsilon}\lambda t}\partial_t \big(e^{-\sqrt{\varepsilon}\lambda t}E^s(U)^2\big) \leq0. $$ Thanks to Gr$\ddot{\text{o}}$nwall's inequality so that it holds \begin{equation*} \forall\hspace{0.1cm}t\in \Big[0,\frac{T}{\sqrt{\varepsilon}}\Big]\hspace{0.1cm},\quad\qquad E^s\big(U(t)\big)\displaystyle\leq \big(e^{\sqrt{\varepsilon}\lambda_{T} t}\big)^{1/2}E^s(U_0) \;, \end{equation*} and hence the desired energy estimate is finally obtained. \end{proof}
\subsection{Main results.}\label{mainresults}
\subsubsection{Well-posedness of the extended Boussinesq system.} Theorem \ref{localexistence} represents the well-posedness of the extended Boussinesq system \eqref{boussinesq} which holds in $X^s=H^{s+2}(\mathbb R)\times H^{s+2}(\mathbb R)$ as soon as $s>3/2$ on a time interval of size $1/\sqrt{\varepsilon}$. \begin{theorem}[Local existence]\label{localexistence} Suppose that $U_0=(\zeta_0,v_0)\in X^s$ satisfying (\ref{depthcond}) for any $t_0>\frac{1}{2}$, $s\geq t_0+1$. Then there exists a maximal time $T_{max}=T(\vert U_0\vert_{X^s})>0$ and a unique solution $U=(\zeta,v)^T\in X^s_{T_{max}}$ to the extended Boussinesq system \eqref{boussinesq} with initial condition $(\zeta_0,v_0)$ such that the non-vanishing depth condition (\ref{depthcond}) is satisfied for any $t\in [0,\frac{T_{max}}{\sqrt{\varepsilon}})$. In particular if $T_{max}<\infty$ one has $$ \vert U(t,\cdot)\vert_{X^s}\longrightarrow\infty\quad\hbox{as}\quad t\longrightarrow \frac{T_{max}}{\sqrt{\varepsilon}},\qquad\text{ or } \qquad
\inf_{\mathbb R} h(t,\cdot)=\inf_{\mathbb R}1+\varepsilon\zeta(t,\cdot)\longrightarrow 0 \quad\hbox{as}\quad t\longrightarrow \frac{T_{max}}{\sqrt{\varepsilon}} \; . $$ \end{theorem} \begin{proof} The proof follows same line as \cite[Theorem 1]{KZI2018} using the energy estimate proved in Proposition \ref{prop1}. This is due to the fact that in \cite{KZI2018} a most general case is considered (\textit{i.e.} the extended Green-Naghdi equations). Remark that the proof itself is an adaptation of the proof of the well-posedness of hyperbolic systems (see \cite{AG91} for general details). \end{proof}
\subsubsection{A stability property.} Theorem \ref{localexistence} is complemented by the following result that shows the stability of the solution with respect to perturbations, which is very useful for the justification of asymptotic approximations of the exact solution. (The solution $U=(\zeta,v)^{T}$ and time $T_{max}$ that appear in the statement below are those furnished by Theorem \ref{localexistence}). \begin{theorem}[Stability]\label{stability} Suppose that the assumption of Theorem \ref{localexistence} is satisfied and moreover assume that there exists $\widetilde{U}=(\widetilde{\zeta},\widetilde{v})^{T}\in C\left([0,\frac{T_{max}}{\sqrt{\varepsilon}}], X^{s+1}(\mathbb R)\right)$ such that \begin{equation*} \left\{ \begin{array}{lcl} \displaystyle\partial_t\widetilde{\zeta}+\partial_x(\widetilde{h}\widetilde{v})=f_1
,\\ \displaystyle \tilde{\Im} \big(\partial_t\tilde{ v}+\varepsilon \tilde{v}\tilde{v}_x\big) + \tilde{h}\partial_x\tilde{\zeta} - \varepsilon^{2} \tilde{\zeta}_{xxx}+\frac{2}{45}\varepsilon^2 \tilde{\zeta}_{xxxxx} + \varepsilon^{2} \tilde{\zeta}_{xxx}+\varepsilon^2 \mathcal{Q}[\tilde{U}]\tilde{v}_x = f_2 \; , \end{array} \right. \end{equation*} with $\widetilde{h}(t,x)=1+\varepsilon\widetilde{\zeta}(t,x)$ and $\widetilde{F}=(f_1,f_2)^{T}\in L^{\infty}\left([0,\frac{T_{max}}{\sqrt{\varepsilon }}],X^s(\mathbb R)\right)$. Then for all $t\in[0,\frac{T_{max}}{\sqrt{\varepsilon} }]$, the error ${\bf U}=U-\widetilde{U}=(\zeta,v)^{T}-(\widetilde{\zeta},\widetilde{v})^{T}$ with respect to $U$ given by Theorem \ref{localexistence} satisfies for all $0\leq t\leq T_{max}/\sqrt{\varepsilon}$ the following inequality $$ \big\vert {\bf U} \big\vert_{L^{\infty}([0,t],X^s(\mathbb R))}\displaystyle\leq \sqrt{\varepsilon}\widetilde{C} \Big( \big\vert {\bf U}_{\mid_{t = 0}} \big\vert_{X^s(\mathbb R)}+ t\big\vert\widetilde{F}\big\vert_{L^{\infty}([0,t],X^s(\mathbb R))}\Big), $$ where the constant $\widetilde{C}$ is depending on $\vert U\vert_{L^{\infty}([0,T_{max}/\sqrt{\varepsilon}],X^s(\mathbb R))}$ and $\vert \widetilde{U}\vert_{L^{\infty}([0,T_{max}/\sqrt{\varepsilon}],X^{s+1}(\mathbb R))}$. \end{theorem} \begin{proof} The proof consists on the evaluation of $\frac{1}{2}\frac{d}{dt}\big\vert{\bf U}\big\vert^2_{X^s(\mathbb R)}$. Knowing that fact, by subtracting the equations satisfied by $U=(\zeta,v)^{T}$ and $\widetilde{U}=(\widetilde{\zeta},\widetilde{v})^{T}$, we obtain: \begin{equation*} \left\{ \begin{array}{lcl} \displaystyle \partial_t{\bf U} + A[U]\partial_x{\bf U}= -\big(A[U]-A[\widetilde{U}]\big)\partial_x\widetilde{U} -\widetilde{F},\\ \displaystyle {\bf U}_{\mid_{t = 0}} = U_0-\widetilde{U}_0 \; . \end{array} \right. \end{equation*} Consequently, a similar energy estimate evaluation as in Proposition \ref{prop1} yields the desired result. \end{proof}
\subsubsection{Convergence.} As a conclusion, the following convergence result states that the solutions of the full Euler system, remain close to the ones of the system we are considering, namely system \eqref{boussinesq}, with a better precision as $\varepsilon^3$ is smaller.
\begin{theorem}[Convergence]\label{convergence} Let $\varepsilon\in(0,1)$, $s>3/2$, and $U_0=(\zeta_0,\psi_0)^T\in {H^{s+N}}(\mathbb R)^2$ satisfying condition \eqref{depthcond} where N is large enough, uniformly with respect to $\varepsilon\in(0,1)$. Moreover, assume $U^{euler}=(\zeta,\psi)^T$ to be a unique solution to the full Euler system \eqref{Zakharovv} that satisfies the assumption of Proposition \ref{consistency}. Then there exists $C$, $T>0$, independent of $\varepsilon$, such that \begin{itemize} \item Our new model \eqref{boussinesq} admits a unique solution $U_{xB}=(\zeta_{xB},v_{xB})^T$, defined on $[0, \frac{T}{\sqrt{\varepsilon}}]$ with corresponding initial data $(\zeta^0,v^0)^T$; \item The error estimate below holds, at any time $0\le t\le T/\sqrt{\varepsilon}$, $$ \vert (\zeta, v) - (\zeta_{xB},v_{xB}) \vert _{L^{\infty}([0,t];X^s)} \le C \varepsilon^3 t \lesssim \varepsilon^{5/2} \; . $$ \end{itemize} \end{theorem} \begin{proof} The first point is provided by the local existence result Theorem \ref{localexistence}. Thanks to Proposition \ref{consistency}, then the solution of the water wave equations $(\zeta,v)^T$ solve our model \eqref{boussinesq} up to a residual $R$ of order $\varepsilon^3$. The error estimation then follows from the stability Theorem \ref{stability}. \end{proof}
\section{Solitary Waves}\label{solitary-approx}
\subsection{Explicit Solitary Wave Solution of the extended Boussinesq system} Solitary waves were initially discovered in shallow water by J.S. Russell during his experiments to design a more dynamic canal boat~\cite{OD03}. Many partial differential equations have been derived in the literature to model the solitary wave observed by Russell. Such models are commonly known as the Korteweg-de Vries (KdV) scalar equation for a unidirectional flow or the coupled Boussinesq and Green-Naghdi evolution equations. These famous nonlinear and dispersive models describe the shallow water waves and admit explicit families of solitary wave solutions~\cite{Bous1872,Ray1876,KDV1895,Serre53,Chen98}. The explicit solitary solutions of different nonlinear PDE's can be calculated using many methods. One of these methods is replacing the partial differential equation by an ordinary one (ODE) and thus one can look for explicit solutions in terms of particular functions. This replacement can be done by setting a reference traveling wave and hence one look for traveling-wave solutions. In this section, we seek the explicit solution of traveling waves for the extended Boussinesq system. Let us recall that the extended Boussinesq system that we are considering can be written as: \begin{equation}\label{boussinesq2} \left\{ \begin{array}{lcl} \displaystyle\partial_t\zeta+\partial_x(hv)=0
\; ,\\ \displaystyle ( 1+\varepsilon\mathcal{T}[\zeta]+\varepsilon^2\mathfrak{T} )\partial_t v + \partial_x\zeta+\varepsilon v \partial_x v +\varepsilon^2 \mathcal{Q}v =\mathcal{O} (\varepsilon^3) \; , \end{array} \right. \end{equation} where $h(t,x)=1+\varepsilon\zeta(t,x)$ and denote by \begin{equation}\label{exp2} \mathcal{T}[\zeta]w =-\frac{1}{3h}\partial_x\big((1+3\varepsilon\zeta)\partial_x w\big)=-\frac{1}{3}(1-\varepsilon\zeta)\partial_x\big((1+3\varepsilon\zeta)\partial_x w\big)+\mathcal{O}(\varepsilon^3), \; \mathfrak{T} w = -\frac{1}{45}\partial_x^4w , \; \mathcal{Q}v = -\frac{1}{3}\partial_x\big(vv_{xx}-v_x^2\big) \; . \end{equation}
In order to find solitary wave solutions of the extended Boussinesq system~\eqref{boussinesq2}, we seek solutions in the form of the traveling wave $\zeta(t,x)=\zeta_c(x-ct)$ and $v(t,x)=v_c(x-ct)$ with $\displaystyle{\lim_{|x| \rightarrow \infty} |(\zeta_c,v_c)|(x)=0}$ where the constant $c \in \mathbb{R}$ is the velocity of the solitary wave. Plugging the above Ansatz into eq.~\eqref{boussinesq2} yields: \begin{equation}\label{boussinesq3} \left\{ \begin{array}{lcl} \displaystyle -c\zeta^{'}_c+(h_c v_c)'=0
\; ,\\ \displaystyle -c v^{'}_c +\frac{\varepsilon c}{3}\Big((1+3\varepsilon \zeta_c)v^{''}_c\Big)'-\frac{\varepsilon^2 c}{3}\zeta_c v^{'''}_c +\frac{\varepsilon^2 c}{45} v_c^{(5)} +\zeta^{'}_c +\frac{\varepsilon}{2} (v_c^2)' = \frac{\varepsilon^2}{3}\big(v_c v_c^{''} -(v^{'}_c)^2\big)' \ \; . \end{array} \right. \end{equation} We may now integrate and, using the vanishing condition at infinity to set the integration constant, we deduce from the first equation: \begin{equation}\label{soleq1} -c\zeta_c+ h_c v_c=0\;. \end{equation} Using~\eqref{soleq1}, one can deduce that $v_c^{'''}=c \zeta_c^{'''} + \mathcal{O} (\varepsilon)$.
One can also check the following identity $\zeta_c \zeta_c^{'''}=(\zeta_c \zeta_c^{''})'-\dfrac{1}{2}\big((\zeta_c^{'})^2\big)'$ is true. Using the latter identities into the second equation of~\eqref{boussinesq3}, we may now integrate and, using the vanishing condition at infinity to set the integration constant one can deduce: \begin{equation}\label{soleq2} -c v_c +\frac{\varepsilon}{2} v_c^2 +\zeta_c=-\frac{\varepsilon c}{3}v^{''}_c-\varepsilon^2 c\zeta_c v_c^{''}+\frac{\varepsilon^2 c^2}{3}\zeta_c \zeta^{''}_c -\frac{\varepsilon^2 c^2}{6} (\zeta_c^{'})^2-\frac{\varepsilon^2 c}{45} v_c^{(4)}+ \frac{\varepsilon^2}{3}v_c v_c^{''} -\frac{\varepsilon^2}{3}(v^{'}_c)^2. \end{equation} One can deduce from~\eqref{soleq1} the following identity: \begin{equation}\label{v''exp} v_c=c \zeta_c -\varepsilon c \zeta_c^2 + \mathcal{O}(\varepsilon^2)\;. \end{equation} Using~\eqref{soleq1} into the \emph{l.h.s} of~\eqref{soleq2} and~\eqref{v''exp} into the \emph{r.h.s} of~\eqref{soleq2}, withdrawing all terms of order $\mathcal{O}(\varepsilon^3)$ one can deduce the following equation: \begin{equation}\label{soleq22} \zeta_c-\dfrac{c^2\zeta_c}{2(1+\varepsilon\zeta_c)^2}(2+\varepsilon\zeta_c)=-\frac{\varepsilon c^2}{3}\zeta_c^{''} + \frac{\varepsilon^2 c^2}{6}(\zeta_c^{'})^2 +\frac{\varepsilon^2 c^2}{3} \zeta_c \zeta_c^{''}-\frac{\varepsilon^2 c^2}{45} \zeta_c^{(4)}. \end{equation} Multiplying~\eqref{soleq22} by $\zeta_c^{'}$ and integrating once again yields, \begin{equation}\label{soleq23} \dfrac{\zeta_c^2}{2} \Big( 1-\dfrac{c^2}{1+\varepsilon \zeta_c} \Big)= \frac{\varepsilon c^2}{6} (\varepsilon\zeta_c -1)(\zeta_c^{'})^2-\frac{\varepsilon^2 c^2}{45} \zeta_c^{'''}\zeta_c^{'} + \frac{\varepsilon^2 c^2}{90} (\zeta_c^{''})^2. \end{equation} The equation~\eqref{soleq23} is a third order non linear ordinary differential equation. When dropping the $\varepsilon^2$ terms on the \emph{r.h.s} of~\eqref{soleq23}, one gets the analogous ODE for the GN equation which exhibits the analytical solitary wave solution defined in~\eqref{solGN}. A careful examination reveals that the equation~\eqref{soleq23} does not admit an explicit solution in any appropriate method. In~\cite{Matsuno2015}, the author studied solitary wave solutions of the Hamiltonian formulation of the extended Green-Naghdi equations by performing a singular perturbation analysis. In the latter paper, Matsuno mentioned that his inspection also reveals that the obtained third-order nonlinear differential equation would not have analytical solutions. The aim was to find an exact solitary wave solution of equation~\eqref{soleq23}. However, analytical approaches might not be applied to many nonlinear problems. The explicit solution of the extended Boussniesq~\eqref{boussinesq2} system remain an open problem. An alternative approach is to consider the numerical solution of the equation~\eqref{soleq23}. Therefore, we validate the asymptotic extended Boussinesq model~\eqref{boussinesq2} by comparing its travelling wave solution (computed numerically) with corresponding solution to the full Euler equations, computed using fast and accurate algorithms~\cite{DC14,Tanaka86}. \subsection{Numerical Solitary Wave Solution of the extended Boussinesq system}\label{NumSWSec} In the previous section, the emphasis was on finding an analytic solution for the extended Boussinesq system of equations of the form of a solitary wave. However, many differential equations, especially nonlinear ones of high order, does not admit exact explicit solutions. Instead, numerical solutions must be considered as an alternative way of dealing with these equations. To this end we compute the solution of~\eqref{soleq23} numerically by employing the Matlab solver \texttt{ode45}. We compare the obtained solutions with the solutions of water-waves equations. The latter is computed using the Matlab script of Clamond and Dutykh~\cite{CD2013} where they introduce a fast and precise approach for computing solitary waves solution. We compute the solitary waves for our model with three values of velocity, namely $c=1.025, \ c=1.01$ and $c=1.002$. In fact, the Matlab script in~\cite{CD2013} offer fast and accurate results but limited to realtively small velocities. We compare the obtained solutions with the ones corresponding to the full Euler system (numerically computed), the original Green-Naghdi system ($\zeta_{GN}$), the Boussinesq system ($\zeta_{B}$) and the KdV equation ($\zeta_{KdV}$). The explicit solution of the original Green-Naghi model has been initially obtained by Serre in~\cite{Serre53} and later on by Su and Gardner~\cite{SuGardner69}: \begin{equation}\label{solGN} \varepsilon \zeta_{GN}(x)=(c^2-1)\ \text{sech}^2 \Big(\sqrt{\dfrac{3(c^2-1)}{4c^2 \varepsilon}}\ x\Big)=\varepsilon c^2 \zeta_{KdV}(x)=\varepsilon c^2 \zeta_{B}(x)\;. \end{equation} The waves are rescaled so that the Korteweg-de Vries and Boussinesq solutions do not depend on $c$. Consistently, we set $\varepsilon=1$. By the convergence theorem, the above solutions provide good approximations of the traveling waves of the exact water-waves equations, when $c-1 \approx \varepsilon \ll 1$, that is in the weakly nonlinear regime.
In fact, in figure~\ref{SWcomp}, one can see clearly as $c-1 \rightarrow 0$ and after re-scaling, the solitary waves tend towards the KdV solution $(\zeta_{KdV})$. Moreover, when zooming in, one can see that the the full Euler system (water-waves) solution is in better agreement with the solution of the extended Boussinesq model rather than the Green-Naghdi one. \begin{figure}
\caption{Comparison of the solitary waves solutions.}
\label{SWcomp}
\end{figure}
In figure~\ref{convratefig}, we plot in a log-log scale the normalized $l^2$-norm of the difference between the solitary wave solutions of the approximate models and the water-waves solution. The error is computed for different values of $c$. The extended Bossinesq model exhibit a better convergence rate (quadratic) when compared to the original Green-Naghdi model (linear). This highlight the fact that extended Boussinesq model have a better approximate solution. \begin{figure}
\caption{Errors as a function of $c-1$ (log-log plot).}
\label{convratefig}
\end{figure}
\section{Explicit solution with correctors of order $\mathcal{O}(\varepsilon^3)$ for the extended Boussinesq equations}\label{explicit-solitary}
Another approach of dealing with nonlinear PDE's when looking for analytical exact solution is finding instead an explicit solution with correctors. Explicit solutions with correctors for asymptotic water waves models have been obtained in~\cite{IM14,HAI20}. Actually, $H^s$-consistent solutions are obtained to the models in the variable topography case using the analytic solution of the model in the flat topography configuration. In what follows, we find an explicit solution with correctors of order $\mathcal{O}(\varepsilon^3)$ for the extended Boussinesq model~\eqref{boussinesq2} and validate the result numerically.
We start by defining an $H^s$-consistent solution or in other words explicit solution with correctors of order $\mathcal{O}(\varepsilon^3)$. \begin{definition} A family $(\zeta,v)$ is $H^s$-consistent on $[0, T/\sqrt{\varepsilon}]$ for the extended Boussinesq equations~\eqref{boussinesq2}, if \begin{equation}\label{boussHsdef} \left\{ \begin{array}{lcl} \displaystyle\partial_t\zeta+\partial_x(hv)=\varepsilon^3 r_1
\; ,\\ \displaystyle ( 1+\varepsilon\mathcal{T}[h]+\varepsilon^2\mathfrak{T} )\partial_t v + \partial_x\zeta+\varepsilon v \partial_x v +\varepsilon^2 \mathcal{Q}v =\varepsilon^3 r_2 \; , \end{array} \right. \end{equation} with $(r_1, r_2)$ bounded in $\Big(L^{\infty} \big([0,\frac{T}{\sqrt{\varepsilon}}], H^s(\mathbb{R})\big)\Big)^2$. \end{definition} The standard Boussinesq system can be easily obtained form the extended Boussinesq system~\eqref{boussinesq2} by dropping all terms of order $\mathcal{O}(\varepsilon^2)$. Thus the standard Boussinesq system can be written as: \begin{equation}\label{standbouss} \left\{ \begin{array}{lcl} \displaystyle\partial_t\zeta+\partial_x(hv)=0
\; ,\\ \displaystyle \partial_t v -\frac{\varepsilon}{3} \partial_x^2 \partial_t v+ \partial_x\zeta+\varepsilon v \partial_x v =\mathcal{O} (\varepsilon^2) \; . \end{array} \right. \end{equation} \subsection{Explicit solution of the standard Boussinesq system~\eqref{standbouss}} The standard Boussinesq system enjoys a well known explicit solution of solitary traveling wave $(\zeta_1,v_1)$ of the form: \begin{equation}\label{stdbousssol} \left\{ \begin{array}{lcl} \displaystyle\zeta_{1}(t,x)= \alpha \ \text{sech}^2 \Big(k \ (x-ct)\Big)
\; ,\\ \displaystyle v_{1}(t,x)=\dfrac{ c \zeta_1(t,x)}{1+\varepsilon \zeta_1(t,x)}\; , \end{array} \right. \end{equation} where $k=\sqrt{\dfrac{3\alpha}{4}}$ and $c=\sqrt{\dfrac{1}{1-\alpha\varepsilon}}$ and $\alpha$ is an arbitrary chosen constant. This explicit solitary wave was already introduced in equation \eqref{solGN} in the previous section~\ref{NumSWSec}. As shown in figure~\ref{SWcomp}, this solution is in good agreement with the water waves solutions in the weakly nonlinear regime. \begin{theorem}\label{Hsconstheo} Let $(\zeta_1, v_1)$ be a solution of the standard Boussinesq system~\eqref{standbouss} and $(\zeta_2,v_2)$ solution of the linear equations below: \begin{equation}\label{linsyst} \left\{ \begin{array}{lcl} \displaystyle\partial_t \zeta_2 +\partial_x v_2=0
\; ,\\ \displaystyle \partial_t v_2 +\partial_x \zeta_2=f(\zeta_1,v_1) \; , \end{array} \right. \end{equation} with \begin{equation}\label{gdef} f(\zeta_1,v_1)=\partial_x \zeta_1 \partial_x \partial_t v_1 +\dfrac{2}{3}\zeta_1 \partial_x^2 \partial_t v_1 + \dfrac{1}{45}\partial_x^4 \partial_t v_1 +\dfrac{1}{3} \partial_x\big(v_1 (v_1)_{xx}-(v_1)_x^2\big),\end{equation} then $(\zeta,v)=(\zeta_1,v_1)+\varepsilon^2(\zeta_2,v_2)$ is $H^s$-consistent with the extended Boussinesq system~\eqref{boussinesq2}. \end{theorem} \begin{proof} First, we would like to mention that we denote by $\mathcal{O}(\varepsilon)$ any family of functions $(f_\varepsilon)_{0<\varepsilon<1}$ such that $(\dfrac{1}{\varepsilon} f_{\varepsilon})_{0<\varepsilon<1}$ remains bounded in $L^{\infty} \big([0,\frac{T}{\sqrt{\varepsilon}}], H^r(\mathbb{R})\big)$, for possibly different values of $r$. We may now proceed in proving the stated result.
If $\zeta$ and $ v$ such that $(\zeta,v)=(\zeta_1,v_1)+\varepsilon^2(\zeta_2,v_2)$ solve the first equation of~\eqref{boussinesq2} up to $\mathcal{O}(\varepsilon^3)$ terms, then \begin{equation*} \partial_t \zeta_{1} +\partial_x ((1+\varepsilon \zeta_1) v_1) + \varepsilon^2 \partial_t \zeta_2 +\varepsilon^2 \partial_x v_2 =\mathcal{O}(\varepsilon^3). \end{equation*} The first equation of~\eqref{boussinesq2} is satisfied up to $\mathcal{O}(\varepsilon^3)$ terms if and only if: \begin{equation*} \varepsilon^2 \partial_t \zeta_2 +\varepsilon^2 \partial_x v_2 =\mathcal{O}(\varepsilon^3). \end{equation*} Therefore one can take: \begin{equation*} \partial_t \zeta_2 + \partial_x v_2 =0. \end{equation*} Now, let us recall that the second equation of~\eqref{boussinesq2} can be written as: \begin{equation*} \partial_t v -\frac{\varepsilon}{3} \partial_x^2\partial_t v-\varepsilon^2\partial_x \zeta \partial_x \partial_t v - \frac{2\varepsilon^2}{3}\zeta \partial_x^2 \partial_t v - \frac{\varepsilon^2}{45}\partial_x^4 \partial_t v+ \partial_x\zeta+\varepsilon v \partial_x v -\frac{\varepsilon^2}{3} \partial_x\big(vv_{xx}-v_x^2\big) = \mathcal{O}(\varepsilon^3). \end{equation*} We seek $(\zeta_2,v_2)$ such that if $(\zeta,v)=(\zeta_1,v_1)+\varepsilon^2(\zeta_2,v_2)$ and $(\zeta_1,v_1)$ solve the standard Boussinesq equations~\eqref{standbouss}, then the second equation of~\eqref{boussinesq2} is satisfied up to $\mathcal{O}(\varepsilon^3)$ terms if and only if: \begin{equation*} \varepsilon^2 \partial_t v_2 +\varepsilon^2 \partial_x \zeta_2 =\varepsilon^2 f(\zeta_1,v_1), \end{equation*} with $f(\zeta_1,v_1)=\partial_x \zeta_1 \partial_x \partial_t v_1 +\dfrac{2}{3}\zeta_1 \partial_x^2 \partial_t v_1 + \dfrac{1}{45}\partial_x^4 \partial_t v_1 +\dfrac{1}{3} \partial_x\big(v_1 (v_1)_{xx}-(v_1)_x^2\big)$. Therefore, this yields \begin{equation*} \partial_t v_2 + \partial_x \zeta_2 = f(\zeta_1,v_1). \end{equation*} Hence, the result is directly obtained given the conditions on $\zeta_2$ and $v_2$ in the theorem statement. \end{proof} \subsection{Analytic solution for the linear system~\eqref{linsyst}} In this section, we find the analytic solution for the two transport equations of system~\eqref{linsyst}. Lets consider first the initial value problem of~\eqref{linsyst}: \begin{equation}\label{IVPlinsyst} \left\{ \begin{array}{lcl} \displaystyle\partial_t \zeta_2 +\partial_x v_2=0
\; , \hspace{3cm} \text{if} \ x \in \mathbb{R}, t >0,\\ \displaystyle \partial_t v_2 +\partial_x \zeta_2=f(t,x), \; \hspace{2.25cm} \text{if} \ x \in \mathbb{R}, t >0,\\ \zeta_2(0,x)=\zeta_2^0(x), \ \ v_2(0,x)=v_2^0(x) \hspace{0.45cm} \text{if} \ x \in \mathbb{R}, \end{array} \right. \end{equation} where $\zeta_2^0$ and $v_2^0$ are both given in $C^\infty(\mathbb{R})$. One can equivalently check the following: \begin{equation}\label{IVPlinsyst2} \left\{ \begin{array}{lcl} \displaystyle\partial_t (\zeta_2+v_2) +\partial_x (\zeta_2+v_2)=f(t,x)
\; , \hspace{1.95cm} \text{if} \ x \in \mathbb{R}, t >0,\\ \displaystyle \partial_t (\zeta_2-v_2) -\partial_x (\zeta_2-v_2)=-f(t,x), \; \hspace{1.7cm} \text{if} \ x \in \mathbb{R}, t >0,\\ \zeta_2(0,x)=\zeta_2^0(x), \ \ v_2(0,x)=v_2^0(x) \hspace{2.2cm} \text{if} \ x \in \mathbb{R}, \end{array} \right. \end{equation} The analytical solution of both transport equations of system~\eqref{IVPlinsyst2} are: \begin{equation*} \zeta_2+v_2=(\zeta_2^0+v_2^0)(x-t)+ \int_0^t f(s,x-t+s) ds, \end{equation*} and \begin{equation*} \zeta_2-v_2=(\zeta_2^0-v_2^0)(x+t)- \int_0^t f(s,x+t-s) ds. \end{equation*} Thus, one can easily deduce that the analytic solutions of system~\eqref{IVPlinsyst} are given by \begin{equation}\label{zeta2def} \zeta_2=\dfrac{1}{2}\Big[(\zeta_2^0+v_2^0)(x-t)+(\zeta_2^0-v_2^0)(x+t)+ \int_0^t f(s,x-t+s) ds-\int_0^t f(s,x+t-s) ds\Big], \end{equation} and \begin{equation}\label{v2def} v_2=\dfrac{1}{2}\Big[(\zeta_2^0+v_2^0)(x-t)-(\zeta_2^0-v_2^0)(x+t)+ \int_0^t f(s,x-t+s) ds+ \int_0^t f(s,x+t-s) ds \Big]. \end{equation} \subsection{Explicit solution with correctors for the system of equations~\eqref{boussinesq2}.} In what follows, we prove that the extended Boussinesq system~\eqref{boussinesq2} enjoys an explicit solution with correctors of order $\mathcal{O}(\varepsilon^3)$. \begin{theorem}\label{Theo} Let $(\zeta_1,v_1)$ given by the expressions in~\eqref{stdbousssol} and $f(t,x)$ as defined in~\eqref{gdef}. Lets also consider the initial condition $( \zeta_0,v_0)=(\zeta_1(0,x),v_1(0,x))+\varepsilon^2(\zeta_2^0,v_2^0)$ where $\zeta_2^0$ and $v_2^0$ are both given in $C^\infty(\mathbb{R})$. Then, the family ($\zeta,v)$ with \begin{equation}\label{zetadef} \zeta=\zeta_1 + \dfrac{\varepsilon^2}{2}\Big[(\zeta_2^0+v_2^0)(x-t)+(\zeta_2^0-v_2^0)(x+t)+ \int_0^t f(s,x-t+s) ds-\int_0^t f(s,x+t-s) ds\Big], \end{equation} and \begin{equation}\label{vdef} v= v_1+\dfrac{\varepsilon^2}{2}\Big[(\zeta_2^0+v_2^0)(x-t)-(\zeta_2^0-v_2^0)(x+t)+ \int_0^t f(s,x-t+s) ds+ \int_0^t f(s,x+t-s) ds \Big], \end{equation} is an explicit solution with correctors of order $\mathcal{O}(\varepsilon^3)$ on $[0,\frac{T}{\sqrt{\varepsilon}}]$ for the extended Boussinesq system~\eqref{boussinesq2}. \end{theorem} \begin{proof} Theorem~\ref{Hsconstheo}, gives the $H^s$ consistency result of $(\zeta,v)=(\zeta_1,v_1)+\varepsilon^2(\zeta_2,v_2)$ with the extended Boussinesq system~\eqref{boussinesq2}, where $(\zeta_2,u_2)$ as given in~\eqref{zeta2def} and~\eqref{v2def} is a solution of the linear system~\eqref{linsyst}. Hence the result can be obtained easily. \end{proof}
\section{Numerical validation}
In this section, we numerically validate the result of Theorem~\ref{Theo}. In fact, we consider the equations given by system~\eqref{boussinesq2} and we compute explicitly the solutions given by~\eqref{zetadef} and~\eqref{vdef}. Then, we compute the residues for both equations after substituting~\eqref{zetadef} and~\eqref{vdef} correspondingly. First we have to set the initial conditions $\zeta_2^0=v_2^0=\exp\Big(-\Big(\dfrac{3\pi x}{10}\Big)^2 \Big)$. We also choose the constant $\alpha=1$. The residues $R_1(\varepsilon)$ and $R_2(\varepsilon)$ of the first and second equation of the system~\eqref{boussinesq2} respectively, are defined as follow: \begin{equation}\label{Residuesdef} \left\{ \begin{array}{lcl}
\displaystyle R_1^p(\varepsilon) = \| \partial_t \zeta +\partial_x(hv)\|_{p}
\; ,\\
R_2^p(\varepsilon) =\| \displaystyle ( 1+\varepsilon\mathcal{T}[h]+\varepsilon^2\mathfrak{T} )\partial_t v + \partial_x\zeta+\varepsilon v \partial_x v +\varepsilon^2 \mathcal{Q}v \|_{p} \; . \end{array} \right. \end{equation} where $p \in \{2,\infty\}$. The residues $ R_1^p(\varepsilon)$ and $ R_2^p(\varepsilon)$ for $p=1$ and $p=\infty$ are computed for several values of $\varepsilon$, namely $\varepsilon=10^{-1}, \ 10^{-2},\ 10^{-3}, \ 10^{-4}$ and $10^{-5}$, at time $t=1$. The results are summarized in Table~\ref{ResTable} and Figures~\ref{R2curves} and~\ref{Rinfcurves} where we plot in a log-log scale the residues $R_1^{p}$ and $R_2^{p}$ for $p=1$ and $p=\infty$ in terms of $\varepsilon$. \begin{center} \begin{table}[H]
\begin{tabular}{ | c | c | c || c | c | c |} \hline $\varepsilon$ & $R_1^2(\varepsilon)$ & $R_2^2(\varepsilon)$ & $\varepsilon$ & $R_1^{\infty}(\varepsilon)$ & $R_2^{\infty}(\varepsilon)$\\ [0.5ex] \hline 1E-1 & 2.70E-02 & 3.80E-03 & 1E-1 & 4.30E-03 & 4.81E-04 \\ 1E-2 & 2.58E-05 & 2.96E-06 & 1E-2 & 4.17E-06 & 4.10E-07 \\ 1E-3 & 2.57E-08 & 2.89E-09 & 1E-3 & 4.16E-09 & 4.12E-10 \\ 1E-4 & 2.57E-11 & 2.88E-12 & 1E-4 & 4.16E-12 & 4.13E-13\\ 1E-5 & 2.58E-14 & 2.90E-15 & 1E-5 & 4.33E-15 & 5.22E-16 \\ \hline \end{tabular} \vspace*{5mm} \caption{The residues $R_1(\varepsilon)$ and $R_2(\varepsilon)$ for $p=2$ (left) and $p=\infty$ (right)} \label{ResTable} \end{table} \end{center} \begin{figure}
\caption{The residues $R_1^{\infty}$ and $R_2^{\infty}$ as a function of $\varepsilon$.}
\label{R2curves}
\end{figure} \begin{figure}
\caption{The residues $R_1^{\infty}$ and $R_2^{\infty}$ as a function of $\varepsilon$.}
\label{Rinfcurves}
\end{figure} One clearly sees that the curves of the residues for both $p=1$ and $p=\infty$ are both parallel to $\varepsilon^3$. This shows that the residues convergence rate is $\mathcal{O}(\varepsilon^3)$, which is in total agreement with our theoretical result.
\end{document} |
\begin{document}
\newcommand\p[1]{
\paragraph{#1}\hspace{-1em}}
\newtheorem{thm}{Theorem} \newtheorem{lem}{Lemma} \newtheorem{cor}{Corollary}
\newtheorem{prp}{Proposition} \newtheorem{dfn} {Definition}
\newtheorem{rmr} {Remark}\newcommand\BR{\begin{rmr}}\newcommand\ER {\end{rmr}}
\newcommand\BD {\begin{dfn}} \newcommand\ED {\end{dfn}}
\newcommand\BL {\begin{lem}} \newcommand\EL {\end{lem}}
\newcommand\BT {\begin{thm}} \newcommand\ET {\end{thm}}
\newcommand\BP {\begin{prp}} \newcommand\EP {\end{prp}}
\newcommand\BC {\begin{cor}} \newcommand\EC {\end{cor}}
\newcommand\BPR {\begin{proof}} \newcommand\EPR {\end{proof}}
\newcommand\BE {\begin{enumerate}} \newcommand\EE {\end{enumerate}}
\newcommand\hreff[1] {{\footnotesize\href{https://#1}{https://#1}}}
\newcommand\emm[1]{{\ensuremath{#1}}} \newcommand\trm[1]{{\bf\em #1}}
\newcommand\emb[1]{{\ensuremath{\mathbf{#1}}}}\frenchspacing
\newcommand\ov[1]{{\overline{#1}}} \newcommand\un[1]{{\underline{#1}}}
\newcommand\floor[1]{{\lfloor#1\rfloor}} \newcommand\ceil[1]{{\lceil#1\rceil}}
\newcommand\edf{{\raisebox{-3pt}{$\,\stackrel{\text{\tiny df}}{=}\,$}}}
\newcommand\tld[1]{{\raisebox{-1pt}{$\widetilde{#1}$}}}
\newcommand\wht[1]{{\raisebox{-1pt}{$\widehat{#1}$}}}
\newcommand\lea{\prec}\newcommand\gea{\succ} \newcommand\eqa{\asymp}
\newcommand\lel{\lesssim}\newcommand\gel{\gtrsim}\newcommand\eql{\sim}
\renewcommand\i {{\emb i}} \renewcommand\d {{\emb d}}
\newcommand\M {{\emb M}} \newcommand\tb {{\emb t}}
\newcommand\St {{\emb S}} \newcommand\I {{\emb I}}
\newcommand\T {{\emb T}} \newcommand\m {{\emb m}}
\newcommand\KM {{\emb{KM}}} \newcommand\one {{\emb 1}}
\newcommand\K {{\emb K}} \newcommand\Ki {{\wht\K}}
\newcommand\mf {{\wht\m}} \newcommand\If {{\wht\i}}
\newcommand\N {{\emm{\mathbb N}}} \newcommand\Q {{\emm{\mathbb Q}}}
\newcommand\R {{\emm{\mathbb R}}} \newcommand\Ks {{\raisebox{2pt}{\emm\chi}}}
\newcommand\Es{{\emm{\bf\cal E}}} \newcommand\Ess {{\tld\Es}}
\renewcommand\a {{\emm\alpha}} \renewcommand\b {{\emm\beta}}
\renewcommand\l {{\emm\lambda}} \renewcommand\r {{\emm\rho}}
\newcommand\g {{\emm\gamma}} \newcommand\dl { {\emm\delta}}
\newcommand\w {{\emm\omega}} \newcommand\W {{\emm\Omega}}
\newcommand\ph {{\emm\varphi}} \newcommand\ie {{\em i.e., }}
\newcommand\eg {{\em e.g., }} \newcommand\re {{\em r.e. }}
\title {\vspace*{-6pc}\ttl} \date{} \author{\aut\\
Boston University\thanks {Computer Science dept., 111 Cummington Mall,
Boston, MA 02215; Home page: \hreff{www.cs.bu.edu/fac/Lnd}}}\maketitle
\vspace*{-3pc}\begin{flushright}\parbox{1pc}{\begin{tabbing}
Π‘ ΡΡΠΎΠΉ Π±Π΅Π·ΠΌΠ΅ΡΠ½ΠΎΡΡΡΡ Π² ΠΌΠΈΡΠ΅ ΠΌΠ΅Ρ.\\*\em -- ΠΠ°ΡΠΈΠ½Π° Π¦Π²Π΅ΡΠ°Π΅Π²Π° \footnotemark\end{tabbing}}\end{flushright}\footnotetext
{Measureless in this world of measures. -- Marina Tsvetaeva}
\vspace*{-2pc}\begin{abstract}\noindent Mutual information $\I$ in infinite sequences (and in their finite prefixes) is essential in theoretical analysis of many situations. Yet its right definition has been elusive for a long time. I address it by generalizing Kolmogorov Complexity theory from measures to {\bf semimeasures} \ie infimums of sets of measures. Being concave rather than linear functionals, semimeasures are quite delicate to handle. Yet, they adequately grasp various theoretical and practical scenaria.
A simple lower bound $\i(\a:\b)\edf\sup_{x\in\N}(\K(x)- \K(x|\a)-\K(x|\b))$ of information turns out tight for Martin-L\"of random $\a,\b\in\{0,1\}^\N$. For all sequences $\I(\a:\b)$ is characterized by the minimum of $\i(\a':\b')$ over random $\a',\b'$ with $U(\a'){=}\a$, $U(\b'){=}\b$.\end{abstract}
\section {Introduction}
Kolmogorov Information theory applies to individual objects, in contrast to Shannon theories that apply to the models of processes that generated such objects. It thus has a much wider domain since many objects (\eg Shakespeare plays) have no realistic generation models. For completed objects, such as integers, the concept is simple and robust: $\I(x:y)=\K(x)+\K(y)-\K(x,y)$.
Yet, the concept is also needed for emerging objects, such as, \eg prefixes of infinite sequences. Encoding prefixes as integers distorts the information by specifying their (arbitrary) cut-off point. This cut-off information is not a part of the original sequence and can be smaller in a longer prefix. In fact, this distortion can overwhelm the actual mutual information between the sequences.
This issue complicates many studies forcing one to use (as, \eg in \cite{fi}) concepts of information that are merely lower bounds, differ between applications, and known not to be tight.
For the related concept of rarity (randomness deficiency) Per Martin-L\"of proposed an extention that works well for infinite sequences under computable distributions. Yet, computability of distributions requires a running time limit for the processes generating them. Such limits then must be accounted for in all formulas, obscuring the simplicity of purely informational values, at a great cost to elegance and transparency. Without such limit many important distributions are only lower-enumerable (r.e.). For instance, universal probability {\M} is the largest within a constant factor \re distribution. It is extraordinarily flat: all sequences are random with respect to it.
Yet {\M} is instrumental in defining other interesting distributions. In particular, Mutual Information in two sequences is their \trm {dependence}, \ie rarity with respect to the distribution $\M\otimes\M$ generating them independently with universal probability each. R.e. distributions are of necessity semimeasures: concave rather than linear functionals. Semimeasures also are relevant in more mundane and widespread situations where the specific probability distribution is not fully known (\eg due to interaction with a party that cannot be modeled). They require much more delicate handling than measures. This article considers many subtleties that arise in such generalization of complexity theory. The concept of rarity for such distribution considered here respects randomness conservation inequalities and is the strongest (\ie largest) possible such definition. The definition of mutual information arising from this concept is shown to allow rather simple descriptions.
\section {Conventions and Background}
Let \R, \Q, \N, $\St{=}\{0,1\}^*$, $\W{=}\{0,1\}^\N$ be, respectively, the sets of reals, rationals, integers, finite, and infinite binary sequences;
$x_{[n]}$ is the $n$-bit prefix and $\|x\|$ is the bit-length of $x{\in}\St$; for $a{\in}\Re^+$, $\|a\|{\edf}|\,\ceil{\log a}{-}1|$. A function $f$ and its values are \trm {enumerable} or \trm\re ($-f$ is \trm{co-r.e.}) if its subgraph $\{(x,t):t<f(x)\}$ is r.e., i.e. a union of an \re set of open balls. $X^+$ means $X\cap\{x{\ge}0\}$. \trm {Elementary} ($f{\in}\Es$) are functions $f:\W\to\Q$ depending on a finite number of digits; $\one\in\Es$ is their unity: $\one(\a)=1$. $\tld E$~is the set of all supremums of subsets of $E$. $f{\uparrow}$ for $f:\W\to\R$, denotes $\sup\{g:f>g\in\Es\}$.
\trm {Majorant} is an \re function largest, up to constant factors, among \re functions in its class.\\ ${\lea}f$, ${\gea}f$, ${\eqa}f$, and ${\lel}f$,
${\gel} f$, ${\sim}f$ denote ${\le}f{+}O(1)$, ${\ge}f{-}O(1)$, ${=}f{\pm}O(1)$, and ${\le}f{+}O(\|f{+}1\|)$,\\ ${\ge}f{-}O(\|f{+}1\|)$, ${=}f{\pm}O(\|f{+}1\|)$, respectively. $[A]\edf1$ if statement $A$ holds, else $[A]\edf0$.
When unambiguous, I identify objects in clear correspondence: \eg prefixes with their codes or their sets of extensions, sets with their characteristic functions, etc.
\subsection {Integers: Complexity, Randomness, Rarity}
Let us define Kolmogorov \trm {complexity} $\K(x)$ as $\|\m(x)\|$ where $\m:\N\to\R$ is the \trm {universal distribution}, \ie a majorant \re function with $\sum_x\m(x){\le}1$. It was introduced in \cite{ZL}, and noted in \cite {L73,L74,g74} to be a modification of the least length of binary programs for $x$ defined in \cite {K65}. The modification restricts the domain $D$ of the universal algorithm $u$ to be prefixless. While technically different, {\m} relies on intuition similar to that of \cite {Sol}. The proof of the existence of a majorant function was a direct modification of \cite {Sol, K65} proofs which have been a keystone of the informational complexity theory.
For $x{\in}\N,y{\in}\N$ or $y{\in}\W$, similarly, $\m(\cdot|\cdot)$ is a majorant \re real function with $\sum_x\m(x|y){\le}1$; $\K(x|y)\edf\|\m(x|y)\|$ ($=$ the least length of prefixless programs transforming $y$ into $x$).
\cite {K65} considers \trm {rarity} $\d(x)\edf\|x\|{-}\K(x)$ of uniformly distributed $x{\in}\{0,1\}^n$.\\ Our modified {\K} allows extending this to other measures $\mu$ on~$\N$. A $\mu$-test is $f:\N\to\R$ with mean
$\mu(f){\le}1$ (and, thus, small values $f(x)$ on randomly chosen~$x$). For computable $\mu$, a majorant \re test is $\tb(x)\edf\m(x)/\mu(x)$. This suggests defining $\d_\mu(x)$ as $\|\ceil{\tb(x)}\|\eqa \|\mu(x)\|-\K(x)$.
\subsection {Integers: Information}
In particular, $x{=}(a,b)$ distributed with $\mu{=}\m\otimes\m$, is a pair of two independent, but otherwise completely generic, finite objects. Then, $\I(a:b)\edf\d_{\m\otimes\m}((a,b)){\eqa}\K(a){+}\K(b){-}\K (a,b)$ measures their \trm {dependence} or \trm {mutual information}. It was shown (see \cite{ZL}) by Kolmogorov and Levin to be close (within
${\pm}O(\log\K(a,b))$) to the expression $\K(a){-}\K(a|b)$ of \cite{K65}. Unlike\\ this earlier expression (see \cite {g74}), our {\I} is symmetric and monotone: $\I(a:b)\lea\I((a,x):b)$ (which will allow extending {\I} to $\W$); it equals $\eqa\K(a)-\K(a|\ov b)$, where by $\ov b$ we will denote $(b,\K(b))$. \\ (The $\I_z$ variation of $\I$ with all algorithms accessing oracle $z$, works similarly.)\\ $\I$ satisfies the following Independence Conservation Inequalities \cite{L74,L84}:\\ For any computable transformation $A$ and measure $\mu$, and some family $t_{a,b}$ of $\mu$-tests
\[(1)\ \I(A(a):b)\lea \I(a:b); \hspace{4pc} (2)\ \I((a,w):b)\lea
\I(a:b)+\log t_{a,b}(w).
\]
(The $O(1)$ error terms reflect the constant complexities of $A,\mu$.)
So, independence of $a$ from $b$ is preserved in random processes,
in deterministic computations, their combinations, etc. These inequalities are not obvious (and false for the original 1965 expression $\I(a:b){=}\K(a){-}\K(a/b)$~) even with $A$, say, simply cutting off half of $a$. An unexpected aspect of $\I$ is that $x$ contains all information about $k{=}\K(x)$, $\I(x:k)\eqa\K(k)$, despite
$\K(k|x)$ being ${\sim}\|k\|$, or ${\sim}\log\|x\|$ in the worst case \cite{g74}. One can view this as an "Occam Razor'' effect: with no initial information about it, $x$ is as hard to obtain as its simplest ($k$-bit) description.
\subsection {Reals: Measures and Rarity}\label{ML}
\p {A measure} on $\W$ is a function $\mu(x){=}\mu(x0){+}\mu (x1)$, for $x{\in}\St$. Its mean $\mu(f)$ is a functional on \Es, linear: $\mu(cf{+}g){=}c\mu(f){+}\mu(g)$ and \trm {normal:} $\mu(\pm\one){=}\pm1$,
$\mu(|f|)\ge0$. It extends to other functions, as usual. An example is $\l(x\W)\edf 2^{-\|x\|}$ (or $\l(x)$ for short).
I use $\mu_{(\a)}(A)$ to treat the expression $A$ as a
function of $\a$, taking other variables as parameters.
$\mu$-\trm{tests} are functions $f\in\Ess$, $\mu(f){\le}1$; computable $\mu$ have \trm {universal} (\ie majorant {\em r.e.}) tests $\T_\mu(\a) {=}\sum_i\m(\a_{[i]})/\mu(\a_{[i]})$, called \trm {Martin-L\"of tests.}\footnote
{The condition $\mu(\T_\mu){\le}1$, slightly stronger (in log scale)
than the original one of \cite {ML}, was\\ required in \cite{L76}
in order to satisfy conservation of randomness. Both types of tests
diverge simultaneously.\\ \cite {Schn73} (for divergence of $\T_\l$),
\cite {L73}, \cite {g80} characterized the tests in complexity terms.}
Indeed, let $t$ be an \re $\mu$-test, and $S_k$ be an \re family of prefixless subsets of $\St$ such that $\cup_{x\in S_k}x\W=\{\a:t (\a){>}2^{k+1}\}$. Then $t(\a)=\Theta(\sum_{k,x{\in}S_k}(2^k[\a{\in}x\W])) =\Theta(\sup_{k,x{\in}S_k}(2^k[\a{\in}x\W]))$. Now, $\sum_{k,x{\in}S_k} (2^k\mu(x)) <\mu(t)\le1$, so $2^k\mu(x){=}O(\m(x))$ for $x{\in}S_k$ and $t(\a){=}O(\sup_{k,x{\in}S_k}([\a{\in}x\W]\m(x)/\mu(x))){=} O(\sup_i(\m(\a_{[i]})/\mu(\a_{[i]})))$.
\trm{Martin-L\"of random} are $\a$ with finite \trm{rarity}
$\d_\mu(\a)\edf\|\ceil {\T_\mu(\a)}\|\eqa\sup_i(\|\mu(\a_{[i]})\|-\K(\a_{[i]}))$
and we also use $\d_\mu(\a|x)\edf\sup_i(\|\mu(\a_{[i]})\|-\K(\a_{[i]}|x))$.
\p {Continuous transformations} $A:\W{\to}\W$ induce normal linear operators $A^*:f{\mapsto}g$ over $\Es$, where $g(\w){=}f(A(\w))$. So obtained, $A^*$ are \trm {deterministic}: $A^*(\min\{f,f'\})=\min\{A^*(f),A^*(f')\}$. Operators that are not, correspond to probabilistic transformations (their inclusion is the benefit of the dual representation), and $g(\w)$ is then the expected value of $f(A(\w))$. Such $A$ also induce $A^{**}$ transforming input distributions $\mu$ to output distributions $\ph=A^{**}(\mu):\ph(f)=\mu(A^*(f))$.
I treat $A,A^*,A^{**}$ as one function $A$ acting as $A^*$, or $A^{**}$ on the respective (disjoint) domains. Same for partial transformations below and their concave duals. I also identify $\w{\in}\W$ with measures $f\mapsto f(\w)$.
\section {Partial Operators, Semimeasures, Complexity of Prefixes}
Not all algorithms are total: narrowing down the output to a single sequence may go slowly and fail (due to divergence or missing information in the input), leaving a compact set of eligible results:
\BD\label{op}\BE \item {\em Partial} continuous transformations \trm {(PCT)} are compact subsets $A\subset\W{\times}\W$ with $A(\a)\edf\{\b:(\a,\b){\in} A\}\ne\emptyset$. When not confusing I identify singletons $\{\b\}$ with $\b{\in}\W$.\\ \trm{Computable} PCT are r.e., \ie enumerate the open complement of $A$;
\item a PCT $A$ is \trm{clopen} if co-images $A^{-1}(s)=\{\a: A(\a)\subset s\}$
of all clopen $s\subset\W$ are clopen.\\ $A$ is $t$-clopen
if $A^{-1}(x\W)$ depend only on $\a_{[t(x_{[i]})]}$ for some $i$.
\item\trm {Dual} of PCT $A$ is the operator $A^*:\Es\to\Ess$,
where $A^*(f)=g:\a\mapsto\min_{\b{\in}A(\a)}f(\b)$. \EE\ED
An important example is a \trm {universal} algorithm $U$. It enumerates all algorithms $A_i$ with a prefixless set $P$ of indexes $i$ and sets $(i\a,\b)\in U$ iff $(\a,\b){\in}A_i,i{\in}P$.
\BR\label{cmp} Composing PCT with linear operators produces normal concave operators, all of them by HahnβBanach theorem. Indeed, each such $C(f)$ is a composition $A(R(B(f)))$: Here a PCT $A(\a)$ relates each $\a$ to the binary encodings $\{\mu\}$ of measures $\mu\ge C(\a)$; $R$ transforms $\{\mu\}$ into a distribution $\{\mu\}\otimes\l$; and $B(\{\mu\}, \b)$ relates $\l$-distributed $\b$ to $\mu$-distributed $\g$ with $\mu[0,\g)\le\b\le\mu[0, \g]$.\ER
Normal concave operators transform measures into \trm {semimeasures}:
\BD\label{sm}\BE\item A \trm {semimeasure} $\mu:\Es{\to}\R$ is a normal ($\mu(\pm\one){=}{\pm}1,\,\mu(|f|){\ge}0$) functional\\ that is concave: $\mu(cf{+}g)\ge c\mu(f){+}\mu(g),\,c\in\R^+$, \eg $\mu(x)\ge\mu(x0){+}\mu(x1)$, for $x\in\St$.\\ $\mu$ extends to $f{\in}{-}\Ess$ as $\inf\{\mu(g):f\le g{\in}\Es\}$, and to other functions as $\sup\{\mu(g): f\ge g{\in}{-}\Ess\}$, as is usual for inner measures. $\mu$ is \trm {deterministic} if $\mu(\min \{f,g\})=\min\{\mu(f),\mu(g)\}$.
\item Normal ($A(\pm\one)=\pm\one$, $A(|f|)\ge0$) concave operators $A:\Es\to\tld\Es$ transform input points $\a$ and distributions $\ph$ (measures or semimeasures) into their output distributions $A(\ph):f{\mapsto}\ph(A(f))$. Operators $A$ are deterministic if semimeasures $A(\a)$ are.\\ \trm {Regular} are semimeasures $A(\l)$ for deterministic \re $A$; $t$-regular for a $t$-clopen $A$. \EE\ED
\BP\label{id}\BE\itemsep0pt\item\label{id1} Each deterministic $\mu$ is $\mu(f)=\min_{\w\in S}f(\w)$ for some compact $S\subset\W$. \item\label{id2} Dual of PCT are those and only those operators that are normal, concave, and deterministic. \item\label{id3} Each $f{\in}\Es$ has a unique form $f{=}\sum r_if_i$ with distinct boolean $f_i{\ge}f_{i+1},f_0{=}\one$, $r_i{>}0$ for $i{>}0$.\\ Then $\un\mu(f)\edf\sum_i r_i\mu(f_i)$. $\mu{=}\un\mu$
if $\mu$ is regular. All \re measures are regular.
\item\label{id4} Each \re semimeasure $\mu$ has a regular
\re $\mu'{\le}\mu$ with $\mu'(x){=}\mu(x)$ for all $x\in\St$.\\ $\mu'$ is $t$-regular for a computable $t$ if $\mu(x)$ have $<t(x)$ bits.\EE\EP
\BPR \ref{id1}: Note, $p(\b)\edf\inf_{g:\mu(g){\ge}1}|g(\b)|\in\{0,1\}$. Indeed, if $\mu(f){-}f(\b)=t{>}0$ and $g=(f{-}f(\b)\one)/t$ then $g(\b){=}0$, $\mu(g){\ge}1$. Then $S$ is $\{\b:p(\b){=}1\}$. \ref{id2}: $\mu{=}A(\a)$ are deterministic, so $\mu(f){=}\min_{\b\in S}f(\b)$. \ref{id3} is since regular $\mu$ are averages of deterministic ones. \ref{id4} is by Theorem 3.2 of \cite{ZL}.\EPR
\subsection {Complexity: General Case}
\BP\label{um} There exists a \trm {universal}, \ie majorant (on $\Es^+$) \re semimeasure {\M}. The values $\M(x)$ can have $\K(x)$ bits. (Thus $t$-clopen PCT can generate $\M$ for any computable $t(x){>}\K(x)$).\footnote
{For $t(x)\sim\|x\|$ shown in \cite{L71}, Th.13; also mentioned in \cite {ZL}, Prp.3.2.}\EP
\BPR For an \re family $\mu_i$ of all \re semimeasures, take $\M(x)= \sum_i\mu_i(x)/2i^2$. $\M(x)$ can be rounded-up to $\K(x)$ bits after adding $\sum_{y\ne\{\}}\m(xy)$ (to keep $\M(x)\ge\M(x0)+\M(x1)$).\EPR
As in \cite {ZL}, $\KM(x)\edf\|\M(x\W)\|$.
Same for $\M_\a$, \re w.r.t. $\a$ and $\KM(x|\a)\edf\|\M_\a(x\W)\|$.
$\K(x|y)$, $\KM(x)$ are examples of the many types of complexity measures on $\St$.\\ \cite{L76b} gives the general construction of Kolmogorov-like complexities $\K_v$. I summarize it here.
$\K_v$ are associated with classes $v$ of functions $m{:}\;\St{\to}[0,1]$, in linear scale, and their logarithmic scale projections $\ov v\edf\{K=\|m\|:
m{\in}v\}$. Thus, $\K(x|y)$ is $\K_v$ for $v=\{m:\sup_y\sum_xm(x|y)\le1\}$.
These $v$ are closed-down, weakly compact, and decidable on tables with finite support. $\ov v$ will have a minimal, up to $\eqa$, co-\re function $\K_v$. This justifies the logarithmic scale where the values of $\K_v$ are well defined up to $O(1)$ adjacent integers. (Though linear scale is often clearer analytically.)
$\K_v$ minimality requires $\min\{K',K''\}{+} O(1)\in\ov v$ for any $K',K''$ in $\ov v$. In the linear scale of $m$ this comes to $(m'{+}m'')/c\in v$ for some $c{=}O(1)$. I tightened this to convexity with $c{=}2$; this changes $K$ in $\ov v$ by just $\Theta(1)$ factors: a matter of choosing bits as units of complexity.
Similarly to Proposition~\ref{um}, this condition suffices for $\ov v$ to have a minimal, up to $\eqa$, co-\re $\K_v$. Each such $\K_v{<}\infty$ has a computable lower bound $B_v(x)=\min_{K{\in}\ov v}K(x)$, largest up to $\eqa$, among \re bounds. And $\K_v{-}B_v$, too, is such a $\K_{v'}$; I call $v'$ \trm {normal}, as $B_{v'}{=}0$. Let $\Es_1{=}\Es^+\cap\{f{:}\:\max_\a f(\a){=}1\}$.
$\KM(f){=}\|\M(f)\|,\,f{\in}\Es_1$ is a normal complexity measure and all others are its special cases:
\BP For each normal $v$ a computable representation $t_x\in\Es_1$ for $x\in\St$ exists such that $\K_v(x)\eqa\KM(t_x)\lea\K(x)$.\EP
\BPR $\K_v(x){\lea}\K(x)$ follows from normality ($B_v{\eqa}0$) and convexity of $v$. Thus $\m_v(x)$ needs $\lea\K(x)\lea2\|x\|$ bits. Let $m'$ be $m{\in}v$ so rounded-down. For $m{\in}v$, let $m_x$ be a prefixless code of $(x,m'(x))$, and $m_{[x]}$ be $m_1m_2\ldots m_x$. Then $t_x(\a)\edf m'(x)$ if $\a{=}m_{[x]}\b,m{\in}v$; otherwise $t_x(\a)\edf0$.
The measure concentrated in a single $\a$ has some $m{\in}v$ for which it maps each $t_x$ to $m'(x)$.\\ Other measures $\mu$ also have $\tau_\mu:x\mapsto\mu (t_x)$ in $v$ by convexity of $v$.\\ As $v$ is closed down, $\tau_\M\in v$, too, and so, $\tau_\M=O(\m_v)$. Conversely, some measure $\a$ has $\tau_\a{=}\m_v$. As $\m_v$ is r.e., the minimal semimeasure $\mu$ with $\tau_\mu\ge\m_v$ is r.e., too, and so, $\m_v\le\tau_\mu=O(\tau_\M)$.\EPR
\section {Complete Sequences} \label{cmpl}
\cite{L76a} calls \trm {complete} sequences $\a$ that are $\mu$-random for a computable $\mu$. This class is closed under all total recursive operators. Here I use this term \trm {complete} also for $\a'$ Turing-equivalent to such \a. This is identical to $\a'$ being either recursive or Turing-equivalent to a $\l$-random sequence.
By \cite {ku,g86,BL}, each $\a{\in}\W$ is w.t.t.-reducible to a \l-random~\w. Indeed, for $P(x,\a)=x\a$, let measure $\r$ be $\l$-integral of $\T_\l$: $\r=P(\m\otimes\l)$. Let $R=\{\a:\T_\l(\a)\le c\}$ for a convenient constant $c$. When $A(\l)$ generates $\M(x)$, the co-images of all prefixes intersect $R$. (Otherwise $A(\r)$ would exceed $\M=A(\l)$.) But for clopen $A$ (see Prp.~\ref{um}), co-image of any $\a{\in}\W$ is the intersection of (non-empty in $R$) clopen co-images of its prefixes $\a_n$, so intersects $R$, too.
Yet partial algorithms can generate incomplete sequences with positive probability: \cite {Vyugin}.
I extend $\K(\b|\a)$ to $\a,\b\in\W$ using a universal PCT $U(p,\a)$ that runs on $\a$ a program $p$ given on a separate tape; $\a_p$ combines bits of $p,\a$ in order read by $U$. $p$ must be prefixless: $U$ diverges and $\a_p$ is undefined unless $U$ detects the end of $p$ and does not try to move beyond its end of tape.
\BD Here $\a,\b\in\W$. $\K(\b|\a)\,\edf\,\min_p\{\|p\|:U(p,\a){=}\b\}$.\\ The \trm {codeset} $R_\a$ for $\a$ is $\{\b:U(\b){=}\a,\,\d_\l(\b){<}c\}$ where $c$ is a constant such that\\ the \trm {incompleteness}\footnote
{For some applications of $\Ks$ its lower bound $\|\M_\a(R_\a)\|$ may suffice.}
$\Ks(\a)\edf\min_{\b\in R_\a}\K(\b|\a)$ of
any $\a$ is $\lel\|\d_\l(\a)\|$\footnote
{By finding $p$ to replace a prefix $q{=}U(p)$ where $\|q\|{-}\|p\|$
is the rarity. \newcounter{cmpr}\setcounter{cmpr}\thefootnote}.\\
\trm {Tight complexity} $\Ki(x|\a)$ is $\|\mf(x|\a)\|$ where $x{\in}\N$,
$\widehat\m_x(\a)\edf\min_{\b\in R_\a}\m(x|\b)$,
$\mf(x|\a)\edf\widehat\m_x{\uparrow}(\a)$.\ED
These concepts satisfy many properties similar to
those given (for integers) in \cite {g74,L74}:
\BP\label{kfs}\BE\itemsep0pt \item\label{kf1} $\K(\b|\a)\sim\KM(\b|\a)$.
\item\label{kf2} $\d_\l(\b_q)\eqa\d_\l(\b)+\|q\|{-}\K(q|\b,\d_\l(\b))$.
\item\label{kf3} $\Ks(\a)\eqa\min_{\b}\{\K(\a|\b){+}\K(\b|\a){+}\d_\l(\b)\}$.
\item\label{kf4} $\Ki(x|\a)\eqa\Ki(\ov x|\a)$. (Recall: $\ov x$ is $(x,\K(x))$.)
\item\label{kf5} $\If(\a:x)\edf\K(x)-\Ki(x|\a)\lea\If(\a:(x,y))$.
\item\label{kf6} $\If(\a:x)\eqa(\min_{\b\in R_\a}\d_\l(\b|\ov x)){\uparrow}
\eqa(\min_{\b\in U^{-1}(\a)}\d_\l(\b|\ov x)){\uparrow}$.\EE\EP
\BPR\ref{kf1}. Let $k{=}\KM(\b|\a)$, $s_{k,\a}\edf\{x0,x1:\KM(x0\W|\a){<}k,\KM(x1\W|\a){<}k\}$,
so, $|s_{k,\a}|<2^k$.\\ Let $x$ be the longest prefix of $\b$ in $s_{k,\a}$.
Then $\K(x|\a,k)\lea k$, and $\b$ can be computed from $x,k,\a$.
\ref{kf2}. "$\d_\l(\b_q)\gea$'' is by $t_{\b_q}\edf\T_\l(\b)2^{\|q\|}
\m(q|\b,\d_\l(\b))$ being \re with $\l_{(\b_q)}(t_{\b_q})\le1$. For "$\lea$''
take a distribution $\mu_{\b,d}(q)\edf\T_\l(\b_q)/2^{\|q\|+d}$ enumerated
for each $\b,d$ only while $\dl_\b\edf\|\sum_q 2^d\mu_{\b,d}(q)\|{\le}d$;\\
so enumeration of $\mu_{\b,\dl_\b}$ is not stopped.
Now, $\dl_\b\eqa\d_\l(\b)$ since $\l_{(\b)}(2^{\dl_\b}){\le}1$.
Also, $\sum_q\mu_{\b,d}(q){=}O(1)$, so $\mu_{\b,d}(q){=}O(\m(q|\b,d))$. Thus,
$\d_\l(\b)+\|q\|-\d_\l(\b_q)\gea\|\mu_{\b,\dl_\b}(q)\|\gea\K(q|\b,\d_\l(\b))$.
\ref{kf3}. Take $p,q,\b{=}U(p,\a)$ with $U(q,\b){=}\a$, $\Ks(\a){\eqa}
\|p\|{+}\|q\|{+}\d_\l(\b)$.\\ Then $\d_\l(\b)\eqa0$, $\K(q|\b)\eqa\|q\|$,
else $\b$ or $q$ could be shrunk decreasing $\Ks(\a)$.\\
Then $\d_\l(\b_q)\eqa0$ by \ref{kf2}, and the claim follows
by appending $q$ to $p$ to map $\a\mapsto(q,\b){\mapsto\b_q}$.
\ref{kf4}. Let $\b{=}v\w,\,\d_\l(\b){\eqa}0,\,\|p\|{=}\K(x|\b)$
(and so, ${\eqa}\K(p|\b)$), and $U(p,\b){=}x$ reads only $p,v$, so,\\
$\K(p,v){\lea}\|pv\|$. Then $\|pv\|{-}\K(p,v)\lea\d_\l(v_p){\eqa}0$
by \ref{kf2}. So, $\K(x){+} \K((p,v)|\ov x)\eqa\K(p,v)\eqa\|pv\|$.\\
Thus, finding $i,j$ with $\K(x){<}i,\K((p,v)|x,i){<}j$,
$i{+}j{\lea}\|pv\|$ computes $\K(x)\eqa i$ from $p,v$.
\ref{kf5}. By \ref{kf4} and $\K(\ov x|\ov{(x,y)}){\eqa}0$, we can replace $x$
with $\ov x$. Let $\d_\l(\b){\eqa}0$.\\ Then $\K(\ov x)-\K(\ov x|\b)-\K(\ov x,y)
+\K((\ov x,y)|\b)\eqa\K(y|\b,\ov x,\K(\ov x|\b)){-}\K(y|\ov x)\lea0$.
\ref{kf6}. For $\b{\in}R_\a$, $\K(x)-\K(x|\b)\gea\d_\l(\b|x)$, \ie
$\m(x)\T_\l(\b|x)=O(\m(x|\b))$.\\ Indeed, the \re $\sum_x\m(x)\T_\l(\b|x)$
is $O(\T_\l(\b)){=}O(1)$ since $\l_{(\b)}(\sum_x\m(x)\T_\l(\b|x))=\\
\sum_x\m(x)\l_{(\b)}(\T_\l(\b|x))\le\sum_x\m(x){\le}1$.
Also for all $\b$, $x{=}U(p)$ with $\K(x){=}\|p\|{\eqa}\K(p)$, the \re\\
$\m(U(p)|\b)p$ is $O(\T_\l(\b|p))$ since $\l_{(\b)}\m(x|\b)/\m(x){=}O(1)$.
So and $\K(x)-\K(x|\b)\lea\d_\l(\b|\ov x)$.\\
And any $\b\in U^{-1}(\a)$ can be compressed\footnotemark[\thecmpr]
to $\b'{\in}R_\a$ with $\d_\l(\b'|\ov x)\lea\d_\l(\b|\ov x)$.\EPR
\section {Rarity}
\subsection {Non-algorithmic Distributions}
\cite {L73} considered a definition of rarity $\T_\mu(\a)$ for arbitrary measures $\mu$ where $\T_\mu$ is \re only relative to $\mu$ used as an oracle. This concept gives interesting results on testing for co-\re classes of measures such as, \eg Bernoulli measures. Yet, for individual $\mu$ it is peculiar in its strong dependence on insignificant digits of $\mu$ that have little effect on probabilities. \cite {L76,g80} confronted this aspect by restrictions making $1/\T_\mu(\a)$ monotone, homogeneous, and concave in $\mu$.\footnote
{The Definition in \cite {L76} has a typo: "$Q(f)$'' meant to be "$Q(g)$''.
Also, in English version "concave relative to $P$''
would be clearer as "for any measure $Q$ concave over $P$''.
So, its $\T_\mu(\a)$ is $\sup_{f,g\in\Es}(t(f|g)f(\a)/\mu(g))$,
for\\ a $t$ majorant among \re functions that keep $\T_\mu(\mu)\le1$
for all measures $\mu$, where $\T_\mu(\ph)\edf\ph_{(\a)}(\T_\mu(\a))$.\par
Restrictions on $t$ (\eg $t\subset\St{\times}\Es$, $\T_\mu(\a)\edf\sup_ {(f,g){\in}t}f(\a)/\mu(g)$) can reduce redundancy with no loss of generality.}
\cite {L84} used another construction for $\T_\mu(\a)$. It generates $\mu$-tests by randomized algorithms and averages their values on $\a$. For computable $\mu$ the tests' ${\le}1$-mean can be forced by the generating algorithm, so the definition agrees with the standard one. But for other $\mu$ the ${\le}1$-mean needs to be imposed externally. \cite {L84} does this by just replacing the tests of higher mean with $\one$ (thus tarnishing the purity of the algorithmic generation aspect). That definition respects the conservation inequalities, so for \re semimeasures it gives a lower bound for our $\d_\mu(\a)$ below (by Prop.\ref{mx}).
\subsection{R.E. Semimeasures}
\p {Coarse Graining.} I use $\l$ as a typical continuous computable measure on \W, though any of them can be equivalently used instead. Also, any recursive tree of clopen subsets can serve in place of $\St$.
Restricting inputs $\w$ of a PCT $A$ to those with converging outputs (\ie a singletons $A(\w)\in\W$) truncate the output semimeasure to a smaller {\em
linear} functional: a maximal measure $\mu^\Es\le\mu{=}A(\l)$. Yet, much information is lost this way: \eg $\|\M^\Es(x)\|,x{\in}\St$ has no recursive in
$\|\M(x)\|$ upper bound. To keep information about generated prefixes, I will require linearity of $\mu^E$ only on a subspace $E{\subset}\Es$. $E$ will play a role of space of $\mu^E$-measurable functions. E.g., relaxing $A(\w)$ restriction from singletons to sets of radius ${\le}2^{-n}$, produces a semimeasure linear on the subspace of $f$ with $f(\a)$ dependent only on $\a_{[n]}$. Subspaces $E\subset\Es$ used below are generated by subtrees\footnote
{If a non-binary tree is used instead of $\St$ then any
$x{\in}S$ must have either all its children in $S$ or none.}
$S{\subset}\St$, \ie are spaces of linear combinations of functions in $S$.
By \trm {$E$-measures} I call semimeasures linear on such $E$.
\BP\label{cg} Each semimeasure $\mu$, for each $E$,
has the largest (on $\Es^+$) $E$-measure $\mu^E\le\mu$.\EP
\BPR Let $X$ be the set of all measures $\ph$ which, for some $F{\subset}E^+$ with $\sum_{f\in F}f>0$\\ and all $g\in\Es^+$, $g\le f\in F$, have $\ph(g)\ge\mu(g)$. Then $\mu^E(f)=\inf_{\ph\in X}\ph(f)$.\EPR
Now, I will extend the concept of rarity $\T_\mu$, $\d{\edf}\|\ceil{\T}\|$ from computable measures $\mu$ to r.e. semimeasures. The idea is for $\d_\mu(\a)$ to be bounded by $\d_\l(\w)$ if $\a{=}A(\w)$, $\mu{\ge}A(\l)$. Coarse graining on a space rougher than the whole $\Es$, allows to define rarity not only for $\a{\in}\W$ but also for its prefixes. For semimeasures, rarity of extensions does not determine the rarity of a prefix.
$\T_\mu$ for a computable measure $\mu$ is a single \re function $\W\to\R^+$ with $\le1$ mean. It is obtained by averaging the \re family of all such functions. This fails if $\mu$ is a semimeasure: its mean of sum can exceed the sum of means. So, our extended $\T_\mu$ will be refined with a subspace $E{\subset}\Es$ parameter.
\BD\label{d1} For an $E{\subset}\Es$ and a PCT $A$, $t^E_A$ is $\sup\{f{\in}E:A(f)\le\T_\l\}$.\ED
\BP\label{uo} Each \re $\mu$, among all \re PCT $A$ with $A(\l)\le\mu$, has a universal one $U_\mu$, \ie such that $t^E_{U_\mu}=O(t^E_A)$ for each $A$ and all $E$. $\mu(f)\le\l(2U_\mu(f))$ if $f\in\St$ or $\mu$ is regular.\EP
\BPR $U(i\w)\edf A_i(\w)$ for a prefixless enumeration $A_i$ of all such $A$. \EPR
\BD\label{rm} $\T_\mu^E(\ph)$ for semimeasures $\ph$, \re $\mu$ is the mean: $\ph^E(t^E_{2U_\mu})$ for $U_\mu$ defined in Prop.\ref{uo}.\ED
\BL\label{dM} (1) $\d_\mu^\Es\eqa\d_\mu$ for computable measures $\mu$.
(So, if $E=\Es$, we omit $E$ in $\d_\mu^E\edf\|\ceil{\T_\mu^E}\|$.)\\
(2) $\d_\mu^E(\mu){=}0$.\hspace{3pc}
(3) $\d_\M\eqa0$ for the universal semimeasure $\M$.\EL
\BPR (1) follows from \cite {ZL} Th.~3.1 and enumerability of $\T_\mu$.
(2) Let $A{=}U_\mu$. By Prop.\ref{uo}, $\mu^E(f)/2\le\l(A(f))$ for $f{\in}\St$, and thus for $f{\in}E^+$. Also any $f{<}t_A^E$ is $<\sum_if_i$ where $f_i{\in}E^+$, $f_if_{j{\ne}i}=0$, and $A(f_i)\le\T_\l$. Now, $\T_\mu^E(\mu)=\sup_{f{\in}\Es^+,f{<}t_A^E}\mu^E(f)/2$,\\ and $\mu^E(f)/2 \le\sum_i\mu^E(f_i)/2\le\l(\sum_i A(f_i))=\l(\sup_i A(f_i))\le\l(\T_\l)\le1$.
(3) By \cite{g86,ku}, an \re PCT $A$ exists such that any {\a} is $A(\w)$ with $d_\l(\w){=}0$. Then $g{=}A(f)\le\T_\l$ means $g(\w){=}f(A(\w))=f(\a)\le\T_\l(\w) \le2$. For a universal $\M$, $\d_\M\lea\d_{A(\l)}\eqa0$.\EPR
Let the semimeasure $\nu{=}\mu{\otimes}\ph$ on $\W^2$ be the minimum of $\mu'{\otimes}\ph'$ over all measures $\mu'{\ge}\mu,\,\ph'{\ge}\ph$. Then $\nu(h){=}\mu(f)\ph(g)$ for $h(\a,\b){=}f(\a)g(\b)$, and for all $h$, if $\ph$ is a measure, $\nu(h){=}$ $\mu(\ph_{(\b)}(h(\a,\b)))$. Let $E\otimes\Es$ be the space generated by $\{f(\a)g(\b),\,g{\in}E,f{\in}\Es\}$. Adding coin-flips preserves randomness:
\BL\label{cir} $\d_{\mu{\otimes}\l}^{E\otimes\Es}(\ph{\otimes}\l)\lea \d_\mu^E(\ph)$ for all $\ph$, \re $\mu$, space $E{\subset}\Es$.\EL
\BPR Let $\phi\edf\ph{\otimes}\l$, $\nu\edf\mu{\otimes}\l$, $E'\edf E{\otimes}\Es$, $A(\a,\b)\edf(U_\mu(\a),\b)$, $t\edf\T_\nu^{E'}(\phi)= \phi^{E'}(t^{E'}_{U_\nu})$. Then\\ for some $c{\in}\Q^+$, $t/c<\phi^{E'}(t^{E'}_{A})=\phi^{E'}(\sup H)$ where $H=\{h{\in}E':A(h) {\le}\T_{\l^2}\}$. So $t/c<\phi^{E'} (\sup G)$ for a finite set $G=\{f_i(\a)g_i(\b)\}\subset H$ with $\l(g_i){=}1$ and $f_if_{j\ne i}{=}0$, thus $\sup G=\sum G$.\\ Now, $U_\mu(f_i)g_i<\T_{\l^2}$,
thus $U_\mu(f_i)<\l_{(\b)}(\T_{\l^2}(\a,\b))=O(\T_\l(\a))$.
Then, $t/c<\phi^{E'}(\sum_if_i g_i)=\sum_i\phi^{E'}(f_i g_i)=\sum_i\ph^E(f_i)= \ph^E(\sum_if_i)=\ph^E(\sup_if_i)=O(\ph^E(t^E_{U_\mu}))=O(\T_\mu^E(\ph))$.\EPR
Let $A(E)$ be $\{f{\in}\Es:A(f){\in}\tld E{\subset}\Ess\}$. Deterministic processing preserves randomness, too:
\BL\label{cid} $\d_{A(\mu)}^{A(E)}(A(\ph))\lea\d_\mu^E(\ph)$ for
each \re PCT $A$, all $\ph$, \re $\mu$, space $E{\subset}\Es$.\EL
\BPR Let $E'\edf A(E)$, $\phi\edf A(\ph)^{E'}\le A(\ph^E)$,
$A_\mu(f)\edf U_\mu(A(f))$. So, $t\edf\T_{A(\mu)}^{E'}(A(\ph))=\\
\phi(t^{E'}_{U_{A(\mu)}})<c\,\phi(t^{E'}_{A_\mu})<c\,\phi(\sup F)$
for $F\edf\{f{\in}E'^+:U_\mu(A(f))\le\T_\l\}$ and some $c\in\Q^+$.\\
Then $t<c\phi (\sup G)$ for a finite set $G\subset F$ that can
be made disjoint, \ie $gg'=0$\\ for $g{\ne}g'$ in $G$ (and thus
$A(g)A(g')=0$ as $A$ is deterministic), so $\sup G=\sum G$.\\
Now, $U_\mu(h){\le}\T_\l$ for $h\edf\sup\{A(f):f{\in}F\}{\in}\tld E^+$,
so $h\le t^E_{U_\mu}$. Then $t/c<\phi(\sup G)= \phi(\sum G)=\\
\sum_{g{\in G}}\phi(g)\le \sum_{g{\in G}}\ph^E(A(g))= \ph^E(\sum_{g{\in G}}A(g))=
\ph^E(\sup_{g{\in G}}A(g)) \le \ph^E(h) \le 2\T_\mu^E(\ph)$. \EPR
By the remark~\ref{cmp}, Lemmas~\ref{cir}, \ref{cid} imply the following theorem:
\BT[Randomness Conservation]\label{t1} The test $\d$ satisfies
$\d_{A(\mu)}^{A(E)}(A(\ph))\lea \d_\mu^E(\ph)$\\ for each normal concave
\re operator $A$, all $\ph$, \re $\mu$, space $E{\subset}\Es$.\ET
These tests $\d_\mu^E$ are the strongest (largest)
extensions of Martin-L\"of tests for computable $\mu$:
\BP $\T_\mu^E(\w)$ is majorant among extensions $\tau_\mu {\in}\tld E^+$
of Martin-L\"of test $\T_\l=\tau_\l$\\ that are non-increasing on $\mu$ and obey Lemma~\ref{cid} for $\|\ceil\tau\|$ with $\tau^E_\mu(\ph)\edf\ph^E(\tau_\mu)$.\label{mx}\EP
\BPR With $A{\edf}U^*_\mu$, $A(\tau_\mu){\le}A(\tau_{A(\l)})$ and Lemma~\ref
{cid} for $\|\ceil\tau\|$ gives $A(\tau_{A(\l)})(\w)=\tau_{A(\l)}(A^*(\w))\le c\,\tau_\l(\w){=}c\T_\l(\w)$ for some $c\,{\in}\Q^+$. If $\tau_\mu{>}2c\,f{\in} E^+$ then $2c\,A(f){<}A(\tau_\mu){\le}c\T_\l$, so $\T_\mu^E{>}f$ as defined.\EPR
\section {Information and its Bounds}
Now, like for the integer case, mutual information $\I(\a:\b)$ can be defined as the deficiency of independence, \ie rarity for the distribution where $\a,\b$ are assumed each universally distributed (a vacuous assumption, see \eg Lemma~\ref{dM}(3)) but independent of each other: \[\I(\a:\b)\edf\d_{\M\otimes\M}((\a,\b)).\]
Its conservation inequalities are just special cases of Theorem~\ref{t1} and supply $\I(\a:\b)$ with lower bounds $\I(A(\a):B(\b))$
for various operators $A,B$. In particular transforming $\a,\b$ into distributions $\m(\cdot|\a),\m(\cdot|\b)$, gives $\I(\a:\b)\gea\i(\a:\b)\edf
\|\ceil{\sum_{x,y\in\N}\m(x|\a)\m(y|\b)2^{\I(x:y)}}\|$.\footnote
{This $\i$ was used as the definition of information in \cite {L74}.}
Same for $\If(\a:\b)\edf\|\ceil{\sum_{z\in\N}
\mf(z|\a)\mf(z|\b)/\m(z)}\|\gea\i(\a:\b)$.\footnote
{$\If\gea\i$ since for $z{=}(x,y)$, by Prop.\ref{kfs}.\ref{kf4},
$\Ki(z|\a)\lea\Ki(\ov y|\a){+}\K(x|\ov y)\eqa
\Ki(y|\a){+}\K(x|\ov y)\lea\K(y|\a){+}\K(x,y){-}\K(y)$.} These bounds also satisfy the conservation inequalities, and agree with $\I(\a:\b)$ for $\a, \b\in\N$. While $\I$ is the largest such extension from $\N$, $\i$ is the smallest one. Interestingly, not only for integers, but also for all complete sequences this simple bound $\i$ is tight, as is an even simpler one
$\i'(\a:\b)\edf\sup_{x\in\N}(\K(x){-}\K(x|\a){-}\K(x|\b))\lea\i(\a{:}\b)$:
\BP\label{cmpl-i} For $\a,\b\,{\in}\W,\,b\,{\in}\N$:
(1) $\I(\a:b)\eqa\K(b){-}\Ki(b|\a)$ (follows from Prop.\ref{kfs}.\ref{kf6});\\
(2) $\I(\a:\b)\lea(\min_{\a'\in R_\a,\b'\in R_\b}\i'(\a':\b')){\uparrow}
\lea\i'(\a:\b)+\Ks(\a){+}\Ks(\b)$.\EP
In particular, this can be used for $\a$ being the Halting Problem sequence (which is complete, being Turing-equivalent to any random \re real, such as, \eg one constructed in sec.~4.4 of \cite {ZL}).
\BPR We can replace $\a,\b$ with $\a'{\in}R_\a,\b'{\in}R_\b$.
Let~$h_n\edf(\a_{[n]},\b_{[n]})$.\\ $\l^2\edf\l{\otimes}\l\,{=}\,O(\M^2)$, so $\I(\a{:}\b){\lea}\d_{\l^2}((\a,\b)){\eqa}\|\ceil{\sup_n4^n\m(h_n)}\| \eqa\sup_n(\K(h_n)-2(\K(h_n){-}n))$.\\ Also $t\edf\sum_{n,v}2^n\m((\a_n,v))=
\Theta(\T_\l(\a))$, so $2^n\m((\a_n,v))/t=O(\m((n,v)|\a,\|t\|))$, and\\
$\K(h_n|\a)-(\K(h_n)-n)\lel\|t\|\eqa0$. Thus $\K(h_n|\a)\lea\K(h_n)-n$ and
$\K(h_n|\b)\lea\K(h_n)-n$.\\ Then $\I(\a:\b)\lea\sup_n(\K(h_n)-2(\K(h_n){-}n))
\lea\sup_n(\K(h_n)-\K(h_n|\a){-}\K(h_n|\b))\lea\i'(\a:\b)$.\EPR
\BP\label{M} Let $A\subset\W$. Then $\M^\Es(A)=0$ iff
$\exists\a\forall\b_{\in A}\I(\b:\a)=\infty$.\EP
\BPR "If'' is by Theorem\ref{t1}. Now, any $A$ with $\M^\Es(A){=}0$ has a
sequence $\a$ of clopen sets $\a_i\subset\W$ with shrinking $\M(\a_i)$, \ie
$\l(\{\g:\exists x\, U(\g)\subset x\W{\subset}\a_i\})<2^{-i}$, and s.t. each
$\b{\in}A$ is in infinitely many $\a_i$. Then, by Prop.\ref{kfs}.\ref{kf6},
$\If(\b:(i,\a_i))\gea(\min_{\g\in U^{-1}(\b)}\d_\l(\g|i,\a_i)){\uparrow}\gea i$
and so $\I(\b:\a)\gea\If(\b:\a)=\infty$.\EPR
\end{document} |
\begin{document}
\flushbottom \title{All-optical implementation of collision-based evolutions of open quantum systems}
\section{Introduction}
Precise control of quantum states is a crucial requirement for future quantum technologies \cite{qcomp,computation_dots}. Their processing protocols should preserve and distribute microscopic correlations in macroscopic scenarios, where countable quantum systems are subjected to environmental noise. It is essential in this context to understand how much robust are the possible quantum dynamical processes and the best way to control the information permeability between the systems and their environment \cite{markovianity_on_demand,markovianity_control,markovianity_photonics,non_markovianity_dephasing,reviewNM3,applNM1}.
Quantum dynamical processes do not act merely on the sample system, actually they act in an extended Hilbert space where system and its surrounding environment are in contact \cite{computation_markov, markovianity_environment}. The non-isolated sample system $s$ is called \textit{open quantum system} (OQS), and is characterized by a state $\rho_{s}\in\mathcal{H}_{s}$. Similarly, the environment $e$ is characterized by a state $\rho_{e}\in\mathcal{H}_{e}$. Without loss of generality, one can assume that the extended system $s-e$ that lives in $\mathcal{H}=\mathcal{H}_{s}\otimes\mathcal{H}_{e}$ is closed, then no information can be lost but only distributed inside $\mathcal{H}$ \cite{markovianity_memory,markovianity_witness}.
The dynamics of an OQS are called Markovian if each continuous or discrete section of the total evolution is independent of the previous ones, otherwise they are called non-Markovian \cite{markovian_comparison}. In the quantum scenario, three different approaches are widely used to quantify the degree of non-Markovianity of a process \cite{quantum_markovianity,reviewNM2}. The first method is based on the presence of information back-flow towards the system from the environment, that acts in this case as a reservoir of information \cite{markovianity_measure,flow2,markovianity_information}. In the OQS framework the total system-environment state $\rho_{s,e}\in\mathcal{H}$ evolves according to a quantum process generating a communication link between $\mathcal{H}_{s}$ and $\mathcal{H}_{e}$. Here the strength of the flow of information between system and environment during their interaction can be used to discriminate the level of non-Markovianity of the process. The second approach studies the divisibility of the process in \textit{Completely Positive} (CP) maps, defining the evolution as non-Markovian if this decomposition fails at some time \cite{quantum_markovianity,markovianity_detection}. In cases were this CP divisibility is valid, also master equations can be well defined \cite{master_equation}. The third method, which has been used in this work, studies the evolution of the entanglement between the system and an isolated ancilla and it is strictly related to the two approaches mentioned above. In order to explain it we refer to the next section and to \cite{quantum_markovianity,NM_ent}.
If the environment is represented by an ensemble of spaces $\mathcal{H}_{e}=\mathcal{H}_{e_{1}}\otimes...\otimes \mathcal{H}_{e_{k}}$, and the system space $\mathcal{H}_{s}$ interacts sequentially with each of them at discrete times, we obtain the so called \textit{collisional model} (CM) \cite{collisional1,collisional2,collisional3,collisional10,markovianity_collisional}. It represents a powerful tool to approximate continuous-time quantum dynamics and to analyze non-Markovian dynamics of OQSs \cite{collisional4,collisional5,collisional6,collisional7,collisional8,collisional9}. Linear optics platforms have been thoroughly analyzed for the implementation of CMs \cite{markovianity_simulator,collisional11}. A simple and effective implementation has been proposed by some of us \cite{markovianity_stroboscopic}. There the authors consider an initial photon state $\rho_{s}(t_{0})\in\mathcal{H}_{s}$, whose spatial mode collides sequentially with the modes of an environment ensemble, which can be considered as a double space environment $\mathcal{H}_{e}=\mathcal{H}_{e_{1}}\otimes\mathcal{H}_{e_{2}}$ with one subspace always prepared in a certain generic state $\rho_{e_{2}}\in\mathcal{H}_{e_{2}}$ at any step-k of the process. The evolution of $\rho_{s}(t_{k})$ is mainly controlled by the interaction involving the Hilbert spaces $\mathcal{H}_{s}$ and $\mathcal{H}_{e_{1}}$, while the memory effects are due to the inter-environment collisions written in $\mathcal{H}_{e_{1}}$ and $\mathcal{H}_{e_{2}}$, which also produces and effective evolution in $\rho_{e_{1}}(t_{k})\in\mathcal{H}_{e_{1}}$. \begin{figure}
\caption{\textbf{Linear optics scheme for the CM:} Each step-k begins with the $s-e_{1}$ collision in $BS_{1}$ (in blue) and it ends with the $e_{1}-e_{2}$ collision in $BS_{2}$ (in green). The phase factor $\theta_{k}$ mediates the collision of the step-(k+1) by optical interference.}
\label{fig:collisional_1}
\end{figure}
The optical implementation described in \cite{markovianity_stroboscopic} is realized by a sequence of Mach-Zehnder interferometers (MZIs) as shown in Fig.~\ref{fig:collisional_1}, where the continuous trajectory is associated to $\rho_{s}$, the segmented trajectories to $\rho_{e_{1}}$ and the dotted trajectory to $\rho_{e_{2}}$. At the step-k of this process $\rho_{s}(t_{k-1})$ interferes with $\rho_{e_{1}}(t_{k-1})$ in the \textit{beam splitter} $BS_{1}$, while the inter-environment collision with $\rho_{e_{2}}$ occurs in $BS_{2}$, which posses a variable reflectivity $R_{BS}\in[0,1]$ to control the environment memory.
\section{Theoretical Model}
In our proposal (shown in Fig.~\ref{fig:collisional_2}a) we consider that all $BS_{2}$ have reflectivity $r_{2}=1$, so that they can be substituted with perfectly reflective mirrors $M$. Here the continuous trajectories correspond to the system ($s$-mode) while the segmented ones correspond to the first environment subspace ($e_{1}$-mode), as in Fig.~\ref{fig:collisional_1}. However, the second environment subspace ($e_{2}$-mode) has no defined path (not present in Fig.~\ref{fig:collisional_2}), since it represents the "absorption environment" after the action of a polarization independent neutral filter $F_{k}$ placed in the $e_{1}$-mode. As seen in Fig.~\ref{fig:collisional_2}b), the super-operator process $\epsilon(t_{k},t_{k-1})$ is composed by a \textit{quarter wave plate} (QWP) in the $s$-mode and a \textit{half wave plate} (HWP) in the $e_{1}$-mode, both at fixed rotation angle $\phi=0$. The environment memory is controlled by the transmissivity factor $T_{k}\in[0,1]$ of $F_{k}$, which gives access to the vacuum state stored by $\rho_{e_{2}}=\ket{0}\bra{0}$, hence effectively mimicking the interaction with the dotted-lines of Fig.~\ref{fig:collisional_1}. The phase factor $\theta_{k}$ mediates collision mechanism by controlling the optical interference. Accordingly, in this setting a purely Markovian dynamics corresponds to the minimum information backflow from $\mathcal{H}_{e_{1}}$ to $\mathcal{H}_{s}$, which is achieved by the maximum loss of information from $\mathcal{H}_{e_{1}}$ to $\mathcal{H}_{e_{2}}$ ($T_{k}=0$). Non-Markovian dynamics instead can arise whenever using $T_{k}\neq0$. \begin{figure}\label{fig:collisional_2}
\end{figure} Let us suppose that the s-mode is initially prepared in a maximally entangled state with an external ancillary system (a-mode) as $\ket{\Psi^{\pm}_{a,s}}=\frac{1}{\sqrt{2}}\left(\ket{H}_{a}\ket{V}_{s}\pm\ket{V}_{a}\ket{H}_{s}\right)$, where $\ket{H}$ ($\ket{V})$ represents the horizontal (vertical) polarization of a photon qubit. Since both e-modes are initialized in a vacuum state $\ket{0}$, the actual complete initial state corresponds to $\rho_{a,s,e_{1},e_{2}}(t_{0})=\ket{\Psi^{\pm}}\bra{\Psi^{\pm}}$, with \begin{equation} \ket{\Psi^{\pm}}=\frac{1}{\sqrt{2}} (\hat{a}_{a,H}^{\dagger}\hat{a}_{s,V}^{\dagger}\pm\hat{a}_{a,V}^{\dagger}\hat{a}_{s,H}^{\dagger})\ket{0}_{a,s,e_{1},e_{2}}\equiv\frac{1}{\sqrt{2}}\left(\ket{1_{h}}_{a}\ket{1_{v}}_{s}\pm\ket{1_{v}}_{a}\ket{1_{h}}_{s}\right)\otimes\ket{0}_{e_{1}}\otimes\ket{0}_{e_{2}}, \end{equation}
where $\hat{a}_{x}^{\dagger}$ are the photon creation operators on each x-mode. It is worth stressing that due to the possibility of loosing the s-photon during the propagation after its interaction with the $e_{2}$-mode, our scheme effectively describes the evolution of a qutrit system (with canonical basis given by the states $\ket{1_{h}}_{s}$, $\ket{1_{v}}_{s}$ and $\ket{0}_{s}$), where information is only stored in the bidimensional subspace associated with one-photon sector.
In our prepared scenario the system-environment interactions are controlled by a series of operations, such as the BS one, \begin{eqnarray} \hat{BS}_{s,e_{1}}\cdot\left[\ket{1}_{s}\otimes\ket{0}_{e_{1}}\right]&\longrightarrow& i\sqrt{r}\ket{1}_{s}\otimes\ket{0}_{e_{1}}+\sqrt{1-r}\ket{0}_{s}\otimes\ket{1}_{e_{1}},\nonumber\\ \hat{BS}_{s,e_{1}}\cdot\left[\ket{0}_{s}\otimes\ket{1}_{e_{1}}\right]&\longrightarrow& i\sqrt{r}\ket{0}_{s}\otimes\ket{1}_{e_{1}}+\sqrt{1-r}\ket{1}_{s}\otimes\ket{0}_{e_{1}},\\ \hat{BS}_{s,e_{1}}\cdot\left[\ket{0}_{s}\otimes\ket{0}_{e_{1}}\right]&\longrightarrow&\ket{0}_{s}\otimes\ket{0}_{e_{1}},\nonumber \end{eqnarray}
with $\ket{1}=\left(\alpha\ket{1_{h}}+\beta\ket{1_{v}}\right)/\sqrt{|\alpha|^{2}+|\beta|^{2}}$ and $r$ as its reflectivity factor. The wave plates act according to \begin{equation} \hat{HWP}_{s,e_{1}}=\mathbb{I}_{s}\otimes\sigma^{z}_{e_{1}}\quad\text{and}\quad\hat{QWP}_{s,e_{1}}=\sigma^{z/2}_{s}\otimes\mathbb{I}_{e_{1}}, \end{equation}
with $\sigma^{z}=\ket{1_{h}}\bra{1_{h}}-\ket{1_{v}}\bra{1_{v}}+\ket{0}\bra{0}$, $\sigma^{z/2}=\ket{1_{h}}\bra{1_{h}}+i\ket{1_{v}}\bra{1_{v}}+\ket{0}\bra{0}$ and $\mathbb{I}=\ket{1_{h}}\bra{1_{h}}+\ket{1_{v}}\bra{1_{v}}+\ket{0}\bra{0}$.
The attenuation operation applied by the filter $F$ connects the environment space of the remaining light ($\mathcal{H}_{e_{1}}$) with the space of the absorbed light ($\mathcal{H}_{e_{2}}$) according to \begin{eqnarray} \hat{F}_{e_{1},e_{2}}\cdot \left[\ket{1}_{e_{1}}\otimes\ket{0}_{e_{2}}\right]&\longrightarrow&\sqrt{T}\ket{1}_{e_{1}}\otimes\ket{0}_{e_{2}}+\sqrt{1-T}\ket{0}_{e_{1}}\otimes\ket{1}_{e_{2}},\nonumber\\ \hat{F}_{e_{1},e_{2}}\cdot \left[\ket{0}_{e_{1}}\otimes\ket{1}_{e_{2}}\right]&\longrightarrow&\ket{0}_{e_{1}}\otimes\ket{1}_{e_{2}},\\ \hat{F}_{e_{1},e_{2}}\cdot \left[\ket{0}_{e_{1}}\otimes\ket{0}_{e_{2}}\right]&\longrightarrow&\ket{0}_{e_{1}}\otimes\ket{0}_{e_{2}},\nonumber \end{eqnarray}
which generates the effective inter-environment collisions that can reset the $e_{1}$-mode to the vacuum state depending on the absorption factor $1-T$. Finally the phase control acts as \begin{equation} \hat{\theta}_{s,e_{1}}=\ket{0}_{s}\bra{0}_{s}\otimes\ket{0}_{e_{1}}\bra{0}_{e_{1}}+\ket{0}_{s}\bra{0}_{s}\otimes\ket{1}_{e_{1}}\bra{1}_{e_{1}}+e^{i\theta}\ket{1}_{s}\bra{1}_{s}\otimes\ket{0}_{e_{1}}\bra{0}_{e_{1}}. \end{equation}
Then, the super-operator can be written as follows: \begin{equation} \epsilon(t_{k},{t_{k-1}})=\mathbb{I}_{s}\otimes\hat{F}_{e_{1},e_{2}}\circ\left(\left(\hat{\theta}_{s,e_{1}}\circ\hat{HWP}_{s,e_{1}}\circ\hat{QWP}_{s,e_{1}}\circ\hat{BS}_{s,e_{1}}\right)\otimes\mathbb{I}_{e_{2}}\right). \end{equation}
According to our CM represented in Fig.~\ref{fig:collisional_2}, the input state $\rho_{a,s,e_{1},e_{2}}(t_{0})$ evolves as \begin{equation}
\rho_{a,s,e_{1},e_{2}}(t_{1},t_{0})=\left(\mathbb{I}_{a}\otimes\epsilon(t_{1},t_{0})\right)\cdot\rho_{a,s,e_{1},e_{2}}(t_{0})\cdot \left(\mathbb{I}_{a}\otimes\epsilon(t_{1},t_{0})\right)^{\dagger}, \end{equation}
at the first step of the evolution. For consecutive steps, the process can be repeated with variations on $\epsilon(t_{k},t_{k-1})$ or by using the same operation. Finally, one can extract the ancilla-system state as $\rho_{a,s}(t_{k})=Tr_{e_{1},e_{2}}[\rho_{a,s,e_{1},e_{2}}(t_{k})]$ or the ancilla-environment state $\rho_{a,e_{1}}(t_{k})=Tr_{s,e_{2}}[\rho_{a,s,e_{1},e_{2}}(t_{k})]$ by tracing out the undesired spaces and measuring bipartite tomographies after the action of the $k$ single step process.
A characterization of the non-Markovianity of the process can then be obtained by studying the evolution of the concurrence $C_{a,s}$ between the ancilla $a$ and the system $s$ at the various steps of the interferometric propagation. From the results of \cite{quantum_markovianity,markovianity_topology} we know in fact that in the cases where the relation $C_{a,s}(t_{k})>C_{a,s}(t_{k-1})$ holds for some $k>1$, a back-flow of information from $e_{1}$ to $s$ has occurred, resulting in a clear indication of a non-Markovian character of the system dynamics. On the contrary a null increase of $C_{a,s}(t_{k})$ cannot be used as an indication of Markovianity.
The magnitude of all information backflows between two steps of the evolution gauges the degree of non-Markovianity, which can be estimated by considering the integral of the concurrence variation \cite{concurrence,qutrits}, over the time intervals in which it increases, i.e. the quantity \begin{equation}\label{eq:measure} \mathcal{N}=\int_{\dot{C}_{a,s}>0} \dot{C}_{a,s}(t) dt. \end{equation}
As already mentioned our system $s$ is intrinsically 3-dimensional. Accordingly the $C_{a,s}$ appearing in Eq.~\ref{eq:measure} should be the qutrits concurrence \cite{qutrits} instead of the standard qubit one \cite{concurrence}. However, for the sake of simplicity, in the experimental implementation which we present in the following sections, we shall restrict the analysis only to the entanglement between the single-photon sectors of $s$ and $a$, by property post-selecting our data. Accordingly our measurements do not complete capture the full non-Markovian character implicit in Eq.~\ref{eq:measure}.
\section{Experimental Implementation}
The experimental setup is based on two concatenated bulk optics Sagnac interferometers (SIs) as described in Fig.~\ref{fig:setup}a). They are initially prepared in a collinear configuration, that by applying the displacement of a mirror in $SI_{1}$ is transformed in a displaced multipass scheme that replicates the CM of Fig.~\ref{fig:collisional_2}. Here we exploit a geometry endowed with high intrinsic phase stability, where different BSs (present in the scheme of Fig.~\ref{fig:collisional_1}) are substituted by different transversal points on a single BS. In this scheme the odd steps circulate in $SI_{1}$, while the even ones circulate in $SI_{2}$. The configuration is equivalent to the model of Fig.~\ref{fig:collisional_2} since we can choose the s-modes and the $e_1$-modes as the clockwise and counter-clockwise trajectories inside each SI, respectively. For the sake of simplicity, from now on we will use the label "e-environment" only for the non absorbed space of the environment, because its complementary part cannot be measured in our configuration. \begin{figure}
\caption{\textbf{a) Multipass scheme on a double SI.} The s-modes and e-modes circulating in the clockwise and counter-clockwise trajectories inside each SI, respectively. \textbf{b) Complete setup for the collisional model.} One can extract $\rho_{a,s}(t_{k})$ or $\rho_{a,e}(t_{k})$ by selecting the trajectories direction, the odd or even steps by choosing the SI, and the step number by using the external moving mirrors (MM) with translational stages. We use a single filter $F^{s}$ and a single filter $F^{e}$ for all odd and even steps. The phase factor $\theta_{k}$ is achieved by the tilting glass plate (TG) respect to the fixed glass (FG). Any output qubit can be measured in the tomography stage together with the external ancillary qubit. Here the blue beams correspond to the first step, red beams to second step, the green ones to the third step and yellow ones to the fourth step.}
\label{fig:setup}
\end{figure}
The relative phase factors $\theta_{k}$ are implemented by a fixed glass plate intersecting all the e-modes inside each SI, while thin glass plates are placed in every s-mode and tilted independently (see Fig.~\ref{fig:setup}b). The transmissivity factors $T_{k}=\frac{T^{e}_{k}}{T^{s}_{k}}$ are implemented by a single neutral density filter $F^{e}$ with transmissivity $T^{e}$ that intersects all the e-modes inside each SI, while another filter $F^{s}$ with transmissivity $T^{s}$ intersects all the s-modes for time-compensation between both optical paths. In this configuration both filters introduce only a controlled absorption, that represents an intrinsic degree of Markovianity under any kind of regime. Even so, the s-e absorptions can be mapped by the relative absorption factor $T_{k}$. Analogously, a single QWP intersects all s-modes of each SI, while a single HWP intersects all the e-modes. Since s-mode and e-mode contain the same kind of optical elements, we ensure temporally compensated trajectories with an uncertainty of $<30\mu m$ per step. The superposition of the $2^{k}$ trajectories at step $k$ is collected by a single-mode optical fiber (SMF) after the tomography stage of s-e modes. Analogously, another SMF collects the external a-mode.
In this work we focus our attention on the case where all the steps are identical, namely by using a unique filtering factor $T_{k}=F$ and phase factor $\theta_{k}=\theta$. This regime can be described by \begin{equation} \epsilon(t_{k},t_{0})=(\epsilon(t_{1},t_{0}))^{k}, \end{equation}
and corresponds to the case of a \textit{stroboscopic evolution} (SE) \cite{markovianity_stroboscopic}. \begin{figure}\label{fig:surfaces}
\end{figure}
The entangled state $\rho_{a,s}(t_{0})=\ket{\Psi^{\pm}_{a,s}}\bra{\Psi^{\pm}_{a,s}}$ is prepared by two indistinguishable processes of Type-II \textit{spontaneous parametric down conversion} (SPDC) inside a high brilliance, high purity Sagnac source based on a periodically-poled KTP (PPKTP) non-linear crystal \cite{source}. Here a single-mode continuous-wave laser at $405nm$ is converted into pairs of photons with orthogonal polarizations at $810nm$ of wavelength and $0.42nm$ of line-width (measured by techniques described in \cite{polariton}). One photon is injected in the s-mode of the setup, while the other travels through the external a-mode. Finally we reconstruct the post-selected state associated to the single-photon sectors of the density matrices $\rho_{a,s}(t_{k})$ or $\rho_{a,e}(t_{k})$ by bipartite hyper-complete tomographies between their associated modes (see Fig.~\ref{fig:collisional_2}b).
\begin{figure}\label{fig:comparison}
\end{figure}
In Fig.~\ref{fig:surfaces} we show a simulation of the possible non-Markovian dynamics under the SE with maximum environment memory ($T=1$) and variable phase factor $\Phi$. These predicted scenarios were obtained by considering an ideal Bell input state $\ket{\Psi^{\pm}}$ and ideal optical elements, e.g. symmetric BS and no-losses elements. They are interesting to understand and identify the flows and back-flows of information, which can be used in the analysis of engineered $s$-$e$ couplings and its permeability or temporally localized communications for noise avoidance. Besides the s-e collision, the e-mode also suffers inter-environment collisions with the absorption space of the environment. Thus, there is a complex information exchange where it is difficult to identify particular correlations exclusively between both a-s and a-e concurrence behaviours.
In Fig.~\ref{fig:comparison} we show a comparison between three SEs considering ideal optical elements, the actual experimental input state $\ket{\Psi}_{exp}$, a phase factor $\theta=\pi/2$ and different degrees of memory $T$. In the case $T=1$ it results a fast entanglement fluctuation with a non-Markovian degree of $\mathcal{N}=0.475$ up to the sixth step. In the case $T=1/4$ one obtains a slower entanglement fluctuation that gives $\mathcal{N}=0.185$, while in the case $T=1/16$ it emerges an even slower fluctuation with $\mathcal{N}=0.005$ (all values of $\mathcal{N}$ reported here are computed on the post-selected single-photon sectors).
\section{Experimental Results}
The experimental test was restricted to the case of a SE with $\Phi=0$ as seen in the light-blue lines of Fig.~\ref{fig:surfaces}a) and Fig.~\ref{fig:surfaces}b), but considering real optical elements. The prepared entangled state $\Omega_{a,s}$ showed a measured Fidelity $F=|\bra{\Psi_{a,s}^{\pm}}\Omega_{a,s}\ket{\Psi_{a,s}^{\pm}}|=0.9712\pm 0.0004$, then the simulated data for the imperfect evolution referred to a Werner input mixed state \cite{werner,werner_optics,det_qcc} $\Omega_{a,s}=\frac{4F-1}{3}\ket{\Psi^{\pm}_{a,s}}\bra{\Psi^{\pm}_{a,s}}+\frac{1-F}{3}\mathbb{I}_{a}\otimes\mathbb{I}_{s}$. \begin{figure}
\caption{\textbf{Non-Markovian dynamics with maximum memory ($T=1$). a)} Concurrence of $\rho_{a,s}(t_{k})$. \textbf{b)} Concurrence of $\rho_{a,e}(t_{k})$. All error bars were calculated from the propagation of 100 Monte-Carlo simulations with Poisson statistics, while theoretical data were simulated by considering the actual optical elements of the interferometric setup.}
\label{fig:results_1}
\end{figure} \begin{figure}
\caption{\textbf{Evolution of Polarization Entanglement with reduced memory ($T=0$ and $T=0.209$).} Concurrence of the single-photon sectors, post-select density matrix $\rho_{a,s}(t_{k})$. All error bars were calculated from the propagation of 100 Monte-Carlo simulations with Poisson statistics, while theoretical data were simulated by considering the actual optical elements of the interferometric setup.}
\label{fig:results_2}
\end{figure}
In Fig.~\ref{fig:results_1} we present the concurrence fluctuations of the single-photon sectors expressed in the post-selected density matrices $\rho_{a,s}(t_{k})$ and $\rho_{a,e}(t_{k})$ during the SE. These states are reconstructed by normalizing the remaining non absorbed coincident photons, and by consequence the associated concurrence values become invariant under losses. Nevertheless, both dynamics behave according to the simulation for the Werner-like input state $\Omega_{a,s}$. In the case $T=1$ of Fig.~\ref{fig:results_1}a) we obtained the highest possible non-Markovianity, where the large concurrence fluctuations give us $\mathcal{N}=0.3232$. As seen in Fig.~\ref{fig:results_2}, in the case $T=0.209$ we obtained reduced concurrence revivals and non-Markovianity of $\mathcal{N}=0.1442$, while in the case $T=0$ we confirmed the lowest possible non-markovianity by obtaining a near to zero value on $\mathcal{N}=0.0044$.
For our particular CM, these results confirm that entanglement revivals are strictly connected to the environment memory. In fact, they show with high precision that decreasing values on $T$ reduce the information back-flows to the s-mode. The slight deviation from the expected theoretical simulations originates from the not perfect superposition of all possible photon trajectories. Even so, this error is strongly minimized by the use of SMFs as final spatial filters.
\section{Conclusions}
In this work we presented a linear optics setup that allows to simulate different open quantum systems dynamics. It is based on a novel interferometric structure that guarantees high phase stability and a multipass evolution in a compact setup, that makes possible to study the dynamics up to 6 steps at least. The dynamics studied here represents the first implementation of the so-called \textit{collisional model} for open quantum systems \cite{markovianity_stroboscopic}, and our results correspond to a particular case of it. The setup is able to simulate a wide variety of stroboscopic evolutions, from strictly Markovian all the way up to strongly non-Markovian dynamics, where quantum memory effects show their contribution. We can experimentally track the role of system-environment and intra-environment interactions in the arising of non-Markovian features and characterize the transition between the two regimes. As the field of quantum technologies spreads, more and more attention has being addressed to the study of non-Markovian dynamics. It can, in principle, be used for efficient information processing \cite{appl1,appl2,violaLloyd,liu2016efficient,dong2018non}, as well as for engineering novel interesting quantum states \cite{appl3,appl4,appl5}. In this perspective, our scheme can be of great interest, thanks to its stability, modular nature and direct access to the environmental degrees of freedom.
\section*{Author Contributions}
V. G. and P. M proposed the theoretical frame and the optical scheme presented in Fig. \ref{fig:collisional_2}, \'A. C. proposed and coordinated the experimental multipass implementation, A. G., C. L. and L. D. B. achieved and analysed the experimental measures, A. D. P. and F. S. contributed to the interpretation of results. All authors contributed to the writing of the manuscript.
\section*{Additional Information}
\textbf{Competing interests:} The authors declare that they have no competing interests.
\end{document} |
\begin{document}
\title[]{Some Remarks on Nonlinear Hyperbolic Equations} \author{Kamal N. Soltanov} \address{{\small Department of Mathematics, }\\ {\small Faculty of Sciences, Hacettepe University, }\\ {\small Beytepe, Ankara, TR-06532, TURKEY} } \email{[email protected]} \date{} \subjclass[2010]{Primary 35G25, 35B65, 35L70; Secondary 35K55, 35G20} \keywords{Nonlinear hyperbolic and parabolic equations, Neumann problem, a priori estimation, smoothness}
\begin{abstract} Here a mixed problem for a nonlinear hyperbolic equation with Neumann boundary value condition is investigated, and a priori estimations for the possible solutions of the considered problem are obtained. These results demonstrate that any solution of this problem possess certain smoothness properties. \end{abstract}
\maketitle
\subsubsection{Introduction}
In this article we consider a mixed problem for a nonlinear hyperbolic equation and study the smoothness of a possible solution of the problem, in some sense. Here we got some new a priori estimations for a solution of the considered problem.
It is known that, up to now, the problem of the solvability of a nonlinear hyperbolic equation with nonlinearity of this type has not been solved when $ \Omega \subset R^{n}$, $n\geq 2$. It should also be noted that it is not possible to use the a priori estimations, which can be obtained by the known methods, to prove the solvability in this case. Consequently, there are no obtained results on a solvability of a mixed problem for the equation of the following type
\begin{equation*} \frac{\partial ^{2}u}{\partial t^{2}}-\overset{n}{\underset{i=1}{\sum }}D_{i} \left[ a_{i}\left( t,x\right) \left\vert D_{i}u\right\vert ^{p-2}D_{i}u \right] =h\left( t,x\right) ,\quad \left( t,x\right) \in Q_{T} \end{equation*} \begin{equation*} Q_{T}\equiv \left( 0,T\right) \times \Omega ,\quad \Omega \subset R^{n},\quad T>0,\quad p>2. \end{equation*} As known, the investigation of a mixed problem for the nonlinear hyperbolic equations of such type on the Sobolev type spaces when $\ \Omega \subset R^{n}$, $n\geq 2$ is connected with many difficulties (see, for example, the works of Leray, Courant, Friedrichs, Lax, F. John, Garding, Ladyzhenskaya, J.-L. Lions, H. Levine, Rozdestvenskii and also, [2, 7 - 11, 14 - 16, 18, 19], etc. ). Furthermore the possible solutions of this problem may possess a gradient catastrophe. Only in the case $n=1$, it is achieved to prove solvability theorems for the problems of such type (and essentially with using the Riemann invariants).
However, recently certain classes of nonlinear hyperbolic equations were investigated and results on the solvability of the considered problems in a more generalized sense were obtained (see, for example, [13] its references) and also certain result about dense solvability was obtained ([20]). Furthermore there are such special class of the nonlinear hyperbolic equations, for which the solvabilities were studied under some additional conditions (see, [3, 4, 13, 17, 23] and its references), for example, under some geometrical conditions.
Here, we investigate a mixed problem for equations of certain class with the Neumann boundary-value conditions. In the beginning, a mixed problem for a nonlinear parabolic equation with similar nonlinearity and conditions as above is studied and the existence of the strongly solutions of this problem is proved. Further some a priori estimations for a possible solution of the considered problem is received in the hyperbolic case with use of the result on the parabolic problem studied above. These results demonstrate that any solution of our main problem possesses certain smoothness properties, which might help for the proof of some existence theorems.\footnote{ Unfortunately, I could not use the obtained estimates for this aim.}
\section{Formulation of Problem}
Consider the problem \begin{equation} \frac{\partial ^{2}u}{\partial t^{2}}-\overset{n}{\underset{i=1}{\sum }} D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) =h\left( t,x\right) ,\quad \left( t,x\right) \in Q_{T},\quad p>2, \tag{1.1} \end{equation} \begin{equation} u\left( 0,x\right) =u_{0}\left( x\right) ,\quad \frac{\partial u}{\partial t} \ \left\vert ~_{t=0}\right. =u_{1}\left( x\right) ,\quad x\in \Omega \subset R^{n},\quad n\geq 2, \tag{1.2} \end{equation} \begin{equation} \frac{\partial u}{\partial \widehat{\nu }}\left\vert {}~_{\Gamma }\right. \equiv \overset{n}{\underset{i=1}{\sum }}\left\vert D_{i}u\right\vert ^{p-2}D_{i}u\cos \left( \nu ,x_{i}\right) =0,\quad \left( x
{\acute{}}
,t\right) \in \Gamma \equiv \partial \Omega \times \left[ 0,T\right] , \tag{1.3} \end{equation} here $\Omega \subset R^{n},n\geq 2$ be a bounded domain with sufficiently smooth boundary $\partial \Omega $; $u_{0}\left( x\right) $, $u_{1}\left( x\right) $, $h\left( t,x\right) $ are functions such that $u_{0},u_{1}\in W_{p}^{1}\left( \Omega \right) $, $h\in L_{p}\left( 0,T;W_{p}^{1}\left( \Omega \right) \right) $, $\nu $ denote the unit outward normal to $\partial \Omega $ (see, [10, 12]).
Introduce the class of the functions $u:Q\longrightarrow R$ \begin{equation*} V\left( Q\right) \equiv W_{2}^{1}\left( 0,T;L_{2}\left( \Omega \right) \right) \cap L^{\infty }\left( 0,T;W_{p}^{1}\left( \Omega \right) \right) \cap L_{p-1}\left( 0,T;\widetilde{S}_{1,2\left( p-2\right) ,2}^{1}\left( \Omega \right) \right) \cap \end{equation*} \begin{equation*} \left\{ u\left( t,x\right) \left\vert \ \frac{\partial ^{2}u}{\partial t^{2}} ,\ \underset{i=1}{\overset{n}{\sum }}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}^{2}u\right) \in L_{1}\left( 0,T;L_{2}\left( \Omega \right) \right) \right. \right\} \cap \end{equation*} \begin{equation*} \left\{ u\left( t,x\right) \left\vert \ \overset{n}{\underset{i=1}{\sum }} \underset{0}{\overset{t}{\int }}\left\vert D_{i}u\right\vert ^{p-2}D_{i}ud\tau \in W_{\infty }^{1}\left( 0,T;L_{q}\left( \Omega \right) \right) \cap L^{\infty }\left( 0,T;W_{2}^{1}\left( \Omega \right) \right) \right. \right\} \end{equation*} \begin{equation} \left\{ u\left( t,x\right) \left\vert \ u\left( 0,x\right) =u_{0}\left( x\right) ,\ \frac{\partial u}{\partial t}\ \left\vert ~_{t=0}\right. =u_{1}\left( x\right) ,\ \frac{\partial u}{\partial \widehat{\nu }} \left\vert ~_{\Gamma }\right. =0\right. \right\} \tag{DS} \end{equation}
where \begin{equation*} \widetilde{S}_{1,\alpha ,\beta }^{1}\left( \Omega \right) \equiv \left\{ u\left( t,x\right) \left\vert ~\left[ u\right] _{S_{1,\alpha ,\beta }^{1}}^{\alpha +\beta }=\left\Vert u\right\Vert _{\alpha +\beta }^{\alpha +\beta }+\underset{i=1}{\overset{n}{\sum }}\left\Vert D_{i}u\right\Vert _{\alpha +\beta }^{\alpha +\beta }+\right. \right. \end{equation*}
\begin{equation*} \left. \left\Vert \underset{i,j=1}{\overset{n}{\sum }}\left\vert D_{i}u\right\vert ^{\frac{\alpha }{\beta }}D_{j}D_{i}u\right\Vert _{\beta }^{\beta }<\infty \right\} ,\quad \alpha \geq 0,\ \beta \geq 1. \end{equation*}
Thus, we will understand the solution of the problem in the following form: A function $u\left( t,x\right) \in V\left( Q\right) $ is called solution of problem (1.1) - (1.3) if $u\left( t,x\right) $ satisfies the following equality \begin{equation*} \left[ \frac{\partial ^{2}u}{\partial t^{2}},v\right] -\left[ \overset{n}{ \underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) ,v\right] =\left[ h,v\right] \end{equation*} for any $v\in W_{q}^{1}\left( 0,T;L_{2}\left( \Omega \right) \right) \cap L^{\infty }\left( Q\right) $, where $\left[ \circ ,\circ \right] \equiv \underset{Q}{\int }\circ \times \circ ~dxdt$.
Our aim in this article is to prove
\begin{theorem} Under the conditions of this section each solution of problem (1.1)-(1.3) belongs to a bounded subset of the space $V\left( Q\right) $ defined in (DS). \end{theorem}
For the investigation of the posed problem in the beginning we will study two problems, which are connected with considered problem. One of \ these problems immediately follows from problem (1.1)-(1.3) and have the form: \begin{equation} \frac{\partial u}{\partial t}-\overset{n}{\underset{i=1}{\sum }}D_{i}\overset {t}{\underset{0}{\int }}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau =H\left( t,x\right) +u_{1}\left( x\right) , \tag{1.4} \end{equation} where $H\left( t,x\right) =\underset{0}{\overset{t}{\int }}h\left( \tau ,x\right) d\tau $.
Consequently, if $u\left( t,x\right) $ is a solution of problem (1.1) - (1.3) then $u\left( t,x\right) $ is a such solution of equation (1.4) that the following conditions are fulfilled: \begin{equation} u\left( 0,x\right) =u_{0}\left( x\right) ,\quad \frac{\partial u}{\partial \widehat{\nu }}\left\vert _{\Gamma }\right. =0. \tag{1.5} \end{equation}
From here it follows that problems (1.1) - (1.3) and (1.4) - (1.5) are equivalent.
And other problem is the nonlinear parabolic problem \begin{equation} \frac{\partial u}{\partial t}-\ \overset{n}{\underset{i=1}{\sum }} D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) =h\left( t,x\right) ,\quad \left( t,x\right) \in Q,\ p>2,\ n\geq 2, \tag{1.6} \end{equation} \begin{equation} u\left( 0,x\right) =u_{0}\left( x\right) ,\quad x\in \Omega ,\ \quad \frac{ \partial u}{\partial \widehat{\nu }}\left\vert ~_{\Gamma }\right. =0, \tag{1.7} \end{equation} where $u_{0}\in W_{p}^{1}\left( \Omega \right) $, $h\in L_{2}\left( 0,T;W_{2}^{1}\left( \Omega \right) \right) $ and $p>2$.
In the beginning the solvability of this problem is studied, for which the general result is used, therefore we begin with this result.
\section{Some General Solvability Results}
Let $X,Y$ be locally convex vector topological spaces, $B\subseteq Y$ be a Banach space and $g:D\left( g\right) \subseteq X\longrightarrow Y$ be a mapping. Introduce the following subset of $\ X$ \begin{equation*} \mathcal{M}_{gB}\equiv \left\{ x\in X\left\vert ~g\left( x\right) \in B,\right. \func{Im}g\cap B\neq \varnothing \right\} . \end{equation*}
\begin{definition} A subset $\mathcal{M}\subseteq X$ is called a $pn-$space (i.e. pseudonormed space) if $\mathcal{M}$ is a topological space and there is a function $ \left[ \cdot \right] _{S}:\mathcal{M}\longrightarrow R_{+}^{1}\equiv \left[ 0,\infty \right) $ (wh\i ch is called $p-$norm of $\mathcal{M}$) such that
qn) $\left[ x\right] _{\mathcal{M}}\geq 0$, $\forall x\in \mathcal{M}$ and $ 0\in \mathcal{M}$, $x=0\Longrightarrow \left[ x\right] _{\mathcal{M}}=0$;
pn) \ $\left[ x_{1}\right] _{\mathcal{M}}\neq \left[ x_{2}\right] _{\mathcal{ M}}\Longrightarrow x_{1}\neq x_{2}$, for $x_{1},x_{2}\in \mathcal{M}$, and $ \left[ x\right] _{\mathcal{M}}=0\Longrightarrow x=0$; \end{definition}
The following conditions are often fulfilled in the spaces $\mathcal{M}_{gB}$ .
N) There exist a convex function $\nu :R^{1}\longrightarrow \overline{ R_{+}^{1}}$ and number $K\in \left( 0,\infty \right] $ such that $\left[ \lambda x\right] _{\mathcal{M}}\leq \nu \left( \lambda \right) \left[ x \right] _{\mathcal{M}}$ for any $x\in \mathcal{M}$ and $\lambda \in R^{1}$, $ \left\vert \lambda \right\vert <K$, moreover $\underset{\left\vert \lambda \right\vert \longrightarrow \lambda _{j}}{\lim }\frac{\nu \left( \lambda \right) }{\left\vert \lambda \right\vert }=c_{j}$, $j=0,1$ where $\lambda _{0}=0$, $\lambda _{1}=K$ and $c_{0}=c_{1}=1$ or $c_{0}=0$, $c_{1}=\infty $, i.e. if $K=\infty $ then $\lambda x\in \mathcal{M}$ for any $x\in S$ and $ \lambda \in R^{1}.$
Let $g:D\left( g\right) \subseteq X\longrightarrow Y$ be such a mapping that $\mathcal{M}_{gB}\neq \varnothing $ and the following conditions are fulfilled
(g$_{\text{1}}$) $g:D\left( g\right) \longleftrightarrow \func{Im}g$ is bijection and $g\left( 0\right) =0$;
(g$_{\text{2}}$) there is a function $\nu :R^{1}\longrightarrow \overline{ R_{+}^{1}}$ satisfying condition N such that \begin{equation*} \left\Vert g\left( \lambda x\right) \right\Vert _{B}\leq \nu \left( \lambda \right) \left\Vert g\left( x\right) \right\Vert _{B},\ \forall x\in \mathcal{ M}_{gB},\ \forall \lambda \in R^{1}; \end{equation*} If mapping $g$ satisfies conditions (g$_{1}$) and (g$_{2}$), then $\mathcal{M }_{gB}$ is a $pn-$space with $p-$norm defined in the following way: there is a one-to-one function $\psi :R_{+}^{1}\longrightarrow R_{+}^{1}$, $\psi \left( 0\right) =0$, $\psi ,\psi ^{-1}\in C^{0}$ such that $\left[ x\right] _{\mathcal{M}_{gB}}\equiv \psi ^{-1}\left( \left\Vert g\left( x\right) \right\Vert _{B}\right) $. In this case $\mathcal{M}_{gB}$ is a metric space with a metric: $d_{\mathcal{M}}\left( x_{1};x_{2}\right) \equiv \left\Vert g\left( x_{1}\right) -g\left( x_{2}\right) \right\Vert _{B}$. Further, we consider just such type of $pn-$spaces.
\begin{definition} The $pn-$space $\mathcal{M}_{gB}$ is called weakly complete if $g\left( \mathcal{M}_{gB}\right) $ is weakly closed in $B.$ The pn-space $\mathcal{M} _{gB}$ is "reflexive" if each bounded weakly closed subset of $\mathcal{M} _{gB}$ is weakly compact in $\mathcal{M}_{gB}$. \end{definition}
It is clear that if $B$ is a reflexive Banach space and $\mathcal{M}_{gB}$ is a $pn-$space, then $\mathcal{M}_{gB}$ is "reflexive". Moreover, if $B$ is a separable Banach space, then $\mathcal{M}_{gB}$ is separable (see, for example, [21, 22] and their references).
Now, consider a nonlinear equation in the general form. Let $X,Y$ be Banach spaces with dual spaces $X^{\ast },Y^{\ast }$ respectively, $\mathcal{M} _{0}\subseteq X$ is a weakly complete $pn-$space, $f:D\left( f\right) \subseteq X\longrightarrow Y$ be a nonlinear operator. Consider the equation \begin{equation} f\left( x\right) =y,\quad y\in Y. \tag{2.1} \end{equation}
\begin{notation} It is clear that (2.1)$\mathit{\ }$is equivalent to the following functional equation: \begin{equation} \left\langle f\left( x\right) ,y^{\ast }\right\rangle =\left\langle y,y^{\ast }\right\rangle ,\quad \forall y^{\ast }\in Y^{\ast }. \tag{2.2} \end{equation} \end{notation}
Let $f:D\left( f\right) \subseteq X\longrightarrow Y$ be a nonlinear bounded operator and the following conditions hold
1) $f:\mathcal{M}_{0}\subseteq D\left( f\right) \longrightarrow Y$ is a weakly compact (weakly "continuous") mapping, i.e. for any weakly convergence sequence $\left\{ x_{m}\right\} _{m=1}^{\infty }\subset \mathcal{ M}_{0}$ in $\mathcal{M}_{0}$ (i.e. $x_{m}\overset{\mathcal{M}_{0}}{ \rightharpoonup }x_{0}\in \mathcal{M}_{0}$) there is a subsequence $\left\{ x_{m_{k}}\right\} _{k=1}^{\infty }\subseteq \left\{ x_{m}\right\} _{m=1}^{\infty }$ such that $f\left( x_{m_{k}}\right) \overset{Y}{ \rightharpoonup }f\left( x_{0}\right) $ weakly in $Y$ (or for a general sequence if $\mathcal{M}_{0}$ is not a separable space) and $\mathcal{M}_{0}$ is a weakly complete $pn-$space;
2) there exist a mapping $g:X_{0}\subseteq X\longrightarrow Y^{\ast }$ and a continuous function $\varphi :R_{+}^{1}\longrightarrow R^{1}$ nondecreasing for $\tau \geq \tau _{0}\geq 0$ \& $\varphi \left( \tau _{1}\right) >0$ for a number $\tau _{1}>0,$ such that $g$ generates a "coercive" pair with $f$ \ in a generalized sense on the topological space $X_{1}\subseteq X_{0}\cap \mathcal{M}_{0}$, i.e. \begin{equation*} \left\langle f\left( x\right) ,g\left( x\right) \right\rangle \geq \varphi \left( \lbrack x]_{\mathcal{M}_{0}}\right) [x]_{\mathcal{M}_{0}},\quad \forall x\in X_{1}, \end{equation*} where $X_{1}$ is a topological space such that $\overline{X_{1}} ^{X_{0}}\equiv X_{0}$ and $\overline{X_{1}}^{\mathcal{M}_{0}}\equiv \mathcal{ M}_{0}$, and\textit{\ }$\left\langle \cdot ,\cdot \right\rangle $ is a \textit{\ }dual form of the pair $\left( Y,Y^{\ast }\right) $. Moreover one of the following conditions $\left( \alpha \right) $ or $\left( \beta \right) $ holds:
$\left( \alpha \right) $ if $g\equiv L$ is a linear continuous operator, then $\mathcal{M}_{0}$ is a \textquotedblright reflexive\textquotedblright\ space (see [21, 22]), $X_{0}\equiv X_{1}\subseteq \mathcal{M}_{0}$ is a separable topological vector space which is dense in $\mathcal{M}_{0}$ and $ \ker L^{\ast }=\left\{ 0\right\} $.
$\left( \beta \right) $ if $g$ is a bounded operator (in general, nonlinear), then $Y$ is a reflexive separable space, $g\left( X_{1}\right) $ contains an everywhere dense linear manifold of $Y^{\ast }$ and $g^{-1}$ is a weakly compact (weakly continuous) operator from $Y^{\ast }$ to $\mathcal{M }_{0}$.
\begin{theorem} \textit{Let conditions 1 and 2 hold. Then equation (2.1) (or (2.2))\ is solvable in }$\mathcal{M}_{0}$\textit{\ for any} $y\in Y$ \textit{satisfying the following inequation: there exists} $r>0$ such that\textit{\ the inequality} \begin{equation} \varphi \left( \lbrack x]_{\mathcal{M}_{0}}\right) [x]_{\mathcal{M}_{0}}\geq \left\langle y,g\left( x\right) \right\rangle ,\text{ for}\quad \forall x\in X_{1}\quad \text{with}\quad \lbrack x]_{\mathcal{M}}\geq r. \tag{2.3} \end{equation} holds. \end{theorem}
\begin{proof} Assume that conditions 1 and 2 ($\alpha $)\textit{\ }are fulfilled and $y\in Y$ such that (2.3) holds. We are going to use Galerkin's approximation method. Let $\left\{ x^{k}\right\} _{k=1}^{\infty }$ be a complete system in the (separable) space $X_{1}\equiv X_{0}$. Then we are looking for approximate solutions in the form $x_{m}=\overset{m}{\underset{k=1}{\sum }} c_{mk}x^{k},$ where $c_{mk}$ are unknown coefficients, which can be determined from the system of algebraic equations \begin{equation} \Phi _{k}\left( c_{m}\right) :=\left\langle f\left( x_{m}\right) ,g\left( x^{k}\right) \right\rangle -\left\langle y,g\left( x^{k}\right) \right\rangle =0,\quad k=1,2,...,m \tag{2.4} \end{equation} where $c_{m}\equiv \left( c_{m1},c_{m2},...,c_{mm}\right) $.
We observe that the mapping \ $\Phi \left( c_{m}\right) :=\left( \Phi _{1}\left( c_{m}\right) ,\Phi _{2}\left( c_{m}\right) ,...,\Phi _{m}\left( c_{m}\right) \right) $ is continuous by virtue of condition 1. (2.3) implies the existence of such $r=r\left( \left\Vert y\right\Vert _{Y}\right) >0$ that the \textquotedblright acute angle\textquotedblright\ condition is fulfilled for all $x_{m}$ with $\left[ x_{m}\right] _{\mathcal{M}_{0}}\geq r$ , i.e. for any $c_{m}\in S_{r_{1}}^{R^{m}}\left( 0\right) \subset R^{m}$, $ r_{1}\geq r$ the inequality \begin{equation*} \overset{m}{\underset{k=1}{\sum }}\left\langle \Phi _{k}\left( c_{m}\right) ,c_{mk}\right\rangle \equiv \left\langle f\left( x_{m}\right) ,g\left( \overset{m}{\underset{k=1}{\sum }}c_{mk}x^{k}\right) \right\rangle -\left\langle y,g\left( \overset{m}{\underset{k=1}{\sum }}c_{mk}x^{k}\right) \right\rangle =\quad \end{equation*} \begin{equation*} \left\langle f\left( x_{m}\right) ,g\left( x_{m}\right) \right\rangle -\left\langle y,g\left( x_{m}\right) \right\rangle \geq 0,\quad \forall c_{m}\in
\mathbb{R}
^{m},\left\Vert c_{m}\right\Vert _{
\mathbb{R}
^{m}}=r_{1}. \end{equation*} holds. The solvability of system (2.4) for each $m=1,2,\ldots $ follows from a well-known \textquotedblleft acute angle\textquotedblright\ lemma ([10, 21 - 23]),\ which is equivalent to the Brouwer's fixed-point theorem. Thus, $ \left\{ x_{m}\left\vert ~m\geq \right. 1\right\} $ is the sequence of approximate solutions which are contained in a bounded subset of the space $ \mathcal{M}_{0}$. Further arguments are analogous to those from [10, 23] therefore we omit them. It remains to pass to the limit in (2.4)\ by $m$ and use the weak convergency of a subsequence of the sequence $\left\{ x_{m}\left\vert ~m\geq \right. 1\right\} $, the weak compactness of the mapping $f$, and the completeness of the system $\left\{ x^{k}\right\} _{k=1}^{\infty }$in the space $X_{1}$.
Hence we get the limit element $x_{0}=w-\underset{j\nearrow \infty }{\lim } x_{m_{j}}\in S_{0}$ which is the solution of the equation \begin{equation} \left\langle f\left( x_{0}\right) ,g\left( x\right) \right\rangle =\left\langle y,g\left( x\right) \right\rangle ,\quad \forall x\in X_{0}, \tag{2.5} \end{equation} or of the equation \begin{equation} \left\langle g^{\ast }\circ f\left( x_{0}\right) ,x\right\rangle =\left\langle g^{\ast }\circ y,x\right\rangle ,\quad \forall x\in X_{0}. \tag{2.5'} \end{equation}
In the second case, i.e. when conditions 1 and 2 ($\beta $)\ are fulfilled and $y\in Y$ such that (2.3) holds, we suppose that the approximate solutions are searched in the form \begin{equation} x_{m}=g^{-1}\left( \overset{m}{\underset{k=1}{\sum }}c_{mk}y_{k}^{\ast }\right) \equiv g^{-1}\left( y_{\left( m\right) }^{\ast }\right) ,\quad i.e.\ x_{m}=g^{-1}\left( y_{\left( m\right) }^{\ast }\right) \tag{2.6} \end{equation} where $\left\{ y_{k}^{\ast }\right\} _{k=1}^{\infty }\subset Y^{\ast }$ is a complete system in the (separable) space $Y^{\ast }$ and belongs to $g\left( X_{1}\right) $. In this case the unknown coefficients $c_{mk}$, that might be determined from the system of algebraic equations \begin{equation} \widetilde{\Phi }_{k}\left( c_{m}\right) :=\left\langle f\left( x_{m}\right) ,y_{k}^{\ast }\right\rangle -\left\langle y,y_{k}^{\ast }\right\rangle =0,\quad k=1,2,...,m \tag{2.7} \end{equation} with $c_{m}\equiv \left( c_{m1},c_{m2},...,c_{mm}\right) $, from which under the our conditions we get \begin{equation} \left\langle f\left( x_{m}\right) ,y_{k}^{\ast }\right\rangle -\left\langle y,y_{k}^{\ast }\right\rangle =\left\langle f\left( g^{-1}\left( y_{\left( m\right) }^{\ast }\right) \right) ,y_{k}^{\ast }\right\rangle -\left\langle y,y_{k}^{\ast }\right\rangle =0, \tag{2.7'} \end{equation} for $k=1,2,...,m$.
As above we observe that the mapping \ \begin{equation*} \widetilde{\Phi }\left( c_{m}\right) :=\left( \widetilde{\Phi }_{1}\left( c_{m}\right) ,\widetilde{\Phi }_{2}\left( c_{m}\right) ,...,\widetilde{\Phi } _{m}\left( c_{m}\right) \right) \end{equation*} is continuous by virtue of conditions 1 and 2($\beta $). (2.3) implies the existence of such $\widetilde{r}>0$ that the \textquotedblright acute angle\textquotedblright\ condition is fulfilled for all $y_{\left( m\right) }^{\ast }$ with $\left\Vert y_{\left( m\right) }^{\ast }\right\Vert _{Y^{\ast }}\geq \widetilde{r}$ , i.e. for any $c_{m}\in S_{r_{1}}^{R^{m}}\left( 0\right) \subset R^{m}$, $\widetilde{r}_{1}\geq \widetilde{r}$ the inequality \begin{equation*} \overset{m}{\underset{k=1}{\sum }}\left\langle \widetilde{\Phi }_{k}\left( c_{m}\right) ,c_{mk}\right\rangle \equiv \left\langle f\left( x_{m}\right) , \overset{m}{\underset{k=1}{\sum }}c_{mk}y_{k}^{\ast }\right\rangle -\left\langle y,\overset{m}{\underset{k=1}{\sum }}c_{mk}y_{k}^{\ast }\right\rangle =\quad \end{equation*} \begin{equation*} \left\langle f\left( g^{-1}\left( y_{\left( m\right) }^{\ast }\right) \right) ,y_{\left( m\right) }^{\ast }\right\rangle -\left\langle y,y_{\left( m\right) }^{\ast }\right\rangle =\left\langle f\left( x_{m}\right) ,g\left( x_{m}\right) \right\rangle -\left\langle y,g\left( x_{m}\right) \right\rangle \geq 0, \end{equation*} \begin{equation*} \forall c_{m}\in
\mathbb{R}
^{m},\left\Vert c_{m}\right\Vert _{
\mathbb{R}
^{m}}=\widetilde{r}_{1}. \end{equation*} holds by virtue of the conditions. Consequently the solvability of the system (2.7) (or (2.7')) for each $m=1,2,\ldots $ follows from the \textquotedblleft acute angle\textquotedblright\ lemma, as above. Thus, we obtain $\left\{ y_{\left( m\right) }^{\ast }\left\vert ~m\geq \right. 1\right\} $ is the sequence of the approximate solutions of system (2.7'), that is contained in a bounded subset of $Y^{\ast }$. From here it follows there is a subsequence $\left\{ y_{\left( m_{j}\right) }^{\ast }\right\} _{j=1}^{\infty }$ of the sequence $\left\{ y_{\left( m\right) }^{\ast }\left\vert ~m\geq \right. 1\right\} $ such that it is weakly convergent in $ Y^{\ast }$, and consequently the sequence $\left\{ x_{m_{j}}\right\} _{j=1}^{\infty }\equiv \left\{ g^{-1}\left( y_{\left( m_{j}\right) }^{\ast }\right) \right\} _{j=1}^{\infty }$ weakly converges in the space $\mathcal{M }_{0}$ by the condition 2($\beta $) (maybe after passing to the subsequence of it). It remains to pass to the limit in (2.7')\ by $j$ and use a weak convergency of a subsequence of the sequence $\left\{ y_{\left( m\right) }^{\ast }\left\vert ~m\geq \right. 1\right\} $, the weak compactness of mappings $f$ and $g^{-1}$, and next the completeness of the system $\left\{ y_{k}^{\ast }\right\} _{k=1}^{\infty }$in the space $Y^{\ast }$.
Hence we get the limit element $x_{0}=w-\underset{j\nearrow \infty }{\lim } x_{m_{j}}$ $=w-\underset{j\nearrow \infty }{\lim }g^{-1}\left( y_{\left( m_{j}\right) }^{\ast }\right) \in \mathcal{M}_{0}$ and it is the solution of the equation \begin{equation} \left\langle f\left( x_{0}\right) ,y^{\ast }\right\rangle =\left\langle y,y^{\ast }\right\rangle ,\quad \forall y^{\ast }\in Y^{\ast }. \tag{2.8} \end{equation} Q.E.D.\footnote{ See also, Soltanov K.N., On Noncoercive Semilinear Equations, Journal- NA: Hybrid Systems, (2008), 2, 2, 344-358.} \end{proof}
\begin{remark} \textit{It is obvious that if there exists a function }$\psi :R_{+}^{1}\longrightarrow R_{+}^{1}$, $\psi \in C^{0}$\textit{such that }$ \psi \left( \xi \right) =0\Longleftrightarrow \xi =0$\textit{\ and if the following inequality\ is fulfilled }$\left\Vert x_{1}-x_{2}\right\Vert _{X}\leq \psi \left( \left\Vert f\left( x_{1}\right) -f\left( x_{2}\right) \right\Vert _{Y}\right) $\textit{\ for all }$x_{1},x_{2}\in \mathcal{M}_{0}$ \textit{\ then solution of equation (2.2) is unique. } \end{remark}
\begin{notation} It should be noted the spaces of the $pn-$space type often arising from nonlinear problems with nonlinear main parts, for example,
1) the equation of the nonlinear filtration or diffusion that have the expression: \begin{equation*} \frac{\partial u}{\partial t}-\nabla \cdot \left( g\left( u\right) \nabla u\right) +h\left( t,x,u\right) =0,\quad u\left\vert \ _{\partial \Omega \times \left[ 0.T\right] }\right. =0, \end{equation*} \begin{equation*} u\left( 0,x\right) =u_{0}\left( x\right) ,\quad x\in \Omega \subset
\mathbb{R}
^{n},\quad n\geq 1 \end{equation*} where $g:
\mathbb{R}
\longrightarrow
\mathbb{R}
_{+}$ is a convex function ($g\left( s\right) \equiv \left\vert s\right\vert ^{\rho }$, $\rho >0$) and $h\left( t,x,s\right) $ is a Caratheodory function, in this case it is needed to investigate \begin{equation*} S_{1,\rho ,2}\left( \Omega \right) \equiv \left\{ u\in L^{1}\left( \Omega \right) \left\vert \ \underset{\Omega }{\dint }g\left( u\left( x\right) \right) \left\vert \nabla u\right\vert ^{2}dx\right. <\infty ;\ \ u\left\vert \ _{\partial \Omega }\right. =0\right\} ; \end{equation*}
2) the equation of the Prandtl-von Mises type equation that have the expression: \begin{equation*} \frac{\partial u}{\partial t}-\left\vert u\right\vert ^{\rho }\Delta u+h\left( t,x,u\right) =0,\quad u\left\vert \ _{\partial \Omega \times \left[ 0.T\right] }\right. =0, \end{equation*} \begin{equation*} u\left( 0,x\right) =u_{0}\left( x\right) ,\quad x\in \Omega \subset
\mathbb{R}
^{n},\quad n\geq 1 \end{equation*} where $\rho >0$ and $h\left( t,x,s\right) $ is a Caratheodory function, in this case it is needed to investigate the spaces of the following spaces type $S_{1,\mu ,q}\left( \Omega \right) $ ($\mu \geq 0,q\geq 1$) and \begin{equation*} S_{\Delta ,\rho ,2}\left( \Omega \right) \equiv \left\{ u\in L^{1}\left( \Omega \right) \left\vert \ \underset{\Omega }{\dint }\left\vert u\left( x\right) \right\vert ^{\rho }\left\vert \Delta u\right\vert ^{2}dx\right. <\infty ;\ \ u\left\vert \ _{\partial \Omega }\right. =0\right\} \end{equation*} etc. \footnote{ Theorem and the spaces of such type were used earlier in many works see, for example, [10, 21], and also the following articles with therein references: \par Ju. A. Dubinskii - Weakly convergence into nonlinear elliptic and parabolic equations, Matem. Sborn., (1965), 67, n. 4; Soltanov K.N. - On solvability some nonlinear parabolic problems with nonlinearitygrowing quickly po-lynomial functions. Matematczeskie zametki, 32, 6, 1982. \par Soltanov K.N. : Some embedding theorems and its applications to nonlinear equations. Differensial'nie uravnenia, 20, 12, 1984; On nonlinear equations of the form $F\left( x,u,Du,\Delta u\right) =0$. Matem. Sb. Ac. Sci. Russ, 1993, v.184, n.11 (Russian Acad. Sci. Sb. Math., 80, (1995) ,2; Solvability nonlinear equations with operators the form of sum the pseudomonotone and weakly compact., Soviet Math. Dokl.,1992, v.324, n.5,944-948; Nonlinear equations in nonreflexive Banach spaces and fully nonlinear equations. Advances in Mathematical Sciences and Applications, 1999, v.9, n 2, 939-972 (joint with J. Sprekels); On some problem with free boundary. Trans. Russian Ac. Sci., ser. Math., 2002, 66, 4, 155-176 (joint with Novruzov E.).} \end{notation}
\begin{corollary} Assume that the conditions of Theorem 2 are fulfilled and \textit{there is a continuous function }$\varphi _{1}:R_{+}^{1}\longrightarrow R_{+}^{1}$ such that $\left\Vert g\left( x\right) \right\Vert _{Y^{\ast }}\leq \varphi _{1}\left( [x]_{S_{0}}\right) $ for any $x\in X_{0}$ and\textit{\ }$\varphi \left( \tau \right) \nearrow +\infty $\textit{\ }and $\frac{\varphi \left( \tau \right) \tau }{\varphi _{1}\left( \tau \right) }\nearrow +\infty $ \textit{\ as }$\tau \nearrow +\infty $. \textit{Then } \textit{equation (2.2) is solvable in }$\mathcal{M}_{0}$, \textit{for any }$y\in Y$. \end{corollary}
\section{Solvability of Problem (1.6) - (1.7)}
A solution of problem (1.6) - (1.7) we will understand in following sense.
\begin{definition} A function $u\left( t,x\right) $ of the space ${\Large P}_{1,\left( p-2\right) q,q,2}^{1}\left( Q\right) $ is called a solution of problem (1.6) - (1.7) if $u\left( t,x\right) $ satisfies the following equality \begin{equation} \left[ \frac{\partial u}{\partial t},v\right] -\left[ \overset{n}{\underset{ i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) ,v \right] =\left[ h,v\right] ,\quad \forall v\in L_{p}\left( Q\right) , \tag{3.1} \end{equation} where \begin{equation*} {\Large P}_{1,\left( p-2\right) q,q,2}^{1}\left( Q\right) \equiv L_{p}\left( 0,T;\overset{0}{S}{}_{1,\left( p-2\right) q,q}^{1}\left( \Omega \right) \right) \cap W_{2}^{1}\left( 0,T;L_{2}\left( \Omega \right) \right) \end{equation*} \begin{equation*} S_{1,\alpha ,\beta }^{1}\left( \Omega \right) \equiv \left\{ u\left( t,x\right) \left\vert ~\left[ u\right] _{S_{1,\alpha ,\beta }^{1}}^{\alpha +\beta }=\underset{i=1}{\overset{n}{\sum }}\left\Vert D_{i}u\right\Vert _{\alpha +\beta }^{\alpha +\beta }+\right. \right. \end{equation*} \begin{equation*} \left. \underset{i,j=1}{\overset{n}{\sum }}\left\Vert \left\vert D_{i}u\right\vert ^{\frac{\alpha }{\beta }}D_{j}D_{i}u\right\Vert _{\beta }^{\beta }<\infty \right\} ,\quad \alpha \geq 0,\ \beta \geq 1. \end{equation*} and $\left[ \cdot ,\cdot \right] $ denotes dual form for the pair $\left( L_{q}\left( Q\right) ,L_{p}\left( Q\right) \right) $ as in the section 1. \end{definition}
For the study of problem (1.6) - (1.7) we use Theorem 2 and Corollary 1 of the previous section. For applying these results to problem (1.6) - (1.7), we will choose the corresponding spaces and mappings: \begin{equation*} \mathcal{M}_{0}\equiv {\Large P}_{1,\left( p-2\right) q,q,2}^{1}\left( Q\right) \equiv L_{p}\left( 0,T;\overset{0}{S}{}_{1,\left( p-2\right) q,q}^{1}\left( \Omega \right) \right) \cap W_{2}^{1}\left( 0,T;L_{2}\left( \Omega \right) \right) , \end{equation*} \begin{equation*} \Phi \left( u\right) \equiv -\ \overset{n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) ,\quad \gamma _{0}u\equiv u\left( 0,x\right) , \end{equation*} \noindent {} \begin{equation*} f\left( \cdot \right) \equiv \left\{ \frac{\partial \cdot }{\partial t}+\Phi \left( \cdot \right) ;\ \gamma _{0}\cdot \right\} ,\quad g\left( \cdot \right) \equiv \left\{ \frac{\partial \cdot }{\partial t}-\Delta \cdot ;\quad \gamma _{0}\cdot \right\} , \end{equation*} \begin{equation*} X_{0}\equiv W_{p}^{1}\left( 0,T;L_{p}\left( \Omega \right) \right) \cap \widetilde{X};\ \end{equation*} \begin{equation*} X_{1}\equiv X_{0}\cap \left\{ u\left( t,x\right) \left\vert \frac{\partial u }{\partial \widehat{\nu }}\left\vert \ _{\Gamma }\right. =0\right. \right\} ; \end{equation*} \begin{equation*} Y\equiv L_{q}\left( Q\right) ,\ q=p^{\prime },\widetilde{X}\equiv L_{p}\left( 0,T;W_{p}^{2}\left( \Omega \right) \right) \cap \left\{ u\left( t,x\right) \left\vert \frac{\partial u}{\partial \nu }\left\vert \ _{\Gamma }\right. =0\right. \right\} \end{equation*} here \begin{equation*} \overset{0}{S}{}_{1,\left( p-2\right) q,q}^{1}\left( \Omega \right) \equiv S_{1,\left( p-2\right) q,q}^{1}\left( \Omega \right) \cap \left\{ u\left( t,x\right) \left\vert \frac{\partial u}{\partial \widehat{\nu }}\left\vert \ _{\partial \Omega }\right. =0\right. \right\} \end{equation*}
Thus, as we can see from the above denotations, mapping $f$ is defined by problem (1.6)-(1.7) and mapping $g$ is defined by the following problem \begin{equation} \frac{\partial u}{\partial t}-\Delta u=v\left( t,x\right) ,\quad \left( t,x\right) \in Q, \tag{3.2} \end{equation} \begin{equation} \gamma _{0}u\equiv u\left( 0,x\right) =u_{0}\left( x\right) ,\ \frac{ \partial u}{\partial \nu }\left\vert \ _{\Gamma }\right. =0. \tag{3.3} \end{equation}
As known (see, [1, 5, 6, 12]), problem (3.2)-(3.3) is solvable in the space \begin{equation*} X_{0}\equiv W_{p}^{1}\left( 0,T;L_{p}\left( \Omega \right) \right) \cap L_{p}\left( 0,T;W_{p}^{2}\left( \Omega \right) \right) \cap \left\{ u\left( t,x\right) \left\vert \frac{\partial u}{\partial \nu }\left\vert \ _{\Gamma }\right. =0\right. \right\} \end{equation*} for any $v\in L_{p}\left( Q\right) $, $u_{0}\in W_{p}^{1}\left( \Omega \right) $.
Now we will demonstrate that all conditions of Theorem 2 and also of Corollary 1 are fulfilled.
\begin{proposition} Mappings $f$ and $g$ , defined above, generate a "coercive" pair on $X_{1}$ in the generalized sense, and moreover the statement of Corollary 1 is valid. \end{proposition}
\begin{proof} Let $u\in X_{1}$, i.e. \begin{equation*} u\in X_{0}\cap \left\{ u\left( t,x\right) \left\vert \frac{\partial u}{ \partial \widehat{\nu }}\left\vert \ _{\Gamma }\right. =0\right. \right\} . \end{equation*}
Consider the dual form $\left\langle f\left( u\right) ,g\left( u\right) \right\rangle $ for any $u\in X_{1}$. More exactly, it is enough to consider the dual form in the form \begin{equation} \underset{0}{\overset{t}{\int }}\underset{\Omega }{\int }f\left( u\right) \ g\left( u\right) \ dxd\tau \equiv \left[ f\left( u\right) ,\ g\left( u\right) \right] _{t} \tag{*} \end{equation} Hence, if we consider the above expression then after certain action and in view of the boundary conditions, we get \begin{equation*} \left[ f\left( u\right) ,\ g\left( u\right) \right] _{t}\equiv \left[ \frac{ \partial u}{\partial t},\ \frac{\partial u}{\partial t}\right] _{t}\ +\left[ \Phi \left( u\right) ,\ \frac{\partial u}{\partial t}\right] _{t}- \end{equation*} \begin{equation*} \left[ \frac{\partial u}{\partial t},\ \Delta u\right] _{t}-\left[ \Phi \left( u\right) ,\ \Delta u\right] _{t}+\underset{\Omega }{\int }u_{0}\ u_{0}\ dx= \end{equation*} \begin{equation*} =\underset{0}{\overset{t}{\int }}\left\Vert \frac{\partial u}{\partial t} \right\Vert _{2}^{2}d\tau +\overset{n}{\underset{i=1}{\sum }}\left[ \frac{1}{ p}\left\Vert D_{i}u\right\Vert _{p}^{p}\left( t\right) +\frac{1}{2} \left\Vert D_{i}u\right\Vert _{2}^{2}\left( t\right) \right] + \end{equation*} \begin{equation*} \left\Vert u_{0}\right\Vert _{2}^{2}+\left( p-1\right) \overset{n}{\underset{ i,j=1}{\sum }}\ \underset{0}{\overset{t}{\int }}\left\Vert \left\vert D_{i}u\right\vert ^{\frac{p-2}{2}}D_{i}D_{j}u\right\Vert _{2}^{2}- \end{equation*} \begin{equation} \overset{n}{\underset{i=1}{\sum }}\left[ \frac{1}{p}\left\Vert D_{i}u_{0}\right\Vert _{p}^{p}+\frac{1}{2}\left\Vert D_{i}u_{0}\right\Vert _{2}^{2}\right] \tag{3.4} \end{equation} here and in (3.5) we denote $\left\Vert \cdot \right\Vert _{p_{1}}\equiv \left\Vert \cdot \right\Vert _{L_{p_{1}}\left( \Omega \right) }$, $p_{1}\geq 1$.
From here\ it follows, \begin{equation*} \left[ f\left( u\right) ,\ g\left( u\right) \right] \geq c\left( \left\Vert \frac{\partial u}{\partial t}\right\Vert _{L_{2}\left( Q\right) }^{2}+ \overset{n}{\underset{i=1}{\sum }}\left\Vert \left\vert D_{i}u\right\vert ^{ \frac{p-2}{2}}D_{i}u\right\Vert _{L_{2}\left( Q\right) }^{2}\right) - \end{equation*} \begin{equation*} c_{1}\left\Vert u_{0}\right\Vert _{W_{p}^{1}}^{p}-c_{2}\geq \widetilde{c} \left( \left\Vert \frac{\partial u}{\partial t}\right\Vert _{L_{2}\left( Q\right) }^{2}+\left[ u\right] _{L_{p}\left( S_{1,\left( p-2\right) q,q}\right) }^{p}\right) - \end{equation*} \begin{equation*} c_{1}\left\Vert u_{0}\right\Vert _{W_{p}^{1}}^{p}-c_{2}\geq \widetilde{c} \left[ u\right] _{\mathbf{P}_{1,\left( p-2\right) q,q,2}^{1}\left( Q\right) }^{2}-c_{1}\left\Vert u_{0}\right\Vert _{W_{p}^{1}}^{p}-\widetilde{c}_{2}, \end{equation*} which demonstrates fulfillment of the statement of Corollary 1\footnote{ From definitions of these spaces is easy to see that $S_{1,p-2,2}^{1}\left( \Omega \right) \subset S_{1,\left( p-2\right) q,q}\left( \Omega \right) $}. Consequently, Proposition 1 is true. \end{proof}
Further for the right part of the dual form, we obtain under the conditions of Proposition 1 (using same way as in the above proof) \begin{equation*} \left\vert \underset{0}{\overset{t}{\int }}\underset{\Omega }{\int }h\left( \frac{\partial u}{\partial t}-\Delta u\right) \ dxd\tau \right\vert \leq C\left( \varepsilon \right) \underset{0}{\overset{t}{\int }}\left\Vert h\right\Vert _{2}^{2}\ d\tau + \end{equation*} \begin{equation} \varepsilon \underset{0}{\overset{t}{\int }}\left\Vert \frac{\partial u}{ \partial t}\right\Vert _{2}^{2}\ d\tau +C\left( \varepsilon _{1}\right) \underset{0}{\overset{t}{\int }}\left\Vert h\right\Vert _{W_{q}^{1}}^{q}\ d\tau +\varepsilon _{1}\overset{n}{\underset{i=1}{\sum }}\underset{0}{ \overset{t}{\int }}\left\Vert D_{i}u\right\Vert _{p}^{p}\ d\tau . \tag{3.5} \end{equation}
It is not difficult to see mapping $g$ defined by problem (3.2)-(3.3) satisfies of the conditions of \ Theorem 2, i.e. $g\left( X_{1}\right) $ contains an everywhere dense linear manifold of $L_{p}\left( Q\right) $ and $ g^{-1}$ is weakly compact operator from $L_{p}\left( Q\right) $ to $\mathcal{ M}_{0}\equiv L_{p}\left( 0,T;\overset{0}{S}{}_{1,\left( p-2\right) q,q}^{1}\left( \Omega \right) \right) $ $\cap $ $W_{2}^{1}\left( 0,T;L_{2}\left( \Omega \right) \right) $.
Thus we have that all conditions of Theorem 2 and Corollary 1 are fulfilled for the mappings and spaces corresponding to the studied problem. Consequently, using Theorem 2 and Corollary 1 we obtain the solvability of problem (1.6)-(1.7) in the space ${\Large P}_{1,\left( p-2\right) q,q,2}^{1}\left( Q\right) $ for any $h\in L_{2}\left( 0,T;W_{2}^{1}\left( \Omega \right) \right) $ and $u_{0}\in W_{p}^{1}\left( \Omega \right) $.
Furthermore, from here it follows that the solution of this problem possesses the complementary smoothness, i.e. $\overset{n}{\underset{i=1}{ \sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) \in L_{2}\left( Q\right) $ as far as we have $\frac{\partial u}{\partial t}\in L_{2}\left( Q\right) $ and $h\in L_{2}\left( 0,T;W_{2}^{1}\left( \Omega \right) \right) $ by virtue of the conditions of the considered problem.
So the following result is proved.
\begin{theorem} Under the conditions of this section, problem (1.6) - (1.7) is solvable in $ {\Large P}\left( Q\right) $ for any $u_{0}\in W_{p}^{1}\left( \Omega \right) $ and $h\in L_{2}\left( 0,T;W_{2}^{1}\left( \Omega \right) \right) $ where \begin{equation*} {\Large P}\left( Q\right) \equiv L_{p}\left( 0,T;\overset{0}{\widetilde{S}} {}_{1,p-2,2}^{1}\left( \Omega \right) \right) \cap W_{2}^{1}\left( 0,T;L_{2}\left( \Omega \right) \right) \cap {\Large P}_{1,\left( p-2\right) q,q,2}^{1}\left( Q\right) . \end{equation*} \end{theorem}
\section{A priori Estimations for Solutions of Problem (1.4) - (1.5)}
Now, we can investigate the main problem of this article, which is posed for problem (1.4) - (1.5). We introduce denotations of the mappings $A$ and $f$ \ that are generated by problems (1.4)-(1.5) and (1.6)-(1.7), respectively.
\begin{theorem} Under the conditions of section 1, any solution $u\left( t,x\right) $ of problem (1.4) -(1.5) belongs to the bounded subset of the function class $ \widetilde{P}\left( Q\right) $ defined in the form \begin{equation*} u\in L^{\infty }\left( 0,T;W_{p}^{1}\left( \Omega \right) \right) ;\quad \frac{\partial u}{\partial t}\in L^{\infty }\left( 0,T;L_{2}\left( \Omega \right) \right) ; \end{equation*} \begin{equation} \overset{n}{\underset{i=1}{\sum }}\overset{t}{\underset{0}{\int }}\left\vert D_{i}u\right\vert ^{p-2}D_{i}ud\tau \in W_{\infty }^{1}\left( 0,T;L_{q}\left( \Omega \right) \right) \cap L^{\infty }\left( 0,T;W_{2}^{1}\left( \Omega \right) \right) \tag{4.1} \end{equation} that satisfies the conditions determined by the dates of problem (1.4)-(1.5). \end{theorem}
\begin{proof} Consider the dual form $\left\langle A\left( u\right) ,f\left( u\right) \right\rangle $ for any $u\in {\Large P}\left( Q\right) $ that is defined by virtue of Theorem 3. We behave as in proof of Proposition 1 and consider only the integral with respect to $x$. Then we have after certain known acts \begin{equation*} \underset{\Omega }{\int }\frac{\partial u}{\partial t}\ \frac{\partial u}{ \partial t}\ dx\ +\underset{\Omega }{\int }\ \left( \overset{t}{\underset{0}{ \int }}\overset{n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right) \left( \overset{n}{ \underset{j=1}{\sum }}D_{j}\left( \left\vert D_{j}u\right\vert ^{p-2}D_{j}u\right) \right) \ dx- \end{equation*} \begin{equation*} \underset{\Omega }{\int }\ \left( \overset{t}{\underset{0}{\int }}\overset{n} {\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right) \frac{\partial u}{\partial t}dx-\underset{ \Omega }{\int }\frac{\partial u}{\partial t}\left( \overset{n}{\underset{i=1} {\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) \right) dx\geq \end{equation*} \begin{equation*} \frac{1}{2}\left\Vert \frac{\partial u}{\partial t}\right\Vert _{L_{2}\left( \Omega \right) }^{2}+\frac{1}{2}\frac{\partial }{\partial t}\left\Vert \overset{t}{\underset{0}{\int }}\overset{n}{\underset{i=1}{\sum }} D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right\Vert _{L_{2}\left( \Omega \right) }^{2}+ \end{equation*} \begin{equation} \frac{1}{p}\frac{\partial }{\partial t}\overset{n}{\underset{i=1}{\sum }} \left\Vert D_{i}u\right\Vert _{L_{p}\left( \Omega \right) }^{p}-\frac{1}{2} \left\Vert \overset{t}{\underset{0}{\int }}\overset{n}{\underset{i=1}{\sum }} D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right\Vert _{L_{2}\left( \Omega \right) }^{2}\left( t\right) \tag{4.2} \end{equation}
Now, consider the right part of the dual form, i.e. $\left\langle H,f\right\rangle $, for the determination of the bounded subset, to which the solutions of the problem belongs (and for the receiving of the a priori estimations). Then we get \begin{equation*} \left\vert \underset{\Omega }{\int }H\ \frac{\partial u}{\partial t}\ dx- \underset{\Omega }{\int }H\overset{n}{\underset{j=1}{\sum }}D_{j}\left( \left\vert D_{j}u\right\vert ^{p-2}D_{j}u\right) \ dx\right\vert \leq C\left( \varepsilon \right) \left\Vert H\right\Vert _{L_{2}\left( Q\right) }^{2}+ \end{equation*} \begin{equation} \varepsilon \left\Vert \frac{\partial u}{\partial t}\right\Vert _{L_{2}\left( \Omega \right) }^{2}\left( t\right) +C\left( \varepsilon _{1}\right) \left\Vert H\right\Vert _{L_{p}\left( W_{p}^{1}\right) }^{p}+\varepsilon _{1}\overset{n}{\underset{j=1}{\sum }}\left\Vert D_{j}u\right\Vert _{L_{p}\left( \Omega \right) }^{p}\left( t\right) . \tag{4.3} \end{equation}
From (4.2) and (4.3) it follows \begin{equation*} 0=\underset{\Omega }{\int }\left( A\left( u\right) -H\right) \ f\left( u\right) \ dx\geq \frac{1}{2}\left\Vert \frac{\partial u}{\partial t} \right\Vert _{L_{2}\left( \Omega \right) }^{2}+\frac{1}{p}\frac{\partial }{ \partial t}\overset{n}{\underset{j=1}{\sum }}\left\Vert D_{j}u\right\Vert _{L_{p}\left( \Omega \right) }^{p}+ \end{equation*} \begin{equation*} \frac{1}{2}\frac{\partial }{\partial t}\left\Vert \overset{t}{\underset{0}{ \int }}\overset{n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right\Vert _{L_{2}\left( \Omega \right) }^{2}-\frac{1}{2}\left\Vert \overset{t}{\underset{0}{\int }}\overset{ n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right\Vert _{L_{2}\left( \Omega \right) }^{2}- \end{equation*} \begin{equation*} \varepsilon _{1}\overset{n}{\underset{i=1}{\sum }}\left\Vert D_{i}u\right\Vert _{L_{p}\left( \Omega \right) }^{p}-C\left( \varepsilon \right) \left\Vert H\right\Vert _{L_{2}\left( Q\right) }^{2}-\varepsilon \left\Vert \frac{\partial u}{\partial t}\right\Vert _{L_{2}\left( \Omega \right) }^{2}-C\left( \varepsilon _{1}\right) \left\Vert H\right\Vert _{L_{p}\left( W_{p}^{1}\right) }^{p} \end{equation*} or if we choose small parameters $\varepsilon >0$ and $\varepsilon _{1}>0$ such as needed, then we have \begin{equation*} c\left\Vert \frac{\partial u}{\partial t}\right\Vert _{L_{2}\left( \Omega \right) }^{2}+\frac{1}{p}\frac{\partial }{\partial t}\overset{n}{\underset{ i=1}{\sum }}\left\Vert D_{i}u\right\Vert _{L_{p}\left( \Omega \right) }^{p}+ \end{equation*} \begin{equation*} \frac{1}{2}\frac{\partial }{\partial t}\left\Vert \overset{t}{\underset{0}{ \int }}\overset{n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right\Vert _{L_{2}\left( \Omega \right) }^{2}\leq C\left( \left\Vert H\right\Vert _{L_{2}\left( Q\right) },\left\Vert H\right\Vert _{L_{p}\left( W_{p}^{1}\right) }\right) + \end{equation*} \begin{equation} \frac{1}{p}\overset{n}{\underset{i=1}{\sum }}\left\Vert D_{i}u\right\Vert _{L_{p}\left( \Omega \right) }^{p}+\frac{1}{2}\left\Vert \overset{t}{ \underset{0}{\int }}\overset{n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right\Vert _{L_{2}\left( \Omega \right) }^{2}. \tag{4.4} \end{equation} Inequality (4.4) show that we can use Gronwall lemma. Consequently using Gronwall lemma we get \begin{equation*} \overset{n}{\underset{i=1}{\sum }}\left\Vert D_{i}u\right\Vert _{L_{p}\left( \Omega \right) }^{p}\left( t\right) +\left\Vert \overset{t}{\underset{0}{ \int }}\overset{n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right\Vert _{L_{2}\left( \Omega \right) }^{2}\left( t\right) \leq \end{equation*} \begin{equation} C\left( \left\Vert H\right\Vert _{L_{2}\left( Q\right) },\left\Vert H\right\Vert _{L_{p}\left( W_{p}^{1}\right) },\left\Vert u_{0}\right\Vert _{W_{p}^{1}\left( \Omega \right) }\right) \tag{4.5} \end{equation} holds for a.e. $t\in \left[ 0,T\right] $.
Thus we have for any solution of problem (1.4) -(1.5) the following estimations \begin{equation*} \left\Vert u\right\Vert _{W_{p}^{1}\left( \Omega \right) }\left( t\right) \leq C\left( \left\Vert H\right\Vert _{L_{p}\left( W_{p}^{1}\right) },\left\Vert u_{0}\right\Vert _{W_{p}^{1}\left( \Omega \right) }\right) , \end{equation*} \begin{equation*} \left\Vert \overset{t}{\underset{0}{\int }}\overset{n}{\underset{i=1}{\sum }} D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right\Vert _{L_{2}\left( \Omega \right) }\left( t\right) \leq C\left( \left\Vert H\right\Vert _{L_{p}\left( W_{p}^{1}\right) },\left\Vert u_{0}\right\Vert _{W_{p}^{1}\left( \Omega \right) }\right) , \end{equation*} \begin{equation*} \left\Vert \frac{\partial u}{\partial t}\right\Vert _{L_{2}\left( \Omega \right) }\left( t\right) \leq C\left( \left\Vert H\right\Vert _{L_{p}\left( W_{p}^{1}\right) },\left\Vert u_{0}\right\Vert _{W_{p}^{1}\left( \Omega \right) }\right) \end{equation*} hold for a.e. $t\in \left[ 0,T\right] $ by virtue of inequalities (4.2) - (4.5). In other words we have that any solution of problem (1.4) -(1.5) belongs to the bounded subset of the following class \begin{equation*} u\in L^{\infty }\left( 0,T;W_{p}^{1}\left( \Omega \right) \right) ;\quad \frac{\partial u}{\partial t}\in L^{\infty }\left( 0,T;L_{2}\left( \Omega \right) \right) ; \end{equation*} \begin{equation*} \frac{\partial }{\partial t}\left( \overset{n}{\underset{i=1}{\sum }}\overset {t}{\underset{0}{\int }}\left\vert D_{i}u\right\vert ^{p-2}D_{i}ud\tau \right) \in L^{\infty }\left( 0,T;L_{q}\left( \Omega \right) \right) \end{equation*} \begin{equation} \overset{t}{\underset{0}{\int }}\overset{n}{\underset{i=1}{\sum }} D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \in L^{\infty }\left( 0,T;L_{2}\left( \Omega \right) \right) , \tag{4.6} \end{equation} for each given $u_{0},u_{1}\in W_{p}^{1}\left( \Omega \right) $, $h\in L_{p}\left( 0,T;W_{p}^{1}\left( \Omega \right) \right) $.
From here it follows that all solutions of this problem belong to a bounded subset of space $P\left( Q\right) $, which is defined by (4.1).
Indeed, firstly it is easy to see that the following inequality holds \begin{equation*} \left\Vert \overset{n}{\underset{i=1}{\sum }}\overset{t}{\underset{0}{\int }} \left\vert D_{i}u\right\vert ^{p-2}D_{i}ud\tau \right\Vert _{L_{q}\left( \Omega \right) }^{q}\leq C\overset{n}{\underset{i=1}{\sum }}\left\Vert \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right\Vert _{L_{q}\left( \Omega \right) }^{q}\leq \end{equation*} \begin{equation*} C\left( T,mes\ \Omega \right) \left\Vert u\right\Vert _{W_{p}^{1}\left( \Omega \right) }^{q}\left( t\right) \Longrightarrow \overset{t}{\underset{0}{ \int }}\overset{n}{\underset{i=1}{\sum }}\left\vert D_{i}u\right\vert ^{p-2}D_{i}ud\tau \in L^{\infty }\left( 0,T;L_{q}\left( \Omega \right) \right) , \end{equation*} and secondary, the following equalities are correct \begin{equation*} \underset{\Omega }{\int }\left( \overset{t}{\underset{0}{\int }}\overset{n}{ \underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right) ^{2}dx\equiv \left\Vert \overset{t}{ \underset{0}{\int }}\overset{n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right\Vert _{2}^{2}\equiv \end{equation*} \begin{equation*} \left\langle \overset{t}{\underset{0}{\int }}\overset{n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau , \overset{t}{\underset{0}{\int }}\overset{n}{\underset{j=1}{\sum }} D_{j}\left( \left\vert D_{j}u\right\vert ^{p-2}D_{j}u\right) d\tau \right\rangle = \end{equation*} \begin{equation*} \overset{n}{\underset{i,j=1}{\sum }}\left\langle \overset{t}{\underset{0}{ \int }}D_{j}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau , \overset{t}{\underset{0}{\int }}D_{i}\left( \left\vert D_{j}u\right\vert ^{p-2}D_{j}u\right) d\tau \right\rangle = \end{equation*} \begin{equation*} \overset{n}{\underset{i,j=1}{\sum }}\left\langle D_{j}\overset{t}{\underset{0 }{\int }}\left\vert D_{i}u\right\vert ^{p-2}D_{i}ud\tau ,D_{i}\overset{t}{ \underset{0}{\int }}\left\vert D_{j}u\right\vert ^{p-2}D_{j}ud\tau \right\rangle , \end{equation*} and also \begin{equation*} \overset{n}{\underset{j=1}{\sum }}\left\Vert D_{j}\overset{t}{\underset{0}{ \int }}~\overset{n}{\underset{i=1}{\sum }}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \right\Vert _{2}^{2}= \end{equation*} \begin{equation*} \overset{n}{\underset{j=1}{\sum }}\left\langle D_{j}\overset{t}{\underset{0}{ \int }}~\overset{n}{\underset{i=1}{\sum }}\left\vert D_{i}u\right\vert ^{p-2}D_{i}ud\tau ,D_{j}\overset{t}{\underset{0}{\int }}~\overset{n}{ \underset{i=1}{\sum }}\left\vert D_{i}u\right\vert ^{p-2}D_{i}ud\tau \right\rangle . \end{equation*} These demonstrate that the function \begin{equation*} v\left( t,x\right) \equiv \overset{t}{\underset{0}{\int }}\ \overset{n}{ \underset{i=1}{\sum }}\left\vert D_{i}u\right\vert ^{p-2}D_{i}u\ d\tau \end{equation*} belongs to a bounded subset of the space \begin{equation*} L^{\infty }\left( 0,T;L_{q}\left( \Omega \right) \right) \cap \left\{ v\left( t,x\right) \left\vert ~Dv\in \right. L^{\infty }\left( 0,T;L_{2}\left( \Omega \right) \right) \right\} . \end{equation*} Therefore, in order to prove the correctness of (4.1), it remains to use the following inequality, i.e. the Nirenberg-Gagliardo-Sobolev inequality \begin{equation} \left\Vert D^{\beta }v\right\Vert _{p_{2}}\leq C\left( \underset{\left\vert \alpha \right\vert =m}{\sum }\left\Vert D^{\alpha }v\right\Vert _{p_{0}}^{\theta }\right) \left\Vert v\right\Vert _{p_{1}}^{1-\theta },\quad 0\leq \left\vert \beta \right\vert =l\leq m-1, \tag{4.7} \end{equation} which holds for each $v\in W_{p_{0}}^{m}\left( \Omega \right) $, $\Omega \subset R^{n}$, $n\geq 1$, $C\equiv C\left( p_{0},p_{1},p_{2},l,s\right) $ and $\theta $ such that $\frac{1}{p_{2}}-\frac{l}{n}=\left( 1-\theta \right) \frac{1}{p_{1}}+\theta \left( \frac{1}{p_{0}}-\frac{m}{n}\right) $. Really, in inequality (4.7) for us it is enough to choose $p_{2}=2$, $l=0$, $p_{1}=q$ , $p_{0}=2$ then we get \begin{equation*} \frac{1}{2}=\left( 1-\theta \right) \frac{p-1}{p}+\theta \left( \frac{1}{2}- \frac{1}{n}\right) \Longrightarrow \theta \left( \frac{1}{2}-\frac{1}{n}- \frac{p-1}{p}\right) =\frac{1}{2}-\frac{p-1}{p}\Longrightarrow \end{equation*} $\theta =\frac{n\left( p-2\right) }{n\left( p-2\right) +2p}$ for $p>2$, and so (4.1) is correct. \end{proof}
\begin{corollary} Under the conditions of Theorem 4, each solution of problem (1.1)-(1.3) belongs to the bounded subset of the class $\mathbf{V}\left( Q\right) $ defined in (DS). \end{corollary}
\begin{proof} From (4.1) it follows \begin{equation*} \overset{n}{\underset{i=1}{\sum }}\overset{t}{\underset{0}{\int }}\left\vert D_{i}u\right\vert ^{p-2}D_{i}ud\tau \in L^{\infty }\left( 0,T;\overset{0}{W} \ _{2}^{1}\left( \Omega \right) \right) \cap W_{\infty }^{1}\left( 0,T;L_{q}\left( \Omega \right) \right) , \end{equation*} moreover \begin{equation*} \overset{t}{\underset{0}{\int }}\overset{n}{\underset{i,j=1}{\sum }} D_{j}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) d\tau \in L^{\infty }\left( 0,T;L_{2}\left( \Omega \right) \right) , \end{equation*} and is bounded in this space. Then taking into account the property of the Lebesgue integrals we obtain \begin{equation*} \overset{t}{\underset{0}{\int }}\ \left\{ \underset{\Omega }{\int }\left[ \overset{n}{\underset{i,j=1}{\sum }}D_{j}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) \right] ^{2}dx\right\} ^{\frac{1}{2}}d\tau \leq C,\quad C\neq C\left( t\right) \end{equation*}
from which we get \begin{equation*} \overset{n}{\underset{i,j=1}{\sum }}D_{j}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) \in L_{1}\left( 0T;L_{2}\left( \Omega \right) \right) , \end{equation*} and so \begin{equation} \overset{n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) \in L_{1}\left( 0T;L_{2}\left( \Omega \right) \right) \tag{4.8} \end{equation} in which is bounded.
If we consider equation (1.1), and take into account that it is solvable in the generalized sense and $\frac{\partial u}{\partial t}\in W_{\infty }^{1}\left( 0,T;L_{2}\left( \Omega \right) \right) $ (by (4.1)) then from Definition 1 it follows that \begin{equation*} \left[ \frac{\partial ^{2}u}{\partial t^{2}},v\right] -\left[ \overset{n}{ \underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) ,v\right] =\left[ h,v\right] \end{equation*} holds for any $v\in W_{\widetilde{p}}^{1}\left( 0,T;L_{2}\left( \Omega \right) \right) $, $\widetilde{p}>1$.
Hence \begin{equation} \left[ \frac{\partial ^{2}u}{\partial t^{2}},v\right] =\left[ \overset{n}{ \underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) +h,v\right] \tag{4.9} \end{equation} holds for any $v\in L^{\infty }\left( Q\right) $.
Thus we obtain $\frac{\partial ^{2}u}{\partial t^{2}}\in L_{1}\left( 0T;L_{2}\left( \Omega \right) \right) $ by virtue of (4.1), (4.8) and as \begin{equation*} \overset{n}{\underset{i=1}{\sum }}D_{i}\left( \left\vert D_{i}u\right\vert ^{p-2}D_{i}u\right) +h\in L_{1}\left( 0T;L_{2}\left( \Omega \right) \right) . \end{equation*} \end{proof}
\end{document} |
\begin{document}
\title{Matrix-Free Approximate Equilibration}
\renewcommand{\fnsymbol{footnote}}{\fnsymbol{footnote}}
\footnotetext[1]{Dept.~of Geophysics, Stanford University ({\tt
[email protected]}).} \footnotetext[2]{Inst.~for Comp.~and Math.~Eng., Stanford University ({\tt
[email protected]}).} \footnotetext[3]{Supported by a National Science Foundation Graduate Research
Fellowship and a Scott A.~and Geraldine D.~Macomber Stanford Graduate
Fellowship.} \footnotetext[4]{Additional support from the Office of Naval Research and the
Army High Performance Computing Research Center.}
\begin{abstract} The condition number of a diagonally scaled matrix, for appropriately chosen scaling matrices, is often less than that of the original. Equilibration scales a matrix so that the scaled matrix's row and column norms are equal. Scaling can be approximate. We develop approximate equilibration algorithms for nonsymmetric and symmetric matrices having signed elements that access a matrix only by matrix-vector products. \end{abstract}
\begin{keywords}
binormalization, doubly stochastic, matrix equilibration, matrix-free algorithms \end{keywords}
\begin{AMS} 15A12, 15B51, 65F35 \end{AMS}
\pagestyle{myheadings} \thispagestyle{plain}
\markboth{A.M.~BRADLEY AND W.~MURRAY} {MATRIX-FREE APPROXIMATE EQUILIBRATION}
\section{Introduction} For a square, nonnegative, real, nonsymmetric matrix $B$, equilibration in the 1-norm finds $x, y > 0$ such that $X B y = e$ and $Y B^T x = e$, where $X = \text{diag}(x)$ and similarly for other vectors, and $e$ is the vector of all ones. Hence $X B Y$ is doubly stochastic. For a symmetric matrix, symmetric equilibration finds $x > 0$ such that $X B x = e$. If $B = A \circ A$ for $A$ a real, possibly signed, matrix, where $\circ$ denotes the element-wise product, then these equations equilibrate $A$ in the 2-norm. Equilibration in the 2-norm is often called \emph{binormalization}. Approximate equilibration scales a matrix so that its row and column norms are almost equal. Both the exactly and approximately equilibrated matrices often have smaller condition numbers than the original. In this paper we always use the 2-norm condition number. Equilibration is particularly usefully applied to matrices for which simpler diagonal scaling methods fail: for example, to indefinite symmetric matrices. In Section \ref{sec:dscale}, we compare equilibration with Jacobi scaling when applied to symmetric matrices.
In some problems, accessing elements of a matrix is expensive. What are often called \emph{matrix-free} algorithms access a matrix only by matrix-vector products. If $A$ is a matrix having nonnegative elements, then many algorithms already exist to equilibrate $A$ using only matrix-vector products: for example, the Sinkhorn-Knopp iteration. But if $A$ has signed elements, then one must obtain $|A|$ to use these algorithms, which requires accessing the elements of $A$. In Section \ref{sec:algs}, we develop matrix-free approximate equilibration algorithms for square nonsymmetric and symmetric matrices having signed elements, and we report the results of numerical experiments with these algorithms in Section \ref{sec:numexp}.
\section{Diagonal scaling of symmetric matrices} \label{sec:dscale} \begin{figure}
\caption{Numerical study of conditioning of symmetric matrices. The four plots
show condition number of the Jacobi-scaled (top) and binormalized (bottom)
symmetric positive definite (left) and indefinite (right) matrix as a function
of the condition number of the unscaled matrix.}
\label{fig:def}
\end{figure}
Jacobi scaling pre- and post-multiplies a square, usually symmetric positive definite (spd) matrix by a diagonal matrix such that the scaled matrix has unit diagonal elements.
Numerical experiments show that the condition number of the equilibrated or Jacobi-scaled matrix is often considerably less than that of the original matrix. Figure \ref{fig:def} shows the results of a numerical experiment using 323 symmetric matrices from the University of Florida Sparse Matrix Collection \cite{davis_uf}; see Section \ref{sec:numexp} for further details on the data set. The matrices used in this experiment have sizes 10 to 36441, with a median size of 5000. Figure \ref{fig:def} shows the condition number of the scaled matrix as a function of that of the unscaled matrix. Two diagonal scaling methods are used: Jacobi (top) and binormalization (bottom). Matrices are divided into positive definite (left) and indefinite (right).
This experiment shows that if a matrix is spd, \begin{comment} then Jacobi scaling is always possible as every diagonal element is positive,
\end{comment} then equilibration and Jacobi scaling reduce the condition number by about the same amount; indeed, the two corresponding plots are almost identical. It also shows that when the two methods are applied to an indefinite matrix---in the case of Jacobi scaling, replacing a zero diagonal element with a one---the condition number of the Jacobi-scaled matrix is likely to be substantially greater than that of the equilibrated matrix. For these reasons, equilibration of symmetric indefinite matrices can be thought of as a generalization of Jacobi scaling of spd matrices, raising the question of the relationship between the two scaling methods when applied to spd matrices.
Let $A$ be an $n \times n$ spd matrix whose diagonal elements are all one. Let $\kappa(\cdot)$ denote the 2-norm condition number of a matrix. Van der Sluis showed that $\kappa(A) \le n \min_d \kappa(DAD)$ (Theorem 4.1 of \cite{vandersluis}) and that if $A$ has at most $q$ nonzero elements in any row, then $\kappa(A) \le q \min_d \kappa(DAD)$ (Theorem 4.3 of \cite{vandersluis}). A matrix $C$ has \emph{Young's property A} if there exists a permutation matrix $P$ such that \begin{equation*}
P C P^T = \pmat{D_1 & C_1 \\ C_2 & D_2} \end{equation*} and $D_1$ and $D_2$ are square diagonal matrices. Forsthye and Straus showed that if the matrix $A$ has in addition Young's property A, then $\kappa(A) = \min_d \kappa(DAD)$ (Theorem 4 of \cite{forsythe-straus}). In summary, these three theorems state that Jacobi scaling is within a factor of $n$, $q$, or 1 of optimal among all diagonal scaling matrices.
If $A$ is spd, then so is $B \equiv A \circ A$ by the Schur Product Theorem (see, for example, Theorem 7.5.3 of \cite{haj}). Suppose $A$ has unit diagonal elements. Then so does $B$. Moreover, $B_{ij} < 1$ for $i \ne j$. \begin{comment} For suppose $B_{ij} \ge 1$. Let $v$ be the vector such that $v_i = 1$, $v_j = -1$, and $v_k = 0$ for all other elements. Then $v^T B v = 2 - 2 B_{ij} \le 0$, which contradicts that $B$ is spd. \end{comment} Suppose Jacobi scaling---replacing a zero diagonal element with a one---has been applied to an $n \times n$ symmetric matrix $\bar A$ to yield the matrix $A$, and again let $B \equiv A \circ A$. Consider the vector of row sums $s \equiv B e$. If $\bar A$ is indefinite, $0 \le s_i < \infty$. If $\bar A$ is spd, as every diagonal element of $B$ is 1, $s_i \ge 1$; and as every off-diagonal element $B_{ij} < 1$, $s_i < n$.
Let $\mu(v)$ be the mean of the elements of an $n$-vector $v$ and $\text{var}(v)$ the variance: $\text{var}(v) \equiv n^{-1} \sum_i (v_i - \mu(v))^2$. If a matrix is binormalized, then the variance of the vector of its row 2-norms is 0. If $\bar A$ is indefinite, $\text{var}(s)$ can be arbitrarily large. But if $A$ is spd, then $\text{var}(s) < (n-1)^2$. For as each $1 \le s_i < n$, $(s_i - \mu(s))^2 < (n-1)^2$, and so $n^{-1} \sum_i (s_i - \mu(s))^2 < n^{-1} \sum_i (n-1)^2 = (n-1)^2$.
From the other direction, an immediate corollary of inequality 2 in \cite{livne04spd} is that if an spd matrix $\bar A$ is equilibrated in the 2-norm to form $\tilde A$, then $n^{-1/2} < \tilde A_{ii} \le 1$ (the upper bound follows immediately from equilibration to unit row and column 1-norms); if $\bar A$ is indefinite, then of course $-1 \le \tilde A_{ii} \le 1$.
In summary, if a matrix is spd, Jacobi scaling produces a matrix that is not arbitrarily far from being binormalized, and binormalization produces a matrix whose diagonal elements are bounded below and above by positive numbers. The bounds depend on the size of the matrix. If a matrix is symmetric indefinite, then neither statement holds.
\section{Algorithms} \label{sec:algs} Sink\-horn and Knopp analyzed the convergence properties of the iteration \eqref{eq:sk-nonsym}: \begin{equation} \label{eq:sk-nonsym}
r^{k+1} = (B c^k)^{-1}, \quad c^{k+1} = (B^T r^{k+1})^{-1}. \end{equation} The reciprocal is applied by element. $c^0$ is a vector whose elements are all positive. According to Knight \cite{knight}, the iteration was used as early as the 1930s.
Parlett and Landis \cite{parlett-landis} generalized Sinkhorn and Knopp's convergence analysis and developed several new algorithms, one of which, EQ, substantially outperformed the Sinkhorn-Knopp iteration on a test set. Khachiyan and Kalantari \cite{khachiyan-kalantari} used Newton's method to scale positive semidefinite symmetric matrices. Livne and Golub \cite{livne04bin} developed algorithms for symmetric and nonsymmetric matrices based on the Gauss-Seidel-Newton method. Knight and Ruiz \cite{knight-ruiz} devised an algorithm based on an inexact Newton method that uses the conjugate gradients iteration.
Nonuniqueness of equilibration in the infinity norm motivates multiple algorithms that consider both efficiency and quality of the scaling under criteria other than the infinity norms of the rows and columns. A matrix can be scaled in the infinity norm if it has no zero rows or columns. The simplest nonsymmetric algorithm is first to scale the rows (columns), then to scale the columns (rows). After the first scaling, the largest number in the matrix is 1, and the second scaling cannot produce numbers that are larger than 1. Therefore, scaling is achieved after one iteration. Bunch \cite{bunch} developed an algorithm that equilibrates any symmetric matrix in the infinity norm. More recently, Ruiz \cite{ruiz} developed another iteration that compares favorably with Bunch's algorithm. He extended the method to 1- and 2-norms and showed convergence results for these algorithms as strong as, and based on, those by Parlett and Landis \cite{parlett-landis} for their algorithms.
Each of these algorithms is iterative and yields a sequence of matrices converging to a doubly stochastic matrix. A user can terminate the iteration early to yield an approximately equilibrated matrix; hence these algorithms may be viewed as approximate equilibration algorithms.
To date, it appears that all scaling algorithms for matrices having signed elements require access to the elements of the matrix. If $A$ is nonnegative, the situation is much different; for example, the Sinkhorn-Knopp algorithm requires only the matrix-vector products (mvp) $A x$ and $A^T x$. For general matrices, algorithms need at least mvp of the form $|A| x$ (1-norm), $(A \circ A) x$ (2-norm), or similar expressions, and their transposes. We introduce approximate scaling algorithms for equilibration in the 2-norm that require only the mvp $A x$ and $A^T x$, where $x$ is a random vector. Algorithms that compute the mvp with a random vector have been developed to solve other problems. For example, Bekas, Kokiopoulou, and Saad \cite{saad} developed a method to estimate the diagonal elements of a matrix; and Chen and Demmel \cite{chen-demmel}, to balance a matrix prior to computing its eigenvalues. Our algorithms also have a connection to the methods of \emph{stochastic approximation} \cite{book-hard-stoch-approx}. \begin{comment} In this section, \emph{stochastic} refers to the use of random vectors rather than, as earlier, to a particular numerical structure of a matrix. \end{comment}
We want to emphasize that because the algorithms we propose access a matrix having signed elements only through a sequence of mvp, we cannot expect them to be faster than, or even as fast as, algorithms that access the elements directly when applied to matrices for which direct access to the elements is possible and efficient. Our algorithms are useful only if a matrix has signed elements that are impossible or inefficient to access directly; it appears there are not algorithms already available to solve this problem. It is also desirable that only a small number, relative to the size of the matrix, of mvp are required.
\subsection{Existence and uniqueness} \label{sec:theory} A matrix has \emph{support} if a positive main diagonal exists under a column permutation; a matrix having this property is equivalently said to be \emph{structurally nonsingular} \cite{davis-book}. A square matrix has \emph{total support} if every nonzero element occurs in the positive main diagonal under a column permutation. A matrix has total support if and only if there exists a doubly stochastic matrix having the same zero pattern \cite{perfect-mirsky}. A matrix $A$ is \emph{partly decomposable} if there exist permutation matrices $P$ and $Q$ such that \begin{equation} \label{eq:pdecomp}
P A Q = \pmat{E & 0 \\ C & D}, \end{equation} where $E$ and $D$ are square matrices. A square matrix is \emph{fully
indecomposable} if it is not partly decomposable. A fully indecomposable matrix has total support \cite{brualdi-80}. A matrix $A$ is \emph{reducible} if there exists a permutation matrix $P$ such that $P A P^T$ has the matrix structure in \eqref{eq:pdecomp}; otherwise, $A$ is \emph{irreducible}. For convenience, a matrix is said to be \emph{scalable} if it can be equilibrated.
\begin{theorem}[Sinkhorn and Knopp \cite{sinkhorn-knopp}] \label{thm:sk}
Let $B$ be a nonnegative square matrix.
\begin{remunerate}
\item There exist positive diagonal matrices $R$ and $C$ such that $F \equiv R
B C$ is doubly stochastic---briefly, $B$ is scalable---if and only if $B$
has total support.
\item If $B$ is scalable, then $F$ is unique.
\item $R$ and $C$ are unique up to a scalar multiple if and only if $B$ is
fully indecomposable.
\item The Sinkhorn-Knopp iteration yields a sequence of matrices that
converges to a unique doubly stochastic matrix, for all initial $r,c > 0$,
if and only if $B$ has support. If $B$ has support that is not total, then
$R$ and $C$ have elements that diverge.
\end{remunerate} \end{theorem} Parts 1--3 were independently discovered in \cite{pbs-66}. \begin{comment} : the authors of each paper acknowledge the other accordingly. The necessity of
total support follows directly from Birkhoff's theorem: a doubly stochastic
matrix is a convex combination of permutation matrices (see, {\it e.g.},
Theorem 8.7.1 of \cite{haj}). \end{comment}
\begin{theorem}[Csima and Datta \cite{csima-datta}] \label{thm:csima-datta}
A symmetric matrix is symmetrically scalable if and only if it has total
support. \end{theorem}
The necessary and sufficient condition of total support in Theorem \ref{thm:csima-datta} is identical to that in part 1 of Theorem \ref{thm:sk}. The necessary part follows directly from part 1, but proving the sufficiency part requires several steps not needed in the nonsymmetric case.
Section 3 of \cite{knight} discusses the symmetric iteration \begin{equation} \label{eq:sk-sym}
x^{k+1} = (B x^k)^{-1} \end{equation} for symmetric $B$ and sketches a proof of convergence. Not directly addressed is that the iterates $x^k$ can oscillate and reducible $B$.
If $B$ is irreducible, this oscillation is straightforward and benign. The resulting scaled matrix is a scalar multiple of a doubly stochastic one. For example, suppose $\bar B = 1$ and $x^0 = 2$. Then for $k$ even, $x^k = 2$, and for $k$ odd, $x^k = 1/2$. In general, if symmetric $B$ is irreducible, $X^k B X^{k+1}$ converges to a doubly stochastic matrix, while $X^{2k} B X^{2k}$ and $X^{2k+1} B X^{2k+1}$ converge to scalar multiples of a doubly stochastic matrix, and these scalars are reciprocals of each other.
Somewhat more complicated is reducible $B$. For example, consider the matrix $\bar B = \text{diag}(1 \ 2)^T$. If $x^0 = e$, the even iterates remain $e$ while the odd iterates are $v \equiv (1 \ 1/2)^T$. $I \bar B V$ is doubly stochastic, but $v$ is not proportional to $e$. Moreover, $V \bar B V$ is not simply a scalar multiple of a doubly stochastic matrix. This nonconvergence is also benign. A reducible symmetric matrix $B$ can be symmetrically permuted to be block diagonal with each block irreducible. Hence the equilibration problem is decoupled into as many smaller problems. We can construct a symmetric equilibrating vector $x$ from the nonsymmetric equilibrating vectors $r$ and $c$ by setting $x = \sqrt{r c}$. For suppose $r$ and $c$ equilibrate $B$ by $R B C$. Let ${\cal I}$ be the indices corresponding to an irreducible block. Then $r({\cal I}) \propto c({\cal I})$ and the block $X({\cal I},{\cal I}) B({\cal
I},{\cal I}) X({\cal I},{\cal I})$ is doubly stochastic. For $\bar B$, the symmetric equilibration vector is $\sqrt{e v} = (1 \ 1/\sqrt{2})^T$.
These observations suggest that we should write the symmetric Sinkhorn-Knopp iteration as \begin{equation} \label{eq:sk-sym-revised}
y^{k+1} = (B y^k)^{-1}, \quad x^{k+1} = \sqrt{y^{k+1} y^k}. \end{equation} Since $x^k$ does not actually play a role in the iteration, in practice, the square root operation needs to be applied only after the final iteration to yield the scaling matrix.
\subsection{Stochastic equilibration} Our algorithms are based on the Sinkhorn-Knopp iteration. The Sinkhorn-Knopp iteration performs the mvp $B x$ and $B^T x$ for a nonnegative matrix $B$. If
$A$ is a matrix having signed elements, then $B_{ij} = |A_{ij}|^p$ for $p \ge 1$ for equilibration in the $p$-norm, and so $B$ is not available if one does not have access to the elements of $A$. The key idea, similar to that in \cite{chen-demmel}, in our algorithms is to compute $B x$ approximately by using an mvp with $A$ rather than $B$, where $B \equiv A \circ A$, and similarly for $B^T x$.
\begin{comment} \begin{lemma} \label{lem:u_iid}
Let $a \in \mathbb{R}^n$. If the elements of the random vector $u \in
\mathbb{R}^n$ have zero mean, positive and finite variance, and are iid, then
$\ensuremath{\text{E}\,} (a^T u)^2 = \eta \ensuremath{\text{E}\,} a^T a$
for finite $\eta > 0$. \end{lemma} \begin{proof}
Because $\ensuremath{\text{E}\,} u_i u_j = 0$ if $i \ne j$, $\ensuremath{\text{E}\,} \left( \sum_j a_j u_j \right)^2 =
\ensuremath{\text{E}\,} \sum_j a_j^2 u_j^2 = \eta \sum_j a_j^2$, where $\eta = \ensuremath{\text{E}\,} u_j^2 > 0$ is
finite. \end{proof}
If $a$ is complex valued, Lemma \ref{lem:u_iid} generalizes simply by considering $(\bar a^T u) (a^T u)$ rather than $(a^T u)^2$. We present our algorithms for real-valued matrices, but they immediately extend to complex-valued matrices. \end{comment}
Let $a \in \mathbb{R}^n$. If the elements of the random vector $u \in \mathbb{R}^n$ have zero mean, positive and finite variance, and are iid, then $\ensuremath{\text{E}\,} (a^T u)^2 = \eta \ensuremath{\text{E}\,} a^T a$ for finite $\eta > 0$, where E denotes expectation. For as $\ensuremath{\text{E}\,} u_i u_j = 0$ if $i \ne j$, $\ensuremath{\text{E}\,} ( \sum_j a_j u_j )^2 = \ensuremath{\text{E}\,} \sum_j a_j^2 u_j^2 = \eta \sum_j a_j^2$, where $\eta = \ensuremath{\text{E}\,} u_j^2 > 0$ is finite. See \cite{chen-demmel} for more on this and related expectations. We use this fact to approximate $B x$ by computing the mvp $A X^{1/2} u$: \begin{equation} \label{eq:AXu_stoch}
\ensuremath{\text{E}\,} (A X^{1/2} u)^2 = \eta ((A X^{1/2}) \circ (A X^{1/2})) e = \eta (A \circ A)
X e = \eta B x. \end{equation}
To increase the accuracy of the approximation to $Bx$, one could compute the mean of multiple mvp $A X^{1/2} u$. Then one could construct an approximate scaling algorithm by replacing the exact computation $B x$ with this estimate, and similarly for $B^T x$, in the Sinkhorn-Knopp algorithm. However, the method of \emph{stochastic approximation} \cite{book-hard-stoch-approx} suggests a better approach. In stochastic approximation, the exact iteration $x^{k+1} = x^k + \omega^k f(x^k)$ is replaced by the stochastic iteration $x^{k+1} = x^k + \omega^k \tilde f(x^k)$, where $\ensuremath{\text{E}\,} \tilde f(x^k) = f(x^k)$ and $x$ is sought such that $f(x) = 0$. Rather than explicitly average multiple realizations of $\hat f(x^k)$ at each iteration, the stochastic approximation iteration controls the relative weight of $\hat f$ through $\omega^k$ and alters the iterate $x^k$ at each evaluation of $\hat f$. \begin{comment} If several conditions are satisfied---some of which are not straightforward to verify---then the second iteration converges with probability 1 to a limit point of the first. \end{comment}
Let $\rho \equiv r^{-1}$, $\gamma \equiv c^{-1}$, and $0 < \omega^k < 1$. Consider the iteration \begin{align}
\rho^{k+1} &= (1 - \omega^k) \frac{\rho^k}{\|\rho^k\|_1} +
\omega^k \frac{B c^k}{\|B c^k\|_1} \label{eq:de1a} \\
\gamma^{k+1} &= (1 - \omega^k) \frac{\gamma^k}{\|\gamma^k\|_1} +
\omega^k \frac{B^T r^{k+1}}{\|B^T r^{k+1}\|_1}. \notag \end{align} This iteration takes a convex combination of the reciprocal of an iterate and the Sinkhorn-Knopp update when each is normalized by its 1-norm. \begin{comment} If $r^k$ rather than $r^{k+1}$ is used in the second equation, then the iteration can be rearranged to produce one having the form of \eqref{eq:exact_it_form} up to a scalar. \end{comment} Let $u^k$ and $v^k$ be random vectors as before. For the vector $x$, $(x)^2$ is the element-wise square. Substituting \eqref{eq:AXu_stoch} into this iteration, we obtain the stochastic iteration \begin{align}
y^k &= (A (C^k)^{1/2} u^k)^2 \notag \\
\rho^{k+1} &= (1 - \omega^k) \frac{\rho^k}{\|\rho^k\|_1} +
\omega^k \frac{y^k}{\|y^k\|_1} \label{eq:se1a} \\
z^k &= (A^T (R^{k+1})^{1/2} v^k)^2 \notag \\
\gamma^{k+1} &= (1 - \omega^k) \frac{\gamma^k}{\|\gamma^k\|_1} +
\omega^k \frac{z^k}{\|z^k\|_1} \notag. \end{align} We implement this iteration in the {\sc Matlab} function {\tt snbin}. \begin{lstlisting}[language=matlab,basicstyle=\footnotesize]
function [r c] = snbin(A,nmv,m,n)
op = isa(A,`function_handle');
if(~op) [m n] = size(A); end
r = ones(m,1); c = ones(n,1);
for(k = 1:nmv)
alpha = (k - 1)/nmv;
omega = (1 - alpha)*1/2 + alpha*1/nmv;
s = randn(n,1)./sqrt(c);
if(op) y = A(s); else y = A*s; end
r = (1-omega)*r/sum(r) + omega*y.^2/sum(y.^2);
s = randn(m,1)./sqrt(r);
if(op) y = A(s,`trans'); else y = (s'*A)'; end
c = (1-omega)*c/sum(c) + omega*y.^2/sum(y.^2);
end
r = 1./sqrt(r); c = 1./sqrt(c); \end{lstlisting}
Our choice of the sequence $\omega^k$ is based on numerical experiments; the sequence encourages large changes in $d/\|d\|_1$ when $k$ is small and smaller changes when $k$ is large.
Iteration \eqref{eq:de1a} forms a linear combination of $\rho^k$ and $B c^k$. One might consider instead forming a linear combination of $r^k$ and $(B c^k)^{-1}$. In the iteration we use, a reciprocal is taken after forming a linear combination of the iterate and a random quantity; in contrast, in this alternative, it is taken before, and of the random quantity. Consequently, the stochastic iteration corresponding to this alternative iteration is less stable than \eqref{eq:se1a}.
A straightforward algorithm for the symmetric problem applies {\tt snbin} to the symmetric matrix $B$ and then returns $\sqrt{r c}$. But numerical experiments suggest we can do better. For irreducible matrices, the denominators $\|d^k\|_1$ and
$\|B x^k\|_1$ in the iteration \begin{equation*}
d^{k+1} = (1 - \omega^k) \frac{d^k}{\|d^k\|_1} + \omega^k \frac{B x^k}{\|B
x^k\|_1} \end{equation*} remove the benign oscillation we observed in Section \ref{sec:theory}; therefore, adjacent iterates, rather than every other one as in \eqref{eq:de1a}, can be combined in a convex sum. This second approach speeds convergence. But it is not sufficient when applied to reducible matrices. Numerical experiments support using the second approach for early iterations, when making progress quickly is important, and then switching to the first approach to refine the scaling matrix. We implement this strategy in {\tt ssbin}. \begin{lstlisting}[language=matlab,basicstyle=\footnotesize]
function x = ssbin(A,nmv,n)
op = isa(A,'function_handle');
if(~op) n = size(A,1); end
d = ones(n,1); dp = d;
for(k = 1:nmv)
u = randn(n,1);
s = u./sqrt(dp);
if(op) y = A(s); else y = A*s; end
alpha = (k - 1)/nmv;
omega = (1 - alpha)*1/2 + alpha*1/nmv;
d = (1-omega)*d/sum(d) + omega*y.^2/sum(y.^2);
if (k < min(32,floor(nmv/2)))
dp = d;
else
tmp = dp; dp = d; d = tmp;
end
end
x = 1./(d.*dp).^(1/4); \end{lstlisting} The final line implements the square root in \eqref{eq:sk-sym-revised}.
In most iterative algorithms, a measure of the merit of an iterate that requires little work to evaluate relative to the work in an iteration influences the behavior of the algorithm. In our algorithms, any procedure to assess the merit of an iterate would require additional mvp, likely wasting work. Hence the parameter values in the loop of each algorithm are fixed independent of problem.
\section{Numerical experiments} \label{sec:numexp} \begin{figure}
\caption{Convergence histories for two symmetric problems having sizes 3564
(left) and 226340 (right). Ten solid lines show individual realizations of the
algorithm. The dashed line corresponds to {\tt snbin}. The dotted line
corresponds to not switching to {\tt snbin}-like behavior.}
\label{fig:conv}
\end{figure}
\begin{figure}
\caption{Ratio for the original and scaled nonsymmetric matrix vs.~matrix size
$N$, after the indicated number of iterations, for 741 matrices.}
\label{fig:exp-snbin-a}
\end{figure}
\begin{figure}
\caption{Condition number of the scaled nonsymmetric matrix vs.~condition number
of the original matrix for 519 matrices (matrices having $N \le 2 \times
10^4$).}
\label{fig:exp-snbin-b}
\end{figure}
\begin{figure}
\caption{Ratios for 466 symmetric matrices. Results for only $K = 128$ are
shown; trends in $K$ follow those for the nonsymmetric problems.}
\label{fig:exp-ssbin-a}
\end{figure}
\begin{figure}
\caption{Condition numbers for 221 symmetric matrices (matrices having $N \le 2
\times 10^4$).}
\label{fig:exp-ssbin-b}
\end{figure}
\begin{figure}
\caption{A closer look at the ratio as a function of $N$ for $K = 128$
iterations for nonsymmetric (left) and symmetric (right) matrices.}
\label{fig:exp-N}
\end{figure}
In our numerical experiments, two quantities of the scaled matrices are measured: condition number if the matrix is not too large; and the ratio of the largest to smallest row 2-norms (in the nonsymmetric case, row or column, depending on which gives a larger number), hereafter designated as the \emph{ratio}.
We test {\tt snbin} and {\tt ssbin} in {\sc Matlab} on matrices in the University of Florida Sparse Matrix Collection \cite{davis_uf}; these are obtained by the following queries: \begin{lstlisting}[language=matlab,basicstyle=\footnotesize]
index = UFget(`refresh');
sids = find(~index.isBinary & index.numerical_symmetry == 1 &...
index.sprank == index.nrows & index.isReal);
nids = find(~index.isBinary & index.numerical_symmetry < 1 &...
index.nrows == index.ncols &...
index.sprank == index.nrows & index.isReal); \end{lstlisting}
First we investigate the behavior of {\tt ssbin} on two problems. Figure \ref{fig:conv} shows the convergence history, starting with the unaltered matrix, for 12 runs of {\tt ssbin} on two symmetric problems. The smaller has size 3564; the larger has size 226340. $\log_{10}$ ratio is used to measure convergence. The ten closely clustered solid curves correspond to the nominal algorithm. The dashed curve indicates the slower convergence of simply applying {\tt snbin}. The dotted curve shows the problem with not eventually switching to {\tt snbin}-like behavior to address reducibility. The plateau in the solid curves ends at iteration 32, when the switch is made. The ten solid curves are closely clustered, indicating the variance in the algorithm's output is small for any number of requested mvp.
In the performance experiments, for each matrix, the algorithm is run five times for $K = 32$, $64$, and $128$ iterations. Results are shown in Figures \ref{fig:exp-snbin-a} and \ref{fig:exp-snbin-b} for nonsymmetric matrices and \ref{fig:exp-ssbin-a} and \ref{fig:exp-ssbin-b} for symmetric matrices. Figure \ref{fig:exp-snbin-a} shows that the ratio tends to decrease with $K$, as one expects. The ratio for the scaled problem, given fixed $K$, grows slowly with problem size $N$. Figure \ref{fig:exp-N} investigates this aspect more closely. It shows details for the case of $K = 128$ iterations for nonsymmetric (left) and symmetric (right) matrices. Over a range of matrix sizes of more than six orders of magnitude, the final ratio often ranges from between $1.5$ and $6$. Figures \ref{fig:exp-snbin-b} and \ref{fig:exp-ssbin-b} show that the condition number of the scaled matrix is almost always, and often substantially, smaller than that of the original matrix: any point that falls below the diagonal line corresponds to a reduction in condition number. The top-left plots of Figures \ref{fig:exp-snbin-b} and \ref{fig:exp-ssbin-b} show the condition numbers of the exactly scaled matrices; the ratios are 1, of course. In the plots corresponding to the stochastic algorithms, what appears to be a point is in fact a cluster of the five points resulting from the five separate runs. The tightness of these clusters again implies that the variance of the outputs of these algorithms is quite small.
These experiments suggest that {\tt ssbin} and {\tt snbin} are effective matrix-free approximate equilibration algorithms: a small number---relative to the size of the matrix---of matrix-vector products is sufficient to approximately equilibrate the matrix. One application is to scale a matrix whose elements require too much work to access directly prior to using a Krylov-subspace iteration to solve a linear system. We recommend performing approximately 100 iterations, which corresponds to 100 matrix-vector products in the symmetric case and 200 in the nonsymmetric.
\end{document} |
\begin{document}
\title[A remark on the extension of $L^{2}$ holomorphic functions]
{A remark on the extension of $L^{2}$ holomorphic functions}
\author{Qi'an Guan} \address{Qi'an Guan: School of Mathematical Sciences, Peking University, Beijing 100871, China.} \email{[email protected]}
\thanks{The authors were partially supported by NSFC}
\subjclass[2010]{32D15, 32E10, 32L10, 32U05, 32W05}
\keywords{plurisubharmonic functions, holomorphic functions, $L^2$ extension}
\date{}
\dedicatory{}
\commby{}
\begin{abstract} In this note, we answer a question on the extension of $L^{2}$ holomorphic functions posed by Ohsawa. \end{abstract}
\maketitle
\section{an answer to a question posed by Ohsawa} In \cite{Ohsawa2017}, Ohsawa gave a survey on a recent ''remarkable'' progress (c.f. \cite{B-L,Bl_inv,G-ZhouL2_CR,G-ZhouL2_ann,G-ZhouL2_Sci,G-Zhou-ZhuL2_CR}) around the famous Ohsawa-Takegoshi $L^{2}$ extension theorem \cite{O-T}. After that, Ohsawa recalled the following consequence of the main result in \cite{G-ZhouL2_ann}, and presented a shorter proof based on a general criterion for the extendibility in \cite{Ohsawa2017}.
\begin{Theorem} \label{coro:GZ-domain}(\cite{G-ZhouL2_ann}, see also Theorem 0.1 in \cite{Ohsawa2017}) Let $D\subset\mathbb{C}^n$ be a pseudoconvex domain, and let $\varphi$ be a plurisubharmonic function on $D$ and $H=\{z_n=0\}$. Then for any holomorphic function $f$ on $H$ satisfying
$$\int_{H}|f|^{2}e^{-\varphi}dV_{H}<\infty,$$ there exists a holomorphic function $F$ on $D$ satisfying $F = f$ on $H$ and \begin{eqnarray*}
\int_{D}|F|^{2}e^{-\varphi-(1+\varepsilon)\log(1+|z_{n}|^{2})}dV_{D}
\leq\frac{\pi}{\varepsilon}\int_{H}|f|^{2}e^{-\varphi}dV_{H}. \end{eqnarray*} \end{Theorem}
In \cite{Ohsawa2017},
considering general plurisubharmonic function $\psi(z_{n})$ instead of $(1+\varepsilon)\log(1+|z_{n}|^{2})$ in Theorem \ref{t:ohsawa2017}, Ohsawa posed the following question on the extension of $L^{2}$ holomorphic functions.
\begin{Question} \label{Q:ohsawa2017} Given a subharmonic function $\psi$ on $\mathbb{C}$ such that $\int_{\mathbb{C}}e^{-\psi}<+\infty$, for any subharmonic function $\varphi$ on $\mathbb{C}$, can one find a holomorphic function $f$ on $\mathbb{C}$ satisfying $f(0)=1$, and
$$\int_{\mathbb{C}}|f|^{2}e^{-\varphi-\psi}\leq e^{-\varphi(0)}\int_{\mathbb{C}}e^{-\psi}?$$ \end{Question}
When $\psi$ does not depend on $\arg z$, Sha Yao gave a positive answer to Question \ref{Q:ohsawa2017} in her Ph.D thesis by using the main result in \cite{G-ZhouL2_ann}.
In the present article, we give the following (negative) answer to Question \ref{Q:ohsawa2017}.
\begin{Theorem} \label{t:ohsawa2017} There exist subharmonic functions $\psi$ and $\varphi$ on $\mathbb{C}$ satisfying
$(1)$ $\int_{\mathbb{C}}e^{-\psi}<+\infty$;
$(2)$ $\varphi(0)\in(-\infty,+\infty)$;
$(3)$ for any holomorphic function $f$ on $\mathbb{C}$ satisfying $f(0)=1$,
$\int_{\mathbb{C}}|f|^{2}e^{-\varphi-\psi}=+\infty$ holds. \end{Theorem}
\section{Proof of Theorem \ref{t:ohsawa2017}}
Let $\psi=2\max\{c_{1}\log|z-1|,c_{2}\log|z-1|\}$ and $\varphi=(1-c_{1})\log|z-1|$, where $c_{1}\in(\frac{1}{2},1)$ and $c_{2}\in(1,\frac{3}{2})$.
We prove Theorem \ref{t:ohsawa2017} by contradiction: if not, then there exists holomorphic function $f$ on $\mathbb{C}$ satisfying $f(0)=1$, and \begin{equation} \label{equ:ohsawa1}
\int_{\mathbb{C}}|f|^{2}e^{-\varphi-\psi}<+\infty. \end{equation}
Note that $(\psi+\varphi)|_{\{|z|<1\}}=2\log|z-1|$, then inequality \eqref{equ:ohsawa1} implies that $f(1)=0$.
Note that $\psi+\varphi-2(1-c_{1}+c_{2})\log|z|$ is bounded near $\infty$, then inequality \eqref{equ:ohsawa1} implies that $f$ is a polynomial. Furthermore, it follows from $1-c_{1}+c_{2}<2$ and inequality \eqref{equ:ohsawa1} that the degree of $f$ must be $0$, which contradicts $f(1)=0$. This proves the present theorem.
{\em Acknowledgements}. The author would like to thank Professor Takeo Ohsawa for giving us series talks in Peking University and sharing his recent work.
\end{document} |
\begin{document}
\title{Foliations on $\mathbb{CP}
\begin{abstract} In this work we classify foliations on $\mathbb{CP}^3$ of codimension 1 and degree $2$ that have a line as singular set. To achieve this, we do a complete description of the components. We prove that the boundary of the exceptional component has only 3 foliations up to change of coordinates, and this boundary is contained in a logarithmic component. Finally we construct examples of foliations on $\mathbb{CP}^3$ of codimension 1 and degree $s \geq 3$ that have a line as singular set and such that they form a family with a rational first integral of degree $s+1$ or they are logarithmic foliations where some of them have a minimal rational first integral of degree not bounded. \end{abstract}
\section{Introduction}
A holomorphic foliation $\mathcal{F}$ on the projective space $\mathbb{CP}^3$ of codimension one and degree $s$ is given by the projective class of a 1-form:
$$ \omega=A_1(z_1,z_2,z_3,z_4)dz_1+A_2(z_1,z_2,z_3,z_4)dz_2+A_3(z_1,z_2,z_3,z_4)dz_3+A_4(z_1,z_2,z_3,z_4)dz_4,$$
\noindent where $A_1, A_2, A_3, A_4 \in \mathbb{C}[z_1,z_2,z_3,z_4]$ are homogeneous of degree $s+1$, and they satisfy:
\begin{enumerate} \item $\sum_{i=1}^4 z_iA_i(z_1,z_2,z_3,z_4)=0$ \item The integrability condition: $\omega \wedge d\omega=0$. \end{enumerate}
\noindent We consider the subspace of classes of 1-forms that satisfy these conditions and such that its singular set $Sing(\omega)$ has codimension 2, we denote this space by $\mathcal{F}(s,3)$. Then $\mathcal{F}(s,3)$ can be identified with a Zariski's open set in the projective space $\mathbb{P}H^0(\mathbb{CP}^3, \Omega_{\mathbb{CP}^3}^1(s+2))$, which has dimension $4 \binom{s+4}{3} - \binom{s+5}{3}-1$. During this article, when we talk about closure we will be thinking with respect to $\mathcal{F}(s,3)$, unless otherwise specified. \\
\noindent The form $ \omega=A_1(z_1,z_2,z_3,z_4)dz_1+A_2(z_1,z_2,z_3,z_4)dz_2+A_3(z_1,z_2,z_3,z_4)dz_3+A_4(z_1,z_2,z_3,z_4)dz_4$ will be called a homogeneous expression of the foliation $\mathcal{F}$. The singular set of the foliation is the algebraic variety
$$Sing(\mathcal{F})=\mathbb{V}(A_1,A_2,A_3,A_4) \subset \mathbb{CP}^3,$$
\noindent which has dimension 1. Then the simplest singular set that a foliation on $\mathbb{CP}^3$ can have is a line, which in this case is the intersection of two hyperplanes. Due to the nature of this singular set it is clear that we can perform more calculations and try to prove more conjectures in the subspace of these foliations. It is for this reason that it is interesting to classify this type of foliations. Here we note that the classification of these foliations is also the question proposed on page 57 of the book \cite{Deserti-Cerveau}. \\
Then the main objective of this work is to find all the foliations in $\mathcal{F}(2,3)$ that have a line as singular set. Of course the linear pull-back of foliation on $\mathbb{CP}^2$ of degree $2$ with a unique singular point satisfy this property. We can see in the article \cite{Cerveau} that there exist just four foliations, up to change of coordinates, with one singular point, they are:
\begin{align*} \nu_1&=z_3^3dz_1-z_1z_2^2dz_2+(z_2^3-z_1z_3^2)dz_2\\ \nu_2&=-z_3^3dz_1+z_3(z_2^2+z_1z_3)dz_2+(z_1z_3^2-z_2^3-z_1z_2z_3)dz_3\\ \nu_3&=z_2^2z_3dz_1-z_3(z_2^2+z_3^2)dz_2+(z_2^3+z_2^2z_3-z_1z_2z_3)dz_3\\ \nu_4&=(z_1z_2z_3+z_3^3-z_2^3)dz_1+z_2(z_3^2-z_1z_2)dz_2-(z_2^2z_3+z_1^2z_2+z_1z_3^2)dz_3. \end{align*}
The first one has a rational first integral of degree $3$, and for the last one the singularity is a saddle-node and the foliation does not have algebraic invariant curves. With this in mind we can ask: It will be that all the foliations on $\mathbb{CP}^3$ of degree $2$ that have a line as singular set are linear pull-back of these $4$ foliation on $\mathbb{CP}^2$? The answer is not, however all of them are somehow obtained from these $4$. \\
More specifically, in this set there are, up to change of coordinates: the linear pull-back of the three foliations $\nu_2$, $\nu_3$, $\nu_4$ on $\mathbb{CP}^2$ of degree $2$ with a unique singular point and without rational first integral and there are three foliations on $\mathbb{CP}^3$ with a rational first integral of degree $3$ where one of them is the linear pull-back of the foliation $\nu_1$ on $\mathbb{CP}^2$ (see Theorem \ref{Classification}).
\\
For the proof of this result we study each irreducible component of the space $\mathcal{F}(2,3)$ of foliations on $\mathbb{CP}^3$ of degree $2$. We know by the article \cite{Cerveau-LinsNeto} that $\mathcal{F}(2,3)$ has $6$ irreducible components,
they are:
\begin{enumerate} \item $S(2,3)$: the foliations which are linear pull-back of foliations on $\mathbb{CP}^2$ of degree $2$ with isolated singularities. \item $\overline{R(2,2)}$: the closure of the space of foliations with a rational first integral $\frac{f(z_1,z_2,z_3,z_4)}{g(z_1,z_2,z_3,z_4)}$, where $f$ and $g$ in $\mathbb{C}[z_1,z_2,z_3,z_4]$ have degree 2 and $f$ defines a smooth hypersurface. \item $\overline{R(1,3)}$: the closure of the space of foliations with a rational first integral $\frac{f(z_1,z_2,z_3,z_4)}{L^3(z_1,z_2,z_3,z_4)}$, where $f$ has degree $3$ and $L$ has degree $1$. \item $\overline{L(1,1,1,1)}$: the closure of the space of logarithmic foliations given by the 1-forms:
$$\omega=\sum_{i=1}^4 \lambda_i L_1L_2L_3L_4 \frac{dL_i}{L_i},$$
\noindent where $L_1, L_2, L_3$ and $L_4$ in $\mathbb{C}[z_1,z_2,z_3,z_4]$ have degree $1$ and $\sum_{i=1}^4 \lambda_i=0$.
\item $\overline{L(1,1,2)}$: the closure of the space of logarithmic foliations given by the 1-forms:
$$\omega=\sum_{i=1}^3 \lambda_i f_1f_2f_3 \frac{df_i}{f_i},$$
\noindent where $f_1, f_2$ define different hyperplanes, $f_3$ defines an irreducible hypersurface of degree $2$ and $\lambda_1+\lambda_2+2\lambda_3=0$.
\item $\overline{E(3)}$: the exceptional component, which is the closure of the orbit of one foliation which singular set contains the twisted cubic.
\end{enumerate}
The components (4) and (5) are called logarithmic components. \\
The most detailed and difficult analysis is that referring to the boundaries of the components. In this sense we have obtained an interesting result that says that in the boundary of the exceptional component $\overline{E(3)}$ there exist, up to change of coordinates, only three foliations which are also in the logarithmic component $\overline{L(1,1,2)}$ (There are also three forms whose singular set has dimension 2, but for the sake of completeness we will also describe them). The proof of this result uses Geometric Invariant Theory and the techniques developed by Kirwan in the book \cite{Kirwan}. For the study of the boundary of the logarithmic components we mainly use the results of \cite{Cerveau-Mattei}. \\
The structure of the paper is as follows. In section 2 we describe all the foliations in the exceptional component, up to linear change of coordinates. Section three is devoted to study the logarithmic components. Finally in the last section we have the theorem of classification of foliations in $\mathcal{F}(2,3)$ that have a line as singular set. We finish with the construction of two examples of foliations in $\mathcal{F}(s,3)$, for $s \geq 3$ with a line as singular set and with one of the following properties:
\begin{enumerate} \item They form a family that have a rational first integral of degree $s+1$. \item They form a family of logarithmic foliations, where some of them have a minimal rational first integral whose degree is not bounded. \end{enumerate}
\section{The boundary of the exceptional component $\overline{E(3)}$}
We know that the exceptional component $\overline{E(3)}$ is the closure of the orbit of a foliation on $\mathbb{CP}^3$ which singular set contains the twisted cubic (see example 6 of \cite{Cerveau-LinsNeto}), this orbit is considering with respect to the action by the automorphism group
of $\mathbb{CP}^3$. Then to find the foliations that have a line as singular set it is necessary to find the foliations in the boundary of this orbit.
For that we use Geometric Invariant Theory applied to the action by change of coordinates in this algebraic variety. We obtain the following.
\begin{teo} \label{excepcional} Let $\overline{E(3)}$ be the exceptional component of the space of foliations on $\mathbb{CP}^3$ of codimension $1$ of degree $2$ then, up to change of coordinates, it contains only the foliations associated to the following 1-forms:
\begin{enumerate}
\item $\omega=(2z_2^2z_4-z_2z_3^2-z_1z_3z_4)dz_1+(2z_3^2z_1-3z_1z_2z_4)dz_2+(3z_1^2z_4-z_1z_2z_3)dz_3+(z_2^2z_1-2z_3z_1^2)dz_4$ \item $\omega_1+\omega_2$ \item $\omega_2+\omega_3$ \item $\omega_1+\omega_3$.
\end{enumerate}
\noindent where
\begin{align*} &\omega_1=z_1(-z_3z_4dz_1+3z_1z_4dz_3-2z_3z_1dz_4),\\ &\omega_2=z_2(2z_2z_4dz_1-3z_1z_4dz_2+z_2z_1dz_4)\\ &\omega_3=z_3(-z_2z_3dz_1+2z_3z_1dz_2-z_1z_2dz_3). \end{align*}
\end{teo}
\begin{proof} In the article \cite{Cerveau-LinsNeto} we can see that the exceptional component $\overline{E(3)}$ of the space of foliations on $\mathbb{CP}^3$ of codimension $1$ of degree $2$ is the closure $\overline{SL_4(\mathbb{C})\cdot \omega}$ in
$\mathcal{F}(2,3)$ of the orbit of the
foliation given by:
\begin{align*}
\omega&=(2z_2^2z_4-z_2z_3^2-z_1z_3z_4)dz_1+(2z_3^2z_1-3z_1z_2z_4)dz_2\\ &+ (3z_1^2z_4-z_1z_2z_3)dz_3+(z_2^2z_1-2z_3z_1^2)dz_4,
\end{align*}
\noindent with respect to the action by change of coordinates by the reductive algebraic group $SL_4(\mathbb{C})$ on $\overline{E(3)}$.
\\
We can do this because we know that $\overline{E(3)}$ is an algebraic irreducible, variety of dimension $13$, therefore the linear action of $SL_4(\mathbb{C})$ in the projective space $\mathbb{P}H^0(\mathbb{CP}^3, \Omega_{\mathbb{CP}^3}^1(4))=\mathbb{CP}^{44}$, induces an action in this variety, which is
of course $SL_4(\mathbb{C})$-invariant:
\begin{align*}
SL_4(\mathbb{C}) \times \overline{E(3)} &\to \overline{E(3)}\\
(g,\nu) &\mapsto g \cdot \nu.
\end{align*}
\noindent We recall that a 1-parameter subgroup of an algebraic group $G$ is an algebraic morphism from $\mathbb{C}^*$ to $G$. And a very known result says that every 1-parameter subgroup of $SL_4(\mathbb{C})$ is diagonalizable. Let:
\begin{align*} \lambda_{(n_1,n_2,n_3)}: \mathbb{C}^* &\to SL_4(\mathbb{C})\\
t &\mapsto \left(\begin{array}{cccc} t^{n_1}&0&0&0 \\ 0&t^{n_2}&0&0\\ 0&0&t^{n_3}&0\\ 0&0&0&t^{n_4} \end{array}\right), \end{align*}
\noindent be a diagonal 1-parameter subgroup of $SL_4(\mathbb{C})$, where $n_1, n_2, n_3, n_ 4 \in \mathbb{Z}$ and $n_1+n_2+n_3+n_4=0$. The action of $\lambda_{(n_1,n_2,n_3)}$ on $\omega$ is:
\begin{align*} \lambda_{(n_1,n_2,n_3)}(t) \cdot \omega&=(2t^{n_3-n_4}z_2^2z_4-t^{n_2-n_3}z_2z_3^2-t^{n_1-n_2}z_1z_3z_4)dz_1\\ &+(2t^{n_3-n_4}z_3^2z_1-3t^{n_2-n_3}z_1z_2z_4)dz_2\\ &+(3t^{n_1-n_2}z_1^2z_4-t^{n_3-n_4}z_1z_2z_3)dz_3\\ &+(t^{n_2-n_3}z_2^2z_1-2t^{n_1-n_2}z_3z_1^2)dz_4. \end{align*}
Let:
\begin{align*} \omega_1&:=-z_1z_3z_4dz_1+3z_1^2z_4dz_3-2z_3z_1^2dz_4=z_1(-z_3z_4dz_1+3z_1z_4dz_3-2z_3z_1dz_4)\\ \omega_2&:=2z_2^2z_4dz_1-3z_1z_2z_4dz_2+z_2^2z_1dz_4=z_2(2z_2z_4dz_1-3z_1z_4dz_2+z_2z_1dz_4)\\ \omega_3&:=-z_2z_3^2dz_1+2z_3^2z_1dz_2-z_1z_2z_3dz_3=z_3(-z_2z_3dz_1+2z_3z_1dz_2-z_1z_2dz_3), \end{align*}
\noindent then $\omega=\omega_1+\omega_2+\omega_3$ and
\begin{align*} \lambda_{(n_1,n_2,n_3)}(t) \cdot \omega_1&=t^{n_1-n_2}\omega_1\\ \lambda_{(n_1,n_2,n_3)}(t) \cdot \omega_2&=t^{n_2-n_3}\omega_2\\ \lambda_{(n_1,n_2,n_3)}(t) \cdot \omega_3&=t^{n_3-n_4}\omega_3.\\ \end{align*}
\noindent If we take
\begin{align*} \lambda_1(t)&=\lambda_{(3,-1,-1)}(t),\\ \lambda_2(t)&=\lambda_{(1,1,-1)}(t),\\ \lambda_3(t)&=\lambda_{(1,1,1)}(t), \end{align*}
\noindent then we have
\begin{equation*} \lim_{t \to \infty} \lambda_i(t) \cdot \omega=\omega_i \quad \textrm{and} \quad \lim_{t \to 0} \lambda_i(t) \cdot \omega=\omega_j+\omega_k \end{equation*}
\noindent where $i,j,k \in \{1,2,3\}$ are $i,j,k$ different from each other. \\
Then the classes of $\omega_1+\omega_2$, $\omega_2+\omega_3$ and $\omega_2+\omega_3$ are in
$$\overline{SL_4(\mathbb{C})\cdot \omega}-SL_4(\mathbb{C}) \cdot \omega=\overline{E(3)}-SL_4(\mathbb{C})\cdot \omega,$$
\noindent here it is important to note that these are integrable 1-forms. We note also that the 1-forms $\omega_1$, $\omega_2$, $\omega_3$ have a hyperplane as singular set and they are in the closure of $SL_4(\mathbb{C})\cdot \omega$ but considered in $\mathbb{P}H^0(\mathbb{CP}^3, \Omega_{\mathbb{CP}^3}^1(4))$. \\
In an analogous way we can see that for $i,j=1,2,3$, $i \neq j$, the classes of $\omega_i $ are in $\overline{SL_4(\mathbb{C}) \cdot (\omega_i+\omega_j)}^{\mathbb{P}H^0(\mathbb{CP}^3, \Omega_{\mathbb{CP}^3}^1(4))}$ and the orbit $SL_4(\mathbb{C})\cdot \omega_i$ is closed in $\mathbb{P}H^0(\mathbb{CP}^3, \Omega_{\mathbb{CP}^3}^1(4))$ because $\omega_i$ is the pull-back of a 1-form of degree $1$ of $\mathbb{CP}^2$ with three proper values and a line of singularities. Here we recall that the orbit of a matrix by conjugation is closed if the Jordan blocks have the minimum size, which is the case for three different proper values. \\
\noindent On the other hand we have that $\lambda_{(3,1,-1)}(t) \cdot \omega=t^2\omega$, then
\begin{equation*} \lim_{t \to 0} \lambda_{(3,1,-1)}(t) \cdot \overline{\omega}=\lim_{t \to 0} t^2\overline{\omega}=0, \end{equation*}
\noindent where $\overline{\omega}$ is a different point from $0$ on $\omega$ in the affine cone of $\overline{E(3)}$. Then by Hilbert-Mumford criterion of 1-parameter subgroups (see \cite{Mumford}) the point $\omega$ is unstable for the action. We conclude that the set of semistable points is empty and the variety $\overline{E(3)}$ is the closed set of unstable points. Moreover, we have that $\{\lambda_{(3,1,-1)}(t): t \in \mathbb{C}^*\} \subset Aut(\omega)$, in fact we know that $\dim Aut(\omega)=2$. \\
Now we are going to prove that the above are all the foliations in $\overline{E(3)}$. For this we use theorem 12.26 that we can find in part II of the book \cite{Kirwan}. In order to simplify the arguments we are going to use the same notation as Kirwan in the mentioned book. \\
The theorem says that there exists a stratification by subvarieties $S_0,...,S_n$, of $\overline{E(3)}$ locally closed, invariant by the action, disjoint and these subvarieties are parametrized by a finite set $\mathcal{B}=\{\beta_0, \beta_1,...,\beta_n\}$ of virtual 1-parameter subgroups of
$SL_4(\mathbb{C})$ and:
\begin{align*}
S_0&=\overline{E(3)}^{ss}=\emptyset, \quad \textrm{semistable points for the action}\\
\overline{E(3)}&=\overline{E(3)}^{un}=\bigcup_{i=1}^n S_{i} \quad \textrm{unstable points for the action}.
\end{align*}
\noindent Without giving too many details we are going to describe the construction of the stratum $S_i$. For that we remember that a diagonal 1-parameter subgroup of $SL_4(\mathbb{C})$ is identified with a point in $\mathbb{Z}^3$ and that the set of virtual 1-parameter subgroups of $SL_4(\mathbb{C})$ is identified with $\mathbb{Q}^3$. \\
\noindent As we can see in definition $12.8$ of \cite{Kirwan} the indexing set $\mathcal{B}$ is the set of minimal combinations of weights lying in some Weyl chamber of the representation. And for $\beta_i \in \mathcal{B}$, we define:
\begin{align*}
Z_i&=\{\nu \in \overline{E(3)}: \beta_i(t) \cdot \nu=\nu\}\\
Y_i&=\{\mu \in \overline{E(3)}: p_i(\mu)=\lim_{t \to 0} \beta_i(t) \cdot \mu \in Z_i\}
\end{align*}
The function $p_i:Y_i \to Z_i$ is a locally trivial fibration with affine fiber and $p_i(\mu) \in \overline{SL_4(\mathbb{C}) \cdot \mu} \cap Z_i$ for all $\mu \in Y_i$. Finally $Y^{ss}_i=p_i^{-1}(Z_i^{ss})$, where $Z_i^{ss}$ is the set of semistable points with respect to a certain action. Then the stratum is $S_i=SL_4(\mathbb{C}) \cdot Y_i^{ss}$.
On the other hand, it is easy to prove that the unique diagonal one parameter subgroup, up to integer multiples, which leaves $\omega$ fixed is $\beta_1=\lambda_{(3,1,-1)}$. Then with $\beta_1$ we will construct the stratum $S_1$, and it satisfies:
$$SL_4(\mathbb{C}) \cdot \omega \subset S_1=SL_4(\mathbb{C}) \cdot Y^{ss}_1 \subset \overline{S_1} \subset \overline{E(3)},$$
\noindent then $\overline{S_1} = \overline{E(3)}$. In this case we have that the closed sets $Z_2,...,Z_n$ have intersection with $Z_1$, because if this doesn't happen then we would have two foliations $\nu_1$, $\nu_2 \in \overline{E(3)}$ such that
$\overline{SL_4(\mathbb{C}) \cdot \nu_1} \cap \overline{SL_4(\mathbb{C}) \cdot \nu_2} \neq \emptyset$. Therefore we conclude that $\overline{S_1} = SL_4(\mathbb{C}) \cdot Y_1$. This means that it is enough to study the subvariety $Y_1$, but we will see that in fact it is enough to find the foliations in $Z_1$ to have all the foliations in $\overline{E(3)}$.
\\
The stabilizer of $\beta_1$ with respect to the adjoint action of $SL_4(\mathbb{C})$ is the subgroup $D$ of diagonal matrices in $SL_4(\mathbb{C})$. And the parabolic subgroup $P_1$ associated to $\beta_1$ is the group of upper triangular matrices (see definition 12.11 of \cite{Kirwan}). Following page 153 of \cite{Kirwan}, we have that $D$ acts in $Z_1:=\{\nu \in \overline{E(3)}: \beta_1(t) \cdot \nu=\nu \}$ and $P_1$ acts in $Y_1$ therefore $D\cdot \omega$ and $P_1 \cdot \omega$ are open and dense in $Z_1$ and in $Y_1$, respectively. \\
Since $Z_1$ is irreducible we have that $Z_1=\overline{D \cdot \omega}$, and it will be enough to find the orbits in this closed set. Let $a_1,a_2,a_3,a_4 \in \mathbb{C}^{*}$ such that $a_1a_2a_3a_4=1$, then \begin{align*}
\left(\begin{array}{cccc} a_1&0&0&0 \\ 0&a_2&0&0\\ 0&0&a_3&0\\ 0&0&0&a_4 \end{array}\right) \cdot \omega=a_1a_2^{-1}\omega_1+a_2a_3^{-1}\omega_2+a_3a_4^{-1}\omega_3. \end{align*}
\noindent Thus $D \cdot \omega=\{\alpha_1 \omega_1+ \alpha_2 \omega_2+ \alpha_3 \omega_3: \alpha_1, \alpha_2, \alpha_3 \in \mathbb{C}^*\} $, note also that $\omega_1$, $\omega_2$, $\omega_3$ are equivalence classes in the projectivization of different eigenspaces for the associated representation given by the action, with respect to the maximal torus $D$. We conclude that $\{\alpha_1 \omega+ \alpha_2 \omega_2+ \alpha_3 \omega_3: \alpha_1, \alpha_2, \alpha_3 \in \mathbb{C}\} \cap \overline{E(3)}$ is closed because is the intersection of the variety with a projective space, since this is contained in $Z_1$ it must be $Z_1$ and therefore:
$$Z_1=D\cdot\omega \cup \bigcup_{k\neq j}D\cdot (\omega_k+\omega_j).$$ \\
In this case the indexing set of virtual 1-parameter subgroups for constructing the stratification is
$$\mathcal{B}=\{\beta_1=(3,1,-1), \beta_{12}=\lambda_3=(1,1,1), \beta_{13}=\lambda_2=(1,1,-1), \beta_{23}=\lambda_1=(3,-1,-1)\}.$$
\noindent Since $Y_1=\overline{P_1 \cdot \omega}$ we have the following sets and locally trivial fibrations:
\begin{align*} &Y_1^{ss}=P_1 \cdot \omega, &Y_{jk}^{ss}=P_{jk} \cdot (\omega_j+\omega_k)\\ & \downarrow p_1 \quad \quad \quad \quad \quad & \downarrow p_{jk} \quad \quad \quad \quad \quad \quad \\ &Z_1^{ss}=D \cdot \omega, &Z_{jk}^{ss}=D_{jk}\cdot (\omega_j+\omega_k) \end{align*}
\noindent where $P_{jk}$ is the parabolic subgroups associated to $\beta_{jk}$ and $D_{jk}$ is the stabilizer of $\beta_{jk}$ with respect to the adjoint action (see page 154 of \cite{Kirwan}). Since $\overline{E(3)}=\overline{S_1}=SL_4(\mathbb{C}) Y_1$ we have that the unique orbits in the exceptional component, up to change of coordinates, are $\omega$, $\omega_1+\omega_2$, $\omega_2+\omega_3$ and $\omega_1+\omega_3$.
\end{proof}
Now we are going to see the singular set and the rational first integrals of the foliations in the exceptional component $\overline{E(3)}$. We use the notation of the above theorem.
The singular set of $\omega$ is the union of the following three curves in $\mathbb{CP}^3$
\begin{enumerate}
\item The conic in a plane $\mathbb{V}(z_1,2z_2z_4-z_3^2)$
\item The line $\mathbb{V}(z_1,z_2)$
\item The twisted cubic $\mathbb{V}(2z_3^2-3z_2z_4,3z_1z_4-z_2z_3,z_2^2-2z_1z_3),$
\end{enumerate}
\noindent and its rational first integral is $\frac{(3z_4z_1^2-3z_1z_2z_3+z_2^3)^2}{(2z_1z_3-z_2^2)^3}$. For the other foliations we have:
\begin{align*}
\textrm{Foliation} \quad& \textrm{Singular Set} & \textrm{Rational First Integral}\\ \omega_1+\omega_2 \quad & \mathbb{V}(z_1,z_2) \cup \mathbb{V}(z_1,z_4) \cup \mathbb{V}(z_4,2z_1z_3-z_2^2)& \frac{z_1^4z_4^2}{(2z_1z_3-z_2^2)^3}\\ \omega_1+\omega_3 \quad &\mathbb{V}(z_1,z_2) \cup \mathbb{V}(z_1,z_3) \cup \mathbb{V}(z_3,z_4)& \frac{(z_1z_4-z_2z_3)^2}{z_1z_3^3}\\ \omega_2+\omega_3 \quad &\mathbb{V}(z_1,z_2) \cup \mathbb{V}(z_2,z_3) \cup \mathbb{V}(z_1,2z_2z_4-z_3^2)& \frac{z_1^2(2z_2z_4-z_3^2)}{z_2^4} \end{align*}
It is easy to see that:
\begin{align*} \omega_1+\omega_2&=z_1z_4(2z_1z_3-z_2^2) \Big(4\frac{dz_1}{z_1}+2\frac{dz_4}{z_4}-3\frac{d(2z_1z_3-z_2^2)}{2z_1z_3-z_2^2}\Big), \\ \omega_1+\omega_3&=z_1z_3(z_1z_4-z_2z_3)\Big(2\frac{d(z_1z_4-z_2z_3)}{z_1z_4-z_2z_3}-\frac{dz_1}{z_1}-3\frac{dz_3}{z_3}\Big),\\ \omega_2+\omega_3&=z_2z_1(2z_2z_4-z_3^2)\Big(4\frac{dz_2}{z_2}-2\frac{dz_1}{z_1}-\frac{d(2z_2z_4-z_3^2)}{2z_2z_4-z_3^2}\Big),\\ \end{align*}
\noindent this means that these foliations are in the logarithmic component $\overline{L(1,1,2)}$. With this we obtain the following corollaries.
\begin{cor} The exceptional component $\overline{E(3)}$ does not have foliations that have a line as singular set.
\end{cor}
\begin{cor} The boundary of the exceptional component is contained in a logarithmic component, more precisely:
$$\overline{E(3)}-E(3) \subset L(1,1,2).$$
\end{cor}
As we say before, the 1-forms $\omega_1$, $\omega_2$ and $\omega_3$ have a hyperplane as singular set, then they do not define a foliation but if we remove the singular hyperplane we obtain foliations which are linear pull-back of foliations on $\mathbb{CP}^2$ of degree $1$ and such that:
\begin{align*}
\textrm{1-form} \quad& \textrm{Singular Set} & \textrm{Rational First Integral}\\ \frac{\omega_1}{z_1} \quad & \mathbb{V}(z_1,z_3) \cup \mathbb{V}(z_1,z_4) \cup \mathbb{V}(z_3,z_4) &\frac{z_1z_4^2}{z_3^3}\\ \frac{\omega_2}{z_2} \quad & \mathbb{V}(z_1,z_2) \cup \mathbb{V}(z_1,z_4) \cup \mathbb{V}(z_2,z_4) &\frac{z_1^2z_4}{z_2^3}\\ \frac{\omega_3}{z_3} \quad & \mathbb{V}(z_1,z_2) \cup \mathbb{V}(z_1,z_3) \cup \mathbb{V}(z_2,z_3) &\frac{z_2^2}{z_1z_3}\\ \end{align*}
\section{The logarithmic components of $\mathcal{F}(2,3)$}
We know that there exist two components of $\mathcal{F}(2,3)$ which generic elements are logarithmic foliations. We are going to describe the singular set for the foliations in every component. Remember that:
\begin{itemize} \item $\overline{L(1,1,1,1)}$ is the closure of the space of logarithmic foliations given by the 1-forms:
$$\omega=\sum_{i=1}^4 \lambda_i L_1L_2L_3L_4 \frac{dL_i}{L_i},$$
\noindent where $L_1, L_2, L_3$ and $L_4$ in $\mathbb{C}[z_1,z_2,z_3,z_4]$ have degree $1$ and $\sum_{i=1}^4 \lambda_i=0$.
\item $\overline{L(1,1,2)}$ is the closure of the space of logarithmic foliations given by the 1-forms:
$$\omega=\sum_{i=1}^3 \lambda_i f_1f_2f_3 \frac{df_i}{f_i},$$
\noindent where $f_1, f_2$ are different hyperplanes, $f_3$ is an irreducible hypersurface of degree $2$ and $\lambda_1+\lambda_2+2\lambda_3=0$. \end{itemize}
If $\omega= \sum_{i=1}^4 \lambda_i L_1L_2L_3L_4 \frac{dL_i}{L_i} \in L(1,1,1,1)$, using the part (c) of the proposition 2.1 in page 96 of \cite{Cerveau-Mattei} we have that:
$$Sing(\omega)=\bigcup_{i \neq j} \mathbb{V}(L_i,L_j),$$
\noindent and this is not a line because $L_1, L_2, L_3$ and $L_4$ are different hyperplanes. On the other hand, with theorem 1.1 page 91 of \cite{Cerveau-Mattei} adapted to this case, we can prove that the foliations in $\overline{L(1,1,1,1)}-L(1,1,1,1)$ have the form:
\begin{enumerate} \item $\omega_1=L_1^2L_2L_3\big(\sum_{i=1}^3 \lambda_i \frac{dL_i}{L_i}+d(\frac{\alpha}{L_1})\big)$, where $\alpha$ is homogeneous of degree $1$. \item $\omega_2=L_1^3L_2\big( \lambda_1 \frac{dL_1}{L_1}+\lambda_2 \frac{dL_2}{L_2}+d(\frac{\alpha}{L_1^2})\big)$, where $\alpha$ is homogeneous of degree $2$. \item $\omega_3=L_1^4\big( \lambda_1 \frac{dL_1}{L_1}+d(\frac{\alpha}{L_1^3})\big)=\lambda_1L_1^3dL_1+L_1d\alpha-3\alpha dL_1$, where $\alpha$ is homogeneous of degree $3$. \end{enumerate}
\noindent By part (d) of the proposition 2.1 in page 96 of \cite{Cerveau-Mattei}, we obtain the singular set for every case
\begin{enumerate} \item $Sing(\omega_1)= \mathbb{V}(L_1,L_2) \cup \mathbb{V}(L_1,L_3) \cup \mathbb{V}(L_2,L_3) \cup \mathbb{V}(L_1,\alpha)$ \item $Sing(\omega_2)= \mathbb{V}(L_1,L_2) \cup \mathbb{V}(L_1,\alpha)$ \item $Sing(\omega_1)= \mathbb{V}(L_1,\alpha)$ \end{enumerate}
If $\omega= \sum_{i=1}^3 \lambda_i f_1f_2f_3 \frac{df_i}{f_i} \in L(1,1,2)$, then using part (c) of proposition 2.1 in \cite{Cerveau-Mattei} we have that:
$$Sing(\omega)=\bigcup_{i \neq j} \mathbb{V}(f_i,f_j) \cup Sing(df_3),$$
\noindent which is not a line. Finally, we just have to note that the foliations on the border of this component have the same form as the foliations on the border of the above logarithmic component. Then in the logarithmic components $\overline{L(1,1,1,1)}$ and $\overline{L(1,1,2)}$ we do not have foliations with a line as singular set.
\section{Foliations on $\mathbb{CP}^3$ of codimension 1 that have a line as singular set}
In this section we present the classification of codimension 1 foliations on $\mathbb{CP}^3$ of degree $2$ that have a line as singular set. \\
As we say in the introduction, the classification of these type of foliations is the question proposed on page 57 of the book \cite{Deserti-Cerveau}, in this sense the authors find the unique $\mathcal{L}$-foliation on $\mathbb{CP}^3$ of degree $2$ with a line as singular set (see theorem 4.5 of \cite{Deserti-Cerveau}). For this reason it is convenient to begin this section by saying something about $\mathcal{L}$-foliations.
\begin{defin} Let $\mathcal{F}$ be a foliation on $\mathbb{CP}^3$ of codimension $1$. We will say that $\mathcal{F}$ is a $\mathcal{L}$-foliation of codimension $1$ if there exists a Lie subalgebra $\mathfrak{g}$ of the Lie algebra of the group $Aut(\mathbb{CP}^3)$, such that for a generic point $z \in \mathbb{CP}^3$ we have the following property:
$$\textrm{$\mathfrak{g}(z)$ is the tangent space to the leaf of $\mathcal{F}$ at $z$}.$$
\noindent In particular, we have that $\dim \mathfrak{g}(z)=\dim \{X(z): X \in \mathfrak{g}\}=2$. \end{defin}
For example, from theorem 4.5 of \cite{Deserti-Cerveau} we have that foliations $\omega$ and $\omega_1+\omega_3$ in the exceptional component $\overline{E(3)}$ (see theorem \ref{excepcional}) are $\mathcal{L}$-foliations. With the same result we have also the following:
\begin{cor} \label{L-foliation} The foliation on $\mathbb{CP}^3$ of degree $2$ given by the 1-form:
$$\omega=3(z_1z_3^2+z_2z_3z_4+z_2^3)dz_3-z_3d(z_1z_3^2+z_2z_3z_4+z_2^3),$$
\noindent is the unique $\mathcal{L}$-foliation, up to linear change of coordinates, that have a line as singular set.
\end{cor}
Now we present the theorem of classification of these type of foliations.
\begin{teo} \label{Classification} The foliations on $\mathbb{CP}^3$ of codimension $1$ of degree $2$ that have a line as singular set are, up to change of coordinates:
\begin{enumerate}
\item The linear pull-back in $\mathbb{CP}^3$ of the foliations on $\mathbb{CP}^2$ of degree $2$ given by the 1-forms:
\begin{align*} \nu_2&=-z_3^3dz_1+z_3(z_2^2+z_1z_3)dz_2+(z_1z_3^2-z_2^3-z_1z_2z_3)dz_3\\ \nu_3&=z_2^2z_3dz_1-z_3(z_2^2+z_3^2)dz_2+(z_2^3+z_2^2z_3-z_1z_2z_3)dz_3\\ \nu_4&=(z_1z_2z_3+z_3^3-z_2^3)dz_1+z_2(z_3^2-z_1z_2)dz_2-(z_2^2z_3+z_1^2z_2+z_1z_3^2)dz_3. \end{align*}
\item Or the foliation given by the 1-form:
$$\omega_f=3fdz_3-z_3df,$$
\noindent where the polynomial $f(z_1,z_2,z_3,z_4)$ is one of the following:
\begin{align*} &z_1z_3^2+z_3z_4^2+z_2^3,\\ & z_1z_3^2+z_2z_3z_4+z_2^3, \\ & z_1z_3^2+z_2^3. \end{align*}
\noindent In these cases the associated foliation has the rational first integral $\frac{f}{z_3^3}$, and only the third one is a linear pull-back of a foliation on $\mathbb{CP}^2$.
\end{enumerate} \end{teo}
\begin{proof} For the proof we will study every component of the space of foliations on $\mathbb{CP}^3$ of codimension 1 of degree $2$. As we mentioned before and following the results of \cite{Cerveau-LinsNeto}, we have 6 components in $\mathcal{F}(2,3)$ that we have called in the introduction: 1,...,6. Components 4, 5 and 6 were analyzed in the preceding sections where we saw that they do not have foliations that have a line as singular set. \\
For the study of component $\overline{R(2,2)}$ we proceed as follows: if the polynomials $f$ and $g$ define quadric surfaces in $\mathbb{CP}^3$, with $\mathbb{V}(f)$ smooth, then the polynomial $g$ is not a double line, because the degree of the foliation is $2$. Let $Q=\mathbb{V}(f)$, by adjuntion formula we have that $K_Q= \mathcal{O}_Q(2) \otimes K_{\mathbb{CP}^3}= \mathcal{O}_Q(-2)$, then $-K_Q=\mathcal{O}_Q(2)$, and this is the class in $Pic(Q)$ of the intersection with another reduced quadric. \\
On the other hand, we know that $Q$ is isomorphic to $\mathbb{P}^1 \times \mathbb{P}^1$ then $K_Q= p_1^*K_{\mathbb{P}^1} \otimes p_2^*K_{\mathbb{P}^1}$, we conclude that $K_Q$ has class $(-2,-2)$. Therefore $\mathcal{O}_Q(2)$ has class $(2,2) \neq (0,4)$, this says that the intersection of $Q$ with another reduced quadric surface is not a line. In the frontier of this component we have foliations with a rational first integral where one of the quadric is the product of two different hyperplanes, then we do not have a line as singular set for the associated foliation. \\
Then all the foliations of $\mathcal{F}(2,3)$ that have a line as singular set are in the components 1 and 3. In the component 1 we have the linear pull-back of the foliations on $\mathbb{CP}^2$ of degree $2$ with a unique singularity. By \cite{Cerveau} we know that the three that do not have rational first integral are $\nu_2$, $\nu_3$ and $\nu_4$. \\
It remains study the component $\overline{R(1,3)}$: Let $\mathcal{F} \in \overline{R(1,3)}$ such that it has a line as singular set. We can suppose that the rational first integral is:
$$\frac{f(z_1,z_2,z_3,z_4)}{z_3^3}$$
\noindent where $f(z_1,z_2,z_3,z_4)$ defines a reduced cubic hypersurface in $\mathbb{CP}^3$. The algebraic variety $\mathbb{V}(f,z_3)$ is contained in the singular set $Sing(\mathcal{F})$ of the foliation, then we can suppose that this is $\mathbb{V}(z_2,z_3)$, therefore
$$f(z_1,z_2,z_3,z_4)=z_3h(z_1,z_2,z_3)+z_3z_4L(z_1,z_2,z_3,z_4)+az_2^3,$$
\noindent where $h(z_1,z_2,z_3)$ has degree 2, $L=a_1z_1+a_2z_2+a_3z_3+a_4z_4$, for some complex numbers $a_1, a_2, a_3, a_4$, and $a \in \mathbb{C}^*$. \\
Note that if $L=0$, then the foliation depends just of the variables $z_1,z_2,z_3$ and this is the linear pull-back of a foliation on $\mathbb{CP}^2$ of degree $2$ with a rational first integral and a unique singularity at $(1:0:0)$. \\
Up to change of coordinates there is only one foliation with this properties, and it has rational first integral:
$$\frac{z_1z_3^2+z_2^3}{z_3^3},$$
\noindent then we can take $h=z_1z_3$ and $a=1$. We can see that if $a_1 \neq 0$, the cubic hypersurface defined by $z_1z_3^2+z_3z_4(\sum_{i=1}^4 a_iz_i)+z_2^3$ has a singularity out of the line $\mathbb{V}(z_2,z_3)$, since this is also a singularity for the foliation then we must ask that $a_1=0$. Then to finish the analysis we consider the following cases for $a_2, a_3$ and $a_4$:
\begin{enumerate}
\item Suppose that $a_4 \neq 0$, and consider $f=z_1z_3^2+z_3z_4(\sum_{i=2}^4 a_iz_i)+z_2^3,$ we can see that the unique singular point for the cubic hypersurface defined by $f$ is $(1:0:0:0)$. We have that, up to change of coordinates (see case XX of the study of singular cubic hypersurfaces in section 9.2.3 of \cite{Dolgachev}), this hypersurface is defined by:
$$z_1z_3^2+z_3z_4^2+z_2^3.$$
\noindent This cubic hypersurface just contains the line $\mathbb{V}(z_2,z_3),$ which is the singular set of the foliation, in fact this is the unique cubic hypersurface which contains just one line (see table 9.1 of \cite{Dolgachev}). The foliation is not a linear pull-back of a foliation on $\mathbb{CP}^2$ because the hypersurface $\mathbb{V}(z_1z_3^2+z_3z_4^2+z_2^3)$ is not a cone over a cubic plane curve. Finally, by theorem 4.5 of \cite{Deserti-Cerveau} we can conclude that this is not a $\mathcal{L}$-foliation either.
\item If $a_2 \neq 0$ and $a_4=0$, then the cubic hypersurface defined by
$$f=z_1z_3^2+z_3z_4(a_2z_2+a_3z_3)+z_2^3,$$
\noindent has as singular set the line $\mathbb{V}(z_2,z_3)$ which is the singular set of the foliation. Then by the form of $f$ and theorem 9.2.1 of \cite{Dolgachev} this is, up to linear change of coordinates:
$$f=z_1z_3^2+z_2z_3z_4+z_2^3.$$
\noindent Hence the rational first integral of the foliation is $\frac{z_1z_3^2+z_2z_3z_4+z_2^3}{z_3^3}$ and this this the $\mathcal{L}$-foliation given in Corollary \ref{L-foliation}. Since this is a $\mathcal{L}$-foliation and using proposition 3.7 in \cite{Deserti-Cerveau} we have this is not a linear pull-back of a foliation on $\mathbb{CP}^2$.
\item By parts 1 and 2, if $a_2 \neq 0$ or $a_4 \neq 0$ the foliation given by the 1-form $\omega_f$ is not a linear pull-back. If $a_2=a_4=0$ then the rational first integral of the foliation is:
$$\frac{(z_1+z_4)z_3^2+z_2^3}{z_3^3},$$
\noindent and with a linear change of coordinates we obtain that the rational first integral is equivalent to
$$\frac{z_1z_3^2+z_2^3}{z_3^3},$$
\noindent and the foliation is given by $\nu_1=z_3^3dz_1-z_1z_2^2dz_2+(z_2^3-z_1z_3^2)dz_2$, this is the linear pull-back of the unique foliation on $\mathbb{CP}^2$ of degree $2$ with a unique singular point and with a rational first integral. This means that this foliation is the unique in the intersection of the components $\overline{R(1,3)}$ and $S(2,3)$ with a line as singular set. \end{enumerate}
\end{proof}
From the proof of the previous theorem we can conclude the following.
\begin{cor} There exists, up to linear change of coordinates, only one codimension 1 foliation on $\mathbb{CP}^3$ of degree 2 with a line as singular set which is neither $\mathcal{L}$-foliation nor linear pull-back of a foliation on $\mathbb{CP}^2$. \\
This foliation has the rational first integral $\frac{z_1z_3^2+z_3z_4^2+z_2^3}{z_3^3}$. \end{cor}
We finish with the following examples of foliations in $\mathcal{F}(s,3)$, where $s \geq 3$, that have a line as singular set.
\begin{ex} In order to give a family of foliations on $\mathbb{CP}^3$ of codimension 1 of arbitrary degree $s$ that have a line as singular set and a rational first integral of degree $s+1$ we do a generalization of the construction that we did in the above theorem. \\
Let $s \in \mathbb{N}$, $P(z_2,z_3)=\sum_{i=0}^s a_i z_2^i z_3^{s-i} \in \mathbb{C}[z_2,z_3]$ with $a_s \neq 0$, and $a \in \mathbb{C}$. We consider:
$$\frac{z_1z_3^{s}+a z_3z_4^s+Q(z_2,z_3)}{z_3^{s+1}},$$
\noindent where $Q(z_2,z_3)=\sum_{i=0}^s \frac{a_i}{i+1}z_2^{i+1}z_3^{s-i}$. Then the 1-form :
$$\omega_{a}= z_3^{s+1}dz_1 -z_3P(z_2,z_3) dz_2+(-z_1z_3^s+z_2P(z_2,z_3)+sa z_3z_4^s)dz_3-s a z_3^2 z_4^{s-1} dz_4,$$
\noindent defines a foliation on $\mathbb{CP}^3$ of degree $s$ and its singular set is the line $\mathbb{V}(z_2,z_3)$. This foliation is in the rational component $\overline{R(1,s+1)}$ of $\mathcal{F}(s,3)$. If $a=0$ then $\omega_0$ is the pull-back of the foliation:
$$\omega_{0,\mathbb{CP}^2}=z_3^{s+1}dz_1 -z_3P(z_2,z_3) dz_2+(-z_1z_3^s+z_2P(z_2,z_3))dz_3$$
\noindent on $\mathbb{CP}^2$ with a unique singularity and with the rational first integral:
$$\frac{z_1z_3^s+Q(z_2,z_3)}{ z_3^{s+1}}.$$ \end{ex}
With the following example we show that for degree greater than 2 there exist logarithmic foliations with a line as singular set. For some cases they have a minimal rational first integral of degree not bounded.
\begin{ex} Let $s_1, s_2, s_3 \in \mathbb{N}$, $a \in \mathbb{C}$ and
\begin{align*} f_a(z_1,z_2,z_3,z_4)&=z_2^{s_2}z_3^{s_3}(z_1z_2^{s_1-1}+az_4^{s_1})+z_2^{s_1+s_2+s_3}+z_3^{s_1+s_2+s_3}.\\ \end{align*}
\noindent Take $\lambda_1, \lambda_2, \lambda_3 \in \mathbb{C}^*$ such that $\lambda_1(s_1+s_2+s_3)+\lambda_2+\lambda_3=0$, then the 1-form
\begin{align*} \omega_a=f_az_2z_3 \big(&\lambda_1 \frac{df_a}{f_a}+ \lambda_2 \frac{dz_2}{z_2}+ \lambda_3 \frac{dz_3}{z_3}\Big), \end{align*}
\noindent defines a logarithmic foliation on $\mathbb{CP}^3$ of degree $s_1+s_2+s_3$, its singular set is the line $\mathbb{V}(z_2,z_3)$. These foliations are in the irreducible logarithmic component $\overline{L(1,1,s_1+s_2+s_3)}$ of $\mathcal{F}(s_1+s_2+s_3,3)$. If $ \lambda_2, \lambda_3 \in \mathbb{N}$, then the foliation has the minimal rational first integral:
$$\frac{f_a^{-\lambda_1}}{z_2^{\lambda_2} z_3^{\lambda_3}},$$
\noindent which has degree $\lambda_1+\lambda_2$, that means that the degree of this minimal rational first integral is not bounded. If $a=0$ the foliation is a linear pull-back of a foliation on $\mathbb{CP}^2$ of degree $s_1+s_2+s_3$. \end{ex}
\end{document} |
\begin{document}
\title{Kraus representation of quantum evolution and fidelity \\ as manifestations of Markovian and non-Markovian avataras} \author{A. K. Rajagopal} \affiliation{Inspire Institute Inc., Alexandria, Virginia, 22303, USA.} \author{A. R. Usha Devi} \email{[email protected]} \affiliation{Department of Physics, Bangalore University, Bangalore-560 056, India} \affiliation{Inspire Institute Inc., Alexandria, Virginia, 22303, USA.} \author{R. W. Rendell} \affiliation{Inspire Institute Inc., Alexandria, Virginia, 22303, USA.} \date{\today}
\begin{abstract} It is shown that the fidelity of the dynamically evolved system with its earlier time density matrix provides a signature of non-Markovian dynamics. Also, the fidelity associated with the initial state and the dynamically evolved state is shown to be larger in the non-Markovian evolution compared to that in the corresponding Markovian case. Starting from the Kraus representation of quantum evolution, the Markovian and non-Markovian features are discerned in its short time structure. These two features are in concordance with each other and they are illustrated with the help of four models of interaction of the system with its environment. \end{abstract} \pacs{03.65.Yz, 03.65.Ta, 42.50.Lc} \maketitle \section{Introduction} The central theme of open quantum systems and their dynamical properties is to develop a description of the interaction of a quantum system with its environment~\cite{Breuer}. The significance of this area of research has been known for a long time and need not be emphasized, as indicated by a large body of literature on the subject. Most recently this has been a subject of intense study. A general form of the local time master equation describing this is given by Chruscinski and Kossakowski~\cite{CK}, which also gives references to the literature. They define clearly the meaning of the terms "Markovian" and "non-Markovian" incarnations of evolution as the absence and presence respectively of the initial time in the local generator of the master equation. Several manifestations of non-Markovianity have been proposed recently~\cite{Cirac,B2,Angel}, where non-Markovian reflections are recognized based on the departure of the evolution from strict Markovianity. While an abstract framework to identify whether a given quantum dynamical channel is Markovian or not has been put forth in Ref.~\cite{Cirac}, recently Breuer et. al.~\cite{B2} proposed a quantification based on the maximum increase of the distinguishability of two different initial quantum states over the entire dynamical evolution. Evaluation of this measure, however, requires optimization of the total increase of the trace distance over all pairs of initial states. More recently~\cite{Angel}, deviations from Markovianity, in terms of the specific dynamical behavior of quantum correlations -- when part of an entangled system evolves under a trace preserving completely positive quantum channel -- has been explored. When complete tomographic information about the dynamical map is available a necessary and sufficient condition of non-Markovianity is also formulated~\cite{Angel}. All the above manifestations of non-Markovianity are built mainly by identifying deviations from the characteristic property of a Markovian channel -- being an element of one-parameter continuous, memoryless, completely positive semigroup. In this paper, we propose {\em fidelity difference} as a non-Markovian incarnation -- which is yet another significant feature capturing the departure from the Markovian semigroup property of evolution.
We begin with the well-known Kraus representation~\cite{Kraus} of the reduced density matrix of the system interacting with an environment. In Sec.~II we begin by recalling the known result~\cite{preskill,RS} that if the Kraus operators exhibit a small-time dependence of a particular form, the Markovian master equation -- known as the Lindblad-Gorini-Kossakowski-Sudarshan (LGKS)~\cite{Lindblad,GKS} master equation -- is recovered. This observation points towards the generality of the Kraus representation of the quantum evolution in subsuming both the Markov and Non-Markov versions depending on the structure of interaction between the system and its environment.
In Sec.~III, we propose to use fidelity~\cite{Fidelity} $F[\rho(t), \rho(t+\tau)]$ as a measure to examine the nature of propensity of the evolved density matrix $\rho(t+\tau)$ with the earlier time density matrix $\rho(t)$. This offers a direct approach to the conventional view of Markovianity, namely that the fidelity $F[\rho(t), \rho(t+\tau)]$ would increase from its initial value $F[\rho(0), \rho(\tau)]$ and approach unity asymptotically. This is thus a test, for any deviation from this behavior would reflect non-Markovian incarnation. We also bring out the significance of our fidelity test in comparison with the recently proposed trace distance based quantification of non-Markovianity~\cite{B2}.
In Sec.~IV, we illustrate our results through some examples. Here, we have considered the exactly known models of Kraus representation given by Yu and Eberly, who investigated the issue of sudden death of entanglement in the two-qubit system, evolving under Markovian~\cite{YE1} and non-Markovian~\cite{YE2} environments. The small time nature of these model Kraus representations illustrate the Markovian and non-Markovian natures in these examples. The fidelity $F[\rho(t),\rho(t+\tau)]$ in the Markovian case is shown to increase with time $t$, as expected. On the other hand, in the non-Markovian limit, we show that the fidelity difference $F[\rho(t),\rho(t+\tau)]-F[\rho(0),\rho(\tau)]$ fluctuates between positive and negative values -- bringing out the essence of non-Markovianity. Further, we observe that the fidelity $F[\rho(0), \rho(t)]$ -- which corresponds to the memory of the initial state carried by the dynamically evolving state - is larger in the non-Markovian limit, when compared with that in the Markovian case in this model.
We also investigate another exactly known Kraus representation~\cite{AKRR, AKRU} of the Jaynes-Cummings model of interaction of a qubit with the radiation field. Unlike the other dynamical models discussed here, this example is exactly solvable and starts with the full Hamiltonian for which the unitary evolution operator can be constructed and as such, we examine here the corresponding dynamical equations associated with both the atom and photon systems. We explore the small time behavior of the Kraus operators in this model to recognize the Markovianity and secondly, we identify that the fidelity difference $F[\rho(t), \rho(t+\tau)]-F[\rho(0),\rho(\tau)]$ of the atom, initially in an excited state, fluctuates between positive and negative values during evolution -- which is a clear signature of non-Markovianity.
Recently, Chruscinski and Kossakowski (CK)~\cite{CK2} considered an interesting illustration of non-Markovian dynamics of a single qubit, either through a non-local master equation with a memory kernel or equivalently via a seemingly simpler local in time equation. Both the descriptions are complimentary to each other: while the non-local equation involves a time-independent memory kernel, the corresponding local approach is governed by a highly singular generator. In other words, it has been illustrated that non-Markovianity manifests differently in local and non-local approaches. Here we show that in this example too the fidelity diference function captures the essense of non-Markovianity.
Our analysis of these examples bring out typical characteristics of non-Markovian dynamics. Sec.~V is devoted to concluding remarks.
\section{Kraus representation of quantum dynamics} For simplicity of presentation, we drop the system and environment Hamiltonians and consider only their mutual interaction. The dynamics of a system density matrix interacting with an environment is given in terms of Kraus representation as \begin{equation} \label{kraus} \rho(t)=\sum_i K_i(t)\rho(0) K_i^\dag(t), \end{equation} with the unit trace condition ${\rm Tr}[\rho(t)]=1$ leading to \begin{equation} \label{sum} \sum_i\, K_i^\dag K_i=I, \end{equation} $I$ denoting the identity matrix.
We first recall~\cite{preskill, RS}, how the well-known LGKS master equation describing Markovian dynamics is obtained from Eq.~(\ref{kraus}). Following Preskill~\cite{preskill}, we separate one term, say $K_0(t)$ in the sum over $i$ in Eq.~(\ref{kraus}), and choose the rest of the terms $K_i(t), \ i\neq 0$ to have the following forms for small time $t$: \begin{equation} \label{smtime} K_i(t)\approx \sqrt{t}\, L_i, \ \ i\neq 0, \end{equation} when Eq.~(\ref{sum}) reduces to \begin{equation} \label{k0} K_0(t)\approx I-\frac{t}{2}\, \sum_{i\neq 0} L_i^\dag L_i. \end{equation} Expressing the Kraus operators in the short time limit in terms of the new $L$-operators, (\ref{kraus}) takes the standard LGKS form, termed as the Markovian master equation: \begin{eqnarray}
\label{Lindblad}
\rho(t)&-&\rho(0)\approx t\, {\cal L}_M\, \rho(0), \nonumber \\
{\rm i.e.,}\ \ \frac{d\rho}{dt}&=&{\cal L}_M\rho=\sum_{i\neq 0} \left[L_i\rho L^\dag_i-\frac{1}{2}\, (L_i^\dag L_i\rho+\rho\, L_i^\dag L_i)\right]. \nonumber \\ \end{eqnarray} (Derivation of the master equation (\ref{Lindblad}) from the Kraus representation (\ref{kraus}) in similar lines as above is also outlined in Ref.~\cite{RS}).
It may be pointed out that in Ref.~\cite{CK2}, a complete phenomenological treatment of local time evolution of open quantum systems, based on a generalization of LGKS representation of Markovian dynamics is discussed. This basically entails a local time-dependent prefactor in the RHS of Eq.~(\ref{Lindblad}). A generalized non-Markovian master equation, which is local in time, has also been derived in Ref.~\cite{RS} and the short-time memory effects, retained from the environment, are shown to lead to dissipations deviating from typical Markovian features~\cite{RSnote}. In the subsequent discussions in Sec.~IV, we explore if the Kraus operators exhibit the desired small time behavior leading to LGKS master equation or not in four different examples.
\section{Fidelity and its implication for Markovianity}
Following Jozsa~\cite{Fidelity}, we define the fidelity $F[\rho(t), \rho(t+\tau)]$ as the propensity of finding the state $\rho(t)$ in the later time state $\rho(t+\tau),\ \tau>0$: \begin{equation} \label{fidelity} F[\rho(t),\rho(t+\tau)]=\left\{{\rm Tr}\left[\sqrt{\sqrt{\rho(t)}\rho(t+\tau)\sqrt{\rho(t)}}\right]\right\}^2, \end{equation} which is bounded by $0\leq F[\rho(t),\rho(t+\tau)]\leq 1$ and satisfies the symmetry property, $F[\rho(t),\rho(t+\tau)]=F[\rho(t+\tau),\rho(t)]$.
Fidelity obeys another significant property i.e., monotonicity~\cite{NC}: \begin{equation} F(\Lambda\,\rho_1,\Lambda\,\rho_2)\geq F(\rho_1,\rho_2) \end{equation} where $\Lambda$ denotes a completely positive map -- which serves as a characteristic feature of Markovian dynamics as indicated below.
Recall that the Markovian evolution guarantees a completely positive, trace preserving dynamical map $\Lambda(t)$, \begin{equation} \rho(0) \rightarrow \rho(t)=\Lambda(t)\rho(0), \end{equation} which also forms a one parameter semigroup obeying the composition law~\cite{CK, Cirac, B2, Angel, RS, CK2} \begin{equation} \label{comp} \Lambda(t_1)\Lambda(t_2)=\Lambda(t_1+t_2), \ t_1,t_2\geq 0, \end{equation} a characteristic feature of Markovian dynamics. Therefore, it is clear that the fidelity function $F[\rho(t),\rho(t+\tau)]$ involving the system density matrix evolving under Markovian dynamics satifies the inequality~\cite{note}, \begin{eqnarray} \label{inequality} F[\rho(t),\rho(t+\tau)]\equiv F[\Lambda(t)\rho(0),\Lambda(t)\rho(\tau)] \nonumber \\ \Rightarrow F[\rho(t),\rho(t+\tau)]\geq F[\rho(0),\rho(\tau)]. \end{eqnarray} Any violation of this inequality is a clear signature of non-Markovian dynamics -- indicating that the associated dynamical map {\em does not} obey the the composition law (\ref{comp}) -- and hence, the dynamics has inbuilt memory effects. Deviation from the trend (\ref{inequality}) is, however, a sufficient -- though not necessary -- reflection of non-Markovianity. We propose to examine non-Markovianity in terms of the {\em fidelity difference} function \begin{equation} \label{fd} G(t, \tau)= \frac{F[\rho(t),\rho(t+\tau)] - F[\rho(0),\rho(\tau)]}{F[\rho(0),\rho(\tau)]}, \end{equation} negative values of which necessarily imply non-Markovianity. We identify the non-Markovian signature in terms of the fidelity difference function in some dynamical models in Sec.~IV.
It is worth pointing out the distinction between the quantification proposed by Breuer et. al.~\cite{B2} and our fidelity test of non-Markovianity proposed here. It is well-known that the distinguishability of two states $\rho_{1},\ \rho_2$, measured in terms of the trace-distance $D(\rho_1,\rho_2)=\frac{1}{2}\vert\vert \rho_1-\rho_2\vert\vert$ never increases~\cite{NC} under all completely positive, trace preserving maps i.e., $D(\Lambda\,\rho_1,\Lambda\,\rho_2)\leq D(\rho_1,\rho_2)$. If the pair of states $\rho_{1,2}$ are evolving under the influence of a dynamical Markovian map i.e., $\rho_{1,2}(t)\equiv \Lambda(t)\rho_{1,2}(0)$, the semi-group composition law (\ref{comp}) imposes that~\cite{B2, note2} $D[\rho_1(t+\tau),\rho_2(t+\tau)]\equiv D[\Lambda(\tau)\rho_1(t),\Lambda(\tau)\rho_2(t)]
\leq D[\rho_1(t),\rho_2(t)],$ for all $t,\tau\geq 0$. This decisive property of Markovian processes, viz., {\em the trace distance of any fixed pair of quantum states never increases}, has been employed in Ref.~\cite{B2} to uncover the non-Markovian feature in open system dynamics, in terms the following quantity \begin{eqnarray*} {\cal N}&=& {{\rm max}\atop {\rho_{1,2}(0)}} \int_{\sigma>0}\, dt\, \sigma[t,\rho_{1,2}(0)],\\ \sigma[t,\rho_{1,2}(0)]&=&\frac{d}{dt}\,D[\rho_1(t),\rho_2(t)], \end{eqnarray*} which measures the total increase of the trace-distance between any optimal pair of states $\rho_{1,2}$ during the entire time evolution. Evidently, this quantification requires optimization over the set of all initial {\em pairs} of states $\rho_{(1,2)}(0)$. Here, we have exploited the divisibility property (\ref{comp}) of the Markovian map differently, as illustrated in (\ref{inequality}), to obtain an inequality for the overlap $F[\rho(t),\rho(t+\tau)]$, involving a dynamically evolving quantum state $\rho(t+\tau)$ and {\em its earlier time version} $\rho(t)$ -- in contrast to that for the trace distance of {\em pairs} of states $\rho_1, \rho_2$ under time evolution as in Ref.~\cite{B2}.
\section{Dynamical models}
We now proceed to explicitly investigate the small time behavior of Kraus representations and the nature of the fidelity functions in some simple models of open system dynamics.
\noindent (a) Yu and Eberly~\cite{YE1} considered the following model Kraus operators for a simplified dynamical system of two qubits interacting with environment along with its initial density matrix, for which we can verify both the small time limit to see if we can get LGKS Master equation and also fidelity to assure us of the interpretation of Markovian evolution or otherwise in our view. The initial state of the two qubit system is chosen to be in the simple form
\begin{equation} \rho_{AB}=\frac{1}{9}\, \left(\begin{array}{cccc} 1 & 0 & 0 &0 \\ 0 & 4 & \lambda & 0 \\ 0 & \lambda & 4 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right) \end{equation} where $0\leq \lambda\leq 4$. The Kraus operators corresponding to the dynamical evolution of the qubits are given by, \begin{eqnarray} \label{kr0} K_0(t)&=&\left(\begin{array}{cccc} \gamma^2(t) & 0 & 0 &0 \\ 0 & \gamma(t) & 0 & 0 \\ 0 & 0 & \gamma(t) & 0 \\ 0 & 0 & 0 & 1 \end{array} \right),\nonumber \\ K_1(t)&=&\left(\begin{array}{cccc} \gamma(t)\omega(t) & 0 & 0 &0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & \omega(t) & 0 \\ 0 & 0 & 0 & 0 \end{array} \right), \nonumber \\ K_2(t)&=&\left(\begin{array}{cccc} \gamma(t)\omega(t) & 0 & 0 &0 \\ 0 & \omega(t) & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right), \nonumber \\
K_3(t)&=&\left(\begin{array}{cccc} \omega^2(t) & 0 & 0 &0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right), \end{eqnarray} where $\gamma(t)={\rm exp}[-\Gamma t/2],\ \omega(t)=\sqrt{[1-\gamma^2(t)]};$ $\Gamma$ represents the strength of the environmental transverse noise. The dynamically evolved two qubit density matrix is given by, \begin{eqnarray} \label{ye1rho} \rho_{AB}(t)&=&\sum_{i=0}^{3}K_i(t) \rho(0) K_i^\dag(t) \nonumber \\ &=&\frac{1}{9}\, \left(\begin{array}{cccc} 1 & 0 & 0 &0 \\ 0 & 4 & \lambda\, \gamma^2(t) & 0 \\ 0 & \lambda\,\gamma^2(t) & 4 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right). \end{eqnarray} \begin{figure}
\caption{(Color online). Fidelity $F[\rho(t),\rho(t+\tau)]$ of the dynamical state $\rho(t+\tau)$ with its earlier time density matrix $\rho(t)$, as a function of dimensionless scaled time $\Gamma t$. Here, we have chosen $\Gamma\tau=1,\ {\rm and}\ \lambda=0.5$. The fidelity increases from its initial value $F[\rho(0),\rho(\tau)]$ and approaches 1 in the limit $\Gamma t\rightarrow \infty$ -- as anticipated in Markovian dynamics. All quantities are dimensionless.}
\end{figure}
In the small time limit i.e., $\Gamma t<<1$, we have, $\omega(t)\approx\sqrt{\Gamma\, t},\ \gamma(t)\approx (1-\Gamma\, t/2)$. Expressing the Kraus operators in this limit as, \begin{eqnarray*} \label{akr} K_0(t)&\approx& I-\frac{\Gamma\, t}{2}\, L_0,\ \ L_0=\left(\begin{array}{cccc} 2 & 0 & 0 &0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right), \nonumber \\ K_1(t)&\approx& \sqrt{\Gamma\, t}\, L_1, \ \ L_1=\left(\begin{array}{cccc} 1 & 0 & 0 &0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right), \nonumber \\ K_2(t)&\approx& \sqrt{\Gamma\, t}\, L_2,\ \ L_2=\left(\begin{array}{cccc} 1 & 0 & 0 &0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right), \nonumber \\
K_3(t)&\approx& \Gamma\, t\, L_3, \ L_3=\left(\begin{array}{cccc} 1 & 0 & 0 &0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right), \end{eqnarray*} We thus obtain the LGKS master equation describing the dynamics as in (\ref{Lindblad}): \begin{eqnarray} \label{aLm} \frac{d \rho_{AB}}{d\, t}&=& \Gamma \, {\cal L}_M \, \rho_{AB},\nonumber \\ &=& \Gamma (L_1\, \rho_{AB}\, L^\dag_1 + L_2\, \rho_{AB}\, L^\dag_2)-\frac{\Gamma}{2}\, (L_0\, \rho_{AB}+ \rho_{AB}\, L_0),\nonumber \\
& & \ \ L_0=L_1^\dag L_1+L_2^\dag\, L_2. \end{eqnarray}
The fidelity function $F[\rho_{AB}(t),\rho_{AB}(t+\tau)]$ may be readily evaluated for the two qubit state (\ref{ye1rho}): \begin{widetext} \begin{equation} F[\rho_{AB}(t),\rho_{AB}(t+\tau)]=\frac{1}{81}\, \left\{1+\sqrt{[4+\lambda\,\gamma^2(t)][4+\lambda\,\gamma^2(t+\tau)]} +\sqrt{[4-\lambda\,\gamma^2(t)][4-\lambda\,\gamma^2(t+\tau)]}\right\}^2. \end{equation} \end{widetext} The variation of fidelity as a function of dimensionless scaled time $\Gamma\, t$ is shown in Fig.~1. It may be seen that $F[\rho_{AB}(t),\rho_{AB}(t+\tau)]$ increases from its initial value $F[\rho_{AB}(0),\rho_{AB}(\tau)]$ and approaches unity asymptotically -- which is a typical Markovian behavior.
\noindent (b) Recently Yu an Eberly~\cite{YE2} considered a slight variation of the above model leading to non-Markovian noise. The Kraus operators assoicated with the noisy evolution of two qubit system are obtained by replacing $\gamma(t) \rightarrow p(t)={\rm exp}[-f(t)], \ f(t)=\frac{\Gamma}{2}[t+\frac{1}{\gamma}(e^{-\gamma t}-1)],$ $\omega(t)\rightarrow q(t)=\sqrt{1-p^2(t)}.$ In this non-Markovian model, $\gamma$ denotes the environmental noise bandwidth and $\Gamma$ is the noise property assoicated with the qubit. In the limit $\gamma\rightarrow \infty,$ we get $f(t)\rightarrow \frac{\Gamma}{2}t$, and hence the Markovian dynamics is recovered.
In the short time limit viz., $\gamma\, t<<1,$ we have, $p(t)\approx 1-\frac{\Gamma\gamma\, t^2}{4}$ and $q(t)\approx t\,\sqrt{\frac{\Gamma\gamma}{2}}$ and clearly, this structure does not lead to the standard LGKS master equation and hence brings out the non-Markovian nature of the model. In the short time limit, the same operators as in (\ref{aLm}) appear -- but in the following form, \begin{eqnarray} \rho_{AB}(t)-\rho_{AB}(0)&\approx& \frac{\Gamma\gamma\, t^2}{2}\, (L_1\, \rho_{AB}\, L^\dag_1 + L_2\, \rho_{AB}\, L^\dag_2)\nonumber \\ &&-\frac{\Gamma\gamma\, t^2}{4}\, (L_0\, \rho_{AB}+ \rho_{AB}\, L_0),\nonumber \\ {\rm or}\ \ \ \ \frac{d\rho_{AB}}{dt}&=&\frac{\gamma\,\Gamma\, t}{2}\, {\cal L}_M\, \rho(0). \end{eqnarray}
(Here, ${\cal L}_M$ is the same as in (\ref{aLm})). In other words, one may recast the dynamical equation in this model as a non-Markovian master equation, with a linear time pre-factor in the LGKS master equation (\ref{Lindblad}).
\begin{figure}
\caption{(Color online). Fidelity difference $G(t,\tau)$ as a function of the dimensionless scaled time $\Gamma t$, in the non-Markovian limit $\gamma=10^{-4}.$ (Here, we have chosen the parameters $\Gamma=1$ and $\tau=1$). Negative values of this function imply violation of the inequality (\ref{inequality}) and hence indicate non-Markovianity. All quantities are dimensionless.}
\end{figure}
Choosing a simple initial two-qubit state in the $X$ form~\cite{YE2} \begin{eqnarray} \label{ye1rho2} \rho_{AB}(0)=\frac{1}{3}\, \left(\begin{array}{cccc} \alpha & 0 & 0 &0 \\ 0 & 1 & 1 & 0 \\ 0 & 1 & 1 & 0 \\ 0 & 0 & 0 & 1-\alpha \end{array} \right), \end{eqnarray} (where $\alpha$ denotes a real, positive parameter) the dynamics does preserve the $X$ structure, with the diagonal elements of the density matrix remaining unaltered and the off-diagonal elements acquiring a time dependence $[\rho_{AB}(t)]_{kl}=[\rho_{AB}(0)]_{kl}\, p^2(t)$. The fidelity associated with $\rho_{AB}(t)$ may be readily evaluated to be,
\begin{widetext}
\begin{equation} F[\rho_{AB}(t),\rho_{AB}(t+\tau)]=\frac{1}{9}\left\{1+\sqrt{[1+p^2(t)][1+p^2(t+\tau)]}+q(t)q(t+\tau)\right\}^2.
\end{equation} \end{widetext}
In Fig.~2 we have plotted the fidelity difference $G(t,\tau)$ (see Eq.(\ref{fd})) as a function of dimensionless scaled time $\Gamma t$, in the non-Markovian limit $\gamma <<1$. Negative fidelity difference reveal the non-Markovian feature.
\begin{figure}
\caption{(Color online). Fidelity $F[\rho(0),\rho(t)]$ of the initial state $\rho(0)$ with dynamically density matrix $\rho(t)$, as a function of dimensionless scaled time $\Gamma t$, in both Markovian (dashed curve; $\gamma=10$) and non-Markovian (solid curve; $\gamma=0.01$) limits; we have also chosen the parameter $\Gamma=1.$ It may be seen that the fidelity larger in the non-Markovian case, when compared to that in the corresponding Markovian case. All quantities are dimensionless.}
\end{figure}
We also find that the fidelity of the initial state with the dynamically evolved density matrix viz., $F[\rho(0),\rho(t)]$, has larger value when $\gamma <<1$ (non-Markovian limit) compared to that in the limit $\gamma>>1$ (Markovian case) highlighting that the memory of the initial state is larger during non-Markovian dynamics. This is depicted in Fig.~3, where we have plotted $F[\rho_{AB}(0),\rho_{AB}(t)]$ as a function of $\Gamma\,t$ both in the Markovian and non-Markovian limits.
(c) Jaynes-Cummings model (JCM)~\cite{AKRR,AKRU} is a model of a two-level atom (qubit) interacting with a radiation field. This example, unlike the three models discussed above, starts with the Hamiltonian for the system for which the evolution operator can be constructed and we will examine here the master equations for both the atom and photon subsystems. Incidentally, in this model, there is sudden death of entanglement of the qubit with the radiation field~\cite{AKRR} -- a characteristic of non-Markovian evolution.
For simplicity of presentation, we consider here the resonant case where the qubit energy is equal to that of the radiation and the initial state of the atom is taken to be its excited state, $\rho_{A}(t=0)=\vert\uparrow\rangle\langle \uparrow\vert$. The initial state of the radiation is taken to be in a coherent state $\rho_{R}(t)=\vert\alpha\rangle\langle \alpha\vert,$ $\vert\alpha\rangle=e^{-\frac{\vert\alpha\vert^2}{2}}\, \sum_{n=0}^{\infty}\frac{\alpha^n}{\sqrt{n!}}\vert n\rangle;$ $\vert\alpha\vert$ denoting the intensity of the radiation. The Kraus representation for both the qubit and the radiation subsystems are explored in Ref.~\cite{AKRU}.
The dynamically evolved qubit density matrix is given by the mixed state~\cite{AKRR, AKRU}: \begin{eqnarray} \label{atom} \rho_{A}(t)&=&\sum_{N=0}^{\infty}\, K_N(t)\rho_A(0)\,K^{\dag}_N(t),\nonumber \\ \label{atomKraus} K_N(t)&=&W_{N\uparrow}(t)\, \vert\uparrow\rangle\langle\uparrow\vert\, +W_{N\downarrow}(t)\, \vert\downarrow\rangle\langle\uparrow\vert \\ \label{atomKrausele} W_{N\uparrow}(t)&=&\cos(gt\sqrt{N+1})\, \langle N\vert\alpha\rangle,\nonumber \\
W_{N\downarrow}(t)&=&-i\, \sin(gt\sqrt{N})\, \langle N-1\vert\alpha\rangle \\
\label{rhot} {\rm or} \ \ \rho_{A}(t)&=& \vert a\vert^2\, \rho(0)+\vert b\vert^2\, \sigma_-\rho(0)\, \sigma_{+}\nonumber \\ && + i\, ab^*\, \rho(0)\, \sigma_{+}-ia^*b\,
\sigma_-\rho(0), \end{eqnarray} Here, $g$ denotes the interaction strength of the radiation with the qubit and \begin{eqnarray} \label{par} \vert a\vert^2&=&\sum_{N=0}^{\infty}\vert\langle N\vert\alpha\rangle\vert^2\, \cos^2(gt\sqrt{N+1}), \nonumber \\ \vert b\vert^2&=&\sum_{N=0}^{\infty}\vert\langle N-1\vert\alpha\rangle\vert^2\, \sin^2(gt\sqrt{N}), \nonumber \\ ab^*&=&\sum_{N=0}^{\infty}\, \cos(gt\sqrt{N+1})\sin(gt\sqrt{N}).
\end{eqnarray}
In the small time limit, the associated Kraus operators $K_N(t)$ of (\ref{atomKraus}) go as the square of time and as such, we do obtain a LGKS-type Master equation with a linear time prefactor -- as above in model (b) -- indicating a non-Markov feature.
We now focus on a much simpler situation, where the initial radiation state is chosen to be the vacuum state $\rho_R(0)=\vert 0\rangle\langle 0\vert$, to illustrate the non-Markovian behavior from the fidelity consideration. The dynamically evolved qubit state then assumes the following simple form (obtained by substituting $\alpha=0$ in (\ref{rhot}), (\ref{par})) \begin{equation} \label{simple}
\rho^{\uparrow}_A(t)=\cos^2(gt)\, \vert\uparrow\rangle\langle\uparrow\vert + \sin^2(gt)\, \vert\downarrow\rangle\langle\downarrow\vert \end{equation} in which situation the fidelity $F[\rho^{\uparrow}_A(t), \rho^{\uparrow}_A(t+\tau)]$ is readily found to be, \begin{eqnarray} \label{fidatom}
F[\rho^{\uparrow}_A(t), \rho^{\uparrow}_A(t+\tau)]&=&\left\{\vert \cos(gt)\cos[g(t+\tau)]\vert \right. \nonumber \\
&& \left. +\vert \sin(gt)\sin[g(t+\tau)]\vert\right\}^2 \end{eqnarray} The fidelity difference $G(t, \tau)$ (see Eq.~(\ref{fd})) plotted in Fig.~4, reveals negative fluctuations and hence is a clear manifestation of non-Markovian evolution. \begin{figure}
\caption{(Color online). Fidelity difference $G(t, \tau)$ in JCM with an initially excited atomic system, as a function of dimensionless scaled time $gt$. We have chosen the parameter $g\tau=10$. Negative fluctuations of the function $G(t, \tau)$ reveal non-Markovian behavior. All quantities are dimensionless.}
\end{figure}
It would be interesting to explore the evolution of radiation subsystem as well in this model. For simplicity we choose the same initial states of the atom (excited state) and the radiation field (coherent state) to obtain the dynamical state of the photon as, \begin{equation} \label{photon} \rho_R(t)=V_{\uparrow\uparrow}(t)\rho_R(0)V^\dag_{\uparrow\uparrow}(t)+V_{\downarrow\uparrow}(t)\rho_R(0)V^\dag_{\downarrow\uparrow}(t), \end{equation} where the corresponding Kraus operators are given by, \begin{eqnarray} \label{phkr}
V_{\uparrow\uparrow}(t)&=&\sum_{n=0}^{\infty} \cos(gt\sqrt{n+1})\, \vert n\rangle\langle n\vert\nonumber \\
V_{\downarrow\uparrow}(t)&=&-i\sum_{n=0}^{\infty} \sin(gt\sqrt{n+1})\, \vert n+1\rangle\langle n\vert. \nonumber \\ \end{eqnarray} The small time behavior of the photon Kraus operators could be readily recognized as, \begin{eqnarray}
V_{\uparrow\uparrow}(t)&\approx& I_R-\frac{1}{2} (gt)^2\, L_0 \nonumber \\
V_{\downarrow\uparrow}(t)&\approx& -i\, gt \, L_1, \nonumber \\ \end{eqnarray} where the Lindblad operators $L_0, L_1$ are related to the photon creation and annihilation operators $a^\dag, \ a$ as follows: \begin{eqnarray*}
L_0&=& a\, a^\dag =\sum_{n=0}^{\infty}\, (n+1) \vert n\rangle\langle n\vert \\
L_1&=&a^\dag=\sum_{n=0}^{\infty}\, \sqrt{n+1}\, \vert n+1\rangle\langle n\vert. \\ \end{eqnarray*} Just as in the case of qubits, we get the LGKS type master equation with a linear time prefactor -- indicating a non-Markovian behavior.
\noindent (d) Chruscinski and Kossakowski~\cite{CK2} presented an interesting model to elucidate non-Markovian quantum dynamics described either by non-local master equation or by a local time formulation. Here, the master equation governing the dynamics of a single qubit system given by, \begin{equation} \label{master} \frac{d\rho}{dt}=\int_{t_0}^{t}{\cal K}(t-u)\rho(u)du, \end{equation} consists of a {\it time independent} memory kernel ${\cal K}(t)= \frac{1}{2}{\cal L}_0$ where ${\cal L}_0$ is a pure dephasing generator, \begin{equation} {\cal L}_0\rho=\sigma_z\rho\sigma_z-\rho. \end{equation} (Here $\sigma_z$ denotes the $z$ component of Pauli spin operator of the qubit.)
In an equivalent approach, the completely positive, trace preserving map $\Lambda(t,t_0)$ characterizing the dynamics $\rho(t)=\Lambda(t,t_0)\rho(t_0)$ satisfies a local in time equation, \begin{equation} \label{local} \frac{d\Lambda(t,t_0)}{dt}={\cal L}(t-t_0)\Lambda(t,t_0), \end{equation} in terms of a highly singular generator \begin{equation} {\cal L}(t-t_0)=\tanh(t-t_0)\, {\cal L}_0. \end{equation} Despite the fact that the local in time dynamics involves a singular generator, the dynamical map has a regular solution given by~\cite{CK2}, \begin{equation} \Lambda(t,t_0)=\frac{1}{2}[1+\cos(t-t_0)]\, I + \frac{1}{2}[1-\cos(t-t_0)]\, ({\cal L}_0+I) \end{equation} and the evolved qubit density matrix is therefore obtained as, \begin{equation} \rho(t)=\Lambda(t,0)\rho(0)=\left(\begin{array}{cc} \rho_{11}(0) & \rho_{12}(0)\cos t \\ \rho_{12}^*(0)\cos t & \rho_{22}(0) \end{array} \right), \end{equation} exhibiting oscillations in qubit coherence.
The above dynamics may also be characterized in terms of a two element Kraus operator set \begin{eqnarray} K_0(t)=\cos(t/2)\, I= \left(\begin{array}{cc} \cos(t/2) & 0 \\ 0 & \cos(t/2) \end{array} \right)\nonumber \\ K_1(t)=\sin(t/2)\, \sigma_z= \left(\begin{array}{cc} \sin(t/2) & 0 \\ 0 & -\sin(t/2) \end{array} \right) \end{eqnarray} leading to the dynamical evolution $\rho(t)=\sum_{i=0,1}K_i(t)\rho(0)K_i^\dag(0)$. Evidently the small time form of the Kraus operators ($K_0(t)\approx I(1-t^2/2)$ and $K_1(t)\approx \frac{t}{2}\, \sigma_z$) lead to a master equation of the LGKS form -- with a linear time pre-factor.
\begin{figure}
\caption{(Color online). Fidelity difference $G(t, \tau)$ corresponding to the dynamical state (\ref{qu}), as a function of (dimensionless) time $t$; here, $\tau=\pi/6$. Negative values of $G(t,\tau)$ point towards non-Markovian behavior. All quantities are dimensionless.}
\end{figure}
Further, considering the initial qubit state to be a pure state with $\rho_{11}(0)=\rho_{22}(0)=\rho_{12}(0)=\frac{1}{2}$, we obtain the evolved system as, \begin{equation} \label{qu} \rho(t)=\frac{1}{2}\left(\begin{array}{cc} 1 & \cos(t) \\ \cos(t) & 1 \end{array} \right), \end{equation} We obtain the fidelity $F[\rho(t),\rho(t+\tau)]$ as, \begin{eqnarray} F[\rho(t),\rho(t+\tau)]&=&\frac{1}{2}\left(1+\cos t \cos(t+\tau)\right. \nonumber \\
&& \ \ \left. +\vert \sin t \sin(t+\tau) \vert \right). \end{eqnarray} We have plotted the fidelity difference $G(t,\tau)$ in Fig.~5. The negative values assumed by the fidelity difference $G(t,\tau)$ (see Fig.~5) point towards the violation of the inequality (\ref{inequality}) -- which highlights the non-Markovian incarnation in this model.
\section{Conclusions} From the Kraus representation of the dynamical evolution and fidelity as a measure of determining the propensity of the initial state in the time evolved state, we have elucidated the manifestation of Markovian or non-Makovian incarnations. We have also proposed fidelity difference to capture the essence of non-Markovianity. With the help of some examples, we have explored the nature of small time behavior of the dynamical Kraus form of quantum dynamics, which covers both Markovian and non-Markovian processes (in the conventional sense) depending on the form of interaction of the system with its environment as well as its initial state. We have shown that in the density matrix evolution governed by non-Markovian dynamics, the fidelity difference fluctuates between positive and negative values -- a clear signature of non-Markovianity. Moreover, the memory of the initial state carried by the dynamically evolving state -- characterized in terms of the fidelity -- is shown to be larger in the non-Markovian limit compared to that in the Markovian case. These two features viz., the small time behavior of Kraus operators and the fidelity, together confirm the Markov or non-Markov behavior in a consistent way.
\end{document} |
\begin{document}
\begin{Titul} {\large \bf DUNKL HARMONIC ANALYSIS\\ AND FUNDAMENTAL SETS OF FUNCTIONS\\[0.2em] ON THE UNIT SPHERE }\\[3ex] {{\bf Roman~A.~Veprintsev} \\[5ex]} \end{Titul}
\begin{Anot} {\bf Abstract.} Using Dunkl theory, we introduce into consideration some weighted $L_p$-spaces on $[-1,1]$ and on the unit Euclidean sphere $\mathbb{S}^{d-1}$, $d\geq 2$. Then we define a family of linear bounded operators $\{V_\kappa^p(x)\colon x\in\mathbb{S}^{d-1}\}$ acting from the $L_p$-space on $[-1,1]$ to the $L_p$-space on $\mathbb{S}^{d-1}$, $1\leq p<\infty$. We establish a necessary and sufficient condition for a function $g$ belonging to the $L_p$-space on $[-1,1]$ such that the family of functions $\{V_\kappa^p(x;g)\colon x\in\mathbb{S}^{d-1}\}$ is fundamental in the $L_p$-space on $\mathbb{S}^{d-1}$.
{\bf Key words and phrases:} fundamental set, unit sphere, Dunkl theory, Dunkl intertwining operator, Funk\,--\,Hecke formula for $\kappa$-spherical harmonics
{\bf MSC 2010:} 42B35, 42C05, 42C10 \end{Anot}
\section{Introduction and preliminaries}
In this section we introduce some basic definitions and notions of general Dunkl theory (see, e.g., \cite{dai_xu_book_approximation_2013,dunkl_article_reflection_1988,dunkl_article_operators_1989,dunkl_article_integral_kernels_1991,dunkl_xu_book_orthogonal_polynomials_2014}); for a background on reflection groups and root systems the reader is referred to \cite{humphreys_book_reflection_groups_1990,dunkl_xu_book_orthogonal_polynomials_2014}.
Let $\mathbb{N}_0$ be the set of nonnegative integers, let $\mathbb{R}^d$ be the $d$-dimensional real Euclidean space of all $d$-tuples of real numbers. For $x\in\mathbb{R}^d$, we write $x=(x_1,\dots,x_d)$. The inner product of $x,\,y\in\mathbb{R}^d$ is denoted by $\langle x,y\rangle=\sum\limits_{i=1}^d x_iy_i$, and the norm of $x$ is denoted by $\|x\|=\sqrt{\langle x,x\rangle}$. Let $\mathbb{S}^{d-1}=\{x\colon\, \|x\|=1\}$ be the unit sphere in $\mathbb{R}^d$, $d\geq 2$. Denote by $d\omega$ the usual Lebesgue measure on $\mathbb{S}^{d-1}$.
For a nonzero vector $v\in\mathbb{R}^d$, define the reflection $s_v$ by \begin{equation*}
s_v(x)=x-2\frac{\langle x,v\rangle}{\|v\|^2}\,v,\quad x\in\mathbb{R}^d. \end{equation*} Each reflection $s_v$ is contained in the orthogonal group $O(\mathbb{R}^d)$.
Recall that a finite set $R\subset\mathbb{R}^d\setminus\{0\}$ is called a root system if the following conditions are satisfied:
(1) $R\cap\mathbb{R}v=\{\pm v\}$\, for all $v\in R$;\qquad (2) $s_v(R)=R$\, for all $v\in R$.
\noindent The subgroup $G=G(R)\subset O(\mathbb{R}^d)$ which is generated by the reflections $\{s_v\colon\, v\in R\}$ is called the reflection group associated with $R$. It is known that the reflection group $G$ is finite and the set of reflections contained in $G$ is exactly $\{s_v\colon\, v\in R\}$.
Each root system $R$ can be written as a disjoint union $R=R_+\cup (-R_+)$, where $R_+$ and $-R_+$ are separated by a hyperplane through the origin. Such a set $R_+$ is called a positive subsystem. Its choice in not unique.
A nonnegative function $\kappa$ on a root system $R$ is called a multiplicity function if it is $G$-invariant, i.e. $\kappa(v)=\kappa(g(v))$ for all $v\in R$, $g\in G$.
Note that definitions given below do not depend on the special choice of $R_+$, thanks to the $G$-invariance of $\kappa$.
The Dunkl operators are defined by \begin{equation*} \mathcal{D}_i f(x)=\frac{\partial f(x)}{\partial x_i}+\sum\limits_{v\in R_+} \kappa(v)\frac{f(x)-f(s_v(x))}{\langle v,x\rangle}\, v_i,\quad 1\le i\le d. \end{equation*} In case $\kappa=0$, the Dunkl operators reduce to the corresponding partial derivatives. These operators were introduced and first studied by C.\,F.~Dunkl.
Let \begin{equation*}\label{special_indeces} \lambda_\kappa=\gamma_\kappa+\frac{d-2}{2},\qquad\gamma_\kappa=\sum\limits_{v\in R_+} \kappa(v), \end{equation*} let $w_\kappa$ denote the weight function on $\mathbb{S}^{d-1}$ defined by \begin{equation*}\label{Dunkl_weight_function}
w_\kappa(x)=\prod\limits_{v\in R_+} |\langle v,x\rangle|^{2\kappa(v)},\quad x\in\mathbb{S}^{d-1}. \end{equation*} The weight function $w_\kappa$ is a positively homogeneous $G$-invariant function of degree $2\gamma_\kappa$. In case $\kappa=0$, $w_\kappa$ is identically equal to $1$.
Suppose $\Pi^d$ is the space of all polynomials in $d$ variables with complex coefficients, $\mathcal{P}_n^d$ is the subspace of homogeneous polynomials of degree $n\in\mathbb{N}_0$ in $d$ variables.
C.\,F.~Dunkl has proved in \cite{dunkl_article_integral_kernels_1991} that there exists a unique linear isomorphism $V_\kappa$ of $\Pi^d$ such that \begin{equation*} V_\kappa(\mathcal{P}_n^d)=\mathcal{P}_n^d,\,\,\, n\in\mathbb{N}_0,\qquad V_\kappa 1=1,\qquad \mathcal{D}_i V_\kappa=V_\kappa\frac{\partial}{\partial x_i},\,\,\, 1\leq i\leq d. \end{equation*} This operator is called the Dunkl intertwining operator. The operator $V_\kappa$ was studied by many mathematicians (for example, C.\,F.~Dunkl, M.~R\"{o}sler, K.~Trim\`{e}che, Y.~Xu). If $\kappa=0$, then $V_\kappa$ is the identity operator.
Throughout this paper, we assume that $p\in[1,\infty)$ and $\lambda_\kappa>0$. In particular, it follows that $\gamma_\kappa>0$ if $d=2$.
To explain our main result of the present paper, we need to introduce some weighted $L_p$-spaces and one family of linear operators.
Denote by $L_{\kappa,p}(\mathbb{S}^{d-1})$ the space of complex-valued Lebesgue measurable functions $f$ on $\mathbb{S}^{d-1}$ with finite norm \begin{equation*}
\|f\|_{\kappa,p,\mathbb{S}^{d-1}}=\Bigl(\int\nolimits_{\mathbb{S}^{d-1}} |f(x)|^p \,d\sigma_\kappa(x)\Bigr)^{1/p},\qquad d\sigma_\kappa(x)=a_\kappa\,w_\kappa(x)\,d\omega(x), \end{equation*} where the normalizing constant $a_\kappa$ satisfies $a_\kappa\int\nolimits_{\mathbb{S}^{d-1}} w_\kappa\,d\omega=1$. The space $L_{\kappa,2}(\mathbb{S}^{d-1})$ is a complex Hilbert space with the inner product \begin{equation*}\label{inner_product_for_space_on_sphere} \langle f,h\rangle_{\kappa,\mathbb{S}^{d-1}}=\int\nolimits_{\mathbb{S}^{d-1}} f(x)\overline{h(x)} \,d\sigma_\kappa(x). \end{equation*}
We also introduce the space $L_{\kappa,\infty}(\mathbb{S}^{d-1})$ composed of all complex-valued Lebesgue measurable functions defined on $\mathbb{S}^{d-1}$ which are $\sigma_\kappa$-measurable and $\sigma_\kappa$-essentially bounded. Because $w_\kappa$ is $\omega$-a.e. nonzero on $\mathbb{S}^{d-1}$, the above notions coincides with the one of $w_\kappa$-measurable and $w_\kappa$-essentially bounded function, respectively.
Let $\lambda>0$. Suppose $L_{p,\lambda}[-1,1]$ is the space of complex-valued Lebesgue measurable functions $g$ on the segment $[-1,1]$ with finite norm \begin{equation*}
\|g\|_{\lambda,p,[-1,1]}=\Bigl(c_\lambda\int\nolimits_{-1}^1 |g(t)|^p\, (1-t^2)^{\lambda-1/2}\,dt\Bigr)^{1/p},\qquad c_\lambda=\Bigl(\int\nolimits_{-1}^1 (1-t^2)^{\lambda-1/2}\,dt\Bigr)^{-1}. \end{equation*}
The Gegenbauer polynomials $C_n^\lambda$ (see, e.g., \cite[p.~302]{andrews_askey_roy_book_special_functions_1999}) are orthogonal with respect to the weight function $(1-t^2)^{\lambda-1/2}$. For a function $g\in L_{p,\lambda}[-1,1]$, its Gegenbauer expansion takes the form \begin{equation}\label{Gegenbauer_expansion} g(t)\sim \sum\limits_{n=0}^\infty \Lambda_{n,\lambda}(g) \frac{n+\lambda}{\lambda} C_n^\lambda(t)\quad\text{with}\quad \Lambda_{n,\lambda}(g)=\frac{c_\lambda}{C_n^\lambda(1)}\int\nolimits_{-1}^1 g(t) C_n^\lambda(t) (1-t^2)^{\lambda-1/2}\,dt, \end{equation}
since $\|C_n^\lambda\|_{\lambda,2,[-1,1]}^2=C_n^\lambda(1)\lambda/(n+\lambda)$.
Theorem~13.17 in \cite{hewitt_stromberg_book_real_analysis_1965} says that $L_{\kappa,q}(\mathbb{S}^{d-1})\subset L_{\kappa,p}(\mathbb{S}^{d-1})$ and $L_{q,\lambda}[-1,1]\subset L_{p,\lambda}[-1,1]$ for $1\leq p<q<\infty$.
Let us now consider the operators \begin{equation*} V_\kappa^p(x)\colon L_{p,\lambda_\kappa}[-1,1]\to L_{\kappa,p}(\mathbb{S}^{d-1}) \end{equation*} depending on $x\in\mathbb{S}^{d-1}$ which are defined by the following rule: \begin{equation*} V_\kappa^p(x;g,y)=V_\kappa\bigl[g(\langle x,\cdot\rangle)\bigr](y),\quad y\in\mathbb{S}^{d-1}\quad \bigl(g\in L_{p,\lambda_\kappa}[-1,1]\bigr). \end{equation*} They are linear bounded operators with operator norm $1$. It follows from the properties of $V_\kappa$ known from the general Dunkl theory (see especially the proof of Theorem~7.4.2 in \cite{dai_xu_book_approximation_2013}) that these operators are well defined.
The main result of this paper is a generalization of Theorem~2.4 in \cite{menegatto_article_fundamental_sets_1998}. More precisely, we establish a necessary and sufficient condition for a function $g\in L_{p,\lambda_\kappa}[-1,1]$ such that the family of functions \begin{equation}\label{fundamental_set}\mathcal{M}_\kappa^p(g)=\{V_\kappa^p(x;g)\colon x\in\mathbb{S}^{d-1}\}\end{equation} is fundamental in the space $L_{\kappa,p}(\mathbb{S}^{d-1})$. This fact is formulated and proved in section~\ref{formulation_and_proof_of_the_main_result}.
Recall that a set $\mathcal{F}$ in a Banach space $\mathcal{E}$ is said to be fundamental if the linear span of $\mathcal{F}$ is dense in $\mathcal{E}$. To prove the main result, we use a consequence of the Hahn--Banach theorem related to fundamentality of sets in normed linear spaces. We include it as a separate lemma for convenience.
\begin{lemen}\label{consequence_of_the_Hahn--Banach_theorem} Let $\mathcal{F}$ be a subset of a Banach space $\mathcal{E}$. In order that $\mathcal{F}$ be fundamental in $\mathcal{E}$, it is necessary and sufficient that $\mathcal{F}$ not be annihilated by a nonzero bounded linear functional on $\mathcal{E}$. \end{lemen}
\section{Some facts of Dunkl harmonic analysis on the unit sphere}
The Dunkl Laplacian $\Delta_\kappa$ is defined by \begin{equation*} \Delta_\kappa=\sum\limits_{i=1}^d\mathcal{D}_i^2 \end{equation*} and it plays the role similar to that of the ordinary Laplacian. It reduces to the ordinary Laplacian provided that $\kappa=0$.
A $\kappa$-harmonic polynomial $P$ of degree $n\in\mathbb{N}_0$ in $d$ variables is a homogeneous polynomial $P\in\mathcal{P}_n^d$ such that $\Delta_\kappa P=0$. Its restriction to the unit sphere is called the $\kappa$-spherical harmonic of degree $n$ in $d$ variables. Denote by $\mathcal{A}_n^d(\kappa)$ the space of $\kappa$-spherical harmonics of degree $n$ in $d$ variables. The $\kappa$-spherical harmonics of different degrees turn out to be orthogonal with respect to the weighted inner product $\langle\cdot,\cdot\rangle_{\kappa,\mathbb{S}^{d-1}}$ \cite[Theorem~1.6]{dunkl_article_reflection_1988}.
Let $C(\mathbb{S}^{d-1})$ be the space of complex-valued continuous functions on $\mathbb{S}^{d-1}$.
\begin{lemen}\label{property_of_fundamentality} The set $\bigcup\limits_{n=0}^\infty\mathcal{A}_n^d(\kappa)$ is fundamental in $C(\mathbb{S}^{d-1})$ and in $L_{\kappa,p}(\mathbb{S}^{d-1})$, $1\leq p<\infty$. \end{lemen}
\proofen Theorem~3.14 in \cite{rudin_book_analysis_1987} states that the space $C(\mathbb{S}^{d-1})$ is dense in $L_{\kappa,p}(\mathbb{S}^{d-1})$ for $1\leq p<\infty$. So it is sufficient to show that $\bigcup\limits_{n=0}^\infty\mathcal{A}_n^d(\kappa)$ is fundamental in $C(\mathbb{S}^{d-1})$.
By the Weierstrass approximation theorem, if $f$ is continuous on $\mathbb{S}^{d-1}$, then it can be uniformly approximated by polynomials restricted to $\mathbb{S}^{d-1}$. According to \cite[Theorem~1.7]{dunkl_article_reflection_1988}, these restrictions belong to the linear span of $\bigcup\limits_{n=0}^\infty\mathcal{A}_n^d(\kappa)$. Thus, $\bigcup\limits_{n=0}^\infty\mathcal{A}_n^d(\kappa)$ is fundamental in $C(\mathbb{S}^{d-1})$.
$\square$
The above proof is analogous to that of Corollary~2.3 in \cite{stein_weiss_book_Fourier_analysis_1971}.
\begin{lemen} Let $g\in L_{p,\lambda_\kappa}[-1,1]$, $1\leq p<\infty$. Then for every $Y_n^\kappa\in\mathcal{A}_n^d(\kappa)$, \begin{equation}\label{Funk-Hecke_formula} \int\nolimits_{\mathbb{S}^{d-1}} V_\kappa^p(x;g,y)\,Y_n^\kappa(y) \,d\sigma_\kappa(y)=\Lambda_{n,\lambda_\kappa}(g)\,Y_n^\kappa(x),\quad x\in\mathbb{S}^{d-1}, \end{equation} where the constant $\Lambda_{n,\lambda_\kappa}(g)$ is defined from \eqref{Gegenbauer_expansion}. \end{lemen}
Equality~\eqref{Funk-Hecke_formula} is the Funk\,--\,Hecke formula for $\kappa$-spherical harmonics written in our setting and designations (cf. \cite[Theorem~7.2.7]{dai_xu_book_approximation_2013}, \cite[Theorem~2.1]{xu_article_Funk--Hecke_formula_2000}).
\section{Main result: proof and its consequence}\label{formulation_and_proof_of_the_main_result}
We can now state and prove the main theorem.
\begin{teoen}\label{main_result} Let $d\geq 2$, $1\leq p<\infty$. Fix a root system $R$ in $\mathbb{R}^d$ and a multiplicity function $\kappa$ on $R$. Let $g\in L_{p,\lambda_\kappa}[-1,1]$. In order that the set $\mathcal{M}_\kappa^p(g)$ \eqref{fundamental_set} be fundamental in $L_{\kappa,p}(\mathbb{S}^{d-1})$, it is necessary and sufficient that $\Lambda_{n,\lambda_\kappa}(g)\not=0$ \eqref{Gegenbauer_expansion} for every $n\in\mathbb{N}_0$. \end{teoen}
\proofen We first prove that the condition is sufficient. Let $\Phi$ be a bounded linear functional on $L_{\kappa,p}(\mathbb{S}^{d-1})$ which annihilates $\mathcal{M}_\kappa^p(g)$. According to the Riesz representation theorem \cite[Theorem~6.16]{rudin_book_analysis_1987}, $\Phi$ can be written as follows: $\Phi(\,\cdot\,)=\langle \,\cdot\,,h\rangle_{\kappa,\mathbb{S}^{d-1}}$, where $h\hm\in L_{\kappa,q}(\mathbb{S}^{d-1})$ and $q$ is the exponent conjugate to $p$ ($p^{-1}+q^{-1}=1$; $q=\infty$ whenever $p=1$). Then the annihilating property of $\Phi$ reduces to \begin{equation*} \int\nolimits_{\mathbb{S}^{d-1}} V_\kappa^p(x;g,y)\, \overline{h(y)} \, d\sigma_\kappa(y)=0,\quad x\in\mathbb{S}^{d-1}. \end{equation*} Next, we multiply both sides of the previous equality by $Y_n^\kappa(x)\in\mathcal{A}_n^d(\kappa)$, $n\in\mathbb{N}_0$, and integrate the resulting expression with respect to the measure $d\sigma_\kappa$. H\"{o}lder's inequality implies that $V_\kappa^p(x;g,y) \, \overline{h(y)} \, Y_n^\kappa(x)$ is $\sigma_\kappa\times\sigma_\kappa$-integrable over $\mathbb{S}^{d-1}\times\mathbb{S}^{d-1}$, and hence, using the Fubini theorem to interchange the order of integration, we get \begin{equation*} \int\nolimits_{\mathbb{S}^{d-1}} \overline{h(y)}\, \biggl(\int\nolimits_{\mathbb{S}^{d-1}} V_\kappa^p(x;g,y)\,Y_n^\kappa(x) \,d\sigma_\kappa(x)\biggr) \,d\sigma_\kappa(y)=0. \end{equation*} Using the symmetric relation \cite[formula~(7)]{li_song_article_inversion_formulas_2009} \begin{equation*} V_\kappa\bigl[g(\langle x,\cdot\rangle)\bigr](y)=V_\kappa\bigl[g(\langle y,\cdot \rangle)\bigr](x) \quad\text{$\sigma_\kappa\times\sigma_\kappa$-a.e. on\, $\mathbb{S}^{d-1}\!\times\!\mathbb{S}^{d-1}$} \end{equation*} and the Funk\,--\,Hecke formula~\eqref{Funk-Hecke_formula}, we obtain \begin{equation*} \Lambda_{n,\lambda_\kappa}(g)\,\langle Y_n^\kappa,h\rangle_{\kappa,\mathbb{S}^{d-1}}=0,\qquad Y_n^\kappa\in\mathcal{A}_n^d(\kappa),\quad n\in\mathbb{N}_0. \end{equation*} It follows from the condition that \begin{equation*} \langle Y_n^\kappa,h\rangle_{\kappa,\mathbb{S}^{d-1}}=0,\qquad Y_n^\kappa\in\mathcal{A}_n^d(\kappa),\quad n\in\mathbb{N}_0. \end{equation*} Thus, $\Phi$ annihilates $\bigcup\limits_{n=0}^\infty \mathcal{A}_n^d(\kappa)$. By continuity of $\Phi$ and Lemma~\ref{property_of_fundamentality}, $\Phi=0$ on $L_{\kappa,p}(\mathbb{S}^{d-1})$. Therefore, the set $\mathcal{M}_\kappa^p(g)$ is fundamental in $L_{\kappa,p}(\mathbb{S}^{d-1})$ by Lemma~\ref{consequence_of_the_Hahn--Banach_theorem}.
Let us now prove that the condition described in the theorem is necessary. Assume, to reach a contradiction, that there exists an index $m\in\mathbb{N}_0$ such that $\Lambda_{m,\lambda_\kappa}(g)=0$. Select any nontrivial $\kappa$-spherical harmonic $Y_m^\kappa\in\mathcal{A}_m^d(\kappa)$ and consider a measure $\mu$ defined on the Lebesgue subsets $\mathcal{L}$ of $\mathbb{S}^{d-1}$ by the rule \begin{equation*} \mu(B)=\int\nolimits_{B} Y_m^\kappa(x)\,\sigma_\kappa(x),\quad B\in\mathcal{L}. \end{equation*} This measure is nontrivial by its definition.
Using the Funk\,--\,Hecke formula~\eqref{Funk-Hecke_formula}, we obtain \begin{equation*} \begin{split} \int\nolimits_{\mathbb{S}^{d-1}} V_\kappa^p(x;g,y)\,d\mu(y)&=\int\nolimits_{\mathbb{S}^{d-1}} V_\kappa^p(x;g,y) \, Y_m^\kappa(y)\,d\sigma_\kappa(y)\\ &=\Lambda_{m,\lambda_\kappa}(g)\,Y_m^\kappa(x)=0,\quad x\in\mathbb{S}^{d-1}. \end{split} \end{equation*} Thus, the nontrivial bounded linear functional $\Phi_1$ on $L_{\kappa,p}(\mathbb{S}^{d-1})$ given by $\Phi_1(f)=\int\nolimits_{\mathbb{S}^{d-1}} f\,d\mu$ annihilates $\mathcal{M}_\kappa^p(g)$. By Lemma~\ref{consequence_of_the_Hahn--Banach_theorem}, $\mathcal{M}_\kappa^p(g)$ is not fundamental in $L_{\kappa,p}(\mathbb{S}^{d-1})$. This contradicts our assumption.
$\square$
The above proof is exactly like that of~Theorem~2.4 in \cite{menegatto_article_fundamental_sets_1998}. Using the scheme of the proof of the theorem, one can prove the following result.
\begin{coren}
Let $d\geq 2$, $s\geq 1$, $1\leq p<\infty$. Fix a root system $R$ in $\mathbb{R}^d$ and a multiplicity function $\kappa$ on $R$. Let $g_1,\dots,g_s\in L_{p,\lambda_\kappa}[-1,1]$. In order that the set $\bigcup\limits_{i=1}^s\mathcal{M}_\kappa^p(g_i)$ be fundamental in $L_{\kappa,p}(\mathbb{S}^{d-1})$, it is necessary and sufficient that $\sum\limits_{i=1}^s|\Lambda_{n,\lambda_\kappa}(g_i)|\not=0$ for every $n\in\mathbb{N}_0$. \end{coren}
\begin{Biblioen} \bibitem{andrews_askey_roy_book_special_functions_1999}Andrews, G.E., Askey, R. and Roy, R., {\em Special functions}. Cambridge University Press, 1999.
\bibitem{dai_xu_book_approximation_2013}Dai, F. and Xu, Y., {\em Approximation theory and harmonic analysis on spheres and balls}. Springer, Berlin--New York, 2013.
\bibitem{dunkl_article_reflection_1988}Dunkl, C.F., Reflection groups and orthogonal polynomials on the sphere. {\em Math. Z.} 197: 33--60, 1988.
\bibitem{dunkl_article_operators_1989}Dunkl, C.F., Differential-difference operators associated to reflection groups. {\em Trans. Amer. Math. Soc.} 311(1): 167--183, 1989.
\bibitem{dunkl_article_integral_kernels_1991}Dunkl, C.F., Integral kernels with reflection group invariance. {\em Can. J. Math.} 43(6): 1213--1227, 1991.
\bibitem{dunkl_xu_book_orthogonal_polynomials_2014}Dunkl, C.F. and Xu, Y., {\em Orthogonal polynomials of several variables}. 2nd ed., Cambridge University Press, 2014.
\bibitem{hewitt_stromberg_book_real_analysis_1965}Hewitt, E. and Stromberg, K., {\em Real and abstract analysis}. Springer-Verlag, New York, 1965.
\bibitem{humphreys_book_reflection_groups_1990}Humphreys, J.E., {\em Reflection groups and Coxeter groups}. Cambridge University Press, 1990.
\bibitem{li_song_article_inversion_formulas_2009}Li, Zh., Song, F., Inversion formulas for the spherical Radon--Dunkl transform. {\em SIGMA.} 5: 025, 15 pages, 2009.
\bibitem{menegatto_article_fundamental_sets_1998}Menegatto, V.A., Fundamental sets of functions on spheres. {\em Methods Appl. Anal.} 5(4): 387--398, 1998.
\bibitem{rudin_book_analysis_1987}Rudin, W., {\em Real and complex analysis}. 3rd ed., McGraw-Hill, New York, 1987.
\bibitem{stein_weiss_book_Fourier_analysis_1971}Stein, E.M. and Weiss, G., {\em Introduction to Fourier analysis on Euclidean spaces}. Princeton University Press, Princeton, 1971.
\bibitem{xu_article_Funk--Hecke_formula_2000}Xu, Y., Funk\,--\,Hecke formula for orthogonal polynomials on spheres and on balls. {\em Bull. London Math. Soc.} 32: 447--457, 2000. \end{Biblioen}
\noindent \textsc{Independent researcher, Uzlovaya, Russia}
\noindent \textit{E-mail address}: \textbf{[email protected]}
\end{document} |
\begin{document}
\title[Information geometric approach to mixed state quantum estimation]{Information geometric approach to mixed state quantum estimation}
\author{Gabriel F. Magno$^1$, Carlos H. Grossi$^2$, Gerardo Adesso$^3$ and Diogo O. Soares-Pinto$^1$} \address{$^1$ Instituto de F\'{i}sica de S\~{a}o Carlos, Universidade de S\~{a}o Paulo, CP 369, 13560-970, S\~{a}o Carlos, SP, Brazil} \address{$^2$ Departamento de Matem\'{a}tica, ICMC, Universidade de S\~{a}o Paulo, Caixa Postal 668, 13560-970, S\~{a}o Carlos, SP, Brazil} \address{$^3$ School of Mathematical Sciences and Centre for the Mathematics and Theoretical Physics of Quantum Non-Equilibrium Systems, University of Nottingham, University Park, Nottingham NG7 2RD, United Kingdom} \ead{[email protected], [email protected], [email protected], [email protected]}
\begin{indented} \item[] \end{indented}
\begin{abstract} Information geometry promotes an investigation of the geometric structure of statistical manifolds, providing a series of elucidations in various areas of scientific knowledge. In the physical sciences, especially in quantum theory, this geometric method has an incredible parallel with the distinguishability of states, an ability of great value for determining the effectiveness in implementing physical processes. This gives us the context for this work. Here we will approach a problem of uniparametric statistical inference from an information-geometric perspective. We will obtain the generalized Bhattacharyya higher-order corrections for the Cram\'{e}r-Rao bound, where the statistics is given by a mixed quantum state. Using an unbiased estimator $T$, canonically conjugated to the Hamiltonian $H$ that generates the dynamics, we find these corrections independent of the specific choice of estimator. This procedure is performed using information-geometric techniques, establishing connections with corrections to the pure states case. \end{abstract}
\section{Introduction} \label{sec:introducao}
Quantum technologies utilise quantum states as the fundamental agents responsible for information processing. Knowing the quantum operations that can act on these states, their proper control allows an optimization in coding/decoding, manipulation and transmission of the information content on the state of the system. The access to this encoded information, after the operation is applied, demands some way to distinguish the initial and final states of the system \cite{ikemike}. Therefore, distinguishing states in quantum models is a typical task at the core of information theory. Several measures of distance in state spaces are known in the literature that are used as quantifiers of distinguishability, for example, the trace distance, Bures distance, Hellinger distance, Hilbert-Schmidt distance, relative entropies, among others \cite{fuchs96, watrous2018}. As the concept of distance is closely linked to some type of geometric structure of the space involved, there is a natural connection between methods of geometry and information theory.
In this geometric formulation of information theory, the quantum state space -- a set of density operators that act on the Hilbert space of a quantum system -- is treated as a differentiable manifold equipped with metric tensors that will define the notion of distance between the elements of the manifold. Such an approach allows the development of interesting physical results exploring the metric properties of space \cite{braunstein-caves, wootters81, amari1993infogeo, anderssonheydari1, anderssonheydari2, heydari}. In this way, it is natural to interpret the distinguishability measures as metrics defined in quantum statistical manifolds. This culminates in {\em information geometry}, an area that applies differential geometry methods to the solution and formalization of information science problems, obtaining robust and elegant results with broad applicability \cite{petz1996, gibiliscoisola, petzhasegawa, petz2002, hiaipetz, anderssonheydari3, jarzyna, benyosborne}. Such a framework was used successfully in problems of statistical inference, and it is specifically the one-parameter instance of these problems that will be the focus of our discussion \cite{brody1996prl, brody1996royalsoc}.
The uniparametric quantum statistical inference problem can be described as follows \cite{Helstrom, paris-ijqi, GLM}: Consider a physical system characterized by the pure quantum state $\rho(t)$, that depends on an unknown parameter $t$. We want to estimate the value of this parameter using an unbiased estimator $T$, i.e., $\mbox{E}_ {\rho} [T] = t$. Thus, we can ask: How accurate is this estimation procedure? To answer this question, note that the variance of the estimator can be interpreted as the error associated with the estimation \cite{Helstrom, paris-ijqi, GLM}. Therefore, a good estimation should have a small error, which means that the variance of the estimator should be as small as possible. In literature it is established a lower bound for the variance of the estimator, named the Cram\'{e}r-Rao inequality \cite{Helstrom, paris-ijqi, GLM}, \begin{equation} \label{desigualdade_cr} \mbox{Var}_{\rho}[T]\ge \frac{1}{\mathcal{G}}, \end{equation} where $\mbox{Var}_{\rho}[T]\equiv\Delta T^2$ is the variance of the estimator $T$ and $\mathcal{G}$ is the quantum Fisher information associated to the parameter of interest. From this relation we can see that the more information the state provides about the parameter, the smaller is $\mbox{Var}_ {\rho}[T]$ and the better the estimation \cite{Helstrom, paris-ijqi, GLM, fabricio}.
In this work, we extend the analysis of the CramΓ©r-Rao bound by obtaining its higher-order corrections using the geometry of the quantum state space for {\it mixed} states instead of {\it pure} states. We obtain the following generalized form of the bound in the mixed quantum state scenario: \begin{prop} Let $\rho(t)$ be a one-parameter mixed state under a von Neumann dynamics generated by the Hamiltonian $H$, canonically conjugated to the unbiased estimator $T$ of the parameter $t$. The generalized Cram\'{e}r-Rao bound is then given by \begin{equation} \label{qntcrlb_classys_correcao_misto} (\Delta T^2 + \delta T^2)(\Delta H^2 - \delta H^2) \geq \frac{1}{4}\left[1+ \frac{(\mu_4-3\mu_2^2)^2}{\mu_6\,\mu_2-\mu_4^2} \right], \end{equation} where $\delta X = \tr(X\sqrt{\rho}X\sqrt{\rho})-[\tr(X\rho)]^2$, and $\mu_{2n}$ are the norms of the $n-$th derivatives of the square root of the state with respect to the parameter. \end{prop}
The paper is organized as follows. In Section II we introduce the notation used along the manuscript and briefly discuss the current literature on quantum statistical estimation. Section III is devoted to obtain the extension of quantum estimation theory when considering a mixed state scenario, analyzing the consequences for the CramΓ©r-Rao bound. In Section IV we present the higher order corrections for the variance implied by this mixed state scenario. In Section V, we present an algorithmic approach to obtain all possible corrections to the bound, and in Section VI we present our conclusions and discussions.
\section{Notation} \label{sec:adapt_notacao}
Throughout the paper we use the following notation. Let $\mathcal{B}_{HS}$ be a Hilbert space contained in the set of square matrices with complex entries, equipped with the Hilbert-Schmidt inner product. For a given matrix $A\in \mathcal{B}_{HS}$ we have \begin{equation*} \langle A,A\rangle_{HS}=\tr (A^\dagger A)<\infty, \end{equation*} where $\langle\bullet,\bullet\rangle_{HS}$ denotes the Hilbert-Schmidt product and $\tr(\bullet)$ stands for the matrix trace. Therefore, $\mathcal{B}_{HS}$ is the set of all matrices with finite Hilbert-Schmidt norm. Thus, let us index the Hilbert-Schmidt product as $g_{ab}$. This definition allows us to establish that $\zeta^ag_{ab}\zeta^b \equiv \tr (\zeta^\dagger\zeta)$, where $\zeta^a \in \mathcal{B}_{HS}$ and $\zeta$ is the matrix associated to $\zeta^a$.
On the other hand, the set of density operators representing mixed states in quantum theory, $\mathcal{S}=\{ \rho \mid \rho=\rho^\dagger > 0, \tr(\rho)= 1, \tr(\rho^{2}) < 1 \}$, denoting a space of positive definite density operators, is a differentiable manifold in $\mathcal{B}_{HS}$ \cite{amari1993infogeo, livro_geo_info}. As the operators in $\mathcal{S}$ are positive definite, we have the embedding $\rho \rightarrow \sqrt{\rho}$ which maps an operator into its own square-root. Thus, identifying $\sqrt{\rho}\equiv\xi$, we find that $g_{ab}\xi^a\xi^b=\tr (\xi\xi)=1$.
A random variable in $\mathcal{S}$ is a form $X_{ab}$ whose average in terms of the state $\xi$ is given by \begin{equation*}
\mbox{E}_{\xi}[X]=\xi^aX_{ab}\xi^b=\tr [\xi X\xi]. \end{equation*} Also, the variance of $X_{ab}$ is given by \begin{equation*}
\mbox{Var}_{\xi}[X]=\Delta X^2=\xi^a\tilde{X}_{ac}\tensor[ ]{\tilde{X}}{^c_b}\xi^b, \end{equation*} where $\tilde{X}_{ab} \equiv X_{ab}-g_{ab}(\xi^cX_{cd}\xi^d)$. It is important to note that when calculating these quantities, in general, we have $X_{ac}\xi^c\xi^b\tensor{Y}{_b^a}\neq\xi^cX_{ca}\xi^b\tensor{Y}{_b^a}$ since $\tr [X\xi\xi Y]\neq\tr [\xi X \xi Y]$. Once $\xi^a$ is a matrix, the way the contraction is taken is quite important, even for symmetric forms, because the matrix algebra is non-commutative.
Now consider that $\mathcal{S}$ is parametrically given by a set of local coordinates $\{ \theta = [\theta^{i}] \in \Re^{r}; i=1,\dots,r\}$, where $\xi(\theta)\in C^\infty$. Defining $\partial_i\equiv\partial/\partial\theta^i$ we have, in terms of local coordinates in $\mathcal{S}$, the Riemannian metric \begin{equation} \label{fisher_metrica_misto} G_{ij}=2g_{ab}\partial_i\xi^a\partial_j\xi^b=2\tr [\partial_i\xi\partial_j\xi] \end{equation} induced by the Hilbert-Schmidt metric $g_{ab}$ of $\mathcal{B}_{HS}(\mathcal{H})$. The proof of Eq.(\ref{fisher_metrica_misto}) follows the same steps as the proof of proposition 1 in Ref.~\cite{brody1996royalsoc}. The difference is that there, a factor 4 is arbitrarily considered so one has an identification with the results of the CramΓ©r-Rao bound while here we have a factor 2. The motivation for this difference will become clear in the next section.
\section{Quantum statistical estimation} \label{sec:est_qnt_misto}
In this section, inspired by Ref.\cite{brody2011fasttrack}, we are going to show a generalisation considering the mixed state scenario. Thus, consider that, in $\mathcal{S}$, we have the result of a measurement from an unbiased estimator $T_{ab}$ such that $\xi^aT_{ab}\xi^b=t$, and that a one-parameter family $\xi(t)$ characterizes the probability distribution of all the possible results of the measurement. We can refer to $t$ as the time that has passed since the preparation of the initially known normalized state $\xi_0 = \xi(0)$. Our goal is to estimate $t$.
The system is prepared in the normalized state $\xi_0$ and evolves under the Hamiltonian $H_{ab}$ following the equation of motion \begin{equation*} \xi_t = e^{-iHt}\,\xi_0\,e^{iHt}. \end{equation*} The derivative of this equation in time gives \begin{equation*} \dot{\xi}=-iH\xi+i\xi H = -i[H,\xi] \end{equation*} that is the von Neumann dynamics for the state $\xi_t$. From such evolution we find that \begin{equation*}
\tr (\dot{\xi}\dot{\xi})=2[\tr (H^2\xi\xi)-\tr (H\xi H\xi)]. \end{equation*}
Given the mapping $\xi\rightarrow\sqrt{\rho}$, we find that the right side of the previous expression is twice the Wigner-Yanase skew information (WYSI) \cite{WY} of an arbitrary observable $H$ and a quantum state $\rho$ \begin{equation} \label{wysi} I_{\rho}(H)=\tr (H^2\rho)-\tr (H\sqrt{\rho}H\sqrt{\rho})=-\frac{1}{2}\tr ([\sqrt{\rho},H]^2). \end{equation} Defining $\delta X^2 \equiv \tr (X \xi X \xi)-[\tr (X \xi \xi)]^2$ we can rewrite Eq.(\ref{wysi}) as \begin{equation*} I_{\rho}(H)=(\Delta H^2-\delta H^2). \end{equation*} From the embedding $\rho\rightarrow \sqrt{\rho}$, we find that the natural metric, induced by the notion of distance in the ambient manifold, is going to be the WYSI in the mixed state space which is the only metric that has a Riemannian $\alpha-$connection \cite{amari1993infogeo}.
Relaxing the unitarity of the trace, we can define a symmetric function of $\xi$ in $\mathcal{B}_{HS}$ as \begin{equation*}
t(\xi)=\frac{\xi^aT_{ab}\xi^b}{\xi^cg_{cd}\xi^d}=\frac{\tr (\xi T\xi)}{\tr (\xi\xi)}. \end{equation*} Again, being the matrix algebra non-commutative, we must be careful when doing the contractions. For example, considering $\triangledown_c=\partial/\partial\xi^c$, we obtain \begin{eqnarray*} \triangledown_c\xi^aT_{ab}\xi^b &=& \triangledown_c\xi^aT_{ab}\xi^b + \xi^aT_{ab}\triangledown_c\xi^b \nonumber \\ &=& T_{cb}\xi^b + \xi^aT_{ac} \\ &=& (T_{ac}+T_{ca})\xi^a=(T\xi+\xi T). \end{eqnarray*} Following this calculation, we can find that the gradient of a function $t$ in $\mathcal{B}_{HS}$ is given by \begin{equation*} \triangledown_ct=\frac{(T_{ac}+T_{ca})\xi^a-2\,t\,g_{ac}\xi^a}{\xi^b\xi_b}. \end{equation*} Now, imposing the unitarity of the trace, $\tr(\xi\xi)=1$, we get \begin{eqnarray*}
\triangledown_ct &=& (\tilde{T}_{ac}+\tilde{T}_{ca})\xi^a \\ \triangledown^ct &=& (\tensor{T}{^c_a}+\tensor{T}{_a^c})\xi^a -2t\xi^c. \end{eqnarray*} The gradient norm is \begin{eqnarray*} \label{norm_gradt_misto} \norm{\triangledown^ct}^2&=&(T_{ac}+T_{ca})(\tensor{T}{^c_b}+\tensor{T}{_b^c})\xi^a\xi^b \\ &-&2\,t\,(T_{ac}+T_{ca})\xi^a\xi^c -2\,t\,(T_{ab}+T_{ba})\xi^a\xi^b + 4\,t^2\,\xi^a\xi_a \\ &=&T_{ca}\xi^a\tensor{T}{^c_b}\xi^b+T_{ca}\xi^a\xi^b\tensor{T}{_b^c} \\ &+&\xi^aT_{ac}\tensor{T}{^c_b}\xi^b + \xi^aT_{ac}\xi^b\tensor{T}{_b^c}-4t^2 \\ &=&2\{ [\tr (T^2\xi\xi)-t^2] + [\tr (T\xi T\xi)-t^2] \} \\ &=&2(\Delta T^2 + \delta T^2) \end{eqnarray*} The von Neumann dynamics can be written as \begin{equation*} \dot{\xi}^a=-i\tensor{H}{^a_b}\xi^b+i\xi^b\tensor{H}{_b^a}. \end{equation*} Being $T$ canonically conjugated to $H$, i.e., $i[H,T]=1$, we obtain the projection of $\triangledown^ct$ into the direction $\dot{\xi}^a$ \begin{eqnarray*}
\triangledown_at\,\dot{\xi}^a&=&[(T_{ac}+T_{ca})\xi^c-2\,t\,\xi_a][-i\tensor{H}{^a_b}\xi^b+i\xi^b\tensor{H}{_b^a}] \nonumber \\ &=&\tr (-iT\xi H\xi+i\xi T\xi H+iT\xi\xi H-i\xi TH\xi) \nonumber \\ &=&i\tr ([H,T]\xi\xi)=1. \end{eqnarray*}
Using the Cauchy-Schwartz inequality for a pair of hermitian operators $X$ and $Y$ \begin{equation*}
[\tr (XY)]^2\geq\tr(X^2)\tr(Y^2), \end{equation*} we deduce the quantum CramΓ©r-Rao inequality for the density operators space \begin{equation} \label{qntcrlb_misto2} (\Delta T^2+\delta T^2)(\Delta H^2-\delta H^2)\geq\frac{1}{4}, \end{equation} where $\Delta X$ are the variances of the parameters $X$ and $(\Delta X^2 + \delta X^2)$ is the skew information of second kind \cite{brody2011fasttrack}. Note that the relation above extends the usual notion of Cram\'{e}r-Rao bound, considering some corrections to the uncertainty relation.
If we impose that $\xi=\xi^2$ to recover the pure state case, it follows that $\tr (H\sqrt{\rho}H\sqrt{\rho}) = [\tr (H\rho)]^2$, consequently $\delta H^2 = 0$, and thus $\tr (\dot{\xi}\dot{\xi})=4\Delta H^2$. Similarly, we can find that $\delta T^2 = 0$. After these considerations, the inequality reduces to the CramΓ©r-Rao bound for pure states found in literature \cite{brody1996prl, brody1996royalsoc}\footnote{In the quantum scenario, the metric when $\alpha=0$ corresponds exactly to the WYSI in the uniparametric case, where the factor 4 can be found \cite{amari1993infogeo}. However, the embedding considered in this case is $\rho\rightarrow 2\sqrt{\rho}$, which is different in the present work. The factor 2 allows us to establish an equality with the relation in Eq.(\ref{qntcrlb_misto2}), in the sense of the CramΓ©r-Rao $1/\mathcal{G}$, and recover the expression for the pure state case. Notice that if we consider the embedding $\rho\rightarrow 2\sqrt{\rho}$, the metric will have the factor 4 and, when taking Eq.(\ref{qntcrlb_misto2}) in the pure state case, we would find $\Delta T^2\Delta H^2\ge 1/8$ \cite{ref20_brody_fasttrack}.}.
We can go a step further and rewrite the inequality given in Eq.(\ref{qntcrlb_misto2}), after a simple manipulation, in another form \begin{equation*} \Delta T^2\Delta H^2 \geq \frac{1}{4} + \delta T^2\delta H^2. \end{equation*} However, although more symmetric, this inequality is less tight then the previous one.
\section{Higher orders corrections for the variance bound in mixed state case} \label{sec:ordem_sup_var_qnt}
The dynamics of a mixed states will not lead to an exponential family of states that saturate the Cauchy-Schwartz inequality, i.e., Eq.(\ref{qntcrlb_misto2}) has its minimum bound unattainable when states evolve under a von Neumann dynamics. The calculation of higher-order corrections then becomes relevant to find how the estimation can be affected in such circumstances.
In order to establish the higher-order corrections to the bound in the mixed state scenario, we will be inspired by the works of Bhattacharyya \cite{bhatt1, bhatt2, bhatt3}, we will follow the programme outlined in Ref.\cite{brody2011fasttrack} and finding in this new context what is called generalized Bhattacharyya bound \cite{brody1996prl, brody1996royalsoc}.
\begin{prop}\label{prop:bhattacharrya_misto} Let \begin{equation*}
\hat{\xi}^{(n)a}=\xi^{(n)a}-\frac{\xi^{(n-1)b}\xi^{(n)}_b}{\xi^{(n-1)c}\xi^{(n-1)}_c}\,\xi^{(n-1)a} - \dots - (\xi^b\xi^{(n)}_b)\,\xi^a \quad (n=0,1,2,\dots), \end{equation*} be an orthogonal vectors system, where $\xi^{(n)a}=d^n\xi^a/dt^n$ and \begin{eqnarray*}
\hat{\xi}^{(r)}_a\xi^a&=&0 \nonumber \\ \hat{\xi}^{(r)}_a\xi^{(s)a} &=& 0; r\neq s \nonumber \\ \hat{\xi}^{(r)}_a\hat{\xi}^{(s)a} &=& 0; r\neq s, \end{eqnarray*} are defined for mixed states. The generalized Bhattacharyya lower bounds for an unbiased estimator $T_{ab}$ of a function $t$ can be expressed in the form \begin{equation} \label{bhattacharrya_misto} \Delta T^2 + \delta T^2 \geq \frac{1}{2}\sum_n\frac{(\triangledown_at\,\hat{\xi}^{(n)a})^2}{\hat{\xi}^{(n)a}\hat{\xi}^{(n)}_a} \end{equation} \end{prop} \begin{proof}[Proof] Let $\hat{T}_{ab}\equiv (T_{ab}+T_{ba})-2\,t\,g_{ab}=(\tilde{T}_{ab}+\tilde{T}_{ba})$. Defining the tensor $R_{ab}\equiv \hat{T}_{ab}+\sum_n\lambda_n\xi_{(a)}\hat{\xi}_{(b)}$, we obtain the variance for $R$ \begin{equation*}
\mbox{Var}_{\xi}[R] = \mbox{Var}_{\xi}[\hat{T}]+\sum_n\lambda_n(\xi^a\hat{T}_{ac}\hat{\xi}^{(n)c}+\hat{\xi}^{(n)c}\hat{T}_{ca}\xi^a) + \sum_n\lambda_r^2\hat{\xi}^{(n)b}\hat{\xi}^{(n)}_b. \end{equation*} To obtain each value $\lambda_n$ that minimizes $\mbox{Var}_{\xi}[R]$, we need to consider the variance as a function of $\{\lambda_n\}_{n\in\mathbb{N}}$ variables and find the extreme points that cause the gradient of this function to vanish. There is only one extreme point, a minimum that presents all entries with the same value, namely $\lambda_n^{min}=-(\xi^a\hat{T}_{ac}\hat{\xi}^{(n)c}+\hat{\xi}^{(n)c}\hat{T}_{ca}\xi^a)/2\hat{\xi}^{(n)}_b\hat{\xi}^{(n)b}$ for all $n$. Replacing this in the above expression, we find \begin{equation*}
\min_{\{\lambda_n\}_{n\in\mathbb{N}}}\left(\mbox{Var}_{\xi}[R]\right) = \mbox{Var}_{\xi}[\hat{T}]-\sum_n\frac{(\xi^a\hat{T}_{ac}\hat{\xi}^{(n)c}+\hat{\xi}^{(n)c}\hat{T}_{ca}\xi^a)^2}{4\hat{\xi}^{(n)b}\hat{\xi}^{(n)}_b}. \end{equation*} Since $\mbox{Var}_{\xi}[R]\geq 0$, we obtain an expression for the generalized bound on the variance of $\hat{T}_{ab}$, \begin{equation*}
\mbox{Var}_{\xi}[\hat{T}]\geq \sum_n\frac{(\xi^a\hat{T}_{ac}\hat{\xi}^{(n)c}+\hat{\xi}^{(n)c}\hat{T}_{ca}\xi^a)^2}{4\hat{\xi}^{(n)b}\hat{\xi}^{(n)}_b}. \end{equation*} We need to obtain \begin{equation*}
(\xi^a\hat{T}_{ac}\hat{\xi}^{(n)c}+\hat{\xi}^{(n)c}\hat{T}_{ca}\xi^a)^2=4(\xi^aT_{ac}\hat{\xi}^{(n)c}+\hat{\xi}^{(n)c}T_{ca}\xi^a)^2=4(\triangledown_at\,\hat{\xi}^{(n)a})^2 \end{equation*} and \begin{equation*}
\mbox{Var}_{\xi}[\hat{T}] = 2(\Delta T^2+\delta T^2). \end{equation*} Combining all these results we arrive at the desired expression in Eq.(\ref{bhattacharrya_misto}). Naturally, for the case $r=1$, we recover the inequality given in Eq.(\ref{qntcrlb_misto2}). \end{proof}
A simple interpretation of this proposition is that, given the gradient vector $\triangledown^at$, its squared modulus will always be greater than or equal to the sum of the squares of its orthogonal components with respect to a given base. Applying Cauchy-Schwarz inequality is the same as using order$-1$ Bhattacharyya inequality. Note that the generalized Bhattacharyya bound is not necessarily independent of the specific choice of the estimator $T_{ab}$. This is evidence that they are not fully equivalent to the original Bhattacharrya bounds, not even at the classical level, since the original bounds are independent of the specific choice of estimator. Therefore, we want to obtain corrections which are independent of that choice, so that the bound will not depend on the way we perform the estimation. This will again demand $T$ and $H$ to be canonically conjugated.
Before focusing on higher order corrections, let us present some useful results. These results are adaptations of statements from \cite{brody1996royalsoc} to the context of mixed states.
\begin{prop} \label{lem:norm_xin_indep_t_misto} Given $\xi^a(t)$ satisfying the von Neumann dynamics, the norm of $\xi^{(n)a}$, $g_{ab}\xi^{(n)a}\xi^{(n)b}$, is independent of the parameter $t$, where $\xi^{(n)a}=d^n\xi^a/dt^n$. In particular, $g_{ab}\xi^{(n)a}\xi^{(n+1)b}=0$. \end{prop} \begin{proof}[Proof] If von Neumann is valid, then $\dot{\xi}^{(n)a}=-i\tensor{H}{^a_b}\xi^{(n)b}+i\xi^{(n)b}\tensor{H}{_b^a}$. The time derivative of the norm of $\xi^{(n)a}$ gives \begin{eqnarray*} \frac{d}{dt}\left[g_{ab}\xi^{(n)a}\xi^{(n)b}\right]&=&2g_{ab}\xi^{(n)a}\dot{\xi}^{(n)b}=2g_{ab}\xi^{(n)a}\xi^{(n+1)b} \nonumber \\ &=&\xi^{(n)}_b(-i\tensor{H}{^a_c}\xi^{(n)c}+i\xi^{(n)c}\tensor{H}{_c^b}) \\ &=&\tr (-i\xi^{(n)}H\xi^{(n)}+i\xi^{(n)}H\xi^{(n)})=0, \end{eqnarray*} completing the proof. \end{proof}
It is important to note that since the von Neumann dynamics is given by $\dot{\xi}=-i[\tilde{H},\xi]$, where $\tilde{H}_{ab}=H_{ab}-g_{ab}[\tr (H\xi\xi)]$, we can generalize this relation to \begin{equation} \label{vonneumann_nderiv} \xi^{(n)}=\mbox{mod}_{-i}[n]\cdot\mbox{Ad}^n_{\tilde{H}}[\xi], \end{equation} where \begin{eqnarray*} \mbox{mod}_{-i}[n] \Longrightarrow && n=\bar{1}\rightarrow-i \\ && n=\bar{2}\rightarrow -1 \\ && n=\bar{3}\rightarrow i \\ && n=\bar{4}\rightarrow +1 \end{eqnarray*} and $$\mbox{Ad}^n_{\tilde{H}}[\bullet]=[\dots[\tilde{H},[\tilde{H},[\tilde{H},[\tilde{H},\bullet]]]]\dots].$$ Let us also define the $n-$th derivative of the norm of $\xi$ as \begin{equation*} g_{ab}\xi^{(n)a}\xi^{(n)b}\equiv \mu_{2n}. \end{equation*}
\begin{prop} \label{prop:conseq_TconjugH_misto} Let $T_{ab}$ be canonically conjugate to $H_{ab}$ and an unbiased estimator to the parameter $t$. Then \begin{equation} \label{conseq_TconjugH_misto} T_{ab}\xi^{(n)a}\xi^{(n)b}=t\,g_{ab}\xi^{(n)a}\xi^{(n)b}+\kappa, \end{equation} where $\kappa$ is a constant. Therefore, for all $n$, $\tilde{T}_{ab}\xi^{(n)a}\xi^{(n)b}=\kappa$ is a constant of motion along the path $\xi(t)$, following a von Neumann dynamics. \end{prop} \begin{proof}[Proof] If $T$ is canonically conjugated to $H$, thus $ig_{ab}=(T_{ac}\tensor{H}{^c_b}-\tensor{H}{_a^c}T_{cb})$. It follows that \begin{equation*} g_{ab}\xi^{(n)a}\xi^{(n)b}=(-i\xi^{(n)a}T_{ac}\tensor{H}{^c_b}\xi^{(n)b}+i\xi^{(n)a}\tensor{H}{_a^c}T_{cb}\xi^{(n)b}) \end{equation*} The derivative of the average of $T$ in the state $\xi^{(n)}$ gives \begin{eqnarray*} \frac{d}{dt}[T_{ab}\xi^{(n)a}\xi^{(n)b}]&=&T_{ab}(\dot{\xi}^{(n)a}\xi^{(n)b}+\xi^{(n)a}\dot{\xi}^{(n)b}) \\ &=&(-i\xi^{(n)a}T_{ac}\tensor{H}{^c_b}\xi^{(n)b}+i\xi^{(n)a}\tensor{H}{_a^c}T_{cb}\xi^{(n)b}) \\ &=&g_{ab}\xi^{(n)a}\xi^{(n)b}. \end{eqnarray*} Integrating the above expression, we obtain \begin{eqnarray*} T_{ab}\xi^{(n)a}\xi^{(n)b}&=&t\,g_{ab}\xi^{(n)a}\xi^{(n)b}+\kappa \\ \tilde{T}_{ab}\xi^{(n)a}\xi^{(n)b}&=&\kappa, \end{eqnarray*} where $\kappa$ is a constant independent of $t$ and $\tilde{T}_{ab}\xi^{(n)a}\xi^{(n)b}$ is a constant of motion. \end{proof}
\begin{prop} \label{lem:2Txi(n)xi_misto} Let $T_{ab}$ be canonically conjugate to $H_{ab}$ and an unbiased estimator to the parameter $t$. Thus, considering $n$ odd integers, with $m=(n-1)/2$, we have \begin{equation} \label{2Txi(n)xi_misto} (T_{ab}+T_{ba})\xi^{(n)a}\xi^b=(-1)^mng_{ab}\xi^{(m)a}\xi^{(m)b} \end{equation} \end{prop} \begin{proof}[Proof] Given Proposition \ref{prop:conseq_TconjugH_misto}, the proof follows the one presented in Lemma 6 of Ref.~\cite{brody1996royalsoc}. \end{proof}
In view of these results, we can deduce some higher order corrections, independent of the specific choice of $T$, for canonically conjugated observables in the mixed state case.
\begin{proof}[Proof of Proposition 1] Let us consider corrections up to the third order for the bound in estimation of mixed states that emerge when we expand $\triangledown_at$ over the orthogonal vector system $\dot{\xi}^a$, $\hat{\xi}^{(2)a}$ and $\hat{\xi}^{(3)a}$. From Eq.(\ref{bhattacharrya_misto}), the generalized bound is \begin{equation} \label{cota_classysvar_ordem3_misto} \Delta T^2+\delta T^2\geq \frac{(\dot{\xi}^a\triangledown_at)^2}{2\dot{\xi}^b\dot{\xi}_b}+\frac{(\hat{\xi}^{(2)a}\triangledown_at)^2}{2\hat{\xi}^{(2)b}\hat{\xi}^{(2)}_b}+\frac{(\hat{\xi}^{(3)a}\triangledown_at)^2}{4\hat{\xi}^{(2)b}\hat{\xi}^{(2)}_b}, \end{equation} where $\hat{\xi}^{(2)a}$ and $\hat{\xi}^{(3)a}$ are given by \begin{equation*}
\hat{\xi}^{(2)a}=\ddot{\xi}^a-\frac{(\ddot{\xi}^b\dot{\xi}_b)}{(\dot{\xi}^c\dot{\xi}_c)}\dot{\xi}^a-(\ddot{\xi}^b\xi_b)\xi^a \end{equation*} and \begin{equation*}
\hat{\xi}^{(3)a}=\dddot{\xi}^a-\frac{(\dddot{\xi}^b\ddot{\xi}_b)}{(\ddot{\xi}^c\ddot{\xi}_c)}\ddot{\xi}^a-\frac{(\dddot{\xi}^b\dot{\xi}_b)}{(\dot{\xi}^c\dot{\xi}_c)}\dot{\xi}^a-(\dddot{\xi}^b\xi_b)\xi^a \end{equation*}
The term corresponding to $r=1$ in Eq.(\ref{cota_classysvar_ordem3_misto}) has already been calculated in the derivation of Eq.(\ref{qntcrlb_misto2}). Thus let us proceed with the second order term. The expression in Eq.(\ref{vonneumann_nderiv}) and Prop.~\ref{lem:norm_xin_indep_t_misto} give us that $\ddot{\xi}=-[\tilde{H},[\tilde{H},\xi]]$, $\ddot{\xi}^a\dot{\xi}_a=0$, $\ddot{\xi}^a\xi_a=-2I_{\rho}(\tilde{H})$. Therefore, $\hat{\xi}^{(2)a}=2I_{\rho}(\tilde{H})\xi-[\tilde{H},[\tilde{H},\xi]]$. Hence, the numerator of the second order term is $(\hat{\xi}^{(2)a}\triangledown_at)^2=\big\{\tr \big[(T\xi-\xi T)(2I_{\rho}(\tilde{H})\xi-[\tilde{H},[\tilde{H},\xi]])\big]\big\}^2$ which explicitly depends on the choice of the estimator $T$. Due to this dependence, we will discard this term, as we want only correction terms that do not depend on the choice of the estimator.
Now, dealing with the term that involves $\hat{\xi}^{(3)a}$, we have $\dddot{\xi}^a\ddot{\xi}_a=\ddot{\xi}^a\dot{\xi}_a=0$ from Prop.~\ref{lem:norm_xin_indep_t_misto}. It follows that $\ddot{\xi}^a\xi_a=-\dot{\xi}^a\dot{\xi}_a$, thus $\dddot{\xi}^a\xi_a=0$. From the expression in Eq.(\ref{vonneumann_nderiv}), $\dddot{\xi}^a=i[\tilde{H},[\tilde{H},[\tilde{H},\xi]]]=i([\tilde{H}^3,\xi]+3[\tilde{H}\xi\tilde{H},\tilde{H}])$. After some manipulation, we find $\dddot{\xi}^a\dot{\xi}_a=-2\tr (\tilde{H}^4\xi\xi-4\tilde{H}^3\xi\tilde{H}\xi+3\tilde{H}^2\xi\tilde{H}^2\xi)$. Consequently, \begin{equation*} \hat{\xi}^{(3)}=i\left\{ \big([\tilde{H}^3,\xi]+3[\tilde{H}\xi\tilde{H},\tilde{H}]\big) - \frac{\mu_4}{\mu_2}[\tilde{H},\xi] \right\} \end{equation*} and \begin{equation} \label{norm_xihat3_misto} \hat{\xi}^{(3)a}\hat{\xi}^{(3)}_a=\mu_6-\frac{\mu_4^2}{\mu_2}, \end{equation} where we explicitly have \begin{eqnarray*} \mu_2&: =&2I_{\rho}(\tilde{H})=2\tr (\tilde{H}^2\xi\xi-\tilde{H}\xi\tilde{H}\xi)=2(\Delta H^2 + \delta H^2)=\tr(\dot{\xi}\dot{\xi}) \\ \mu_4&: =& 2\tr (\tilde{H}^4\xi\xi-4\tilde{H}^3\xi\tilde{H}\xi+3\tilde{H}^2\xi\tilde{H}^2\xi) =\tr(\ddot{\xi}\ddot{\xi})\\ \mu_6&: =& 2\tr (\tilde{H}^6\xi\xi-6\tilde{H}^5\xi\tilde{H}\xi+15\tilde{H}^4\xi\tilde{H}^2\xi-10\tilde{H}^3\xi\tilde{H}^3\xi)=\tr(\xi^{(3)}\xi^{(3)}). \end{eqnarray*}
Regarding the numerator of the third order term \begin{equation*} \hat{\xi}^{(3)a}\triangledown_at=i\tr \left\{ [T,\tilde{H}^3]\xi\xi+3(\xi[\tilde{H}^2,T]\xi\tilde{H}+\xi[T,\tilde{H}]\xi\tilde{H}^2) -\xi[T,\tilde{H}]\xi\frac{\mu_4}{\mu_2} \right\}, \end{equation*} we see that it involves commutators between $T$ and $H^k,k\in\mathbb{N}$. By finite induction one shows that $[\tilde{H}^k,T]=-ki\tilde{H}^{k-1}, k\in\mathbb{N}$, and we have \begin{equation} \label{gradt_xihat3_misto} \hat{\xi}^{(3)a}\triangledown_at=\frac{\mu_4^2}{\mu_2}-3\mu_2. \end{equation} Due to Prop.~\ref{lem:2Txi(n)xi_misto}, which hypothesizes the canonical conjugation between $T$ and $H$, in general, even terms explicitly depend on the arbitrary choice of the estimator $T$ and odd terms involve commutators between $T$ and $H ^ k, k \in \mathbb{N}$; therefore the odd terms will not depend on the choice of $T$. Thus, putting the results in Eqs.(\ref{qntcrlb_misto2}), (\ref{norm_xihat3_misto}), and (\ref{gradt_xihat3_misto}) together, we obtain the following Heisenberg-like correction for the CramΓ©r-Rao bound in the mixed state case, which only depends on the dynamics of $\xi(t)$ generated by $H$, i.e., the statement of Proposition 1.
Imposing $\xi=\xi^2$ to recover the pure state case, we have $\mu_4 \neq \langle\tilde{H}^4\rangle$, where $\langle\tilde{H}^n\rangle=\tr(\tilde{H}^n\xi\xi)$ denotes the $n$-th moment of the Hamiltonian in the corresponding state $\xi$. The only circumstance where the equality holds is when $\mu_2=\langle\tilde{H}^2\rangle$. Thus, just imposing that the density operator characterizes a pure state Eq.(\ref{qntcrlb_classys_correcao_misto}) does not the recover the results in Refs.~\cite{brody1996prl, brody1996royalsoc}, contrary to what was expected \cite{brody2011fasttrack}. \end{proof}
\section{Method to obtain higher-orders corrections} \label{sec:algorit_ordem_impar}
In the previous section, we found that \textit{even} higher order corrections explicitly depend on the choice of the estimator $T$, while by Prop.~\ref{lem:2Txi(n)xi_misto} \textit{odd} higher order corrections are independent of the specific choice of $T$. We will then present an algorithmic way of calculating odd terms of higher order corrections following the steps seen in Ref.~\cite{algorit_ordem_impar} for a one-parameter family of states $\xi(t)$ evolving under von Neumann dynamics.
Let the series of orthogonal vectors be \begin{equation*} \left\{ \xi^a, \dot{\xi}^a, \ddot{\xi}^a - (\ddot{\xi}^b\xi_b)\xi^a, \dddot{\xi}^a-\frac{\dddot{\xi}^b\dot{\xi}_b}{\dot{\xi}^c\dot{\xi}_c}\dot{\xi}^a, \dots \right\}, \end{equation*} that are basically the $\hat{\xi}^{(n)a}$, given in Prop.~\ref{prop:bhattacharrya_misto}, dismissing the vanishing terms (e.g. $\dddot{\xi}^a\ddot{\xi}_a = \ddot{\xi}^a\dot{\xi}_a = \ddot{\xi}^a\xi_a=0$). Let us denote this series of vectors by $\{\uppsi^a_n\}$; thus $\uppsi^a_0=\xi^a$, $\uppsi^a_1=\dot{\xi}^a$ and so on.
For each \textit{odd integer value\/} $n$ the basis vectors $\uppsi^a_n$ are obtained by subtracting the components $\uppsi^a_k$ of $\xi^{(n)a}$ with $k<n$ \begin{eqnarray*} \uppsi^a_1 &=& \dot{\xi}^a \\ \uppsi^a_3 &=&\xi^{(3)a}-\frac{\xi^{(3)b}\uppsi_{1b}}{\uppsi^c_1\uppsi_{1c}}\uppsi^a_1 \\ \uppsi^a_5 &=& \xi^{(5)a}-\frac{\xi^{(5)b}\uppsi_{3b}}{\uppsi^c_3\uppsi_{3c}}\uppsi^a_3 - \frac{\xi^{(5)b}\uppsi_{1b}}{\uppsi^c_1\uppsi_{1c}}\uppsi^a_1. \end{eqnarray*} Note that $\triangledown_at=(\tilde{T}_{ab}+\tilde{T}_{ba})\xi^b$, so we can rewrite the generalized bounds in Eq.(\ref{bhattacharrya_misto}) as \begin{equation} \label{bhattacharrya_misto_uppsi} \Delta T^2 + \delta T^2 \geq \frac{1}{2}\sum_n\frac{[\uppsi^a_n(\tilde{T}_{ab}+\tilde{T}_{ba})\xi^b]^2}{g_{cd}\uppsi^c_n\uppsi^d_n}. \end{equation}
Let $N_n \equiv g_{ab}\uppsi^a_n\uppsi^b_n$ stand for the denominator of the correction terms in Eq.(\ref{bhattacharrya_misto_uppsi}). We have \begin{equation*} N_n=\frac{D_{2n}}{D_{2n-4}} , \quad n>2, \end{equation*} where $D_{2n}$ is defined by the determinant \begin{equation*} D_{2n} =
\left| \matrix{ \mu_{2n}& \mu_{2n-2}& \cdots& \mu_{n+1} \cr \mu_{2n-2}& \mu_{2n-4}& \cdots& \mu_{n-1} \cr \vdots & \vdots & \ddots & \vdots \cr
\mu_{n+1} & \mu_{n-1} & \cdots & \mu_{2} \cr} \right|.
\end{equation*} As examples we found that $D_{2}=\mu_2$, $D_6=\mu_6\mu_2-\mu_4^2$. Note that $N_1=\mu_2=2I_{\rho}(\tilde{H})$. The statistical identities \cite{stuart_kendall} guarantee that $D_{2n}\ge 0$.
Before moving on to the numerator, we define \begin{equation*} F_{n,k}\equiv \frac{\xi^{(n)a}\uppsi_{ka}}{\uppsi^b_k\uppsi_{kb}}, \end{equation*} which has an expression in terms of determinants given by \begin{equation*} F_{n,k}=\frac{(-1)^{\frac{1}{2}(n+k)-1}}{D_{2k}}
\left| \matrix{\mu_{n+k} & \mu_{n+k-2} & \dots & \mu_{n+1}\cr \mu_{2k-2} & \mu_{2k-4} & \dots & \mu_{k-1}\cr \vdots & & \ddots & \vdots \cr
\mu_{k+1} & \mu_{k-1} & \dots & \mu_{2} \cr} \right| .
\end{equation*} For example, we have for $k=1,3$ \begin{eqnarray*} F_{n,3} &=& (-1)^{m+1}\frac{1}{D_6}
\left| \matrix{\mu_{n+3} & \mu_{n+1} \cr
\mu_4 & \mu_2 \cr} \right| \\
F_{n,1} &=& (-1)^m\frac{1}{D_2}\mu_{n+1}, \end{eqnarray*} where $m=1/2(n-1)$.
We now have all the identities necessary to find a recursive relationship to obtain the odd higher order corrections. Going back to the numerator, we define $U_n\equiv \uppsi^a_n(\tilde{T}_{ab}+\tilde{T}_{ba})\xi^b$. Using $F_{n,k}$ follows the expression for $\uppsi^a_n$ \begin{equation*} \uppsi^a_n=\xi^{(n)a}-\sum\limits^{n-2}_{k=1,3,5,\dots}F_{n,k}\uppsi^a_k; \end{equation*} from the relation above and Prop.~\ref{lem:2Txi(n)xi_misto} follows a recursive formula for $U_n$ \begin{equation} \label{recursiva_Un} U_n=(-1)^m\,n\,\mu_{n-1}-\sum\limits^{n-2}_{k=1,3,5,\dots}F_{n,k}U_k, \end{equation} where $U_1=1$. Thus, the uncertainty relation in Eq.(\ref{bhattacharrya_misto_uppsi}) can be rewritten as \begin{equation} \label{correcao_superior_impares} (\Delta T^2+\delta T^2)(\Delta H^2-\delta H^2)\ge \frac{1}{4}\sum\limits^{n-2}_{k=1,3,5,\dots}\frac{\mu_2U^2_k}{N_k}. \end{equation}
Using the relations given in Eqs.(\ref{recursiva_Un}) and (\ref{correcao_superior_impares}), after some algebraic manipulations, we can obtain any correction of a higher order purely in terms of $\mu_{2k}$, regardless of the choice of estimator.
\section{Conclusion} \label{sec:conclusao}
In this work we approached the quantum statistical estimation problem using mixed states, illustrated by time-energy uncertainty relations, from a geometric perspective. This geometrical view of the space of states allows us to make a clear distinction from the pure state scenario previously reported in literature. We obtained the Cram\'{e}r-Rao bound independent of the choice of the estimator and analyzed the corrections that emerge naturally. A methodology to obtain higher-order corrections is also presented and, consequently, the path for extensions of the main result is proposed. Contrarily to what was expected, when we imposed $\rho = \rho^2$, there was no reduction to the known bound found for the pure state case.
It is important to note that the square root embedding used in the present work, $\rho\rightarrow \sqrt{\rho}$, is related to the WYSI metric which, in turn, has its Riemannian connection identified with the single $\alpha$-connection of the same type. This fact makes the WYSI a good choice of metric to work over a geometric structure of the state space, since it presents this privileged Riemannian structure from the point of view of $\alpha$-connections. The choice of this embedding was also motivated by the possibility of recovering the $1/4$ factor in the Cram\'{e}r-Rao bound.
To conclude our discussions in the context of single parameter estimation, it is important to remember that the saturation of the Cram\'{e}r-Rao bound can only be achieved when considering two important features: $(i)$ the asymptotic limit of a large number of probes and $(ii)$ performing an optimal measurement given by the eigenbasis of the symmetric logarithmic derivative. Usually in a laboratory, where the experimentalist has access to a limited number of probes, corrections to the bound gain importance to provide tighter estimates to the attainable estimation precision. Here we investigated these corrections approaching the problem from an information-geometric point of view and we obtained that in the context of mixed state quantum estimation, the Wigner-Yanase skew information constitutes a natural metric in the space of states. Next, higher-order corrections obtained to the Cram\'{e}r-Rao inequality based on such metric determine the tightness of the bound for practical purposes. Our work provides advances towards understanding the mixed state estimation paradigm and its practical realization in quantum sensing and metrology.
\section{Note added} During the preparation of this manuscript, we became aware of related work by A.~J.~Belfield and D.~C.~Brody \cite{brody2020} where higher-order corrections to quantum estimation bounds based on the Wigner-Yanase skew information metric are also discussed.
\ack We thank Dorje C. Brody for careful reading of the manuscript and for fruitful discussions. The project was funded by Brazilian funding agencies CNPq (Grant No. 307028/2019-4), FAPESP (Grant No. 2017/03727-0), Coordena\c{c}\~{a}o de Aperfei\c{c}oamento de Pessoal de N\'{i}vel Superior - Brasil (CAPES) (Finance Code 001), by the Brazilian National Institute of Science and Technology of Quantum Information (INCT/IQ), and by the European Research Council (ERC StG GQCOP Grant No.~637352).
\section*{References}
\end{document} |
\begin{document}
\title{Feature Encodings for Gradient Boosting with Automunge}
\begin{abstract} Automunge is a tabular preprocessing library that encodes dataframes for supervised learning. When selecting a default feature encoding strategy for gradient boosted learning, one may consider metrics of training duration and achieved predictive performance associated with the feature representations. Automunge offers a default of binarization for categoric features and z-score normalization for numeric. The presented study sought to validate those defaults by way of benchmarking on a series of diverse data sets by encoding variations with tuned gradient boosted learning. We found that on average our chosen defaults were top performers both from a tuning duration and a model performance standpoint. Another key finding was that one hot encoding did not perform in a manner consistent with suitability to serve as a categoric default in comparison to categoric binarization. We present here these and further benchmarks. \end{abstract}
\section{Introduction}
The usefulness of feature engineering for applications of deep learning has long been considered a settled question in the negative, as neural networks are on their own universal function approximators \citep{GoodBengCour16}. However, even in the context of deep learning, tabular features are often treated with some form of encoding for preprocessing. Automunge \citep{anonymous_github} is a platform for encoding dataframes developed by the authors. This python library was originally built for a simple use case of basic encoding conventions for numeric and categoric features, like z-score normalization and one-hot encodings. Along the iterative development journey we began to flesh out a full library of encoding options, including a series of options for numeric and categoric features that now include scenarios for normalization, binarization, hashing, and missing data infill under automation. Although it was expected that these range of encoding options would be superfluous for deep learning, that does not rule out their utility in other paradigms which could range from simple regression, support vector machines, decisions trees, or as will be the focus of this paper, gradient boosting.
The purpose of this work is to present the results of a benchmarking study between alternate encoding strategies for numeric and categoric features for gradient boosted tabular learning. We were particularly interested in validating the libraryβs default encoding strategies, and found that in both primary performance metrics of tuning duration time and model performance the current defaults under automation of categoric binarization and numeric z-score normalization demonstrated merit to serve as default encodings for the Automunge library. We also found that in addition to our default binarization, even a frequency sorted variant of ordinal encoding on average outperformed one hot encoding.
\section{Automunge}
Automunge \citep{anonymous_github} is an open source python library, available now for pip install, built on top of Pandas \citep{McKinney:10}, Numpy \citep{2020NumPy-Array}, SciKit-learn \citep{Pedregosa:11}, and Scipy \citep{SciPy2020}. It takes as input tabular data received in a tidy form, meaning one column per feature and one row per sample, and returns numerically encoded sets with infill to missing points, thus providing a push-button means to feed raw tabular data directly to machine learning. The extent of derivations may be minimal, such as numeric normalizations and categoric binarizations under automation, or may include more elaborate univariate transformations, including aggregated sets thereof. Generally speaking, the transformations are performed based on a fit to properties of features in a designated training set, and then that same basis may be used to consistently and efficiently prepare subsequent test data, as may be intended for use in inference or for additional training data preparation.
The interface is channeled through two master functions, automunge(.) and postmunge(.). The automunge(.) function receives a training set and if available also a consistently formatted test set, and returns a collection of dataframes intended for training, validation, and inference β each of these aggregations further segregated into subsets of features, index, and label sets. A validation set, if designated by ratio of partitioned data from the training set, is segregated from the training data prior to transformations and then consistently prepared on the train set basis to avoid data leakage between training and validation. The function also returns a populated python dictionary, which we call the postprocess\_dict, recording steps and parameters of transformations. This dictionary may then be passed along with subsequent test data to the postmunge(.) function for consistent preparations on the train set basis, as for instance may be applied sequentially to streams of data. Because it makes use of train set properties evaluated during a corresponding automunge(.) call instead of directly evaluating properties of the test data, preparing data in the postmunge(.) function can be very efficient.
There is a built in extensive library of feature encodings to choose from. Numeric features may be assigned to any range of transformations, normalizations, and bin aggregations. Sequential numeric features may be supplemented by proxies for derivatives \citep{anonymous_Numbers}. Categoric features may be encoded as ordinal, one hot, binarization, hashing, or even parsed categoric encoding \citep{anonymous_Strings} with an increased information retention in comparison to one hot encoding by a vectorization as a function of grammatical structure shared between entries. Categoric sets may be collectively aggregated into a single common binarization. Categoric labels may have label smoothing applied \citep{7780677}, or fitted smoothing where null values are fit to class distributions. Sets of transformations to be directed at targeted features can be assembled which include generations and branches of derivations by making use of our family tree primitives \citep{anonymous_github}, as can be used to redundantly encode a feature in multiple configurations of varying information content. Such transformation sets may be accessed from those predefined in an internal library for simple assignment or alternatively may be custom configured. Even the transformation functions themselves may be custom defined from a very simple template. Through application statistics of the features are recorded to facilitate detection of distribution drift. Inversion is available to recover the original form of data found preceding transformations, as may be used to recover the original form of labels after inference. Missing data is imputed by auto ML models trained on surrounding features \citep{anonymous_MissingData}. Noise may be channeled into feature encodings for non-deterministic inference, as may include stochastic perturbations sampled from quantum circuits \citep{https://doi.org/10.48550/arxiv.2202.09248}.
\section{Gradient Boosting}
Gradient boosting \citep{friedman2000greedy} refers to a paradigm of decision tree learning \citep{quinlan:induction} similar to random forests \citep{10.1023/A:1010933404324} but in which the optimization is boosted by recursively training an iterationβs model objective to correct the performance of the preceding iterationβs model. It is commonly implemented in practice by the XGBoost library \citep{Chen_2016} for GPU acceleration, although there are architecture variations available for different fortes, like LightGPM \citep{NIPS2017_6449f44a} which may train faster on CPUβs than XGBoost (with a possible performance tradeoff).
Gradient boosting has traditionally been found as a winning solution for tabular modality competitions on the Kaggle platform, and its competitive efficacy has even been demonstrated for more sophisticated applications like time series sequential learning when used for window based regression \citep{https://doi.org/10.48550/arxiv.2101.02118}. Recent tabular benchmarking papers have found that gradient boosting may still mostly outperform sophisticated neural architectures like transformers \citep{gorishniy2021revisiting}, although even a vanilla multi layer perceptron neural network could have capacity to outperform gradient boosting with comprehensively tuned regularizers \citep{kadra2021welltuned}. Gradient boosting can also be expected to have higher latency inference than neural networks \citep{https://doi.org/10.48550/arxiv.2110.01889}.
Conventional wisdom is that one can expect gradient boosting models to have capacity for better performance than random forests for tabular applications but with a tradeoff of increased probability of overfitting without hyperparameter tuning \citep{Howard:20}. With both more sensitivity to tuning parameters and a much higher number of parameters in play than random forest, gradient boosting usually requires more sophistication than a simple grid or random search for tuning. One compromise method available is for a sequential grid search through different subsets of parameters \citep{procedural_tuning}, although more automated and even parallelized methods are available by way of black box optimization libraries like Optuna \citep{https://doi.org/10.48550/arxiv.1907.10902}. There will likely be more improvements to come both in libraries and tuning conventions, this is an active channel of industry research.
\section{Feature Encodings}
Feature encoding refers to feature set transformations that serve to prepare the data for machine learning. Common forms of feature encoding preparations include normalizations for numeric sets and one hot encodings for categoric, although some learning libraries may accept categoric features in string representations for internal encodings. Before the advent of deep learning, it was common to supplement features with alternate representations of extracted information or to combine features in some fashion. Such practices of feature engineering are sometimes still applied in gradient boosted learning, and it was one of the purposes of these benchmarks to evaluate benefits of the practice in comparison to directly training on the data.
An important distinction of feature encodings can be considered as those that can be applied independent of an esoteric domain profile verses those that rely on external structure. An example could be the difference between supplementing a feature with bins derived based on the distribution of populated numeric values verses extracting bins based on an external database lookup. In the case of Automunge, the internal library of encodings follows almost exclusively the former, that is most encodings are based on inherent numeric or string properties and do not consider adjacent properties that could be inferred based on relevant application domains. (An exception is made for date-time formatted features which under automation automatically extract bins for weekdays, business hours, holidays, and redundantly encodes entries based on cyclic periods of different time scales \citep{time_encoding}.) The library includes a simple template for integrating custom univariate transformations \citep{anonymous_customtransforms} if a user would like to integrate into a pipeline alternate conventions.
\subsection{Numeric}
Numeric normalizations [Appendix \ref{Numeric}] in practice are most commonly applied similar to our default of z-score `nmbr' (subtract mean and divide by standard deviation) or min-max scaling `mnmx' (converting to range between 0β1). Other variations that may be found in practice include mean scaling `mean'(subtract mean and divide by min max delta), and max scaling `mxab' (divide by feature set absolute max). More sophisticated conventions may convert a distribution shape in addition to the scale, such as the box-cox power law transformation `bxcx' \citep{https://doi.org/10.1111/j.2517-6161.1964.tb00553.x} or Scikit-Learnβs \citep{JMLR:v12:pedregosa11a} quantile transformer `qttf', which both may serve the purpose of converting a feature set to closer resemble a Gaussian distribution. In general, numeric normalizations are more commonly applied for learning paradigms other than those based on decision trees, where for example in neural networks they serve the purpose of normalizing gradient updates across features. We did find that the type of normalizations applied to numeric features appeared to impact performance, and we will present these findings below.
\subsection{Categoric}
Categoric encodings [Appendix \ref{Categoric}] are most commonly derived in practice as a one hot encoding, where each unique entry in a received feature is translated to boolean integer activations in a dedicated column among a returned set thereof. The practice of one hot encoding has shortcomings in the high cardinality case (where a categoric feature has an excessive number of unique entries), which in the context of gradient boosting may be particularly impactful as an inflated column count impairs latency performance of a training operation β or when the feature is targeted as a classification label may even cause training to exceed memory overhead constraints. The Automunge library attempts to circumvent this high cardinality edge case in two fashions, first by defaulting to a binarization encoding instead of one hot, and second by distinguishing highest cardinality sets for a hashed encoding \citep{NIPS1988_82161242} \citep{10.1145/1553374.1553516} \citep{anonymous_hashed} which may stochastically consolidate multiple unique entries into a shared ordinal representation for a reduced number of unique entries.
The library default of categoric binarization `1010' refers to translating each unique entry in a received feature to a unique set of zero, one, or more boolean integer activations in a returned set of boolean integer columns. Where one hot encoding may return a set of n columns for n unique entries, binarization will instead return a smaller count of log2(n) rounded up to nearest integer. We have previously seen the practice discussed in the blogging literature, such as \citep{onehot_blog}, although without validation as offered herein.
A third common variation on categoric representations includes ordinal encodings, which simply refers to returning a single column encoding of a feature with a distinct integer representation for each unique entry. Variations on ordinal encodings in the library may sort the integer representations by frequency of the unique entry `ord3' or based on alphabetic sorting `ordl'.
Another convention for categoric sets unique to the Automunge library we refer to as parsed categoric encodings `or19' \citep{anonymous_Strings}. Parsed encodings search through tiers of string character subsets of unique entries to identify shared grammatical structure for supplementing encodings with structure derived from a training set basis. Parsed encodings are supplemented with extracted numeric portions of unique entries for additional information retention in the form received by training.
\section{Benchmarking}
The benchmarking sought to evaluate a range of numeric and categoric encoding scenarios by way of two key performance metrics, training time and model performance. Training was performed over the course of ~1.5 weeks on a Lambda workstation with AMD 3970X processor, 128Gb RAM, and two Nvidia 3080 GPUs. Training was performed by way of XGBoost tuned by Optuna with 5-fold fast cross-validation \citep{NIPS2013_f33ba15e} and early stopping criteria of 50 tuning iterations without improvement. Performance was evaluated against a partitioned 25\% validation set based on a f1 score performance metric, which we understand is a good default for balanced evaluation of bias and variance performance of classification tasks \citep{stevens2020learning}. This loop was repeated and averaged across 5 iterations and then repeated and averaged across 31 tabular classification data sets sourced from the OpenML benchmarking repository \citep{Vanschoren_2014}. Rephrasing for clarity, the reported metrics are averages of 5 repetitions of 31 data sets for each encoding type as applied to all numeric or categoric features for training. The distribution bands shown in the figures are across the five repetitions. The data sets were selected for diverse tabular classification applications with in-memory scale training data and tractable label cardinality.
We found that these benchmarks gave us comfort in the Automunge library's defaults of numeric z-score normalization and categoric binarization. An interesting result was the outperformance of categoric binarization in comparison to one-hot encoding, as the latter is commonly used in mainstream practice as a default. Further dialogue for the interpretation of the results presented in Figures \ref{fig1:test} and \ref{fig2:test} are provided in [Appendix \ref{Numeric}, \ref{Categoric}].
\begin{figure}
\caption{Numeric tuning time comparison}
\label{fig1:sub1}
\caption{Numeric model performance comparison}
\label{fig1:sub2}
\caption{Numeric Results}
\label{fig1:test}
\end{figure}
\begin{figure}
\caption{Categoric tuning time comparison}
\label{fig2:sub1}
\caption{Categoric model performance comparison}
\label{fig2:sub2}
\caption{Categoric Results}
\label{fig2:test}
\end{figure}
\section{Conclusion}
We hope that these benchmarks may have provided some level of user comfort by validating the default encodings applied under automation by the Automunge library of z-score normalization and categoric binarization, both from a training time and model performance standpoint. If you would like to try out the library we recommend the tutorials folder found on GitHub \citep{anonymous_github} as a starting point.
\appendix
\section{Numeric Encodings} \label{Numeric}
\begin{figure}
\caption{Numeric Encodings}
\label{capped quantiles}
\label{binstransform}
\end{figure}
\subsection{default}
\begin{itemize} \item defaults for Automunge under automation as z-score normalization (`nmbr' code in the library) \item The default encoding was validated both from a tuning duration and a model performance standpoint as top performing scenario on average. \end{itemize}
\subsection{qttf}
\begin{itemize} \item Scikit-Learn QuantileTransformer with a normal output distribution \item The quantile distribution conversion did not perform as well on average as simple z-score normalization, although it remained a top performer. \end{itemize}
\subsection{powertransform}
\begin{itemize} \item the Automunge option to conditionally encode between `bxcx', `mmmx', or `MAD3' based on distribution properties (via libraryβs powertransform=True setting) \item This was the worst performing encoding scenario, which at a minimum demonstrates that the heuristics and statistical measures currently applied by the library to conditionally select types of encodings could use some refinement. \end{itemize}
\subsection{mnmx}
\begin{itemize} \item min max scaling `mnmx' which shifts a feature distribution into the range 0β1 \item This scenario performed considerably worse than z-score normalization, which we expect was due to cases where outlier values may have caused the predominantly populated region to get βsquished togetherβ in the encoding space. \end{itemize}
\subsection{capped quantiles}
\begin{itemize} \item min max scaling with capped outliers at 0.99 and 0.01 quantiles (`mnm3' code in library) \item This scenario is best compared directly to min-max scaling, and demonstrates that defaulting to capping outliers did not benefit performance on average. \end{itemize}
\subsection{binstransform}
\begin{itemize} \item z-score normalization supplemented by 5 one hot encoded standard deviation bins (via libraryβs binstransform=True setting) \item In addition to a widened range of tuning durations, the supplemental bins did not appear to be beneficial to model performance for gradient boosting. \end{itemize}
\section{Categoric Encodings} \label{Categoric}
\begin{figure}
\caption{Categoric Encodings}
\label{hsh2}
\label{or19}
\end{figure}
\subsection{default}
\begin{itemize} \item defaults for Automunge under automation for categoric binarization (`1010' code in the library) \item The default encoding was validated as top performing both from a tuning duration and a model performance standpoint. \end{itemize}
\subsection{onht}
\begin{itemize} \item one hot encoding \item The model performance impact was surprisingly negative compared to the default considering this is often used as a default in mainstream practice. Based on this benchmark we recommend discontinuing use of one-hot encoding outside of special use cases (like e.g. for purposes of feature importance analysis). \end{itemize}
\subsection{ord3}
\begin{itemize} \item ordinal encoding with integers sorted by category frequency `ord3' \item Sorting ordinal integers by category frequency instead of alphabetic significantly benefited model performance, in most cases lifting ordinal above one hot encoding although still not in the range of the default binarization. \end{itemize}
\subsection{ordl}
\begin{itemize} \item ordinal encoding with integers sorted alphabetically by category `ordl' \item Alphabetic sorted ordinal encodings (as is the default for Scikit-Learnβs OrdinalEncoder) did not perform as well, we recommend defaulting to frequency sorted integers when applying ordinal. \end{itemize}
\subsection{hsh2}
\begin{itemize} \item hashed ordinal encoding (library default for high cardinality categoric `hsh2') \item This benchmark was primarily included for reference, it was expected that as some categories may be consolidated there would be a performance impact for low cardinality sets. The benefit of hashing is for high cardinality which may otherwise impact gradient boosting memory overhead. \end{itemize}
\subsection{or19}
\begin{itemize} \item multi-tier string parsing `or19' \citep{anonymous_Strings} \item It appears that our recent invention of multi-tier string parsing succeeded in outperforming one-hot encoding and was the second top performer, but did not perform sufficiently to recommended defaulting in comparison to vanilla binarization. We recommend reserving string parsing for cases where the application may have some extended structure associated with grammatical content, as was validated as outperforming binarization for an example in the citation. \end{itemize}
\section{Data Sets}
The Benchmarking included the following tabular data sets, shown here with their OpenML ID number. A thank you to \citep{Vanschoren_2014} for providing the data sets and \citep{kadra2021welltuned} for inspiring the composition.
\begin{itemize}
\item Click prediction / 233146 \item C.C.FraudD. / 233143 \item sylvine / 233135 \item jasmine / 233134 \item fabert / 233133 \item APSFailure / 233130 \item MiniBooNE / 233126 \item volkert / 233124 \item jannis / 233123 \item numerai28.6 / 233120 \item Jungle-Chess-2pcs / 233119 \item segment / 233117 \item car / 233116 \item Australian / 233115 \item higgs / 233114 \item shuttle / 233113 \item connect-4 / 233112 \item bank-marketing / 233110 \item blood-transfusion / 233109 \item nomao / 233107 \item ldpa / 233106 \item skin-segmentation / 233104 \item phoneme / 233103 \item walking-activity / 233102 \item adult / 233099 \item kc1 / 233096 \item vehicle / 233094 \item credit-g / 233088 \item mfeat-factors / 233093 \item arrhythmia / 233092 \item kr-vs-kp / 233091 \end{itemize}
\section{Checklist}
\begin{enumerate}
\item For all authors... \begin{enumerate}
\item Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope?
\answerYes{}
\item Did you describe the limitations of your work?
\answerYes{}
\item Did you discuss any potential negative societal impacts of your work?
\answerNA{}
\item Have you read the ethics review guidelines and ensured that your paper conforms to them?
\answerYes{} \end{enumerate}
\item If you are including theoretical results... \begin{enumerate}
\item Did you state the full set of assumptions of all theoretical results?
\answerNA{}
\item Did you include complete proofs of all theoretical results?
\answerNA{} \end{enumerate}
\item If you ran experiments... \begin{enumerate}
\item Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)?
\answerYes{}
\item Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)?
\answerNo{} (see supplemental material notebooks)
\item Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)?
\answerYes{}
\item Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)?
\answerYes{} \end{enumerate}
\item If you are using existing assets (e.g., code, data, models) or curating/releasing new assets... \begin{enumerate}
\item If your work uses existing assets, did you cite the creators?
\answerYes{}
\item Did you mention the license of the assets?
\answerNo{}
\item Did you include any new assets either in the supplemental material or as a URL?
\answerYes{}
\item Did you discuss whether and how consent was obtained from people whose data you're using/curating?
\answerNA{}
\item Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content?
\answerNo{} \end{enumerate}
\item If you used crowdsourcing or conducted research with human subjects... \begin{enumerate}
\item Did you include the full text of instructions given to participants and screenshots, if applicable?
\answerNA{}
\item Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable?
\answerNA{}
\item Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation?
\answerNA{} \end{enumerate}
\end{enumerate}
\end{document} |
\begin{document}
\title{Godement-Jacquet L-functions and full theta lifts}
\author[Y. Fang]{Yingjue Fang}
\address{College of Mathematics and Statistics, Shenzhen University, Shenzhen, 518060, China} \email{[email protected]}
\author[B. Sun]{Binyong Sun} \address{Academy of Mathematics and Systems Science, Chinese Academy of Sciences \& University of Chinese Academy of Sciences, Beijing, 100190, China} \email{[email protected]}
\author [H.Xue] {Huajian Xue} \address{Beijing International Center for Mathematical Research\\ Peking University\\ Beijing, 100871, China} \email{[email protected]}
\subjclass[2000]{22E50} \keywords{Godement-Jacquet L-function, theta lift}
\begin{abstract} We relate poles of local Godement-Jacquet L-functions to distributions on matrix spaces with singular supports. As an application, we show the irreducibility of the full theta lifts to ${\mathrm{GL}}_n({\mathrm {F}})$ of generic irreducible representations of ${\mathrm{GL}}_n({\mathrm {F}})$, where ${\mathrm {F}}$ is an arbitrary local field.
\end{abstract}
\maketitle
\section{Introduction}
Let ${\mathrm {F}}$ be a local field and let ${\mathrm {D}}$ be a central division algebra over ${\mathrm {F}}$ of finite dimension $d^2$ ($d\geq 1$). Fix an integer $n\geq 1$. As usual, let $\operatorname{M}_{n}({\mathrm {D}})$ denote the space of $n\times n$ matrices with coefficients in ${\mathrm {D}}$. Put \[
G:={\mathrm{GL}}_n({\mathrm {D}})\subset \operatorname{M}_{n}({\mathrm {D}}). \] Write $\mathcal S$ for the space of Schwartz or Bruhat-Schwartz functions on $\operatorname{M}_{ n}({\mathrm {D}})$, when ${\mathrm {F}}$ is respectively archimedean or non-archimedean. View it as a representation of $G\times G$ by the action \begin {equation}\label{actgg}
((g,h). \phi)(x):=\abs{\det(g^{-1}h)}_{\mathrm {F}}^{\frac{dn}{2}}\phi(g^{-1} xh),\quad g,h\in G,\, \phi\in {\mathcal {S}},\, x\in \operatorname{M}_{n}({\mathrm {D}}). \end {equation} Here ``$\det$" stands for the reduced norm on $\operatorname{M}_{n}({\mathrm {D}})$, and ``$\abs{\,\cdot\, }_{\mathrm {F}}$" stands for the normalized absolute value on ${\mathrm {F}}$. Write $G_1$ for the subgroup $G\times\{1\}$ of $G\times G$, and likewise write $G_2$ for the subgroup $\{1\}\times G$ of $G\times G$. When no confusion is possible, we will identify these two groups with $G$.
Let $\sigma$ be an irreducible admissible smooth representation of $G$. By an ``admissible smooth representation", we mean a Casselman-Wallach representation when ${\mathrm {F}}$ is archimedean, and
a smooth representation of finite length when ${\mathrm {F}}$ is non-archimedean. The reader may consult \cite{Ca}, \cite[Chapter 11]{Wa2} or \cite{BK} for details about Casselman-Wallach representations.
Define the full theta lift of $\sigma$ by
\begin {equation}\label{theta1}
\Theta_1(\sigma):= ({\mathcal {S}}\widehat \otimes \sigma^\vee)_{G_1},
\end {equation}
which is a representation of $G_2$ and is also viewed as a representation of $G$ via the identification $G\cong G_2$.
Here ``$\widehat \otimes$" denotes the completed projective tensor product in the archimedean case, and the algebraic tensor product in the non-archimedean case;
a superscript ``$\,^\vee$" indicates the contragredient representation; $\sigma^\vee$ is viewed as a representation of $G_1$ via the identification $G_1\cong G$;
and a
subscript group indicates the maximal (Hausdorff in the archimedean case) quotient on which the group acts trivially.
Similar to \eqref{theta1}, view $\sigma$ as a representation of $G_2$ and define
\begin {equation}\label{theta2}
\Theta_2(\sigma):= ({\mathcal {S}}\widehat \otimes \sigma^\vee)_{G_2},
\end {equation}
which is a representation of $G$. The following proposition is well known. See \cite{howe}, \cite{Ku} and \cite{MVW}, for examples.
\begin{prpt}\label{ftfl}
Both $\Theta_1(\sigma)$ and $\Theta_2(\sigma)$ are admissible smooth representations of $G$.
\end{prpt}
It is also well known that $\Theta_1(\sigma)$ has a unique irreducible quotient, which is isomorphic to $\sigma^\vee$, and likewise $\Theta_2(\sigma)$ has a unique irreducible quotient, which is also isomorphic to $\sigma^\vee$ (\emph{cf.}~ \cite[Th\'eor\`eme 1]{mi}). This assertion is equivalently formulated as in the following theorem.
\begin{thm}\label{howe} Let $\sigma, \sigma'$ be irreducible admissible smooth representations of $G$. Then \[
\dim {\mathrm{Hom}}_{G\times G}({\mathcal {S}}, \sigma\widehat \otimes \sigma')=\left \{
\begin{array}{ll}
1,\quad &\textrm{if $\sigma'\cong \sigma^\vee$;}\\
0,\quad &\textrm{otherwise.}
\end{array}
\right. \] \end{thm}
For applications to representation theory and automorhic forms, it is desirable to know whether or not the full theta lift itself is irreducible. This is known affirmatively for supercuspidal representations in the non-archimedean case, in a general setting of dual pair correspondences (see \cite{Ku}). However, not much is known beyond the supercuspidal case.
Write $\mathcal S^\circ$ for the space of Schwartz or Bruhat-Schwartz functions on $G$ when ${\mathrm {F}}$ is respectively archimedean or non-archimedean. By extension by zero, we view it as a subrepresentation of ${\mathcal {S}}$. The following is the key result of this note. \begin{thm}\label{pole1} The following assertions are equivalent. \begin{itemize} \item[(a).] The Godment-Jacquet L-function $\operatorname{L}(s,\sigma)$ has no pole at $s=1/2$. \item[(b).] ${\mathrm{Hom}}_{G_1}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma)= 0$. \item[(c).] ${\mathrm{Hom}}_{G_2}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma^\vee)= 0$. \item[(d).] ${\mathrm{Hom}}_{G\times G}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma\widehat \otimes \sigma^\vee)= 0$. \end{itemize} If one of the above conditions is satisfied, then both $\Theta_1(\sigma)$ and $\Theta_2(\sigma^\vee)$ are irreducible. \end{thm}
The following result will be proved in Section \ref{secf} by using the Fourier transform.
\begin{prpt}\label{theta12} As representations of $G$, $\Theta_1(\sigma)$ and $\Theta_2(\sigma)$ are isomorphic to each other. \end{prpt}
Theorem \ref{pole1} and Proposition \ref{theta12} have the following obvious consequence. \begin{cort}\label{nopole} Assume that $\operatorname{L}(s,\sigma)$ has no pole at $s=1/2$, or $\operatorname{L}(s,\sigma^\vee)$ has no pole at $s=1/2$. Then as representations of $G$, $\Theta_1(\sigma)\cong \sigma^\vee \cong \Theta_2(\sigma)$. \end{cort}
\begin{example} Assume that ${\mathrm {F}}$ is non-archimedean and $G={\mathrm{GL}}_2({\mathrm {F}})$. If $\sigma$ is not the trivial representation, then $\operatorname{L}(s,\sigma)$ has no pole at $s=1/2$, or $\operatorname{L}(s,\sigma^\vee)$ has no pole at $s=1/2$. Thus by Corollary \ref{nopole}, $\Theta_1(\sigma)$ and $\Theta_2(\sigma)$ are irreducible. On the other hand, it is shown in \cite{Xue} that $\Theta_1(\sigma)$ and $\Theta_2(\sigma)$ are reducible when $\sigma$ is the trivial representation of ${\mathrm{GL}}_2({\mathrm {F}})$.
\end{example}
We are particularly interested in generic representations of ${\mathrm{GL}}_n({\mathrm {F}})$ since they appear as local components of cuspidal automorphic representations. The following proposition asserts that the assumption in Corollary \ref{nopole} does hold for generic representations of ${\mathrm{GL}}_n({\mathrm {F}})$.
\begin{prpt}\label{generic} Assume that ${\mathrm {D}}={\mathrm {F}}$ and $\sigma$ is generic. Then $\operatorname{L}(s,\sigma)$ has no pole at $s=1/2$, or $\operatorname{L}(s,\sigma^\vee)$ has no pole at $s=1/2$. \end{prpt}
By Corollary \ref{nopole} and Proposition \ref{generic}, we get the following result. \begin{thm} Assume that ${\mathrm {D}}={\mathrm {F}}$ and $\sigma$ is generic. Then as representations of $G$, $\Theta_1(\sigma)\cong \sigma^\vee \cong \Theta_2(\sigma)$. \end{thm}
As one step towards the proof of Proposition \ref{generic}, in Section \ref{secl} we will prove the following result which is interesting in itself.
\begin{prpt}\label{pat1} Let $\sigma_1, \sigma_2$ be irreducible admissible smooth representations of ${\mathrm{GL}}_{n_1}({\mathrm {F}})$ and ${\mathrm{GL}}_{n_2}({\mathrm {F}})$ ($n_1,n_2\geq 1$), respectively. Assume that both $\operatorname{L}(s,\sigma_1)$ and $\operatorname{L}(s,\sigma_2)$ have a pole at $s=1/2$. Then the Rankin-Selberg L-function $\operatorname{L}(s,\sigma_1\times \sigma_2)$ has a pole at $s=1$. \end{prpt}
\begin{rremark} By using local Langlands correspondence for both ${\mathrm{GL}}_n({\mathrm {F}})$ and ${\mathrm{GL}}_n({\mathrm {D}})$, Proposition \ref{pat1} implies the similar result with ${\mathrm {F}}$ replaced by ${\mathrm {D}}$ (The Rankin-Selberg L-function for ${\mathrm{GL}}_{n_1}({\mathrm {D}})\times {\mathrm{GL}}_{n_2}({\mathrm {D}})$ is defined via the Jacquet Langlands correspondence).
\end{rremark}
\section{A proof of Theorem \ref{pole1}}
We continue with the notation of the Introduction. The local Godement-Jacquet zeta integral attached to $\sigma$ is defined by \[
\operatorname{Z}(\phi, \lambda, v;s):=\int_G \phi(g) \langle g.v, \lambda\rangle \abs{\det(g)}_{\mathrm {F}}^{s+\frac{dn-1}{2}}\,\operatorname{d}\! g, \quad \phi\in {\mathcal {S}},\, \lambda\in \sigma^\vee,\, v\in \sigma,\, s\in \mathbb{C}, \] where $\operatorname{d}\!g$ is a fixed Haar measure on $G$. It is clear that if $\phi\in {\mathcal {S}}^\circ$, then the integral is absolutely convergent and is holomorphic in the variable $s\in \mathbb{C}$.
We summarize the basic results of local Godement-Jacquet zeta integrals as in the following theorem (\emph{cf.}~ \cite[Theorems 3.3 and 8.7]{GJ}). \begin{thm}\label{gjzeta} When the real part of $s$ is sufficiently large, the integral $\operatorname{Z}(\phi, \lambda,v;s)$ is absolutely convergent for all $\phi$, $\lambda$ and $v$. Moreover, there exists a (continuous in the archimedean case) map \[
\operatorname{Z}^\circ: {\mathcal {S}}\times \sigma^\vee\times \sigma\times \mathbb{C}\rightarrow \mathbb{C} \] which is linear on the first three variables and holomorpic on the last variable such that \begin{itemize}
\item
when the real part of $s$ is sufficiently large,
\[
\operatorname{Z}^\circ(\phi, \lambda,v ;s)=\frac{\operatorname{Z}(\phi, \lambda,v;s)}{\operatorname{L}(s,\sigma)},\quad \textrm{for all }\phi, v,\lambda; \quad\textrm{and}
\]
\item
for each $s\in \mathbb{C}$, the trilinear form $\operatorname{Z}^\circ(\cdot, \cdot, \cdot; s)$ yields a generator of the one dimensional vector space
\[
{\mathrm{Hom}}_{G\times G}({\mathcal {S}}\widehat \otimes \sigma^\vee\widehat \otimes \sigma, \abs{\det}^{s-\frac{1}{2}}_{\mathrm {F}}\otimes \abs{\det}^{\frac{1}{2}-s}_{\mathrm {F}} ).
\] \end{itemize} \end{thm}
Let $\operatorname{Z}^\circ$ be as in Theorem \ref{gjzeta}. Write $\operatorname{Z}^{\frac{1}{2}}$ for the generator of the one dimensional space \[
{\mathrm{Hom}}_{G\times G}({\mathcal {S}}, \sigma \widehat \otimes \sigma^\vee)
\] produced by the trilinear form $\operatorname{Z}^\circ(\cdot, \cdot, \cdot; \frac{1}{2})$.
\begin{lemt}\label{gjzetap} The Godement-Jacuqet L-function $\operatorname{L}(s,\sigma)$ has a pole at $s=\frac{1}{2}$ if and only if \[
\operatorname{Z}^\circ|_{{\mathcal {S}}^\circ \times \sigma^\vee\times \sigma\times \{\frac{1}{2}\}}=0,\quad \textrm{or equivalently,}\quad \operatorname{Z}^{\frac{1}{2}}|_{{\mathcal {S}}^\circ}=0. \] \end{lemt} \begin{proof} Denote by $c_r(s-\frac{1}{2})^{-r}$ the leading term of the Laurent expansion of $\operatorname{L}(s,\sigma)$ around $s=\frac{1}{2}$. Then $r\geq 0$ as all local L-functions have no zero. Now we have that
\[
\operatorname{Z}^\circ(\phi, v, \lambda;\frac{1}{2})=\lim_{s\rightarrow \frac{1}{2}} \left(s-\frac{1}{2}\right)^{r} \, \cdot \, c_r^{-1}\cdot \int_G \phi(g) \langle g.v, \lambda\rangle \abs{\det(g)}_{\mathrm {F}}^{\frac{dn}{2}}\,\operatorname{d}\! g
\]
for all $\phi\in {\mathcal {S}}^\circ,\, \lambda\in \sigma^\vee,\, v\in \sigma$. This is identically zero if and only if $r>0$. Thus the lemma follows. \end{proof}
\begin{lemt}\label{l1} If \begin {equation}\label{vant} {\mathrm{Hom}}_{G_1}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma)\neq 0 \quad\textrm{or }\quad
{\mathrm{Hom}}_{G_2}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma^\vee)\neq 0,
\end {equation}
then
$\operatorname{Z}^{\frac{1}{2}}|_{{\mathcal {S}}^\circ}= 0$ \end{lemt} \begin{proof} First assume that ${\mathrm{Hom}}_{G_1}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma)\neq 0$. By Proposition \ref{ftfl}, we know that there is an irreducible admissible smooth representation $\sigma'$ of $G$ such that \[
{\mathrm{Hom}}_{G\times G}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma\widehat \otimes \sigma')\neq 0. \] Then Theorem \ref{howe} implies that $\sigma'\cong \sigma^\vee$. Therefore, there is a nonzero element of ${\mathrm{Hom}}_{G\times G}({\mathcal {S}}, \sigma\widehat \otimes \sigma^\vee)$ which vanishes on ${\mathcal {S}}^\circ$. Since
$\dim {\mathrm{Hom}}_{G\times G}({\mathcal {S}}, \sigma\widehat \otimes \sigma^\vee)=1$, this implies that $\operatorname{Z}^{\frac{1}{2}}|_{{\mathcal {S}}^\circ}=0$. If ${\mathrm{Hom}}_{G_2}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma^\vee)\neq 0$, a similar proof shows that $\operatorname{Z}^{\frac{1}{2}}|_{{\mathcal {S}}^\circ}=0$. \end{proof}
\begin{lemt}\label{l2} Parts (a), (b), (c) and (d) of Theorem \ref{pole1} are equivalent to each other. \end{lemt} \begin{proof}
If $\operatorname{Z}^{\frac{1}{2}}|_{{\mathcal {S}}^\circ}= 0$, then $\operatorname{Z}^{\frac{1}{2}}$ descends to a nonzero element of ${\mathrm{Hom}}_{G\times G}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma\widehat \otimes \sigma^\vee)$. Therefore \[
\operatorname{Z}^{\frac{1}{2}}|_{{\mathcal {S}}^\circ}= 0\quad \Longrightarrow\quad {\mathrm{Hom}}_{G\times G}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma\widehat \otimes \sigma^\vee)\neq 0. \] It is obvious that \[
{\mathrm{Hom}}_{G\times G}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma\widehat \otimes \sigma^\vee)\neq 0 \quad \Longrightarrow\quad
\left\{ \begin{array}{l}
{\mathrm{Hom}}_{G_1}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma)\neq 0, \ \textrm{ and }\\
{\mathrm{Hom}}_{G_2}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma^\vee)\neq 0.
\end{array}
\right. \] Together with Lemma \ref{gjzetap} and Lemma \ref{l1}, this proves the lemma. \end{proof}
\begin{lemt}\label{frob} Let $\sigma_0$ be a smooth representation of $G$ when ${\mathrm {F}}$ is non-archimedean, and a smooth Fr\'echet representation of $G$ of moderate growth when ${\mathrm {F}}$ is archimedean. Then \begin {equation}\label{isof1}
({\mathcal {S}}^\circ\widehat \otimes \sigma_0)_{G_1} \cong \sigma_0 \end {equation} as representations of $G$. \end{lemt} \begin{proof} We prove the lemma in the archimedean case by assuming that ${\mathrm {F}}$ is archimedean. The non-archimedean case is similar but less involved, and we omit its proof. Write $\mathcal D^\circ:={\mathcal {S}}^\circ \operatorname{d}\! g$, which is a topological vector space of measures on $G$. It is a representation of $G\times G$ such that $(g,h)\in G\times G$ acts on it by the push-forward of measures through the translation map \[
G\rightarrow G,\quad x\mapsto gxh^{-1}. \] Using the topological linear isomorphism \[
{\mathcal {S}}^\circ\rightarrow \mathcal D^\circ, \quad \phi\mapsto \check \phi \cdot \abs{\det}_{\mathrm {F}}^{-\frac{dn}{2}}\cdot \operatorname{d}\! g, \qquad (\check \phi(g):=\phi(g^{-1})) \] we know that \eqref{isof1} is equivalent to \begin {equation}\label{isof2}
(\mathcal D^\circ\widehat \otimes \sigma_0)_{G_2} \cong \sigma_0. \end {equation}
The bilinear map \begin {equation}\label{actsigma0}
\mathcal D^\circ\times \sigma_0\rightarrow \sigma_0, \quad (\phi \operatorname{d}\! g, v)\mapsto (\phi \operatorname{d}\! g).v:= \int_G \phi(g) g.v \operatorname{d} \! g \end {equation} is continuous and yields a $G$-homomorphism \begin {equation}\label{isof3}
(\mathcal D^\circ\widehat \otimes \sigma_0)_{G_2} \rightarrow \sigma_0. \end {equation} The theorem of Dixmier-Malliavin \cite[Theorem 3.3]{DM} implies that the map \eqref{isof3} is surjective. It is thus an open map by the Open Mapping Theorem. In order to show that the map \eqref{isof3} is an isomorphism, it suffices to show that its transpose is a linear isomorphism. This transpose map is the composition of \begin{eqnarray*}
\sigma_0^*&\rightarrow &{\mathrm{Hom}}_{G_2}(\sigma_0, (\mathcal D^\circ)^*)\\
&\cong& {\mathrm{Hom}}_{G_2}(\mathcal D^\circ\widehat \otimes \sigma_0, \mathbb{C}) \qquad \quad \textrm{\cite[Formula (50.16)]{Tr}}\\
&\cong & ((\mathcal D^\circ\widehat \otimes \sigma_0)_{G_2})^*, \end{eqnarray*} where the first homomorphism is given by \begin {equation}\label{firstmap}
\lambda\mapsto (v\mapsto(\eta\mapsto \lambda(\eta. v))). \end {equation}
By definition, $(\mathcal D^\circ)^*$ is the space of tempered generalized functions on $G$. Let $\nu\in {\mathrm{Hom}}_{G_2}(\sigma_0, (\mathcal D^\circ)^*)$. Since the convolution of a tempered generalized function on $G$ with an element of $\mathcal D^\circ$ is a smooth function, using the theorem of Dixmier-Malliavin, we know that $\nu(v)$ is a smooth function on $G$ for each $v\in \sigma_0$. Let $\lambda_\nu(v)\in \mathbb{C}$ be its evaluation at $1\in G$. Then $\lambda_\nu$ is a linear functional on $\sigma_0$. It is easy to check that the diagram \begin {equation}\label{cd1}
\begin{CD}
\mathcal D^\circ\times \sigma_0@>\textrm{the map \eqref{actsigma0}}>> \sigma_0\\
@V\textrm{(identity map)}\times \nu VV @VV\lambda_\nu V\\
\mathcal D^\circ\times (\mathcal D^\circ)^* @ >\textrm{the natural paring}>> \mathbb{C}\\
\end{CD} \end {equation} commutes. Note that the bottom horizontal arrow is separately continuous. Thus the composition of \[
\mathcal D^\circ\times \sigma_0\xrightarrow{\textrm{the map \eqref{actsigma0}}} \sigma_0\xrightarrow{\lambda_\nu} \mathbb{C} \] is separately continuous, which is automatically continuous by \cite[Corollary of Theorem 34.1]{Tr}. This implies that $\lambda_\nu$ is continuous. Using the commutative diagram \eqref{cd1}, it is routine to check that the map \[
{\mathrm{Hom}}_{G_2}(\sigma_0, (\mathcal D^\circ)^*)\rightarrow \sigma_0^*,\qquad \nu\mapsto \lambda_\nu \] is inverse to the map \eqref{firstmap}. Therefore the map \eqref{firstmap} is bijective. This finishes the proof of the lemma. \end{proof}
\begin{rremark} The proof of the above lemma shows that the isomorphism \eqref{isof2} holds when $G$ is replaced by an arbitrary totally disconnected locally compact Hausdorff topological group, or an arbitrary almost linear Nash group. See \cite{Sun} for the notion of almost linear Nash groups, and \cite[Sections 2.2, 2.3]{Sun2} for the notion of smooth representations of moderate growth for almost linear Nash groups.
\end{rremark}
\begin{lemt}\label{irrf} If ${\mathrm{Hom}}_{G_1}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma)= 0$, then the representation $\Theta_1(\sigma)$ of $G$ is irreducible. \end{lemt} \begin{proof} The exact sequence \[ 0\rightarrow {\mathcal {S}}^\circ \rightarrow {\mathcal {S}} \rightarrow {\mathcal {S}}/{\mathcal {S}}^\circ\rightarrow 0 \] yields an exact sequence \[
({\mathcal {S}}^\circ\widehat \otimes \sigma^\vee)_{G_1} \rightarrow ({\mathcal {S}}\widehat \otimes \sigma^\vee)_{G_1} \rightarrow (({\mathcal {S}}/{\mathcal {S}}^\circ)\widehat \otimes \sigma^\vee)_{G_1} \rightarrow 0. \] The assumption of the lemma implies that $ (({\mathcal {S}}/{\mathcal {S}}^\circ)\widehat \otimes \sigma^\vee)_{G_1}=0$. Thus we have a surjective homomorphism \[
({\mathcal {S}}^\circ\widehat \otimes \sigma^\vee)_{G_1} \rightarrow \Theta_1(\sigma)
\] of representations of $G$. By Lemma \ref{frob}, \[
({\mathcal {S}}^\circ\widehat \otimes \sigma^\vee)_{G_1} \cong \sigma^\vee. \] Since $\Theta_1(\sigma)$ is nonzero, we conclude that $\Theta_1(\sigma)\cong \sigma^\vee$ is irreducible. \end{proof}
A similar argument as in the proof of Lemma \ref{irrf} shows the following lemma. \begin{lemt}\label{irrf2} If ${\mathrm{Hom}}_{G_2}({\mathcal {S}}/{\mathcal {S}}^\circ, \sigma^\vee)= 0$, then the representation $\Theta_2(\sigma^\vee)$ of $G$ is irreducible. \end{lemt}
Combining Lemmas \ref{l2}, \ref{irrf} and \ref{irrf2}, we finish the proof of Theorem \ref{pole1}.
\section{A proof of Proposition \ref{theta12}}\label{secf}
\begin{lemt}\label{fourier} There is a (topological in the archimedean case) linear automorphism ${\mathcal {F}}: {\mathcal {S}}\rightarrow {\mathcal {S}}$ such that \[
{\mathcal {F}}((g,h).\phi)=(h,g).({\mathcal {F}}(\phi)) \quad \textrm{for all } g,h\in G, \phi\in {\mathcal {S}}. \] \end{lemt} \begin{proof} Define a symmetric bilinear form \[
\langle\,,\,\rangle: \operatorname{M}_{n}({\mathrm {D}})\times \operatorname{M}_{n}({\mathrm {D}})\rightarrow {\mathrm {F}}, \quad (x,y)\mapsto \textrm{the reduced trace of $xy$}. \] Fix a non-trivial unitary character $\psi$ on ${\mathrm {F}}$. Define the Fourier transform ${\mathcal {F}}: {\mathcal {S}}\rightarrow {\mathcal {S}}$ by \[
({\mathcal {F}}(\phi))(x):=\int_{ \operatorname{M}_{n}({\mathrm {D}})} \phi(y)\psi(\langle x,y\rangle) \operatorname{d}\! y,\quad \phi\in {\mathcal {S}}, x\in \operatorname{M}_{n}({\mathrm {D}}), \] where $\operatorname{d}\! y$ is a Haar measure on $\operatorname{M}_{n}({\mathrm {D}})$. It is routine to check that ${\mathcal {F}}$ fulfills the requirement of the lemma. \end{proof}
Lemma \ref{fourier} clearly implies Proposition \ref{theta12}, namely \[
\Theta_1(\sigma)\cong\Theta_2(\sigma). \]
\section{A proof of Proposition \ref{generic}}\label{sec18}
We first treat the case of essentially square integrable representations. Recall that an irreducible admissible smooth representation of ${\mathrm{GL}}_n({\mathrm {F}})$ is said to be essentially square integrable if all its matrix coefficients are square integrable when restricted to ${\mathrm{SL}}_n({\mathrm {F}})$. Note that essentially square integrable representations of ${\mathrm{GL}}_n(\mathbb{C})$ exist only when $n=1$, and essentially square integrable representations of ${\mathrm{GL}}_n(\mathbb R)$ exist only when $n\leq 2$.
\begin{lemt}\label{gl1r} Proposition \ref{generic} holds when $G={\mathrm{GL}}_1(\mathbb R)$. \end{lemt} \begin{proof} The representation $\sigma$ corresponds to a character of $\mathbb R^\times$ of the form \begin {equation}\label{cgl1r}
x\mapsto \chi_{m, r}(x):=\left(\frac{x}{\abs{x}}\right)^m \abs{x}^{r} , \end {equation} where $m\in\{0,1\}$ and $r\in \mathbb{C}$. Then (\emph{cf.}~ \cite[Section 16]{Ja}) \[
\operatorname{L}(s,\sigma)=\pi^{\frac{-(s+m+r)}{2}}\Gamma(\frac{s+m+r}{2}), \] and \[
\operatorname{L}(s,\sigma^\vee)=\pi^{\frac{-(s+m-r)}{2}}\Gamma(\frac{s+m-r}{2}). \] Recall that the poles of the gamma function are $0,-1,-2, -3, \cdots$. Thus, if both $\operatorname{L}(s,\sigma)$ and $\operatorname{L}(s,\sigma^\vee)$ have a pole at $\frac{1}{2}$, then \[
\frac{1}{2}+m+r, \frac{1}{2}+m-r\in\{0,-2,-4,-6,\cdots\}. \] This implies that $m<0$, which contradicts to the fact that $m\in \{0,1\}$. \end{proof}
\begin{lemt}\label{comlex} Proposition \ref{generic} holds when $G={\mathrm{GL}}_1(\mathbb{C})$. \end{lemt} \begin{proof} The representation $\sigma$ corresponds to a character of $\mathbb{C}^\times$ of the form \begin {equation}\label{cmr}
z\mapsto \chi_{m, r}(z):=z^m(z\bar z)^{r-\frac{m}{2}} , \end {equation} where $m\in \mathbb{Z}$ and $r\in \mathbb{C}$. Then (\emph{cf.}~ \cite[Section 16]{Ja}) \[
\operatorname{L}(s,\sigma)=2(2\pi)^{-s-r-\frac{\abs{m}}{2}}\Gamma(s+r+\frac{\abs{m}}{2}), \] and \[
\operatorname{L}(s,\sigma^\vee)=2(2\pi)^{-s+r-\frac{\abs{m}}{2}}\Gamma(s-r+\frac{\abs{m}}{2}). \] The lemma then follows as in the proof of Lemma \ref{gl1r}. \end{proof}
\begin{lemt}\label{gl2} Proposition \ref{generic} holds when $G={\mathrm{GL}}_2(\mathbb R)$ and $\sigma$ is essentially square integrable. \end{lemt}
\begin{proof} Under the local Langlands correspondence, the representation $\sigma$ corresponds to a representation of the Weil group $W_\mathbb R$ of $\mathbb R$ of the form ${\mathrm{Ind}}_{\mathbb{C}^\times}^{W_\mathbb R} \chi_{m,r}$, where $\chi_{m,r}$ is as in \eqref{cmr} with $m\neq 0$. Then (\emph{cf.}~ \cite[Section 16]{Ja}) \[
\operatorname{L}(s, \sigma)=\operatorname{L}(s, \chi_{m,r})\quad \textrm{and}\quad \operatorname{L}(s, \sigma^\vee)=\operatorname{L}(s,\chi_{m,r}^{-1}), \]
and the lemma follows by Lemma \ref{comlex}.
\end{proof}
Given an admissible smooth representation $\sigma_i$ of ${\mathrm{GL}}_{n_i}({\mathrm {F}})$ for each $i=1,2,\cdots, \ell$ ($\ell\geq 1$, $n_i\geq 1$), let $\sigma_1\dot\times \sigma_2\dot\times \cdots \dot \times \sigma_\ell$ denote the normalized smooth induction \[
{\mathrm{Ind}}_{{\mathrm {P}}_{n_1, n_2, \cdots, n_\ell}({\mathrm {F}})}^{{\mathrm{GL}}_{n_1+n_2+\cdots+n_\ell}({\mathrm {F}})} (\sigma_1\widehat\otimes \sigma_2\widehat\otimes \cdots \widehat\otimes \sigma_\ell), \] where ${\mathrm {P}}_{n_1, n_2, \cdots, n_\ell}({\mathrm {F}})$ denotes the block-wise upper triangular parabolic subgroup of ${\mathrm{GL}}_{n_1+n_2+\cdots+n_\ell}({\mathrm {F}})$ which has ${\mathrm{GL}}_{n_1}({\mathrm {F}})\times {\mathrm{GL}}_{n_2}({\mathrm {F}})\times\cdots \times {\mathrm{GL}}_{n_\ell}({\mathrm {F}})$ as a Levi factor, and $\sigma_1\widehat\otimes \sigma_2\widehat\otimes \cdots \widehat\otimes \sigma_\ell$ is viewed as a representation of ${\mathrm {P}}_{n_1, n_2, \cdots, n_\ell}({\mathrm {F}})$ as usual.
Assume that ${\mathrm {F}}$ is non-archimedean for the moment. Let $\tau$ be a supercuspidal irreducible admissible smooth representation of ${\mathrm{GL}}_m({\mathrm {F}})$, where $m$ is a positive divisor of $n$. Suppose $n=\ell m$. Then the representation \[ (\tau\cdot \abs{\det}_{\mathrm {F}}^{1-\ell})\dot \times (\tau\cdot \abs{\det}_{\mathrm {F}}^{2-\ell})\dot \times\dots \dot\times \tau \]
has a unique irreducible quotient representation, which we denote by $\sigma_{n,\tau}$. It is an essentially square integrable irreducible admissible smooth representation of ${\mathrm{GL}}_n({\mathrm {F}})$. Conversely, every such representation is uniquely of the form $\sigma_{n,\tau}$. See \cite{BZ, Ze} for more details.
\begin{lemt}\label{sqip} Assume that ${\mathrm {F}}$ is non-archimedean, and $\sigma=\sigma_{n,\tau}$ is essentially square integrable as above. If $\operatorname{L}(s, \sigma)$ has a pole at $\frac{1}{2}$, then $m=1$ and $\tau$ is the character $\abs{\,\cdot\, }^{-\frac{1}{2}}$ of ${\mathrm{GL}}_1({\mathrm {F}})$. \end{lemt} \begin{proof} The lemma follows by noting that (\emph{cf.}~ \cite[Theorem 8.2]{JPSS}) \[
\operatorname{L}(s, \sigma_{n,\tau})=\operatorname{L}(s, \tau). \]
\end{proof}
By Lemma \ref{sqip}, $\sigma_{n, \abs{\,\cdot\, }^{-\frac{1}{2}}}$ is the only essentially square integrable irreducible admissible smooth representation of ${\mathrm{GL}}_n({\mathrm {F}})$ whose L-function has a pole at $\frac{1}{2}$. Since the representation $\sigma_{n, \abs{\,\cdot\, }^{-\frac{1}{2}}}$ is not self-dual, we conclude that Proposition \ref{generic} holds when ${\mathrm {F}}$ is non-arhimedean and $\sigma$ is essentially square integrable. Together with Lemmas \ref{gl1r}, \ref{comlex} and \ref{gl2}, this implies the following lemma.
\begin{lemt}\label{nal2} Proposition \ref{generic} holds when
$\sigma$ is essentially square integrable. \end{lemt}
Now ${\mathrm {F}}$ is archimedean or non-archimedean, as in Lemma \ref{nal2}. Recall that a unitary representation of ${\mathrm{GL}}_n({\mathrm {F}})$ is said to be tempered if it is weakly contained in the regular representation (see \cite{CHH}), and an irreducible admissible smooth representation $\rho$ of ${\mathrm{GL}}_n({\mathrm {F}})$ is said to be essentially tempered if there is a real number $e(\rho)$ such that $\rho\cdot \abs{\det}_{\mathrm {F}}^{-e(\rho)}$ is unitarizable and tempered. Note that the real number $e(\rho)$ is uniquely determined by $\rho$. It is evident that all essentially square integrable irreducible admissible smooth representations of ${\mathrm{GL}}_n(F)$ are essentially tempered. The following lemma is well-known and easy to check. See \cite[Theorem 1.1]{HO} for a more general statement.
\begin{lemt}\label{rs} Let $\sigma_i$ be an irreducible admissible smooth representation of ${\mathrm{GL}}_{n_i}({\mathrm {F}})$ which is unitarizable and tempered ($i=1,2$, $n_i\geq 1$).
Then the Rankin-Selberg L-function $\operatorname{L}(s,\sigma_1\times \sigma_2)$ has no pole in the domain where the real part of $s$ is positive. \end{lemt}
To prove Proposition \ref{generic} in the general case, we need the following result.
\begin{prpt}\label{shahidi} Let $\sigma_1, \sigma_2$ be essentially tempered irreducible admissible smooth representations of ${\mathrm{GL}}_{n_1}({\mathrm {F}})$ and ${\mathrm{GL}}_{n_2}({\mathrm {F}})$ ($n_1,n_2\geq 1$), respectively. Then the Rankin-Selberg L-function $\operatorname{L}(s,\sigma_1^\vee\times \sigma_2)$ has a pole at $s=1$ if and only if $e(\sigma_1)\geq e(\sigma_2)$ and $\sigma_1\dot \times \sigma_2$ is reducible. \end{prpt}
\begin{proof} Lemma \ref{rs} implies that if $e(\sigma_1)<e(\sigma_2)$ then $\operatorname{L}(s,\sigma_1^\vee\times \sigma_2)$ has no pole at $s=1$. Thus we may assume that $e(\sigma_1)\geq e(\sigma_2)$, and then the proposition is an instance of \cite[Proposition 5.3]{CS}. \end{proof}
Now we come to the proof of Proposition \ref{generic}. As in Proposition \ref{generic}, let $\sigma$ be a generic irreducible admissible smooth representation of ${\mathrm{GL}}_n({\mathrm {F}})$. Write \[ \sigma\cong \sigma_1\dot\times \sigma_2\dot\times \cdots \dot \times\sigma_\ell\quad(\ell\geq 1), \] where $\sigma_i$ ($i=1,2, \cdots, \ell$) is an essentially square integrable irreducible admissible smooth representation of ${\mathrm{GL}}_{n_i}({\mathrm {F}})$ ($n_i\geq 1$), with $n_1+n_2+\cdots n_\ell=n$. Then \[
\operatorname{L}(s, \sigma)=\prod_{j=1}^\ell \operatorname{L}(s, \sigma_j)\quad\textrm{and}\quad \operatorname{L}(s, \sigma^\vee)=\prod_{j=1}^\ell \operatorname{L}(s, \sigma_j^\vee). \]
Assume by contradiction that both $\operatorname{L}(s, \sigma)$ and $\operatorname{L}(s, \sigma^\vee)$ have a pole at $s=\frac{1}{2}$. Using Lemma \ref{nal2}, we know that both $\operatorname{L}(s, \sigma_i)$ and $\operatorname{L}(s, \sigma_j^\vee)$ have a pole at $s=\frac{1}{2}$, for some $i\neq j$.
Proposition \ref{pat1} (which will be proved in Section \ref{secl}) then implies that $\operatorname{L}(s, \sigma_j^\vee\times \sigma_i)$ has a pole at $s=1$. Hence by Proposition \ref{shahidi}, $\sigma_j\dot\times \sigma_i$ is reducible, which contradicts the fact that $\sigma$ is irreducible. This proves Proposition \ref{generic}.
\section{A proof of Proposition \ref{pat1}}\label{secl} Let $\sigma_1, \sigma_2$ be as in Proposition \ref{pat1} so that both $\operatorname{L}(s,\sigma_1)$ and $\operatorname{L}(s,\sigma_2)$ have a pole at $s=\frac{1}{2}$. We are aimed to show that $\operatorname{L}(s,\sigma_1\times \sigma_2)$ has a pole at $s=1$. Using the Langlands classification for general linear groups, we assume without loss of generality that both $\sigma_1$ and $\sigma_2$ are essentially square integrable. We further assume without loss of generality that $n_1\geq n_2$.
\begin{lemt}\label{l51} Assume that ${\mathrm {F}}$ is non-archimedean. Then $\operatorname{L}(s,\sigma_1\times \sigma_2)$ has a pole at $s=1$. \end{lemt} \begin{proof} By Lemma \ref{sqip}, \[
\sigma_1\cong \sigma_{n_1, \abs{\,\cdot\, }^{-\frac{1}{2}}} \quad \textrm{and}\quad \sigma_2\cong\sigma_{n_2, \abs{\,\cdot\, }^{-\frac{1}{2}}}. \] Thus by \cite[Theorem 8.2]{JPSS} (see also \cite[Theorem 2.3]{CPS}), \[
\operatorname{L}(s,\sigma_1\times \sigma_2)=\prod_{j=0}^{n_2-1} \operatorname{L}(s+j, \abs{\,\cdot\,}^{-n_2}). \] Hence $s=1$ is a pole of $\operatorname{L}(s,\sigma_1\times \sigma_2)$.
\end{proof}
\begin{lemt}\label{l52} Assume that ${\mathrm {F}}$ is archimedean and $n_1=n_2=1$. Then $\operatorname{L}(s,\sigma_1\times \sigma_2)$ has a pole at $s=1$. \end{lemt} \begin{proof} Fist assume that ${\mathrm {F}}=\mathbb{C}$. Write $\sigma_1\cong\chi_{m_1,r_1}$ and $\sigma_2\cong\chi_{m_2,r_2}$ as in \eqref{cmr}. Then \[ \frac{\abs{m_1}+1}{2}+r_1,\, \frac{\abs{m_2}+1}{2}+r_2\in \{0,-1,-2,\cdots\}. \] This implies that \[
\frac{\abs{m_1+m_2}}{2}+1+r_1+r_2\in \{0,-1,-2,\cdots\}. \] Thus \[ \operatorname{L}(s,\sigma_1\times \sigma_2)=2(2\pi)^{-s-(r_1+r_2)-\frac{\abs{m_1+m_2}}{2}}\Gamma(s+r_1+r_2+\frac{\abs{m_1+m_2}}{2}) \] has a pole at $s=1$.
When ${\mathrm {F}}=\mathbb R$, the same proof shows that $\operatorname{L}(s,\sigma_1\times \sigma_2)$ has a pole at $s=1$. \end{proof}
\begin{lemt}\label{l53} Assume that ${\mathrm {F}}=\mathbb R$ and $(n_1,n_2)=(2,1)$. Then $\operatorname{L}(s,\sigma_1\times \sigma_2)$ has a pole at $s=1$. \end{lemt} \begin{proof} Under the local Langlands correspondence, the representation $\sigma_1$ corresponds to a representation of the Weil group $W_\mathbb R$ of $\mathbb R$ of the form ${\mathrm{Ind}}_{\mathbb{C}^\times}^{W_\mathbb R} \chi_{m_1,r_1}$, where $\chi_{m_1,r_1}$ is as in \eqref{cmr}, with $m_1\neq 0$. Write $\sigma_2\cong\chi_{m_2,r_2}$ as in \eqref{cgl1r}. Then \[ \frac{\abs{m_1}+1}{2}+r_1\in \{0,-1,-2,\cdots\} \] and \[ \frac{1}{2}+m_2+r_2\in \{0,-2,-4,\cdots\}. \] This implies that \[ \frac{\abs{m_1}}{2}+1+r_1+r_2\in \{0,-1,-2,\cdots\} \] Thus \[ \operatorname{L}(s,\sigma_1\times \sigma_2)=2(2\pi)^{-s-(r_1+r_2)-\frac{\abs{m_1}}{2}}\Gamma(s+r_1+r_2+\frac{\abs{m_1}}{2}) \] has a pole at $s=1$. \end{proof}
\begin{lemt}\label{l54} Assume that ${\mathrm {F}}=\mathbb R$ and $(n_1,n_2)=(2,2)$. Then $\operatorname{L}(s,\sigma_1\times \sigma_2)$ has a pole at $s=1$. \end{lemt} \begin{proof} Under the local Langlands correspondence, the representation $\sigma_i$ ($i=1,2$) corresponds to a representation of the Weil group $W_\mathbb R$ of $\mathbb R$ of the form ${\mathrm{Ind}}_{\mathbb{C}^\times}^{W_\mathbb R} \chi_{m_i,r_i}$, where $\chi_{m_i,r_i}$ is as in \eqref{cmr}, with $m_i\neq 0$. Then \[ \frac{\abs{m_i}+1}{2}+r_i\in \{0,-1,-2,\cdots\}\quad (i=1,2), \] which implies that \begin {equation}\label{m1m2} \frac{\abs{m_1+m_2}}{2}+1+r_1+r_2\in \{0,-1,-2,\cdots\}. \end {equation} We have that \begin{eqnarray*}
&&{\mathrm{Ind}}_{\mathbb{C}^\times}^{W_\mathbb R} \chi_{m_1,r_1}\otimes {\mathrm{Ind}}_{\mathbb{C}^\times}^{W_\mathbb R} \chi_{m_2,r_2}\\
&\cong & {\mathrm{Ind}}_{\mathbb{C}^\times}^{W_\mathbb R} \left(\chi_{m_1,r_1}\otimes ({\mathrm{Ind}}_{\mathbb{C}^\times}^{W_\mathbb R} \chi_{m_2,r_2})|_{\mathbb{C}^\times} \right)\\
&\cong & {\mathrm{Ind}}_{\mathbb{C}^\times}^{W_\mathbb R} \left(\chi_{m_1,r_1}\otimes (\chi_{m_2,r_2}\oplus \chi_{-m_2,r_2})\right)\\
&\cong& {\mathrm{Ind}}_{\mathbb{C}^\times}^{W_\mathbb R} \chi_{m_1+m_2,r_1+r_2}\oplus {\mathrm{Ind}}_{\mathbb{C}^\times}^{W_\mathbb R} \chi_{m_1-m_2,r_1+r_2}. \end{eqnarray*} Thus \[ \operatorname{L}(s,\sigma_1\times \sigma_2)=\operatorname{L}(s, \chi_{m_1+m_2,r_1+r_2})\cdot \operatorname{L}(s, \chi_{m_1-m_2,r_1+r_2}). \] It has a pole at $s=1$ by \eqref{m1m2}. \end{proof}
Proposition \ref{pat1} is now proved by summarizing Lemmas \ref{l51}, \ref{l52}, \ref{l53} and \ref{l54}.
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Robust and Parallel Bayesian Model Selection}
\author[ut]{Michael Minyi Zhang\corref{mycorrespondingauthor}}
\ead{michael\[email protected]}
\author[columbia]{Henry Lam}
\ead{[email protected]}
\author[ndu]{Lizhen Lin}
\ead{[email protected]}
\cortext[mycorrespondingauthor]{Corresponding author. Address: Department of Statistics and Data Sciences. The University of Texas at Austin. 1 University Station G2550. Austin, TX 78712. Tel: (512) 232-0693. Fax: (512) 475-8297.}
\address[ut]{The University of Texas at Austin, Austin, TX 78712, USA.}
\address[columbia]{Columbia University, New York, NY 10027, USA.}
\address[ndu]{University of Notre Dame, Notre Dame, IN 46556, USA.}
\begin{abstract}
Effective and accurate model selection is an important problem in modern data analysis. One of the major challenges is the computational burden required to handle large data sets that cannot be stored or processed on one machine. Another challenge one may encounter is the presence of outliers and contaminations that damage the inference quality. The parallel ``divide and conquer'' model selection strategy divides the observations of the full data set into roughly equal subsets and perform inference and model selection independently on each subset. After local subset inference, this method aggregates the posterior model probabilities or other model/variable selection criteria to obtain a final model by using the notion of geometric median. This approach leads to improved concentration in finding the ``correct" model and model parameters and also is provably robust to outliers and data contamination.
\end{abstract}
\begin{keyword}
Machine learning \sep Bayesian statistics \sep model selection \sep scalable inference.
\end{keyword}
\end{frontmatter}
\linenumbers
\section{INTRODUCTION} \label{sec:intro} In many data modeling scenarios, many plausible models are available to fit to the data, each of which may result in drastically different predictions and conclusions. Being able to select the right model for inference is a crucial task. As our main example, we consider model selection for a normal linear model: \begin{align} Y &= X\beta + \epsilon, \hspace{.5em}\epsilon \sim N(0, \sigma^2 I)\label{linmod}, \end{align} where $Y$ is an $N$ dimensional response vector, $X$ is an $N \times D$ dimensional design matrix and $\beta$ is a $D$ dimensional vector of regression parameters. Here the candidate models to be selected could refer to the sets of significant variables. In a Bayesian setting, we have a natural probabilistic evaluation of models through posterior model probabilities. Depending on the objectives of the data analysis, we may be interested in assessing the belief on which is the ``best'' model or obtaining predictions with minimum error.
Existing procedures to accomplish the aforementioned goals, however, will perform poorly under the presence of outliers and contaminations. In addition, Markov chain Monte Carlo (MCMC) algorithms for these methods do not scale to big data situations. The goal of this paper is to investigate a ``divide-and-conquer'' method that integrates with existing Bayesian model selection techniques, in a way that is robust to outliers and, moreover, allows us to perform Bayesian model selection in parallel.
Our ``divide-and-conquer" strategy is based on the ideas for robust inference using the notion of the geometric median \cite{minsker2015geometric}, especially the median posterior in the Bayesian context \cite{wang2014median,minsker2014scalable}. Previous work in this area has focused on the performance in parametric inference.
Our contribution in this paper is to demonstrate the effectiveness of these ideas in selecting the correct class of models on top of the parameters. In particular, we show that the model aggregated across different subsets (the ``divide") has improved concentration to the true model class compared to the one using the full data set. This concentration is in terms of the posterior model probabilities to the point mass assigned to the true model. The result also holds jointly with the concentration of the parameter estimates, and under the presence of outliers and hence demonstrates robustness. We carry out extensive numerical studies on simulation data and a real data example to demonstrate the performance of our proposed approach.
\section{BAYESIAN MODEL SELECTION} \label{sec:model_selection}
In Bayesian model selection, we define the prior model probability $Pr(M_k)$ for each of the model $M_k$ ($k = 1, \ldots, K$) under consideration. For model $M_k$, we additionally have parameters $(\beta_k, \sigma_k^2)$ with prior $Pr(\beta_k,\sigma_k^2|M_k)$, which leads to a likelihood $Pr(Y|\beta_k,\sigma_k^2, M_k)$. Thus, the posterior model probability for model $M_k$, $ Pr(M_k |-) $, is proportional to \begin{align*}
Pr(M_k) \int \! Pr(Y| \beta_k,\sigma_k^2, M_k)Pr(\beta_k,\sigma_k^2|M_k) \, \mathrm{d}\beta_k\mathrm{d}\sigma_k^2. \end{align*} However, as noted in \cite{barbieri2004optimal}, choosing the model with the highest posterior model probability is not always the best option nor should one neglect the risk of model uncertainty. Instead of resorting to a single model for predicted values $\tilde{Y}$ (or some quantity of interest in general), \cite{hoeting1999bayesian} proposes to average over the model uncertainty with Bayesian model averaging (BMA) to obtain a posterior mean and variance of $\tilde{Y}$ at a covariate level $\tilde{X}$: \begin{align*}
E[\tilde{Y} | \tilde{X}, Y] = \sum_{k=1}^{K}&E[\tilde{Y} | \tilde{X}, Y, M_k]Pr(M_k|\tilde{X}, Y),\\ \nonumber Var(\tilde{Y} | \tilde{X}, Y) = \sum_{k=1}^{K}&Pr(M_k|X, Y)\left( Var(\tilde{Y} | \tilde{X}, Y, M_k) + \right.\\
&\left. E[\tilde{Y} | \tilde{X}, Y, M_k]^2 \right) - E[\tilde{Y} |X, Y]^2. \end{align*} We will focus on BMA in our theoretical developments in this paper. Our numerical experiments, however, will show that our divide-and-conquer strategy is also effective in applying on other model selection methods.
The first alternative to BMA is the median probability model, which can be shown to be optimal if we must choose one model for prediction \cite{barbieri2004optimal}. In this approach, we define the posterior inclusion probability of each predictor $x_d$ ($d=1,\ldots, D$) as the sum of posterior model probabilities of the models that include predictor $x_d$, namely $p_d = \sum_{k : x_d \in M_k} Pr(M_k |X, Y)$. The median probability model is the model that includes the predictors $x_d$ if $p_d \geq 1/2$.
Second, using the maximum value of the likelihood for each model $Pr(Y | \hat\beta_k,\hat\sigma_k^2, M_k)$, where $(\hat\beta_k,\hat\sigma_k^2)$ is the maximum likelihood estimate of $(\beta_k,\sigma_k^2)$, we can perform penalized model selection through the Akaike information criterion (AIC) \cite{akaike1974new} or the Bayesian information criterion (BIC) \cite{schwarz1978estimating} by selecting the model with the lowest information criterion:
\begin{align*}
\text{AIC} = -2 \log Pr(Y | \hat\beta_k,\hat\sigma_k^2, M_k) + 2(D+1),\\
\text{BIC} = -2 \log Pr(Y | \hat\beta_k,\hat\sigma_k^2, M_k) + (D+1)\log N. \end{align*}
The final model selection technique we will consider is stochastic variable selection through the spike and slab model \cite{george1993variable}, which allows for variable shrinkage under high-dimensional models.
For the purposes of this paper, we will use the rescaled spike and slab model \cite{ishwaran2005spike}. To perform posterior inference in this model, we first define $Y^{\prime} = \sqrt{\frac{N}{\hat{\sigma}^2}}Y $ where $\hat{\sigma}^2$ is the unbiased estimate of $\sigma^2$ under the full model and let $\nu_0 > 0$ be some small number. The model is defined to be the following mixture model: \begin{align*} \begin{split} Y^{\prime} &\sim N(X\beta, N\sigma^2I), \; \beta_{SS_d} \sim N(0, J_d\tau_d^2),\\ \sigma^{-2}_{ss} &\sim \text{Gamma}(a,b),\; J_d \sim (1-w)\delta_{J_d}(\nu_0) + w\delta_{J_d}(1),\\ \tau_d^{-2} &\sim \text{Gamma}(a_{\tau}, b_{\tau}),\;w \sim \text{Uniform}(0,1). \end{split} \end{align*}
\section{DIVIDE-AND-CONQUER AND ROBUST BAYESIAN MODEL SELECTION} \label{sec:method}
In our robust model selection strategy, we divide $N$ observations into $R$ subsets of roughly equal sample size. Then inference, model selection and prediction is performed for the linear model $Y_{(j)} = X_{(j)}\beta + \epsilon_{(j)}$ independently across $j=1,\ldots , R$ subsets using the existing Bayesian model selection procedures, which are then combined to form a final model or a combined prediction value.
Given linear model \eqref{linmod}, we first define the following priors on a normal likelihood with response variable $Y$ and $D$-dimensional predictor $X$. The $N$ observations are divided into $R$ subsets with $s$ observations within each subset. One has, \begin{align*} Pr\left( \sigma^{-2}_{(j)} \right) &= \text{Gamma}(a,b),\\
Pr(\beta_{(j)} | \sigma^2_{(j)}) &= N(\beta_0, \sigma^2_{(j)} \Sigma_0). \end{align*}
To compensate for the data division, we raise the likelihood of the divided data $Pr(Y_{(j)}| X_{(j)}, \beta, \sigma^{2} )$ to the $R$-th power and adjust the normalizing constant accordingly so that the likelihood for $Y_{j}$ is: \begin{align*} \left( \frac{R}{2\pi \sigma^2_{(j)}} \right)^{N/2} \exp\left\{ \frac{-R}{2\sigma^2} \left(Y_{(j)} - X_{(j)}\beta_{(j)} \right)^T \left(Y_{(j)} - X_{(j)}\beta_{(j)} \right) \right\}. \end{align*} The intuition and motivation for raising the subset likelihood to $R$-th power is to adjust the potentially inflated variance of the subset posterior distribution. Exploiting conjugacy, we obtain the full conditionals for data subset $ j = 1, \ldots , R $: \begin{align*}
Pr(\beta_{(j)} | - ) &\nonumber = N\left( \mu_\beta, \sigma^2 \Sigma_\beta \right),\\ \mu_\beta &= \Sigma_\beta\left( \beta_0 \Sigma_0^{-1} + R X_{(j)}^T Y_{(j)} \right),\\
\Sigma_\beta &= \left( \Sigma_0^{-1} + R X_{(j)}^{T} X_{(j)} \right)^{-1},\\ \nonumber Pr\left( \sigma^{-2}_{(j)} | - \right) & = \text{Gamma}\left( a^{\prime}, b^{\prime}\right),\\ \nonumber a^{\prime} &= a + \frac{N+D}{2},\\ b^{\prime} &= b + \frac{R}{2} \epsilon^T \epsilon + \frac{1}{2}\left( \beta_{(j)} - \beta_0 \right)^T \Sigma_0^{-1}\left( \beta_{(j)} - \beta_0 \right), \\ \epsilon &= \left(Y_{(j)} - X_{(j)}\beta_{(j)}\right). \end{align*}
Let $\Sigma_X = I + RX_{(j)}\Sigma_0 X_{(j)}^T$, then integrating out the parameters gives us the following marginal distribution $Pr(Y_{(j)}|X_{(j)})$:
\begin{align*}
\frac{ \left( \frac{R}{2\pi} \right)^{ \frac{N}{2} } b^a \Gamma(a+ \frac{N}{2})\left|\Sigma_X \right|^{-\frac{1}{2}} /\, \Gamma(a) }{\left(b+ \frac{R}{2}\left(Y_{(j)} - X_{(j)}\beta_0 \right)^T \Sigma_X^{-1} \left(Y_{(j)} - X_{(j)}\beta_0 \right) \right)^{a + \frac{N}{2} }}. \end{align*}
For distributed AIC and BIC model evaluation, we raise the likelihood term of the AIC and BIC formula to the power of $R$: \begin{align*}
\text{AIC}_R = -2R \log Pr(Y_{(j)} | \hat\beta_k,\hat\sigma_k^2, M_k) + 2(D+1), \\
\text{BIC}_R = -2R \log Pr(Y_{(j)} | \hat\beta_k,\hat\sigma_k^2, M_k) + (D+1)\log N. \end{align*}
In applying our procedure with the spike and slab prior, we derived the full Gibbs sampler for our procedure. For posterior inference in the spike and slab model, let $\Delta = \text{diag}\left\{ J_1\tau_{1}^{2}, \ldots , J_D\tau_{D}^{2} \right\}$, we can perform Gibbs sampling by drawing from the following posteriors: \begin{align*}
Pr(\beta_{SS(j)} | - ) & = N(\mu_{\beta_{SS}}, \Sigma_{\beta_{SS}}),\\ \Sigma_{\beta_{SS}} &= \left( \Delta^{-1} + \frac{R}{N\sigma^{-2}_{SS(j)}} X_{(j)}^TX_{(j)} \right)^{-1},\\ \mu_{\beta_{SS}} &= \Sigma_{\beta_{SS}} \left( \frac{R}{N\sigma^{-2}_{SS(j)}} X_{(j)}^TY_{(j)} \right),\\
Pr\left( \sigma^{-2}_{SS(j)} | - \right) & = \text{Gamma}\left( a^{\prime}_{SS}, b^{\prime}_{SS} \right),\\ a^{\prime}_{SS} &= a + \frac{N}{2},\\ b^{\prime}_{SS} &= b + \frac{R}{2N}\left(Y_{(j)} - X_{(j)}\beta_{SS(j)} \right)^T \left(Y_{(j)} - X_{(j)}\beta_{SS(j)} \right),\\ \begin{split}
Pr\left( J_d | - \right) &\propto w_{d1} \delta_{J_d}(\nu_0) + w_{d2}\delta_{J_d}(1),\\ w_{d1} &= (1-w)\nu_0^{-1/2}\exp\left\{ -\frac{\beta_{SS(j)d}^2}{2\nu_0\tau^{2}_{d}} \right\},\\ w_{d2} &= w \exp\left\{ -\frac{\beta^{2}_{SS(j)d}}{2\tau^{2}_{d}} \right\}, \end{split}\\
Pr\left( \tau^{-2}_{d} | - \right) & = \text{Gamma}\left( a_{\tau} + \frac{1}{2}, b_{\tau} + \frac{\beta^{2}_{SS(j)d}}{2J_{d}} \right),\\
Pr(w | -) & = \text{Beta}\left( 1+ \left| \left\{ d : J_d = 1 \right\} \right| , 1+ \left| \left\{ d : J_d = \nu_0 \right\} \right| \right). \end{align*}
Once inference is built on each subset, the key step is to aggregate the subset models (or estimates) together into a final model (or estimate). To aggregate our results, we collect the $R$ number of subset models or estimates and find the geometric median between these $R$ elements. The geometric median for a set of elements $\{x_1,\ldots,x_R\}$ valued on a Hilbert space $\mathbb H$, is defined as \begin{align} \begin{split}
x_*&=\text{med}_g(x_1,\ldots,x_R)=\text{argmin}_{y\in\mathbb H}\sum_{j=1}^R\|y-x_j\|,\label{geometric median definition} \end{split} \end{align}
where $\|\cdot\|$ is the norm associated with the inner product in $\mathbb H$ \cite{minsker2014scalable}. The solution can generally be effectively approximated using the Weiszfeld algorithm \cite{wes37}.
For instance, in the case of aggregating the posterior model probabilities across $R$ subsets of data, the geometric median operates on the space of posterior distributions and the geometric median posterior model probability, $ Pr_*(M_k|X, Y) $, is defined as: \begin{align}
\argmin_{P \in \Pi_K} \sum_{j=1}^{R} \left|\left| P - Pr(M_k|X_{(j)}, Y_{(j)}) \right|\right|\label{model aggregation}, \end{align}
where $Pr(M_k|X_{(j)}, Y_{(j)})$ is the posterior model probabilities for subset $j$, and $\Pi_K$ denotes the space of distributions on $K$ support points. The metric $\|\cdot\|$ here can be taken as the Euclidean metric, or an integral probability metric (IPM) defined as $||P - Q|| = \sup_{f \in \mathcal{F}} \left| \int \! f(x) \, \mathrm{d}(P-Q)(x) \right|$ for some class of functions $\mathcal F$ \cite{sriperumbudur2010hilbert,sriperumbudur2012empirical}.
For the model selection techniques discussed earlier (AIC, BIC, and the median model selection), we can choose a final model in two ways: One, we can select the best model locally on each subset, use it for prediction, and then aggregate the results (estimate combination). Or two, we can take the median of the model selection criteria and choose that particular model on each subset and then aggregate the results to get a final model (model combination).
However, in Bayesian model averaging and spike and slab modeling we do not choose a final model. We can still perform model or estimate combination by aggregating the posterior model probabilities. We consider both model and estimate combinations in our experiments and show that they yield similar results in our experimental settings.
\begin{algorithm} \caption{Algorithm for robust model selection in the case of BMA.} \label{alg:model_select} \For{$j \in \left\{ 1,\ldots , R \right\} $}{
Raise likelihood to $R$-th power\\
Compute inference for $P(\theta | M_k, X_{(j)},Y_{(j)})$ for $k=1,\ldots,K$\\
Draw predictive values from predictive posterior $P(\tilde{Y} | M_k, X_{(j)},Y_{(j)})$ for $k=1,\ldots,K$\\
Calculate posterior model probabilities $\{P(M_k | X_{(j)},Y_{(j)})\}_{k=1,\ldots,K}$\\
}
Calculate geometric median of posterior model probabilities over the subsets using \eqref{model aggregation}.\\ Approximate geometric medians of posterior parameter probabilities or predictive values given individual models over the subsets using \eqref{geometric median definition}.\\
Obtain BMA estimate: $E[\tilde Y | Y, X] = \sum_{k=1}^{K}E_*[\tilde Y | X, Y, M_k]Pr_*(M_k|X, Y)$
\end{algorithm}
\section{IMPROVED CONCENTRATION AND ROBUSTNESS} \label{sec:theory}
In this section we provide theoretical justification on the robustness in the divide-and-conquer strategy. In particular, we focus on BMA. Additionally, we show that the aggregated model class from our strategy concentrates faster, in terms of posterior model probabilities, to the correct class compared to using the whole data set at once. This concentration result can be joint with parameter estimation, and also applies in a way that exhibits robustness against outliers. Note that we do not raise the subset likelihood to $R$-th power in our current theoretical analysis, but the results can be generalized by imposing slightly stronger entropy conditions on the model.
Let $\mathcal S$ be the domain of $\theta=(M_k,\beta,\sigma^2)$, our set of model indices and parameters. Let $\theta_0$ be the true data generating parameter, and let $(X_1,Y_1)$ be a generic data point. Let $p_0(y|x):=p(y|x,\theta_0)$ be the true conditional density of $Y_1$ given $X_1$, and $p_0(x)$ be the true density of the covariates $X_1$. We denote $p_\theta(y|x):=p(y|x,\theta)$. Let $P_\theta$ be the distribution defined by $p_0(x)\times p_\theta(y|x)$ and $P_0$ is the true distribution $p_0(x)\times p_0(y|x)$. For convenience, we denote $P_0f=P_0f(X_1,Y_1)=E_{p_0}[f(X_1,Y_1)]$ where $E_{p_0}[\cdot]$ is the expectation under $p_0(y|x)\times p_0(x)$. We denote $P_0^N$ as the true probability measure taken on the data $(X,Y)$ of size $N$ and $P_0^Nf=E_{P_0^N}[f(X,Y)]$. Lastly, we denote $\mathcal D(\epsilon,\mathcal P,d)$ as the $\epsilon$-packing number of a set of probability measures $\mathcal P$ under the metric $d$, which is the maximal number of points in $\mathcal P$ such that the distance between any pair is at least $\epsilon$. We implicitly assume here that $\mathcal P$ is separable. The following Theorem \ref{thm plain} follows from a modification of Theorem 2.1 in \cite{ghosal2000}: \begin{thm} Assume that there is a sequence $\varepsilon_N$ such that $\varepsilon_N\to0$ and $N\varepsilon_N^2\to\infty$ as $N\to\infty$, a constant $C$, and a set $\mathcal S_N\in\mathcal S$ so that \begin{enumerate}[leftmargin=*] \item $\log\mathcal D(\varepsilon_N/2,\mathcal P_{\mathcal S_N},d_H)\leq N\varepsilon_N^2.$\label{assumption1} \item $Pr(\mathcal S\setminus\mathcal S_N)\leq e^{-N\varepsilon_N^2(C+4)}.$\label{assumption2}
\item $Pr\left(\theta:-P_0\log\frac{p_\theta(Y_1|X_1)}{p_0(Y_1|X_1)}\leq\varepsilon_N^2,\ \right. \\
\left. \qquad P_0\left(\frac{p_\theta(Y_1|X_1)}{p_0(Y_1|X_1)}\right)^2\leq\varepsilon_N^2\right)\geq e^{-N\varepsilon_N^2C}.$\label{assumption3} \end{enumerate}
where $\mathcal P_{\mathcal S_N}=\{p_0(x)\times p_\theta(y|x):\theta\in\mathcal S_N\}$ and $d_H$ is the Hellinger distance. Then we have \begin{align} \begin{split}
P_0^N&\left(Pr(\theta:d_H(P_\theta,P_0)>T\varepsilon_N^2|X,Y)>\delta\right)\leq \\ &\frac{1}{C^2N\varepsilon_N^2\delta}+\frac{2e^{-LN\varepsilon_N^2}}{\delta}+\frac{2e^{-2N\varepsilon_N^2}}{\delta},\label{main plain} \end{split} \end{align} for any $0<\delta<1$ and sufficiently large $T>0$ such that $LT^2\geq C+4$ and $LT^2-1>L$, where $L$ is a universal constant.\label{thm plain} \end{thm}
The proof of Theorem \ref{thm plain} is in the Appendix. As noted by \cite{ghosal2000}, the important assumptions are Assumptions \ref{assumption1} and \ref{assumption3}. Essentially, Assumption \ref{assumption1} constrains the size of the parameter domain $\mathcal S$ to be not too big, whereas Assumption \ref{assumption3} ensures sufficient mass of the prior on a neighborhood of the true parameter. The concentration result \eqref{main plain} states that the posterior distribution of $\theta$ is close to the true $\theta_0$ with high probability, where the closeness is measured in terms of the Hellinger distance between the likelihoods. Note that the RHS of \eqref{main plain} consists of three terms. The dominant term is the power-law decay in $N\varepsilon_N^2$. The other two exponential decay terms result from technical arguments in the existence of tests that sufficiently distinguish between distributions \cite{birge1983approximation,le2012asymptotic}.
Next we describe the concentration behavior of BMA. We focus on the situations where all the candidate models are non-nested, i.e. only one model contains distributions that are arbitrarily close to the truth. Without loss of generality, we let $M_1$ be the true model.
\begin{thm}[BMA of Non-Nested Models] Suppose the assumptions in Theorem \ref{thm plain} hold. Also assume that, for sufficiently small $\epsilon>0$, $d(P_\theta,P_0)>\epsilon$ for any $\theta\in\mathcal S_{-1}:=\{(M_k,\beta,\sigma^2):k\neq1\}$. Let $L$ be the same universal constant arising in Theorem \ref{thm plain}. We have \begin{enumerate}[leftmargin=*] \item For any given $0<\delta<1$, \begin{align} \begin{split}
P_0^N&\left(Pr(M_1|X,Y)<1-\delta\right)\leq \\ &\frac{1}{C^2N\varepsilon_N^2\delta}+\frac{2e^{-LN\varepsilon_N^2}}{\delta}+\frac{2e^{-2N\varepsilon_N^2}}{\delta}, \end{split} \label{interim9} \end{align}\label{plain} for sufficiently large $N$. \item For any given $0<\delta<1$, \begin{align} \begin{split}
P_0^N&\left(d_E(Pr(M_k|X,Y),\mathbf e_1)>\delta\right)\leq\\ &\frac{\sqrt 2}{C^2N\varepsilon_N^2\delta}+\frac{2\sqrt 2e^{-LN\varepsilon_N^2}}{\delta}+\frac{2\sqrt 2e^{-2N\varepsilon_N^2}}{\delta}, \end{split} \label{Euclidean bound} \end{align} for sufficiently large $N$, where $d_E$ is the Euclidean distance, and $\mathbf e_1$ is the point mass on $M_1$.\label{Euclidean} \item For any $0<\delta<\sqrt{(\sqrt 2-1)^2+1}/2$, \begin{align} \begin{split}
P_0^N&\left(d_H(Pr(M_k|X,Y),\mathbf e_1)>\delta\right)\leq \\ &\frac{(\sqrt 2-1)^2+1}{\sqrt 2C^2N\varepsilon_N^2\delta^2}+\frac{((\sqrt 2-1)^2+1)e^{-LN\varepsilon_N^2}}{\delta^2}+\\ &\frac{((\sqrt 2-1)^2+1)e^{-2N\varepsilon_N^2}}{\delta^2}, \end{split} \label{Hellinger bound} \end{align} for sufficiently large $N$. \label{Hellinger} \end{enumerate} \label{thm model selection} \end{thm}
\noindent\textbf{Proof of Theorem \ref{thm model selection}.}
\emph{Proof of \ref{plain}.} Consider large enough $N$ and fix a sufficiently large $T>0$. We have \begin{eqnarray}
&&Pr(\theta:d(P_\theta,P_0)\leq T\varepsilon_N^2|X,Y)\notag\\
&=&E_{Pr}\left[Pr(\theta:d(P_\theta,P_0)\leq T\varepsilon_N^2|M_k,X,Y)|X,Y\right]{},\\
&&{}\text{\ \ where $E_{Pr}[\cdot|X,Y]$ denotes the posterior expectation}{}\notag\\
&&{}\text{\ \ and $Pr(\cdot|M_k,X,Y)$ denotes the posterior distribution given model $M_k$}\notag\\
&=&Pr(M_1|X,Y)Pr(\theta:d(P_\theta,P_0)\leq T\varepsilon_N^2|M_1,X,Y),\label{interim14} \end{eqnarray} by the condition that $d(P_\theta,P_0)>T\varepsilon_N^2$ for any $\theta\in\mathcal S_{-1}$ and any $T>0$ eventually. Hence \begin{equation}
Pr(\theta:d(P_\theta,P_0)\leq T\varepsilon_N^2|X,Y)\geq1-\delta,\label{interim7} \end{equation} implies \begin{equation}
Pr(M_1|X,Y)\geq1-\delta\label{interim8}. \end{equation} The result then follows from Theorem \ref{thm plain}, which implies that \eqref{interim7} occurs with probability at least $$1-\left(\frac{1}{C^2N\varepsilon_N^2\delta}+\frac{2e^{-LN\varepsilon_N^2}}{\delta}+\frac{2e^{-2N\varepsilon_N^2}}{\delta}\right),$$
\emph{Proof of \ref{Euclidean}.} Note that \eqref{interim8} implies \begin{equation}
d_E(Pr(M_k|X,Y),\mathbf e_1)=\sqrt{(1-Pr(M_1|X,Y))^2+\sum_{k\neq1}Pr(M_k|X,Y)^2}\leq\sqrt2\delta,\label{interim10} \end{equation}
since $(1-Pr(M_1|X,Y))^2\leq\delta^2$ and $(\delta,0,\ldots,0)$ is an optimizer of the optimization $$\max\sum_{i=2}^Kx_i^2\text{\ \ subject to\ \ }\sum_{i=2}^Kx_i\leq\delta.$$ Hence \eqref{interim9} and \eqref{interim10} together imply
$$P_0^N\left(d_E(Pr(M_k|X,Y),\mathbf e_1)\geq\sqrt2\delta\right)\leq\frac{1}{C^2N\varepsilon_N^2\delta}+\frac{2e^{-LN\varepsilon_N^2}}{\delta}+\frac{2e^{-2N\varepsilon_N^2}}{\delta}.$$ By redefining $\tilde\delta=\sqrt2\delta$, we get \eqref{Euclidean bound}.
\emph{Proof of \ref{Hellinger}.} Note that \eqref{interim8} implies \begin{align}
d_H(Pr(M_k|X,Y),\mathbf e_1)&=\sqrt{\frac{1}{2}\left((\sqrt{1-Pr(M_1|X,Y)})^2+\sum_{k\neq1}Pr(M_k|X,Y)\right)}\notag\\ &\leq\sqrt{\frac{1}{2}\left((1-\sqrt{1-\delta})^2+\delta\right)},\label{interim11} \end{align} since $x_i=\delta/(k-1)$ for all $i\neq0$ gives the optimizer of the optimization $$\max\sum_{i\neq0}\sqrt{x_i}\text{\ \ subject to\ \ }\sum_{i\neq0}x_i\leq\delta.$$ Hence \eqref{interim9} and \eqref{interim11} together imply \begin{align} \begin{split}
P_0^N\left(d_H(Pr(M_k|X,Y),\mathbf e_1)>\sqrt{\frac{1}{2}\left((1-\sqrt{1-\delta})^2+\delta\right)}\right)\leq\\ \frac{1}{C^2N\varepsilon_N^2\delta}+\frac{2e^{-LN\varepsilon_N^2}}{\delta}+\frac{2e^{-2N\varepsilon_N^2}}{\delta}\label{interim12}. \end{split} \end{align} Note that $(1-\sqrt{1-\delta})^2$ is a convex function in $\delta$ for $0<\delta<1$ and is equal to 0 at $\delta=0$. Thus $(1-\sqrt{1-\delta})^2\leq(\sqrt 2-1)^2\delta$ for $0<\delta<1/2$, where $(\sqrt 2-1)^2$ is the slope of the line between $(0,0)$ and $(1/2,(1-\sqrt{1-1/2})^2$. Hence, for $0<\delta<1/2$, we have $$\sqrt{\frac{1}{2}\left((1-\sqrt{1-\delta})^2+\delta\right)}\leq\sqrt{((\sqrt 2-1)^2+1)\frac{\delta}{2}}.$$ Combining with \eqref{interim12}, we have \begin{align} \begin{split}
P_0^N\left(d_H(Pr(M_k|X,Y),\mathbf e_1)>\sqrt{((\sqrt 2-1)^2+1)\frac{\delta}{2}}\right)\leq\\ \frac{1}{C^2N\varepsilon_N^2\delta}+\frac{2e^{-LN\varepsilon_N^2}}{\delta}+\frac{2e^{-2N\varepsilon_N^2}}{\delta}. \end{split} \end{align} By redefining $\tilde\delta=\sqrt{((\sqrt 2-1)^2+1)\delta/2}$, we get \eqref{Hellinger bound}.
\qed
Note that the assumption $d(P_\theta,P_0)>\epsilon$ for any $\theta\in\mathcal S_{-1}$ and sufficiently small $\epsilon$ is a manifestation of the non-nested model situation, asserting that only one model is ``correct". Result \ref{plain} is a concentration on the posterior probability of picking the correct model to be close to 1.
Result \ref{Euclidean} translates this in terms of the Euclidean distance between the model posterior probability and the point mass on the correct model. Result \ref{Hellinger} is an alternative using the Hellinger distance.
Note that the concentration bound for Hellinger distance \eqref{Hellinger bound} is inferior to that for Euclidean distance \eqref{Euclidean bound} for small $\delta$ since $\delta^2$ instead of $\delta$ shows up in the RHS of \eqref{Hellinger bound}. This is because in our proof, the function $\sqrt{(1-\sqrt{1-\delta})^2+\delta}$ that appears in \eqref{interim12} has derivative $1/(2\sqrt{(1-\delta)((1-\sqrt{1-\delta})^2+\delta)})$ which is $\infty$ at $\delta=0$, and thus no linearization is available when $\delta$ is close to 0.
Theorem \ref{thm model selection} can be modified to handle the case where multiple models contain the truth. In particular, the expression inside the probability in \eqref{interim9} becomes
$$\sum_{r\in\mathcal M}Pr(M_r|X,Y)<1-\delta,$$ where $\mathcal M$ is the collection of all $r$ such that $M_r$ contains the true model. In \eqref{Euclidean bound} and \eqref{Hellinger bound}, the use of $\mathbf e_1$ is replaced by an existence of some probability vector (dependent on $N$) supported on the indices in $\mathcal M_r$. In other words, one now allows comparing with an arbitrary allocation of probability masses to all true models in the concentration bound. These modifications can be seen by following the arguments in the proof of Theorem \ref{thm model selection}. Specifically, \eqref{interim14} would be modified as
$$\sum_{r\in\mathcal M}Pr(M_r|X,Y)Pr(\theta:d(P_\theta,P_0)\leq T\varepsilon_N^2|M_r,X,Y).$$ Then \eqref{interim7} would imply a modified version of \eqref{interim8}, namely
$$\sum_{r\in\mathcal M}Pr(M_r|X,Y)\geq1-\delta,$$
giving the claimed modification for \eqref{interim9}. Then, following \eqref{interim10}, we could find a probability vector to make all $(1-Pr(M_r|X,Y))^2$ terms vanish except one, which is in turn bounded by $\delta^2$. This gives the claimed modifications for \eqref{Euclidean bound} and \eqref{Hellinger bound}.
The following result states how a divide-and-conquer strategy can improve the concentration rate of the posterior model probabilities towards the correct model: \begin{thm}[Concentration Improvement]
Suppose the assumptions in Theorem \ref{thm model selection} hold. Let $s=N/R$, and
$q=\frac{\sqrt 2}{C^2s\varepsilon_s^2\delta}+\frac{2\sqrt 2e^{-Ls\varepsilon_s^2}}{\delta}+\frac{2\sqrt 2e^{-2s\varepsilon_s^2}}{\delta}$.
For sufficiently large $s$, letting $\alpha,\nu$ be constants such that $0<q<\alpha<1/2$ and $0\leq\nu<(\alpha-q)/(1-q)$, we have:
\begin{enumerate}[leftmargin=*]
\item $Pr_*(M_k|X,Y)$, the geometric median under $d_E$ of $\{Pr(M_k|(X_{(j)},Y_{(j)}))\}_{j=1,\ldots,R}$, satisfies
\begin{align}
\begin{split}
P_0^N&\left(d_E(Pr_*(M_k|X,Y),\mathbf e_1)>C_\alpha\delta\right)\leq\\
&\left(e^{(1-\nu)\psi(\frac{\alpha-\nu}{1-\nu},q)}\right)^{-R},
\end{split}\label{geometric median Euclidean bound eqn}
\end{align}\label{geometric median Euclidean bound}
where $C_\alpha=(1-\alpha)\sqrt{1/(1-2\alpha)}$, and $\psi(\alpha,q)=(1-\alpha)\log\frac{1-\alpha}{1-q}+\alpha\log\frac{\alpha}{q}$.
\label{geometric median Euclidean}
\item Let $K$ be the number of model classes, then:
\begin{align}
\begin{split}
P_0^N&\left(Pr_*(M_1|X,Y)<1-C_\alpha\delta\sqrt{\frac{K-1}{K}}\right)\leq \\
&\left(e^{(1-\nu)\psi(\frac{\alpha-\nu}{1-\nu},q)}\right)^{-R}.
\end{split}\label{geometric median plain eqn}
\end{align}\label{geometric median plain}
\item Suppose in addition that, for any $P_{\theta^1},P_{\theta^2}$ such that $\theta^i=(M_1,\beta^i,(\sigma^2)^i)$ for $i=1,2$, we have
\begin{equation}
d_H(P_{\theta^1},P_{\theta^2})\geq\tilde C\rho_k(\theta^1,\theta^2)^\gamma,\label{IPM}
\end{equation}
where $\rho_k(\theta^1,\theta^2)=\|k(\cdot,\theta^1)-k(\cdot,\theta^2)\|_{\mathbb H}$, with $k$ being a characteristic kernel defined on the space $\{\theta=(M_1,\cdot,\cdot)\}$ and $\mathbb H$ is the corresponding reproducing kernel Hilbert space (RKHS), and $\tilde C>0$ and $\gamma>0$ are constants. Moreover, assume that there is a universal constant $\tilde K$ such that $e^{-\tilde Ks\varepsilon_s^2/2}\leq\varepsilon_s$ for all $s$, and we choose $\varepsilon_s$ such that $\tilde q=\frac{1}{Cs\varepsilon_s^2}+4e^{-\tilde Ks\varepsilon_s^2/2}<1/2$. Then
\begin{align*}
\begin{split}
P_0^N\left(Pr_*(M_1|X,Y)>1-C_\alpha\delta\sqrt{\frac{K-1}{K}}, \right. \\
\left. \|Pr_*(\theta|M_1,X,Y)-\delta_0\|_{\mathcal F_k}\leq C_\alpha\tilde T\epsilon_s^{1/\gamma}\right)\\ \geq1-\left(e^{(1-\nu)\psi\left(\frac{\alpha-\nu}{1-\nu},q\right)}\right)^{-R}-\left(e^{\psi\left(\alpha,q\right)}\right)^{-R},
\end{split}
\end{align*}
where $\|\cdot\|_{\mathcal F_k}$ is defined as $\|P-Q\|_{\mathcal F_k}=\|\int k(x,\cdot)d(P-Q)(x)\|_{\mathbb H}$, $\tilde T>0$ is a sufficiently large constant, $Pr_*(\theta|M_1,X,Y)$ is the geometric median of $\{Pr(\theta|M_1,(X_{(j)},Y_{(j)}))\}_{j=1,\ldots,R}$ under the $\|\cdot\|_{\mathcal F_k}$-norm, and $\delta_0$ is the delta measure at the true parameter.\label{geometric median joint}
\end{enumerate}\label{thm main} \end{thm}
The significance of Theorem \ref{thm main} is the improvement of the concentration from power-law decay in Theorem \ref{thm model selection} to exponential decay, as the number of subsets grows. Such type of results is known in the case of parameter estimation (e.g., \cite{wang2014median,minsker2014scalable}). Theorem \ref{thm main} generalizes to the case of model selection.
Results \ref{geometric median Euclidean} and \ref{geometric median plain} describe the exponential concentration for the model posteriors to the correct model, while Result \ref{geometric median joint} states the joint concentration in both the model posterior and the parameter posterior given the correct model, when one adopts a second layer of divide-and-conquer on the parameter posterior conditional on each individual candidate model. Result \ref{geometric median joint} in particular combines with the parameter concentration result in \cite{minsker2014scalable}.
Note that we have taken a hybrid viewpoint here that we assume a ``correct" model and parameters in a frequentist sense. Under this view, a posterior probability more concentrated towards the truth is more desirable. This constitutes our main claim that the divide-and-conquer strategy is attractive. This view has been used in existing work like \cite{wang2014median,minsker2014scalable}.
Finally, the following theorem highlights that the concentration improvement still holds even if the data are contaminated to a certain extent: \begin{thm}[Robustness to Outliers] Using the notation in Theorem \ref{thm main}, but assume instead that, for $j$ where $1\leq j\leq\lfloor(1-\nu)R\rfloor+1$, \begin{align*} \begin{split}
P_0^s&\left(d_E(Pr(M_k|X_{(j)},Y_{(j)}),\mathbf e_1)>\delta\right)\leq\\ &\frac{\sqrt 2}{C^2s\varepsilon_s^2\delta}+\frac{2\sqrt 2e^{-Ls\varepsilon_s^2}}{\delta}+\frac{2\sqrt 2e^{-2s\varepsilon_s^2}}{\delta}, \end{split} \end{align*} the conclusion of Theorem \ref{thm main} still holds.\label{thm extension} \end{thm} Theorem \ref{thm extension} stipulates that when a small number of subsets are contaminated by arbitrary nature, the geometric median approach still retains the same exponential concentration.
\noindent\textbf{Proofs of Theorems \ref{thm main} and \ref{thm extension}.}
The proofs of both theorems rely on a key theorem on geometric median in \cite{minsker2015geometric}, restated in the Appendix. We focus on Theorem \ref{thm main}, as the proof for Theorem \ref{thm extension} is a straightforward modification in light of Theorem \ref{thm geometric median}.
\emph{Proof of \ref{geometric median Euclidean}.} Immediate by noting that
$$P_0^s\left(d_E(Pr(M_k|X_{(j)},Y_{(j)}),\mathbf e_1)>\delta\right)\leq q,$$ for all $j=1,\ldots,R$, and applying Theorem \ref{thm geometric median}.
\emph{Proof of \ref{geometric median plain}.} Note that \begin{equation}
d_E(Pr_*(M_k|X,Y),\mathbf e_1)\geq(1-Pr_*(M_1|X,Y))\sqrt{\frac{K}{K-1}}\label{interim13}. \end{equation}
To see this, let $a=Pr_*(M_1|X,Y)$. We have $$d_E(Pr_*(M_k|X,Y),\mathbf e_1)=\sqrt{(1-a)^2+\sum_{i=2}^Kx_i^2},$$ where $x_i$'s satisfy $\sum_{i=2}^Kx_i=1-a$. Since $(1-a)/(K-1)$ is the optimizer of the optimization $$\min\sum_{i=2}^Kx_i^2\text{\ \ subject to\ \ }\sum_{i=2}^Kx_i=1-a,$$ we get $\sqrt{(1-a)^2+\sum_{i=2}^Kx_i^2}\geq(1-a)\sqrt{K/(K-1)}$.
Hence \eqref{geometric median Euclidean bound eqn} and \eqref{interim13} together give
$$P_0^N\left(Pr_*(M_1|X,Y)<1-C_\alpha\delta\sqrt{\frac{K-1}{K}}\right)\leq\left(e^{(1-\nu)\psi(\frac{\alpha-\nu}{1-\nu},q)}\right)^{-R}.$$
\emph{Proof of \ref{geometric median joint}.} Under the additional assumptions, we can invoke Corollary 3.5 in \cite{minsker2014scalable} to obtain that
$$P_0^N\left(\|Pr_*(\theta|M_1,X,Y)-\delta_0\|_{\mathcal F_k}>C_\alpha\tilde T\epsilon_s^{1/\gamma}\right)\leq\left(e^{\psi\left(\alpha,q\right)}\right)^{-R}.$$ The result follows from applying a union bound and together with \eqref{geometric median plain eqn}.
\qed
\section{SIMULATIONS AND DATA ANALYSIS}\label{sec:sim} For the BMA, AIC, BIC and median probability model tests, we generate data from a model $Y=X\beta + \epsilon$, where $X$ is a $5000 \times 10$ matrix and $\beta$ is a $10$ dimensional vector with $3$ true predictors. We assess the aforementioned model selection techniques with four tests, over $10$ trials for the contamination and magnitude tests and over $20$ trials for the coverage test on $1$ and $10$ subsets for the magnitude and coverage tests and $ 1 $ and $ 50 $ subsets for the contamination tests with 1,000 iterations on each MCMC chain and a burn-in period of the initial $500$ iterations.
The first test is the contamination test which examines the root mean square error (RMSE) of held-out test data $\tilde{Y}$ of size $50$ against the number of outliers present (as many as $5$ in our experiments) in the training data, $Y$. We generate outliers by taking the maximum of the absolute value of the data and add a given magnitude value. Each outlier has a relative magnitude of 10,000 meaning that we find the largest output, $ Y_{i^{\ast}}$ such that $ i^{\ast} = \argmax_{i} \left\{ |Y_i|: i = 1, \ldots , N \right\} $, so that the value of the outlier is $Y_{i^{\ast}} + \left( \mbox{sgn}(Y_{i^{\ast}}) \times 10000 \right)$. For the contamination test, we expect to see superior performance with regards to RMSE of the $50$ subset median posterior as long as the number of outliers per subset does not exceed $1$. Figure~\ref{fig:contamination} demonstrates the robustness of our technique to the number of outliers when we divide the data into subsets. We can see that the empirical $95\%$ distribution of the RMSE over $ 10 $ trials for $50$ subsets (green dashed line) falls dramatically below that of the RMSE distribution of $1$ subset for each model selection technique when outliers are present except in the case when $ 50 $ outliers are present for Bayesian model averaging which approaches the point where the theoretical guarantees of our method are violated.
\begin{figure}
\caption{Contamination test.}
\label{fig:contamination}
\end{figure}
The second test assesses the RMSE of the held-out test data of size $50$ against the \emph{increasing relative magnitude of one outlier} present in the training data. We expect to see nearly constant RMSE on the $10$ subset run as the relative magnitude of a single outlier increases, thus the procedure is robust. We can see in Figure~\ref{fig:magnitude} that the RMSE of distributed variants of the model selection techniques are lower than the single processor variants as the number of outliers increases. In the magnitude test, we can categorically observe that $10$ subset RMSE is invariant to the relative magnitude of one outlier present in the data whereas the RMSE grows rapidly on one subset.
\begin{figure}
\caption{Magnitude of outlier test.}
\label{fig:magnitude}
\end{figure}
The next test assesses the $95\%$ frequentist posterior coverage of the true held-out predictive value of size $1$, $\tilde{Y}$, against the \emph{increasing relative magnitude} of one outlier in the training data. To calculate coverage we generate $50$ independent MCMC chains at each level of outlier magnitude and calculate the proportions of chains which include the true predictive value within the $2.5\%$ and $97.5\%$ percentiles of the posterior predictive draws. For the coverage test we see that the empirical coverage of a single predictive value for the distributed subsets is, on average, $95\%$ regardless of the magnitude of the outlier as opposed to the empirical coverage for the single subset. In the $1$ subset case, we can see that the empirical coverage degrades almost to zero as the magnitude of the outlier grows. (see Figure~\ref{fig:coverage}).
\begin{figure}
\caption{Testing empirical coverage of predictive value.}
\label{fig:coverage}
\end{figure}
Our last evaluation is the coverage of the regression coefficients and the ability for our model selection techniques to \emph{choose the correct model} under the distributed setting with a single outlier of magnitude 10,000. We compare the posterior credible interval of the regression coefficients for $ 1 $ and $ 10 $ subsets. Note that we do not include nested models in our evaluations or models larger than the true model (i.e models with more than $ 3 $ covariates included). Furthermore, we perform this evaluation under two settings: One, where we combine the optimal local model seleceted on each subset (``Model Combination'') or if we combine the subposterior estimates and select the optimal model globally (``Estimate Combination'') As seen in Figure~\ref{fig:reg_coverage}, the parallel technique is able to select the correct model $ 1 $ subset test, the outlier leads to the incorrect model being selected. Additionally, Figure~\ref{fig:reg_coverage_model} demonstrates that model and estimate combination yield similar results with the regression coefficient coverage test.
\begin{figure}
\caption{Posterior regression parameter coverage test results, estimate combination.}
\label{fig:reg_coverage}
\end{figure}
\begin{figure}
\caption{Posterior regression parameter coverage test results, model combination.}
\label{fig:reg_coverage_model}
\end{figure}
Also, we would like to see if the results still hold between model and estimate combination for the other simulation studies performed. Figs.~\ref{fig:contam_compare}, \ref{fig:coverage_compare}, and \ref{fig:magnitude_compare} show that there is little difference in how we combine the information for model selection in each of the tests evaluated. \begin{figure}
\caption{Contamination test}
\label{fig:contam_compare}
\end{figure}
\begin{figure}
\caption{Coverage test.}
\label{fig:coverage_compare}
\end{figure}
\begin{figure}
\caption{Magnitude test.}
\label{fig:magnitude_compare}
\end{figure}
Furthermore, we wish to evaluate our method a large synthetic dataset with the same synthetic generating process as above, but with \emph{one million observations} divided over 50 processors. Here, we examine the behavior of our method when we increase the magnitude of one outlier in the dataset and when we increase the number of outliers with fixed magnitude. In Figure~\ref{fig:big_data}, we can see that our performance is robust when the number of outliers per subset fulfills Theorem 4. When the number of outliers reaches 40 and 50, we see start to see a noticeable degradation of our method's predictive ability. However, this degradation is still small relative to what we might observe in the case where we do not divide the data into subsets.
Additionally, we would like to see the computational gain of dividing the data for this situation in terms of CPU time for running the model selection and inference procedure. For one subset the average computation time is 91,829.15 seconds with a standard error of $ 190.80 $ seconds. For ten subsets, the average computation time is 10,301.60 seconds with a standard error of $ 81.28 $ seconds. And for fifty subsets, the average computation time is 29,49.74 seconds with a standard error of $ 16.61 $ seconds which signifies that we obtain critical computational performance when dividing our method across multiple processors.
\begin{figure}
\caption{Synthetic big data results.}
\label{fig:big_data}
\end{figure}
Lastly, we evaluate our parallel model selection method on the diabetes data set used in \cite{efron2004least}. The diabetes data consists of a $442 \times 10$ dimension design matrix scaled with unit norm and zero mean and a single response vector. We held out $45$ observations for test evaluation and plotted the posterior $95\%$ credible intervals for the predictive values centered at zero after subtracting the true predictive value. We can see in Fig.~\ref{fig:diabetes} that, after dividing the data across 5 subsets, we can attain a tighter credible interval over the true value for each model selection technique.
\begin{figure}
\caption{Diabetes test data results.}
\label{fig:diabetes}
\end{figure}
\section{CONCLUSION}\label{sec:conclusion}
While a substantial body of work exists for fast and scalable Bayesian inference methods, few research methods are available on robust and scalable model selection.
We have studied in this paper a divide-and-conquer strategy that contributes to filling this gap. This strategy operates by taking the geometric median of posterior model probabilities or other selection criteria that extends previous results focusing on parametric inference. We show theoretically how the strategy, particularly in the setting of BMA, can be robust to outliers and, moreover, exhibits faster concentration to the true model in terms of posterior model probabilities. The concentration result also applies to the joint setting of model selection and parameter estimation. We illustrate with both simulation data and a real data example how a variety of our strategy leads to more robust inference compared to standard approach that does not divide data into subsets. The strategy we present is simple to execute and is foreseen to have good practical value.
\section{APPENDIX}\label{sec:appendix} \noindent\textbf{Proof of Theorem \ref{thm plain}.}
The proof is a modification of that for Theorem 2.1 in \cite{ghosal2000}. Take any $\epsilon>2\varepsilon_N$, we have, by Assumption \ref{assumption1}, $$\log\mathcal D\left(\frac{\epsilon}{2},\mathcal P_{\mathcal S_N},d\right)\leq\log\mathcal D\left(\varepsilon_N,\mathcal P_{\mathcal S_N},d\right)\leq N\varepsilon_N^2.$$ Then, by Theorem 7.1 in \cite{ghosal2000}, there exists tests $\phi_N$ and a large enough constant $T$ (chosen later) such that \begin{equation} P_0^N\phi_N\leq e^{N\varepsilon_N^2}e^{-LNT^2\varepsilon_N^2}\frac{1}{1-e^{-LNT^2\varepsilon_N^2}},\label{test1} \end{equation} and \begin{equation} \sup_{\theta\in\mathcal S_N:d(P_\theta,P_0)>T\varepsilon_N}P_\theta^N(1-\phi_N)\leq e^{-LNT^2\varepsilon_N^2},\label{test2} \end{equation}
for a universal constant $L>0$, any $N>0$, and $P_\theta^N$ denotes the probability measure on $(X,Y)$ under $(X_1,Y_1)\sim p_0(x)\times p_\theta(y|x)$.
By \eqref{test1}, we have \begin{equation}
P_0^NPr(\theta:\theta:d(P_\theta,P_0)>L\varepsilon_N^2|X,Y)\phi_N\leq P_0^N\phi_N\leq2e^{-LN\varepsilon_N^2},\label{interim5} \end{equation} as $N\to\infty$, if we choose $LT^2-1>L$. Now, since
$$P_0\frac{p_\theta(Y_1|X_1)}{p_0(Y|X)}=\int\frac{p_\theta(y|x)}{p_0(y|x)}p_0(dy|x)p_0(dx)=\int p_\theta(dy|x)p_0(dx)=1,$$ by Fubini's theorem, we have
$$P_0^N\int_{\mathcal S\setminus\mathcal S_N}\prod_{i=1}^N\frac{p_\theta(Y_i|X_i)}{p_0(Y_i|X_i)}Pr(\mathrm{d}\theta)\leq Pr(\mathcal S\setminus\mathcal S_N).$$ Hence, by Fubini's theorem again, \begin{eqnarray}
&&P_0^N\int_{\theta\in\mathcal S:d(P_\theta,P_0)>T\varepsilon_N}\prod_{i=1}^N\frac{p_\theta(Y_i|X_i)}{p_0(Y_i|X_i)}Pr(\mathrm{d}\theta)(1-\phi_N)\notag\\ &\leq&\Pi(\mathcal S\setminus\mathcal S_N)+\int_{\theta\in\mathcal S_N:d(P_\theta,P_0)>T\varepsilon_N}P_\theta^N(1-\phi_N)Pr(\mathrm{d}\theta)\notag\\ &\leq&\Pi(\mathcal S\setminus\mathcal S_N)+e^{-LNT^2\varepsilon_N^2}\text{\ \ by \eqref{test2}}\notag\\ &\leq&2e^{-N\varepsilon_N^2(C+4)},\label{interim4} \end{eqnarray} if $KM^2\geq C+4$, by Assumption \ref{assumption2}.
By Lemma \ref{lemma bound} (stated below) and Assumption \ref{assumption3}, with probability at least $1-1/(C^2N\varepsilon_N^2)$, we have \begin{equation}
\int\prod_{i=1}^N\frac{p_\theta(Y_i|X_i)}{p_0(Y_i|X_i)}Pr(\mathrm{d}\theta)\geq e^{-2N\varepsilon_N^2}Pr(B_n)\geq e^{-N\varepsilon_N^2(2+C)},\label{interim3} \end{equation} where
$$B_n=\left\{\theta:-P_0\log\frac{p_\theta(Y_1|X_1)}{p_0(Y_1|X_1)}\leq\varepsilon_N^2,\ P_0\left(\frac{p_\theta(Y_1|X_1)}{p_0(Y_1|X_1)}\right)^2\leq\varepsilon_N^2\right\}.$$ Let $A_N$ be the event that \eqref{interim3} holds. We have \begin{eqnarray*}
&&P_0^NPr(\theta:d(P_\theta,P_0)>T\varepsilon_N|X,Y)(1-\phi_N)\mathbf 1_{A_N}\\
&=&P_0^N\frac{\int_{\theta:d(P_\theta,P_0)>T\varepsilon_N}\prod_{i=1}^N\frac{p_\theta(Y_i|X_i)}{p_0(Y_i|X_i)}Pr(\mathrm{d}\theta)}{\int\prod_{i=1}^N\frac{p_\theta(Y_i|X_i)}{p_0(Y_i|X_i)}Pr(\mathrm{d}\theta)}(1-\phi_N)\mathbf 1_{A_N}\\
&\leq&e^{N\varepsilon_N^2(2+C)}2e^{-N\varepsilon_N^2(C+4)}\text{\ \ by \eqref{interim4} and \eqref{interim3}}\\
&=&2e^{-2N\varepsilon_N^2}.\label{interim6} \end{eqnarray*}
Therefore, \begin{eqnarray*}
&&P_0^NPr(\theta:d(P_\theta,P_0)>T\varepsilon_N|X,Y)\\
&=&P_0^NPr(\theta:d(P_\theta,P_0)>T\varepsilon_N|X,Y)\phi_N+P_0^NPr(\theta:d(P_\theta,P_0){}\\
&&{}>T\varepsilon_N|X,Y)(1-\phi_N)\mathbf 1_{A_N}+P_0^NPr(\theta:d(P_\theta,P_0){}\\
&&>{}T\varepsilon_N|X,Y)(1-\phi_N)(1-\mathbf 1_{A_N})\\
&\leq&P_0^NPr(\theta:d(P_\theta,P_0)>T\varepsilon_N|X,Y)\phi_N+P_0^NPr(\theta:d(P_\theta,P_0){}\\
&&{}>T\varepsilon_N|X,Y)(1-\phi_N)\mathbf 1_{A_N}+P_0^N(A_N^c)\text{\ \ for sufficiently large $T$}\\
&\leq&2e^{-LN\varepsilon_N^2}+2e^{-2N\varepsilon_N^2}+\frac{1}{C^2N\varepsilon_N^2}, \end{eqnarray*} by \eqref{interim5}, \eqref{interim6} and the property of $A_N$. By Chebyshev's inequality, we have
$$P_0^N\left(Pr(\theta:d(P_\theta,P_0)>T\varepsilon_N^2|X,Y)>\delta\right)\leq\frac{1}{C^2N\varepsilon_N^2\delta}+\frac{2e^{-LN\varepsilon_N^2}}{\delta}+\frac{2e^{-2N\varepsilon_N^2}}{\delta},$$ which concludes the theorem.
\qed
\begin{lemma}
For any $\epsilon>0$ and probability distribution $\Pi$ defined on the set
\begin{equation}
\left\{\theta:-P_0\log\frac{p_\theta(Y|X)}{p_0(Y_1|X_1)}\leq\epsilon^2,\ P_0\left(\frac{p_\theta(Y|X)}{p_0(Y_1|X_1)}\right)^2\leq\epsilon^2\right\},\label{set}
\end{equation}
we have, for every $C>0$,
\begin{equation}
P_0^N\left(\int\prod_{i=1}^N\frac{p_\theta(Y_i|X_i)}{p_0(Y_i|X_i)}\Pi(\mathrm d\theta)\leq e^{-(1+C)N\epsilon^2}\right)\leq\frac{1}{C^2N\epsilon^2}.\label{bound}
\end{equation}\label{lemma bound} \end{lemma}
\begin{thm}[Adopted from \cite{minsker2015geometric}]
Consider a Hilbert space $(\mathbb H,\langle\cdot,\cdot\rangle)$ and $\xi_0\in\mathbb H$. Let $\hat\xi_1,\ldots,\hat\xi_R\in\mathbb H$ be a collection of independent random $\mathbb H$-valued elements. Let $\alpha,q,\nu$ be constants such that $0<q<\alpha<1/2$ and $0\leq\nu<(\alpha-q)/(1-q)$. Suppose that there exists $\epsilon>0$ such that for all $j$, where $1\leq j\leq\lfloor(1-\nu)R\rfloor+1$,
$$P(\|\hat\xi_j-\xi_0\|>\epsilon)\leq q.$$
Let $\hat\xi_*=\text{med}_g(\hat\xi_1,\ldots,\hat\xi_R)$ be the geometric median of $\{\hat\xi_1,\ldots,\hat\xi_R\}$. Then
$$P(\|\hat\xi_*-\xi_0\|>C_\alpha\epsilon)\leq\left(e^{(1-\nu)\psi(\frac{\alpha-\nu}{1-\nu},q)}\right)^{-R},$$
where $C_\alpha=(1-\alpha)\sqrt{1/(1-2\alpha)}$, and
$$\psi(\alpha,q)=(1-\alpha)\log\frac{1-\alpha}{1-q}+\alpha\log\frac{\alpha}{q}.$$\label{thm geometric median} \end{thm}
\section*{ACKNOWLEDGMENTS} The contribution of Lizhen Lin was funded by NSF grants IIS 1663870, CAREER DMS 1654579. The contribution of Henry Lam was funded by NSF grants CMMI-1542020, CMMI-1523453 and CAREER CMMI-1653339. The contribution of Michael Zhang was funded by NSF grant 1447721.
\section*{REFERENCES}
\end{document} |
\begin{document}
\pagestyle{plain}
\title{Descent polynomials } \author{ Alexander Diaz-Lopez\\[-5pt] \small Department of Mathematics and Statistics, Villanova University,\\[-5pt] \small 800 Lancaster Avenue, Villanova, PA 19085, USA, {\tt [email protected]}\\ Pamela E. Harris\\[-5pt] \small Mathematics and Statistics Department, Williams College,\\[-5pt] \small 18 Hoxsey Street, Williamstown, MA 01267, USA, {\tt [email protected]}\\ Erik Insko\\[-5pt] \small Department of Mathematics, Florida Gulf Coast University,\\[-5pt] \small 10501 FGCU Blvd. South, Fort Myers, FL 33965-6565, USA, {\tt [email protected]}\\ Mohamed Omar\\[-5pt] \small Department of Mathematics, Harvey Mudd College,\\[-5pt] \small 301 Platt Boulevard, Claremont, CA 91711-5901, USA, {\tt [email protected]}\\ Bruce E. Sagan\\[-5pt] \small Department of Mathematics, Michigan State University,\\[-5pt] \small East Lansing, MI 48824-1027, USA, {\tt [email protected]} }
\date{\today\\[10pt]
\begin{flushleft}
\small Key Words: coefficients, consecutive pattern avoidance, Coxeter group, descent polynomial, descent set, peak polynomial, peak set, roots\\[5pt]
\small AMS subject classification (2010): 05A05 (Primary) 05E15, 20F55 (Secondary)
\end{flushleft}}
\maketitle
\begin{abstract}
Let $n$ be a nonnegative integer and $I$ be a finite set of positive integers. In 1915, MacMahon proved that the number of permutations in the symmetric group ${\mathfrak S}_n$ with descent set $I$ is a polynomial in $n$. We call this the descent polynomial. However, basic properties of these polynomials such as a description of their coefficients and roots do not seem to have been studied in the literature. Much more recently, in 2013, Billey, Burdzy, and Sagan showed that the number of elements of ${\mathfrak S}_n$ with peak set $I$ is a polynomial in $n$ times a certain power of two. Since then, there have been a flurry of papers investigating properties of this peak polynomial. The purpose of the present paper is to study the descent polynomial. We will see that it displays some interesting parallels with its peak relative. Conjectures and questions for future research are scattered throughout.
\end{abstract}
\section{Introduction} \label{sec:int}
For the rest of this paper, we let $n$ be a nonnegative integer and $I$ be a finite set of positive integers. (In Section~\ref{sec:ocg} we will permit $I$ to contain $0$.) We will also use the notation \begin{equation} \label{eq:m} m=\max(I\cup\{0\}), \end{equation} where the presence of zero ensures that $m$ is well defined even when $I$ is empty. We also use the standard notation $[n]=\{1,2,\dots,n\}$. More generally, given integers $\ell,n$ we set $$[\ell,n]=\{\ell,\ell+1.\dots,n\},$$ and similarly for other interval notations.
Denote by ${\mathfrak S}_n$ the symmetric group of permutations $\pi=\pi_1\pi_2\dots\pi_n$ of $[n]$ written in one-line notation. Note that we will sometimes insert commas into such sequences for clarity in distinguishing adjacent elements. The {\em descent set} of $\pi$ is $$ \Des\pi=\{i \mid \pi_i>\pi_{i+1}\}\subseteq [n-1]. $$ Note that a similar definition can be given for any sequence $\pi$ of integers and we will have occasion to use that level of generality. Given $I$ and $n>m$, where $m$ is defined by~\ree{eq:m}, we wish to study the set $$ D(I;n) = \{\pi\in{\mathfrak S}_n \mid \Des\pi = I\}, $$ and its cardinality $$ d(I;n)=\# D(I;n). $$ As an example, if $I=\{1,2\}$ then \begin{equation} \label{eq:D12} D(\{1,2\};n) =\{\pi\in{\mathfrak S}_n \mid \pi_1>\pi_2>\pi_3<\pi_4<\dots<\pi_n\}. \end{equation} It follows that $\pi_3=1$. Furthermore, one can pick any two integers from $[2,n]$ to be to the left of $\pi_3$. Placing the integers to the left of $\pi_3$ in decreasing order and the remaining ones to the right of $\pi_3$ in increasing order completely determines $\pi$. Thus \begin{equation} \label{eq:d12} d(\{1,2\};n)=\binom{n-1}{2} = \frac{(n-1)(n-2)}{2}, \end{equation} which is a polynomial in $n$. Using the Principle of Inclusion and Exclusion, MacMahon~\cite[Art.\ 157]{mac:ca} proved that this is always the case. \begin{thm}[\cite{mac:ca}] \label{th:mac} For any $I$ and all $n>m$ we have that $d(I;n)$ is a polynomial~in~$n$.
\qed \end{thm} We call $d(I;n)$ the {\em descent polynomial} of $I$. Although this result was proved in 1915, very little work has been done in the intervening years to study these polynomials in more detail. The purpose of this work is to rectify this oversight. We also note that since $d(I;n)$ is a polynomial, we can extend its domain of definition to all complex $n$, which will be a useful viewpoint in the sequel.
Another well-studied statistic on permutations is the {\em peak set} defined by $$ \Peak\pi=\{i \mid \pi_{i-1}<\pi_i>\pi_{i+1}\} \subseteq[2,n-1]. $$ It is not true that any set of integers $I\subseteq[2,\infty)$ is the peak set of some permutation. For example, clearly $I$ can not contain two consecutive indices. Say that $I$ is {\em admissible} if there is some permutation $\pi$ with $\Peak\pi=I$. For $I$ admissible and $n>m$, consider the set $$ P(I;n) = \{\pi\in{\mathfrak S}_n \mid \Peak\pi = I\}. $$ To illustrate, if $I=\emptyset$, then $$ P(\emptyset;n) = \{\pi\in{\mathfrak S}_n \mid \text{$\pi_1>\dots >\pi_i<\pi_{i+1}<\dots<\pi_n$ for some $1\le i\le n$}\}. $$ Noting that $\pi_i$ must be $1$, such a permutation is determined by picking some subset of $[2,n]$ to be to the left of $\pi_i$, then arranging those elements in decreasing order, and finally making the rest an increasing sequence to the right of $\pi_i$. It follows that $$ \#P(\emptyset;n) = 2^{n-1}, $$ which is certainly not a polynomial in $n$. But nearly one hundred years after MacMahon's theorem, Billey, Burdzy, and Sagan~\cite{bbs:pps} proved the following result. \begin{thm}[\cite{bbs:pps}] \label{th:bbs} For any admissible $I$ and all $n>m$ we have that $$ \#P(I;n) = p(I;n) 2^{n-\#I-1}, $$ where $p(I;n)$ is a polynomial in $n$ taking on integer values in the range $(m,\infty)$.
\qed \end{thm} As might be expected, $p(I;n)$ is called the {\em peak polynomial} of $I$. Inspired by this theorem, a number of papers have been written about properties of peak and related polynomials~\cite{bbps:mew,bft:crp,cvdlopz:nps,dlhio:ppp,dlhip:psc,dnpt:psp,kas:mfp}. It turns out that many of our results about descent polynomials have analogues for peak polynomials.
The rest of this paper is organized as follows. In the next section we derive two recursions for $d(I;n)$ that prove useful in the sequel. Section~\ref{sec:coe} is devoted to the study of the coefficients of $d(I;n)$ when expanded in an appropriately centered binomial coefficient basis for the polynomial ring ${\mathbb Q}[n]$. In particular, we give a combinatorial interpretation for these constants which permits us to prove a log-concavity result. We also explore a conjecture that the coefficients of $d(I;n)$ when expanded in a differently centered basis alternate in sign. In Section \ref{sec:roo}, we study the roots of the descent polynomial, including those which are complex. It will be shown that the elements of $I$ are always integral zeros, and progress will be made towards a conjecture about the location of the full set of roots in the complex plane. Analogues of $d(I;n)$ in Coxeter groups of type $B$ and $D$ are considered in Section~\ref{sec:ocg}. We end with a section containing comments and open questions. There we present a result that unifies Theorems~\ref{th:mac} and~\ref{th:bbs} using the concept of consecutive pattern avoidance.
\section{Two recursions} \label{sec:rec}
In this section we derive two recursions for $d(I;n)$. The first will be useful in a number of ways, for example in determining the degree of $d(I;n)$ and in finding some of its roots.
If $I\neq\emptyset$, then we let $$ I^-= I-\{m\}. $$ We first express $d(I;n)$ in terms of $d(I^-;n)$ which will permit latter proofs by induction on $m$ or on $\#I$. \begin{prop}\label{prop:recursion} If $I\neq\emptyset$, then \begin{equation} \label{eq:I^-} d(I;n)=\binom{n}{m} d(I^-;m)-d(I^-;n). \end{equation} \end{prop} \begin{proof} Consider the set $P$ of permutations $\pi\in{\mathfrak S}_n$ that can be written as a concatenation $\pi=\pi'\pi''$ satisfying \begin{enumerate} \item $\#\pi'=m$ and $\#\pi''=n-m$, and \item $\Des\pi'=I^-$ and $\pi''$ is increasing. \end{enumerate} We can write $P$ as the disjoint union of those $\pi$ where $\pi_m'>\pi_1''$ and those where the reverse inequality holds. So $\#P = d(I;n)+d(I^-;n)$.
On the other hand, the elements of $P$ can be constructed as follows. Pick $m$ elements of $[n]$ to be in $\pi'$ which can be done in $\binom{n}{m}$ ways. Arrange those elements to have descent set $I^-$ which can be done in $d(I^-;m)$ ways. Finally, put the remaining elements in $\pi''$ in increasing order which can only be done in one way. If follows that $\#P=\binom{n}{m} d(I^-;m)$. Comparing this with the expression for $\#P$ at the end of the previous paragraph completes the proof. \end{proof}
We can use the previous result to provide a new proof of MacMahon's theorem and to also obtain the degree of $d(I;n)$. \begin{thm} \label{thm:polydegreen} For all $I$ we have that $d(I;n)$ is a polynomial in $n$ with $\deg d(I;n)=m$. \end{thm} \begin{proof} We prove this by induction on $\#I$. If $I=\emptyset$, then $d(I;n)=1$ and the result clearly holds. For nonempty $I$, we examine~\ree{eq:I^-}. We have that $\binom{n}{m}$ is a polynomial in $n$ of degree $m$. Multiplying by the nonzero constant $d(I^-;m)$ does not change this. And, by induction, $d(I^-;n)$ is a polynomial of lesser degree so that the first term in the difference is dominant. \end{proof}
MacMahon also gave an explicit formula for $d(I;n)$ using the Principle of Inclusion and Exclusion. As a further application of ~\ree{eq:I^-}, we will now rederive this expression. Before doing so, we set the following notation. Recall that a {\em composition of $n$} is a sequence of positive integers summing to $n$. Given a set of positive integers $I=\{i_1<\dots<i_k\}$ and $n>i_k$ it will be convenient to let $i_0=0$ and $i_{k+1}=n$. Now we can form the {\em difference composition} \begin{equation}\label{delta(J)} \delta(I)=(i_1-i_0,\ i_2-i_1,\ \dots,\ i_{k+1}-i_k). \end{equation} To any composition $\delta=(\delta_1,\dots,\delta_k)$ of $n$ we associate the multinomial coefficient $$ \binom{n}{\delta}=\frac{n!}{\delta_1!\dots \delta_k!}. $$ Finally, we let $\binom{I}{i}$ be the set of all $i$-element subsets of $I$. \begin{thm}[\cite{mac:ca}] \label{PIE} If $I$ is a set of positive integers with $\#I=k$, then \begin{equation} \label{PIEeq} d(I;n)=\sum_{i\ge0} (-1)^{k-i} \sum_{J\in \binom{I}{i}} \binom{n}{\delta(J)}. \end{equation} \end{thm} \begin{proof} We proceed by induction on $\#I$. If $I=\emptyset$, then $d(I;n)=1$. In this case the right-hand side of \eqref{PIEeq} is $\binom{n}{\delta(\emptyset)}=1$. We assume that the result holds for all sets $I$ with $\#I\leq k$. Consider $\#I=k+1$ and $m=\max(I)$. Note that if $\delta^-$ is a composition of $m$ then $\binom{n}{m}\binom{m}{\delta^-}=\binom{n}{\delta}$ where $\delta$ is $\delta^-$ with $n-m$ appended. Now using this fact, equation~\eqref{eq:I^-}, and the induction hypothesis we have \begin{align*} d(I;n)&=\binom{n}{m}\left[\sum_{i\ge0} (-1)^{k-i} \sum_{J\in \binom{I^-}{i}} \binom{m}{\delta(J)}\right] -\sum_{i\ge0} (-1)^{k-i} \sum_{J\in \binom{I^-}{i}} \binom{n}{\delta(J)}\\ &=\sum_{i\ge0}(-1)^{k+1-i}\left[\sum_{J\in\binom{I}{i},\;m\in J}\binom{n}{\delta(J)}+\sum_{J\in\binom{I}{i},\; m\notin J}\binom{n}{\delta(J)}\right]\\ &=\sum_{i\ge0}(-1)^{k+1-i}\sum_{J\in\binom{I}{i}}\binom{n}{\delta(J)}, \end{align*} as desired. \end{proof}
It will be useful to have a recursion that does not contain any negative terms. We will see an application of this recursion when we investigate the expansion of $d(I;n)$ in a certain binomial basis. A similar recursion was used by Diaz-Lopez, Harris, Insko and Omar~\cite{dlhio:ppp} when they proved the peak polynomial positivity conjecture of Billey, Burdzy, and Sagan~\cite{bbs:pps}. To state our recursion, we need some notation.
Suppose $I=\{i_1,\dots,i_\ell\}$ where the integers are listed in increasing order. We define two related sets of positive integers. Specifically, for $1\le k\le\ell$ we let $$ I_k=\{i_1,\dots,i_{k-1},i_k-1,\dots,i_\ell-1\}-\{0\}, $$ and $$ \hat{I}_k=\{i_1,\dots,i_{k-1},i_{k+1}-1,\dots,i_\ell-1\}. $$ Note that subtracting $\{0\}$ in $I_k$ is only necessary when $k=1$ and $i_1=1$ so that $I_k$ is still a set of positive integers. The reason these sets are interesting is that if one removes $n+1$ from a $\pi\in D(I;n+1)$ then the resulting $\pi'$ has $\Des\pi'=I_k$ or $\Des\pi'=\hat{I}_k$ for some $k$.
Also note that $n+1$ can only appear at the end of $\pi$ or at a position $i_k$ where $i_k-1\not\in I$. So define $$ I'=\{i_k \mid i_k-1\not\in I\} $$ and $I''=I'-\{1\}$. Note $I'$ and $I''$ are only different if $1\in I'$. \begin{thm} \label{thm:rec2} If $I\neq\emptyset$, then $$ d(I;n+1)=d(I;n)+\sum_{i_k\in I''} d(I_k;n) +\sum_{i_k\in I'} d(\hat{I}_k;n). $$ \end{thm} \begin{proof} We partition $D(I;n+1)$ according to the position of $n+1$. If $\pi\in D(I;n+1)$ then we let $\pi'$ be the permutation obtained from $\pi$ by deleting $n+1$. If $\pi_{n+1}=n+1$ then the corresponding $\pi'$ are the elements of $D(I;n)$ which gives the first term in the sum for $d(I;n+1)$. Now suppose $\pi_{i_k}=n+1$ where $i_k>1$ and $\pi_{i_k-1}>\pi_{i_k+1}$. Then the possible $i_k$ where this could occur are exactly the elements of $I''$, and the $\pi'$ which result form the set $D(I_k;n)$. This explains the first summation. Similarly, suppose $\pi_{i_k}=n+1$ where either $i_k=1$, or $i_k>1$ and $\pi_{i_k-1}<\pi_{i_k+1}$. Then the corresponding $\pi'$ are counted by the second sum and we are done. \end{proof}
\section{Coefficients} \label{sec:coe}
In this section we show that the coefficients of descent polynomials, written in a certain polynomial basis, are nonnegative by providing a combinatorial interpretation for them. Based on a partial result and computer evidence, we then conjecture that these coefficients form a log-concave sequence. We also make a conjecture that the coefficients in another polynomial basis alternate in sign and prove it in a special case.
The study of coefficients of polynomials has a rich history and many important examples. For instance, Ehrhart polynomials \cite{s:drcp} and chromatic polynomials \cite{b:ecpl} can be written in certain polynomial bases using {nonnegative} coefficients. {In 2013} Billey, Burdzy, and Sagan conjectured that peak polynomials {could} be written with non-negative coefficients in a binomial basis \cite{bbs:pps}. This conjecture was {proved} in 2017 by Diaz-Lopez et al. \cite{dlhio:ppp}. We restate their result here and then prove a similar, {but stronger,} result for descent polynomials in Theorem \ref{comb interp}. \begin{thm} [{\cite{dlhio:ppp}}]\label{thm:dlhio} For any non-empty admissible set $I$ we have \[ p(I;n)=b_0(I) \binom{n-m}{0}+b_1(I)\binom{n-m}{1}+\dots+ b_{m-1}(I) \binom{n-m}{m-1},\] where $b_0(I)=0$ and for $ 1\leq k \leq m-1$ the constant $b_k(I)$ is positive.
\qed \end{thm}
Before proving our main result of this section, we need a lemma which is of interest in its own right. Recall the definition that for integers $\ell,n$ $$ [\ell,n]=\{\ell,\ell+1,\ell+2,\dots,n\}. $$ We also use this notation for the sequence $\ell,\ell+1,\dots,n$. Context should make it clear which interpretation is meant.
\begin{lem} \label{lem:pos} For any finite set of positive integers $I$ and $n>m$ we have $D(I;n)\neq\emptyset$. \end{lem} \begin{proof} We induct on $\#I$. If $I=\emptyset$ then the identity permutation is in $D(I;n)$. If $I\neq\emptyset$ then by induction there is a permutation $\pi \in \mathfrak{S}_m$ with $\pi\in D(I^-;m)$ where, as usual, $I^-=I-\{m\}$. It follows that $D(I;n)$ contains the concatenation $\sigma=\pi' 1 [m+2,n]$ where $\pi'$ is $\pi$ with all its elements increased by one. \end{proof}
We can now state the main result of this section for descent polynomials.
\begin{thm} \label{comb interp} For any {finite set of positive integers} $I$ we have that \begin{equation} \label{a_k(I) exp} d(I;n)=a_0(I) \binom{n-m}{0}+a_1(I)\binom{n-m}{1}+\dots+ a_m(I) \binom{n-m}{m}, \end{equation} where $a_0(I)=0$ and for $k\geq 1$ the constant $a_k(I)$ is the number of $\pi\in D(I;2m)$ such that \begin{equation} \label{pi cap int} \{\pi_1,\dots,\pi_m\}\cap [m+1,2m]=[m+1,m+k]. \end{equation} Moreover, $a_k(I)>0$ for $1 \leq k \leq m$. \end{thm} \begin{proof} By Theorem \ref{thm:polydegreen}, $d(I;n)$ is a polynomial in $n$ of degree $m$, so we can write it uniquely as a linear combination of the polynomial basis $$\left\{ \binom{n-m}{0}, \binom{n-m}{1},\dots,\binom{n-m}{m}\right\}. $$
For ease of notation, given $\pi\in D(I;n)$ we let $$ \pi[m] = \{\pi_1,\dots,\pi_m\}\cap [m+1,n]. $$ Now consider $$
D_k(I;n)=\{\pi\in D(I;n)\ |\ \#\pi[m]=k\}. $$ Clearly $D(I;n)$ is the disjoint union of the sets $D_k(I;n)$ for $k\geq 0$. So to prove the summation formula in~\eqref{a_k(I) exp}, it suffices to demonstrate that $\#D_k(I;n)=a_k(I)\binom{n-m}{k}$. We also claim that $D_0(I;n)=\emptyset$ which forces $a_0(I)=0$. Indeed, if there is an element $\pi\in D_0(I;n)$ then $\pi[m]=\emptyset$. This implies that $\{\pi_1,\dots,\pi_m\}=[m]$. Thus $\pi_m\le m$ and $\pi_{m+1}>m$ which contradicts the fact that $m$ is a descent.
For the rest of the proof we will assume $n\ge 2m$. This assumption is without loss of generality since if we can show that the polynomials on both sides of equation~\ree{a_k(I) exp} agree for an infinite number of values, then they must agree everywhere. For $k\geq 1$, consider the elements $\pi \in D_k(I;n)$. There are $\binom{n-m}{k}$ ways to pick the $k$ elements of $\pi[m]$. Furthermore, given any two $k$-element subsets $X$ and $Y$ of $[m+1,n]$, there is an order preserving bijection $f:X\rightarrow Y$. This induces a bijection from the $\pi\in D_k(I;n)$ with $\pi[m]=X$ to the $\sigma\in D_k(I;n)$ with $\sigma[m]=Y$ by applying $f$ to the elements of $\pi[m]$, leaving the elements in the first $m$ positions from $[m]$ unchanged, and then listing the remaining elements in increasing order. Note that all the elements of $[m]$ remain unchanged as $f$ is only applied to elements of $[m+1,n]$. This bijection clearly preserves the descent set everywhere except possibly at position $m$. To see that the descent at $m$ is preserved, note that $\pi_{m+1}\in[m]$ since the subsequence $\pi_{m+1}\cdots \pi_n$ is increasing
and there is at least one element of $[m]$ not in $\{\pi_1,\dots,\pi_m\}$ {because of the assumption} $k\ge1$. But then in $\sigma=f(\pi)$ we have $\sigma_{m+1}=\pi_{m+1}$ since elements of $[m]$ are unchanged. So if $\pi_m\in[m]$ then $\sigma_m=\pi_m>\pi_{m+1}=\sigma_{m+1}$ and if $\pi_m>m$ then $\sigma_m>m\ge\sigma_{m+1}$ as desired.
Letting $X=[m+1,m+k]$ we have shown that
\[\#D_k(I;n)=\#X \cdot \binom{n-m}{k}.\]
Furthermore $k=\#X$ is less than or equal to $m$, which means that the largest interval we need to consider is $[m+1,2m]$ and this is contained in $[m+1,n]$ by our assumption that $n\ge2m$. Thus {$\#X=a_k(I)$} which is clearly a constant independent of $n$. This completes the proof of the summation formula \eqref{a_k(I) exp}.
To prove the last statement of the theorem, suppose $1\leq k \leq m$. It is enough to show that $D_k(I;2m) \neq \emptyset$. By Lemma~\ref{lem:pos} there is $\pi\in D(I^-;m)$. Thus the concatenation $\sigma=\pi' [1,k] [m+k+1,2m]$ is in $D_k(I;2m)$ where $\pi'$ is $\pi$ with every element increased by~$k$. \end{proof}
To illustrate this result, let $I=\{1,2\}$. Then $a_1(I)$ is the number of $\pi=\pi_1\pi_2\pi_3\pi_4 \in D(I;4)$ such that $\{ \pi_1,\pi_2\} \cap [3,4] = [3]$. Similarly, $a_2(I)$ is the number of $\pi \in D(I;4)$ such that $\{ \pi_1,\pi_2\} \cap [3,4] = [3,4]$. Out of the three elements in $D(I;4)$ one can quickly check that only $\pi=3214$ satisfies the condition for $a_1(I)$, thus $a_1(I)=1$. Similarly, only $\pi=4312$ satisfies the condition for $a_2(I)$, so $a_2(I)=1$. Theorem \ref{comb interp} states that \[ d (I;n)= \binom{n-2}{1} + \binom{n-2}{2}.\] By the binomial recursion, this expression agrees with \eqref{eq:d12}.
Many coefficient sequences of combinatorial polynomials have interesting properties, one of which we will investigate in the context of the previous theorem. A sequence of real numbers $(a_k)=(a_k)_{k\ge0}$ is {\em log-concave} if, for every $k\ge1$, we have $a_{k-1} a_{k+1}\le a_k^2$. Log-concave sequences appear naturally in combinatorics, {algebra, and geometry}; we refer the reader to \cite{s:logconc} and \cite{b:logconc} for important examples and results. We make the following conjecture about the sequence $(a_k(I))$ which has been verified for any set $I$ with $m\leq 18$.
\begin{conj} \label{con:lc} For any finite set of positive integers $I$, the sequence $(a_k(I))$ is log-concave. \end{conj}
We are able to prove this conjecture for certain $I$, but first we need a lemma. In it, the sequence $(a_k)$ is said to have a certain property, such as nonnegativity, if all the individual $a_k$ do. Also, the sequence has {\em no internal zeros} if the elements between any two nonzero elements of the sequence are also nonzero. \begin{lem} \label{log conc lem} \begin{enumerate} \item If $(a_k)$ and $(b_k)$ are log-concave sequences, then so is $(a_k b_k)$. \item Let $(a_k)$ be a nonnegative log-concave sequence with no internal zeros and let $\ell$ be a positive integer. Then the sequence $(a_k+a_{k+1}+\dots+a_{k+\ell})$ is log-concave. \end{enumerate} \end{lem} \begin{proof} Statement 1 follows easily from the definition of log-concavity. For statement 2 note that if we can prove the case $\ell=1$ then the general case will follow by induction since $a_k+\dots+a_{k+\ell}=(a_k+\dots+a_{k+\ell-1})+a_{k+\ell}$. A nonnegative log-concave sequence $(a_k)$ with no internal zeros satisfies $a_{k+1}/a_{k} \leq a_{k}/a_{k-1}$ for all $k$. In particular, if $j\le k$ then $a_{k+1}/a_k \leq a_j/a_{j-1}$ and thus $a_{j-1} a_{k+1}\le a_j a_k$. So \begin{align*} (a_{k-1}+a_k)(a_{k+1}+a_{k+2}) &=a_{k-1}a_{k+1}+a_{k-1}a_{k+2}+a_k a_{k+1} + a_k a_{k+2}\\ &\le a_k^2+a_k a_{k+1}+a_k a_{k+1} + a_{k+1}^2\\ &=(a_k+a_{k+1})^2, \end{align*} as desired. \end{proof}
\begin{figure}
\caption{The diagram of a $\pi\in D(I;2m)$. The binomial numbers correspond to the possible of ways of choosing each of the three highlighted segments.}
\label{fig:pi_k}
\end{figure}
The next result shows that the sequence $(a_k(I))$ is log-concave in a special case. \begin{prop}\label{prop:logconc} Let $\ell\le m$ be positive integers and let $I=\{\ell,\ell+1,\dots,m\}$. Then $(a_k(I))$ is log-concave. \end{prop} \begin{proof} We first use the combinatorial description of {$a_k(I)$} in Theorem~\ref{comb interp} to derive an explicit formula for this quantity. Let $\pi\in D(I;2m)$ satisfy equation~\ree{pi cap int}. In Figure \ref{fig:pi_k} we create a diagram of the permutation $\pi$ by plotting the points $(i,\pi_i)$ and connecting them by, possibly dotted or dashed, segments. Note that the form of $I$ implies that $\pi_1\dots\pi_m$ has a single local maximum at $\pi_\ell$. Combining this with~\ree{pi cap int} we see that $\pi_\ell=m+k$ and the elements of $[m+1,m+k]$ are $\pi_{\ell-k+i},\pi_{\ell-k+i+1},\dots,\pi_{\ell+i-1}$ for some $i$ with $1\le i\le m-\ell+1$. Now there are $\binom{k-1}{i-1}$ ways of selecting the elements $\pi_{\ell+1},\dots,\pi_{\ell+i-1}$. Once these elements are put in a decreasing sequence just after $\pi_\ell$, the rest of the elements {of $[m+1,m+k]$} must form an increasing sequence just before $\pi_\ell$. Next we choose the elements of the increasing sequence $\pi_1,\dots,\pi_{\ell-k+i-1}$ from $[m]$ in $\binom{m}{\ell-k+i-1}$ ways. The remaining $m-\ell+k-i+1$ elements of $[m]$ must be arranged as the elements $\pi_{\ell+i},\dots,\pi_{m+k}$ with unique local minimum at $\pi_{m+1}$. So the number of ways to choose $\pi_{m+2},\dots,\pi_{m+k}$ is $\binom{m-\ell+k-i}{k-1}$. And once these elements are chosen there is only one way to arrange them and the remaining elements since they are all in increasing or decreasing order. So $$ a_k(I)=\sum_{i=1}^{m-\ell+1} \binom{k-1}{i-1}\binom{m}{\ell-k+i-1}{\binom{m-\ell+k-i}{k-1}.} $$ Now for any fixed $c$, the binomial coefficient sequences $\left( \binom{k}{c} \right)_{k\ge0}$ and $\left( \binom{c}{k} \right)_{k\ge0}$
are well known to be log-concave. In combination with Lemma \ref{log conc lem}, this shows that the sequence $(a_k(I))$ is log-concave. \end{proof}
If we expand $d(I;n)$ in the binomial basis centered at $-1$ then these coefficients also seems to be well behaved. The following conjecture has been verified for all $I$ with $m\le 12$. \begin{conj} \label{-1 basis} For any $I$ we have $$ d(I;n) = \sum_{k=0}^m (-1)^{m-k} c_k(I) \binom{n+1}{k}, $$ where $c_k(I)$ is a nonnegative integer for all $0\le k\le m$. \end{conj}
We are able to prove this conjecture for $c_0(I)$. To do so, we need a couple of lemmas. Recall that since $d(I;n)$ is a polynomial in $n$, it is defined for all complex numbers. \begin{lem}\label{lem:d at 0} We have
\[ d(I;0)=(-1)^{ \#I}.\] \end{lem} \begin{proof} We proceed by induction on $\#I$. The result is clear when $I=\emptyset$ as $d(\emptyset;n)=1$. Consider any set $I$ with $\#I\geq 1$, then by Proposition \ref{prop:recursion} and the inductive hypothesis
\[ d(I;0)=\binom{0}{m}d(I^-;m)-d(I^-;0) = 0- (-1)^{\#I^-}=(-1)^{\#I},\] which is what we wished to prove. \end{proof}
Keeping the notation of Conjecture~\ref{-1 basis}, we note that \begin{equation} \label{c0} d(I;-1)=(-1)^m c_0(I). \end{equation} This is why our next result will be useful. \begin{prop}\label{prop:ineqonN} For any $I$ and any $n\geq m+2$ we have
\[ d(I;n) \geq |d(I;-1)|.\] \end{prop} \begin{proof} Note that $d(I;n)$ is an increasing function of $n$ for integral $n>m$ because any permutation $\pi\in D(I;n)$ can be extended to one in $D(I;n+1)$ by merely appending $n+1$. So it suffices to prove the result when $n=m+2$.
We proceed by induction on $m$. If $m=0$ then $I=\emptyset $ and $d(I;n)=1$ and the result follows. For the induction step, we first note that by Lemmas \ref{lem:pos} and \ref{lem:d at 0} $$
|d(I;0)|=1\leq d(I;m+1). $$ We now apply Theorem~\ref{thm:rec2}, keeping the notation therein, as well as induction and the previous displayed equation to obtain
\begin{align*}
d(I;m+2)&=d(I;m+1)+\displaystyle\sum_{i_k\in I''}d(I_k;m+1)+\displaystyle\sum_{{i_k\in I'}}d(\hat{I}_k;m+1)\\
&\geq d(I;m+1)+\displaystyle\sum_{i_k\in I''}|d(I_k;-1)|+\displaystyle\sum_{{i_k\in I'}}|d(\hat{I}_k;-1)|\\
&\geq |d(I;0)|+\displaystyle\sum_{i_k\in I''}|d(I_k;-1)|+\displaystyle\sum_{{i_k\in I'}}|d(\hat{I}_k;-1)|\\
&\geq \left|d(I;0)-\displaystyle\sum_{i_k\in I''}d(I_k;-1)-\displaystyle\sum_{{i_k\in I'}}d(\hat{I}_k;-1)\right|\\
&= |d(I;-1)|,
\end{align*} as desired \end{proof}
\begin{prop} \label{prop:c_0} For any $I$ we have $c_0(I)\ge0$. \end{prop} \begin{proof} By equation~\ree{c0}, it suffices to show that the sign of $d(I;-1)$ is $(-1)^m$. We will proceed by induction on $\#I$. As usual, the case $I=\emptyset$ is trivial. For $I\neq\emptyset$, applying recursion~\ree{eq:I^-} yields \begin{equation}\label{eq:dat-1} d(I;-1)=\binom{-1}{m}d(I^-;m)-d(I^-;-1)=(-1)^m d(I^-;m)-d(I^-;-1). \end{equation} By Lemma~\ref{lem:pos} we have $d(I^-;m)>0$. And by induction, the sign of $d(I^-;-1)$ is $(-1)^{m^-}$ where $m^-=\max(I^-\cup\{0\})$.
So if $m$ and $m^-$ have opposite parity, then the result follows from \eqref{eq:dat-1}. If they have the same parity, then $m\geq m^-+2$. Applying Proposition \ref{prop:ineqonN} to $I^-$ we get $d(I^-;m) \geq |d(I^-;-1)|$. So, using equation~\eqref{eq:dat-1} again, the sign of $d(I;-1)$ is $(-1)^m$ in this case as well. \end{proof}
\section{Roots} \label{sec:roo}
We defined $d(I;n)$ only for $n>m$ because we wished to count a nonempty set of permutations. However, by Theorem~\ref{th:mac}, $d(I;n)$ is a polynomial in $n$ so we can extend the definition to $d(I;z)$ for any complex number $z$. In this context, it makes sense to talk about the roots of $d(I;z)$ and we study them in this section. We start by showing that elements of $I$ are roots of $d(I;z)$, a result analogous to one for peak polynomials \cite{bft:crp}.
\begin{thm} \label{roots} If $I$ is a set of positive integers and $i\in I$ then $d(I;i)=0$. \end{thm}
\begin{proof}
We induct on $\#I$ using the recursion~\ree{eq:I^-}. The result is vacuously true when $I$ is empty. If $i\in I^{-}$ then, by the induction hypothesis, $d(I^{-};i)=0$. Also $\binom{i}{m}=0$ since $i<m$. Substituting these values into~\ree{eq:I^-} shows that $d(I;i)=0$. The only other case is $i=m$. But then, using equation~\ree{eq:I^-} again, we have that $$d(I;m)=\binom{m}{m}d(I^{-};m)-d(I^{-};m)=0,$$ as desired.
\end{proof}
Now that we have established that the elements of $I$ are themselves roots of $d(I;z)$, the remainder of this section focuses on understanding the remaining roots of this polynomial lying in the complex plane. Throughout we denote by
$|z_0|$, ${\mathfrak R}(z_0)$ and ${\mathfrak I}(z_0)$ the norm, real and imaginary parts, respectively, of the complex number $z_0$.
We begin by commenting on the analogous problem for peak polynomials. Billey, Fahrbach and Talmage \cite{bft:crp} extensively studied the roots of peak polynomials. Their observations led to the following conjecture regarding the position of the roots in the complex plane. \begin{conj}[\cite{bft:crp}] \label{con:peakroots} For any admissible $I$ and $z_0 \in{\mathbb C}$ which is a root of $p(I;z)$, we have \begin{enumerate}
\item $|z_0|\le m$, and \item ${\mathfrak R}(z_0)\ge-3$. \end{enumerate} \end{conj}
In fact, in Section 2 of their paper, Billey, Fahrbach and Talmage establish that Theorem~\ref{thm:dlhio} for peak polynomials was implied by this conjecture. They verified Conjecture~\ref{con:peakroots} computationally for all polynomials $p(I;z)$ where $m \leq 15$. We have computed the roots of descent polynomials $d(I;z)$ for all sets $I$ with $m \leq 12$ and arrived at a similar, but more restrictive, conjecture.
\begin{conj} \label{con:roots} For any $I$ and $z_0\in{\mathbb C}$ which is a root of $d(I;z)$ we have \begin{enumerate}
\item $|z_0|\le m$, and \item ${\mathfrak R}(z_0)\ge-1$. \end{enumerate} \end{conj}
We start by establishing that this conjecture holds for $\#I=1$ by ad hoc means. Although this approach does not seem to generalize, it gives some intuition about why the two bounds hold. \begin{thm} \label{thm:roots I=m} If $I=\{m\}$ and $d(I;z_0)=0$ then \begin{enumerate}
\item $|z_0|\le m$, and \item ${\mathfrak R}(z_0)\ge-1$. \end{enumerate} \end{thm} \begin{proof} Consider the equation $$ 0=d(I;z)=\binom{z}{m}-1. $$
First suppose that $|z|>m$. Then, by the triangle inequality, $|z-k|\ge |z|-k>m-k$ and it follows that $$
\left| \binom{z}{m}\right| = \frac{|z|\cdot |z-1|\cdots |z-m+1|}{m!}>1. $$ So such $z$ can not be a root of $d(I;z)$ and the first statement in the theorem is proved.
Now suppose ${\mathfrak R}(z)<-1$. Then $|z-k|\ge |{\mathfrak R}(z-k)|>k+1$ and the previous displayed equation still holds. This finishes the proof of the second statement. \end{proof}
We note that one can use similar techniques to show that if $I=\{1,m\}$ then the roots of $d(I;z)$ satisfy the conjecture. But since we were not able to push this method further we will not present the proof.
In order to establish further bounds for $|z_0|$, we introduce some necessary background on bounding the moduli of roots of polynomials. Recall that given a nonconstant polynomial $f(z)=\sum_{i=0}^dc_iz^i$, the maximum modulus of a root of $f(z)$ is bounded above by the Cauchy bound of $f$, denoted $\rho(f)$, which is the unique positive real solution to the equation \begin{equation} \label{eq:zero}
|c_0|+|c_1|z+\cdots+|c_{d-1}|z^{d-1}=|c_d|z^d, \end{equation} when $f$ is not a monomial, and zero otherwise \cite[Theorem 8.1.3]{rs:atp}.
Although the Cauchy bound of $f(z)$ does not yield an explicit bound for the moduli of the roots of $f(z)$ there are many results that provide such upper estimates for the Cauchy bound. For example \cite[Corollary 8.1.8]{rs:atp} gives various bounds for $\rho(f)$ including
\begin{equation} \label{eq:simplebound}
\rho(f)<1+\;\;\max_{0\leq i\leq d-1}\left|\frac{c_i}{c_d}\right|, \end{equation} which we will use in the proof of Theorem \ref{thm:roots}.
It is possible to obtain bounds for polynomials expressed in other bases, such as Newton bases, which we define now. Given a sequence of complex numbers $\xi_1,\xi_2,\ldots,$ the polynomials \[P_k(z)=\prod_{i=1}^{k}(z-\xi_i),\] $k\ge0$, form a basis for the vector space of all real polynomials called the Newton basis with respect to the nodes $\xi_1,\xi_2,\ldots$. Furthermore, since $\deg(P_k(z))=k$ then $\{P_0(z),P_1(z),\ldots,P_d(z)\}$ forms a basis for the vector space of real polynomials of degree at most $d$, for any $d$. \begin{thm}[Theorem 8.6.3 in \cite{rs:atp}]\label{thm:Union}
Let $f(z)=\sum_{k=0}^dc_kP_k(z)$ be a polynomial of degree $d$ where the $P_k$'s form the Newton basis with respect to the nodes $\xi_1,\ldots,\xi_d$. Then $f$ has all of its zeros in the union of the discs \begin{equation} \label{eq:discs}
{\cal D}_k:=\{z\in\mathbb{C} \mid |z-\xi_k|\leq\rho\}, \end{equation} where $k=1,\ldots,d$ and $\rho$ is the Cauchy bound of $\sum_{k=0}^d c_kz^k$.
\qed \end{thm} Theorem \ref{thm:Union} played an important role in the work of Brown and Erey that improved known bounds for the moduli of the roots of chromatic polynomials for dense graphs \cite{be:nbcpcr}. We will use this result to make progress on Conjecture~\ref{con:roots}. Because of recursion~\ree{eq:I^-} we consider the Newton bases with respect to the nodes $0,1,2,3,\ldots$, which is \[ z\hs{-2pt}\downarrow_k = z(z-1) \cdots (z-k+1), \] $k\ge0$. This is known as the \emph{falling factorial basis}. Expanding $d(I;z)$ in terms of this basis and using the previous theorem immediately gives us the following bounds on the roots of $d(I;z)$. \begin{lem} \label{lem:CauchyBound} Suppose $d(I;z)=\sum_{k=0}^{{m}} c_k z\hs{-2pt}\downarrow_k.$ Then the roots of $d(I;z)$ lie in the union of the discs \[
{\cal D}_k=\{z \in{\mathbb C} \mid |z-k|\le \rho(I)\}, \] where $k=0,\ldots,m-1$ and $\rho(I)$ is the Cauchy bound of the polynomial $\sum_{k=0}^{{m}} c_k z^k.$
\qed \end{lem}
We now present bounds, linear in $m$, for roots of descent polynomials in the special cases when $\#I \leq 2$, and bounds which appear to be less tight for general $I$. We begin by revisitng the case when $\# I = 1$.
\begin{thm} \label{thm:I=m} Let $I=\{m\}$ and $$ \rho_m=\frac{m}{e}\sqrt[m]{me}. $$ Then the roots of $d(I;z)$ lie in the union of the discs $$
{\cal D}_k=\{z\in{\mathbb C} \mid |z-k|\le \rho_m\}, $$ where $k=0,\dots,m-1$. \end{thm} \begin{proof} By Lemma~\ref{lem:CauchyBound}, it suffices to show that $\rho(I) \leq \rho_m$. Since $d(I;z)=\binom{z}{m}-1$ which has the same roots as $z\hs{-2pt}\downarrow_m - m!$, it suffices show that $\rho_m$ is an upper bound for the unique positive real solution to the equation $z^m=m!$. This solution is $\sqrt[m]{m!}$, and using upper Riemann sums to estimate the function $\ln m!$ from $\int \ln x \ dx$ establishes that $m!\le m^{m+1}/e^{m-1}$. The result follows. \end{proof}
We can use the previous result to derive somewhat different bounds from those in Theorem~\ref{thm:roots I=m} for the special case $\# I=1$. \begin{cor} \label{cor:I=mroots} If $I=\{m\}$ and $d(I;z_0)=0$ then \begin{enumerate}
\item $|z_0|\le \rho_m+m-1$, \item ${\mathfrak R}(z_0)\ge-\rho_m$, and
\item $|{\mathfrak I}(z_0)| \le \rho_m$. \end{enumerate} Furthermore, for all $m\ge1$, we have $$ \frac{m}{e} < \rho_m\le m. $$ \end{cor} \begin{proof} Assertions 1, 2 and 3 follow immediately from the description of the discs in Theorem~\ref{thm:I=m}. To obtain the bounds on $\rho_m$, consider the function $f(m)=\sqrt[m]{me}$. Taking the derivative gives $$ f'(m)=\sqrt[m]{me}\cdot \frac{-\ln m}{m^2} \le 0, $$ for $m\ge1$. So $f(m)$ is decreasing on the interval $[1,\infty)$ and thus is bounded above by $f(1)=e$. Applying l'H\^opital's Rule shows that $\lim_{m\rightarrow\infty} f(m) = 1$ and this limit is a lower bound. The desired inequalities follow from observing $\rho_m = mf(m)/e$. \end{proof}
We note that close to the imaginary axis this corollary gives a tighter bound on $|{\mathfrak I}(z_0)|$ than Theorem~\ref{thm:roots I=m} since $\rho_m\le m$, reducing the area being considered in the earlier theorem by roughly half for large $m$. We now turn to the case $\#I=2$.
\begin{thm} \label{thm:size2} Let $I=\{\ell,m\}$ with $1 \leq \ell < m$. Then the roots of $d(I;z)$ lie in the union of the discs \[
{\cal D}_k = \{z \in \mathbb{C} \mid |z-k| \leq m\}, \] for $k=0,\ldots,m-1$.
\end{thm}
\begin{proof} We established through computation that the result is true for $m \leq 4$ so we assume $m \geq 5$. By definition, $I^{-}=\{\ell\}$, so by repeatedly applying equation~\ree{eq:I^-} we have \begin{align*} d(I;z) &= \binom{z}{m}d(I^{-};m) - d(I^{-};z) \\ &= \binom{z}{m}\left(\binom{m}{\ell}-1\right) - \binom{z}{\ell}+1 \\ &= \frac{1}{m!}\left(\binom{m}{\ell}-1\right) z\hs{-2pt}\downarrow_m- \frac{1}{\ell!} z\hs{-2pt}\downarrow_{\ell} + 1. \end{align*} Multiplying the previous equation by $\ell !$ and using Lemma~\ref{lem:CauchyBound}, the roots of $d(I;z)$ are contained in the union of the discs \[{\cal D}_k=
\{z \in \mathbb{C} \mid |z-k| \leq \rho \}, \hspace{0.2in} k=0,1,\ldots,m-1, \] where $\rho$ is any upper bound on the unique positive real solution to the equation \[ \frac{\ell!}{m!} \left(\binom{m}{\ell} - 1 \right)z^m = z^{\ell}+\ell!. \] Since $\binom{m}{\ell} - 1\ge \binom{m}{\ell}/2$, replacing the former expression by the latter in the previous displayed equation just increases the unique positive real solution. Rewriting the result, it suffices to show that $m$ is an upper bound for the positive real solution of \[ z^{\ell}\left( \frac{1}{2} \frac{1}{(m-\ell)!}z^{m-\ell} - 1 \right)=\ell!. \] To do so, observe that $m^{\ell}>\ell!$ and $\frac{m^{m-\ell}}{(m-\ell)!}\ge m>4$. So evaluating the left side of the previous equality at $z=m$ gives \[ m^{\ell} \left( \frac{1}{2} \frac{1}{(m-\ell)!} \cdot m^{m-\ell}-1 \right) > \ell! \cdot \left(\frac{1}{2} \cdot 4 - 1 \right) = \ell! \] and so $m$ must exceed the unique positive real solution. \end{proof}
Similar to Corollary~\ref{cor:I=mroots}, we can use Theorem~\ref{thm:size2} to bound the norm, real and imaginary parts of roots of $d(I;z)$ when $\# I = 2$.
\begin{cor} If $\#I = 2$ and $d(I;z_0)=0$ then \begin{enumerate}
\item $|z_0|\le 2m-1$, \item ${\mathfrak R}(z_0)\ge-m$, and
\item $|{\mathfrak I}(z_0)| \le m$.
\qed \end{enumerate} \end{cor}
Similar bounds on the roots of $d(I;z)$ can be established when $\# I = 3$ by first repeatedly applying equation~\ree{eq:I^-} to express $d(I;z)$ as a linear combination of the falling factorials, and then applying a strategy like the one in the proof of Theorem~\ref{thm:size2}. But applying these techniques as $\# I$ grows becomes increasingly complicated, so it is not clear that this method will be able to produce a linear bound in general.
We now discuss how to find general bounds on the roots of $d(I;z)$ regardless of the size of $I$. We begin with the following result.
\begin{lem} \label{le:c_i's} We have $$ d(I;z) = c_0 + \sum_{k\in I} c_k z\hs{-2pt}\downarrow_k, $$ where $$
\frac{1}{k!} \le |c_k| \le 1, $$ for all $k\in I\cup\{0\}$. \end{lem} \begin{proof} Induct on $\#I$. We have $d(\emptyset;n)=1$ which satisfies the lemma. By induction we can write $$ d(I^-;z)= c_0^- + \sum_{k\in I^-} c_k^- z\hs{-2pt}\downarrow_k, $$ where $$
\frac{1}{k!} \le |c_k^-| \le 1, $$ for all $k\in I^-\cup\{0\}$. Now using equation~\ree{eq:I^-} we have that \begin{align*} d(I;z)&= \binom{z}{m} d(I^-;m) - d(I^-;z)\\[5pt] &=\frac{d(I^-;m)}{m!} z\hs{-2pt}\downarrow_m - \left( c_0^- + \sum_{k\in I^-} c_k^- z\hs{-2pt}\downarrow_k\right)\\[5pt] &=-c_0^- - \sum_{k\in I^-} c_k^- z\hs{-2pt}\downarrow_k + c_m z\hs{-2pt}\downarrow_m, \end{align*} where $c_m=d(I^-;m)/m!$. The lemma now follows for $k<m$ from the bounds on the $c_k^-$, and for $k=m$ from the fact that $1\le d(I^-;m)\le m!$. \end{proof}
The previous lemma permits us to find general bounds for the roots of $d(I;z)$.
\begin{thm} \label{thm:roots} Let $I$ satisfy $\# I \geq 2$, and let $m^{-}= \max I^{-}$. Furthermore let \begin{equation} \label{eq:min} \rho=\min\left(m!+1,\ (m! \cdot \# I)^{1/(m-m^-)}\right). \end{equation} The roots of $d(I;z)$ all lie in the union of the discs \begin{equation} \label{m!+1}
{\cal D}_k=\{z\in\mathbb{C}\mid\; |z-k|\le \rho\}, \end{equation} where $k=0,1,\dots,m-1$. In particular, if $d(I;z_0)=0$ then \begin{enumerate}
\item $|z_0|\le \rho+m-1$, \item ${\mathfrak R}(z_0)\ge -\rho$, and
\item $|{\mathfrak I}(z_0)|\le \rho$. \end{enumerate} \end{thm}
\begin{proof} The bounds on $|z_0|$, ${\mathfrak R}(z_0)$ and $|{\mathfrak I}(z_0)|$ all follow from~\ree{m!+1}. Define coefficients $c_k$ as in Lemma \ref{le:c_i's}. To prove~\ree{m!+1} itself, it suffices to show that $\rho$ is an upper bound for the unique positive real solution of $$
|c_m| z^m = |c_0|+\sum_{k\in I^-} |c_k| z^k. $$
Replacing $|c_m|$ by its smallest possible value and the other $|c_k|$ by their largest possible value will only increase the value of the positive solution. So, using the bounds on the $c_k$, it suffices to show that $\rho$ is an upper bound on the unique positive real solution of \begin{equation}\label{eq:auxeq} \frac{1}{m!} z^m = 1 +\sum_{k\in I^-} z^k. \end{equation}
Applying equation~\ree{eq:simplebound} establishes that $\rho \leq m!+1$. On the other hand, since $z^k \leq z^{m-}$ for all $k \in I^{-}$ and real $z \geq 1$, $\rho$ is bounded above by the unique positive real solution of the equation $z^m/m! = (\# I) \cdot z^{m^{-}}$, which is $(m! \cdot \# I)^{1/(m-m^-)}$. \end{proof}
On the right side of~\ree{eq:min} the first argument achieves the minimum if $m-m^-=1$ since then, using the assumed bound on $\#I$ yields $$ (m!\cdot \#I)^{1/(m-m^-)}\ge 2 m!>m!+1. $$ But if $m-m^-\ge2$ then the second argument is smaller since $$ (m!\cdot \#I)^{1/(m-m^-)}\le (m\cdot m!)^{1/2} < m!+1. $$ In fact, if $m^-$ is held constant and $m\rightarrow\infty$ then the bound becomes linear. An illustration of these two cases is given in Figure~\ref{fig:boundsm4}, where the graph on the left is for $I=\{1,3,4\}$ and the one on the right is for $I=\{1,2,4\}$.
\begin{figure}
\caption{The roots of descent polynomials for $I=\{1,3,4\}$ and $I=\{1,2,4\}$ are plotted as dots and the corresponding
bounding discs from Theorem \ref{thm:roots} are shaded in grey.}
\label{fig:boundsm4}
\end{figure}
We now use a technique from linear algebra to obtain a different sort of restriction on the roots of $d(I;n)$. In fact, we will restrict the position of the zeros of any polynomial whose expansion in the falling factorial basis has nonnegative coefficients. Because of the generality of this result, it will often be less restrictive than Theorem~\ref{thm:roots}. But along the real axis it will give a linear bound for any $I$ and so it will be an improvement.
Throughout the remainder of this section, we move freely between a complex number $z=x+iy$ and the vector ${\bf v}=(x,y)\in{\mathbb R}^2$. So if $z=\rho e^{i\theta}$ then we call $\theta$ an {\em argument} of ${\bf v}$ and write $\arg {\bf v}=\theta$. The {\em principle value} of ${\bf v}$, denoted $\Arg {\bf v}$, is the argument of ${\bf v}$ satisfying $-\pi<\Arg{\bf v}\le\pi$. It will be convenient to let $\Arg (0,0) = \infty$.
Given vectors ${\bf v}_0,\dots,{\bf v}_m$ we say they are {\em nonnegatively linearly independent} if the only linear combination $c_0{\bf v}_0+\dots+c_m{\bf v}_m=(0,0)$ with all the $c_i$ nonnegative is the trivial combination where $c_0=\dots=c_m=0$. Otherwise the vectors are {\em nonnegatively linearly dependent}. An {\em open half-plane} consists of all points on one side of a line $L$ through the origin. The corresponding {\em closed half-plane} is obtained by also including the points on $L$. The easy backward direction of the following lemma is a well-known tool in the literature. But we present a proof for completeness as well as showing that the two statements are actually equivalent. \begin{lem} \label{nli} Vectors ${\bf v}_0,\dots,{\bf v}_m$ are nonnegatively linearly independent if and only if they all lie in some open half-plane. \end{lem} \begin{proof} If the vectors all lie in an open half-plane then clearly so will any nontrivial nonnegative linear combination. Since the half-plane is open, such a linear combination can not be zero.
Now suppose the vectors do not lie in any open half-plane. There are two cases. If they all lie in a closed half-plane then, since they do not lie in any open half-plane, there must be two of the vectors, say ${\bf v}_0$ and ${\bf v}_1$, such that ${\bf v}_0=-c {\bf v}_1$ for some scalar $c>0$. Thus ${\bf v}_0+c{\bf v}_1=(0,0)$ and the vectors are nonnegatively linearly dependent.
Now suppose that the vectors do not lie in any closed half-plane and consider the vector ${\bf v}_0$. We will find two other vectors satisfying a nonnegative linear dependence relation with ${\bf v}_0$. Rotating each of ${\bf v}_0,\dots,{\bf v}_m$ through the angle
$-\Arg {\bf v}_0$, we can assume that ${\bf v}_0$ lies along the positive $x$-axis. Since all the vectors do not lie in the half-plane $x\ge0$ there must be some vector, say ${\bf v}_1$, with $|\Arg{\bf v}_1|>\pi/2$. Consider the line $L$ through ${\bf v}_1$. Note that by construction, ${\bf v}_0$ and the negative $x$-axis are on opposite sides of $L$. And, by the closed half-plane hypothesis again, there must be some ${\bf v}_2$ on the same side of $L$ as the negative $x$-axis but on the opposite side of the $x$-axis from ${\bf v}_1$. It follows that there is some nonnegative linear combination $a{\bf v}_1+b{\bf v}_2$ which lies on the negative $x$-axis. So $a{\bf v}_1+b{\bf v}_2=-c{\bf v}_0$ for $c> 0$ which gives the nonnegative linear dependency $c{\bf v}_0+a{\bf v}_1+b{\bf v}_2=(0,0)$. \end{proof}
Since the linear dependencies in the previous proof only involve at most three vectors, we have actually proved the following result. \begin{lem} Vectors ${\bf v}_0,\dots,{\bf v}_m$ are nonnegatively linearly independent if and only any three of them lie in an open half-plane.
\qed \end{lem}
To make the connection with roots of polynomials, let $P_m(z)$ be the vector space of polynomials in a variable $z$ with real coefficients and let ${\cal B}(z)=\{b_0(z),\dots,b_m(z)\}$ be a basis for $P_m(z)$. Consider the subset of $P_m(z)$ defined by $$ P_{\cal B}(z)=\left\{f(z)\neq 0 \mid \text{$f(z) =\sum_{k=0}^m c_k b_k(z)$ with $c_k\ge0$ for all $k$}\right\}, $$ where in the above definition $0$ represents the zero polynomial. Translating Lemma~\ref{nli} into this language we immediately have the following result. \begin{cor} \label{P_B(z)} The complex number $w$ is not a root of any polynomial in $P_{\cal B}(z)$ if and only if the vectors corresponding to the complex numbers in ${\cal B}(w)$ lie in some open half-plane.
\qed
\end{cor} We now specialize to the falling factorial basis $\{z\hs{-2pt}\downarrow_k | \ k\ge 0\}$. As usual $\bar{z}$ denotes the complex conjugate of $z$, and if $S$ is a set of complex numbers, then we let $\ol{S}=\{\bar{z} \mid z\in S\}$. \begin{thm} \label{cF} Let $$ {\cal F}(z)=\{z\hs{-2pt}\downarrow_0,\dots,z\hs{-2pt}\downarrow_m\}. $$ The complex number $w$ is not a root of any polynomial in $P_{\cal F}(z)$ if and only if $w$ is in the region $R=S\cup\ol{S}$ where \begin{equation} \label{region} S=\left\{z\in {\mathbb C} \mid \text{$\Arg z \ge 0$ and $\sum_{i=1}^m \Arg(z-i+1)<\pi$}\right\}. \end{equation}
\end{thm} \begin{proof} Since the coefficients of polynomials $f(z)\in P_{\cal F}(z)$ are real, we have $f(w)=0$ if and only if $f(\bar{w})=0$. So, letting $R$ be the region of $w$ which are not roots of any such $f(z)$, we have $R=S\cup\ol{S}$ where $S=\{z\in R \mid \Arg z\ge0\}$. So it suffices to show that $S$ is given as in the statement of the theorem. Equivalently, by the previous corollary, we must show that for $z$ with $\Arg z\ge 0$ we have $z\in S$ as defined by equation~\ree{region} if and only if the elements of ${\cal F}(z)$ lie in an open half-plane.
Suppose first that the sum inequality in~\ree{region} holds for $z$. Since $z\hs{-2pt}\downarrow_0=1$, we wish to show that for $1\le k \le m$ the complex numbers $z\hs{-2pt}\downarrow_k$ lie either on the positive $x$-axis or in the open half-plane above the $x$-axis. For then the elements of ${\cal F}(z)$ will lie in the open half-plane above the line $y=\epsilon x$ for a sufficiently small negative $\epsilon$. Since $\Arg z\ge 0$, we have $\Arg(z-r)\ge 0$ for all reals $r$. Using this and the fact that $1\le k\le m$, we have $$ 0 \le \sum_{i=1}^k \Arg(z-i+1) \le \sum_{i=1}^m \Arg(z-i+1)<\pi. $$ But $z\hs{-2pt}\downarrow_k=\prod_{i=1}^k (z-i+1)$, so the displayed inequalities imply $0\le \Arg(z\hs{-2pt}\downarrow_k)<\pi$ which is what we wished to show.
To complete the proof we must show that if $\sum_{i=1}^m \Arg(z-i+1)\ge\pi$ then the elements of ${\cal F}(z)$ will not all lie in any open half-plane. From the argument in the preceding paragraph we see that $s_k:=\sum_{i=1}^k \Arg(z-i+1)$ is an increasing function of $k$. And $s_0=0$. Thus there must be a nonnegative integer $\ell$ such that $s_\ell<\pi\le s_{\ell+1}$. If $s_{\ell+1}=\pi$ then $z\hs{-2pt}\downarrow_0$ and $z\hs{-2pt}\downarrow_{\ell+1}$ are nonnegatively linearly dependent and we are done by Lemma~\ref{nli}. If $s_{\ell+1}>\pi$ then we must have $0<\Arg z <\pi$. It follows that $0< \Arg(z-\ell)<\pi$. Since $z\hs{-2pt}\downarrow_{\ell+1}=(z-\ell) z\hs{-2pt}\downarrow_\ell$, the previous inequalities force a point on the negative $x$-axis to be a nonnegative linear combination of $z\hs{-2pt}\downarrow_\ell$ and $z\hs{-2pt}\downarrow_{\ell+1}$. So, together with $z\hs{-2pt}\downarrow_0=1$ we have a nonnegative linear dependency in this case as well. This concludes the proof of the theorem. \end{proof}
Finally, we return to descent polynomials. If $S$ is any set of complex numbers and $w\in{\mathbb C}$ then let $S+w =\{z+w\mid z\in S\}$. \begin{cor} \label{co:lin alg} Let $I$ be a finite set of positive integers. Then any element of $R+m$ where $R$ is defined as in Theorem~\ref{cF} is not a root of $d(I;z)$. \end{cor} \begin{proof} By Theorem~\ref{comb interp}, we can write $$ d(I;z)=\sum_{k=0}^m a_k(I) \binom{z-m}{k}=\sum \frac{a_k(I)}{k!} (z-m)\hs{-2pt}\downarrow_k, $$ where $a_k(I)/k!\ge0$ for all $k$. So $f(z):=d(I;z+m)\in P_{\cal F}(z)$. Applying the previous theorem and using the fact that $z\in R+m$ if and only if $z-m\in R$ finishes the proof. \end{proof}
Figure \ref{fig:RootBounds411and415cropped} plots all of the roots of descent polynomials corresponding to subsets $I \subseteq [4]$ as small dots, the worst-case bounds described in Theorem \ref{thm:roots} for such roots are shaded in light grey and the dark grey arc is the region $R+4$ where $R$ is as described in Theorem \ref{cF}. The image on the right gives a close-up view of the region $R+4$ near the real-axis. While in the first image the region $R+4$ looks to be bounded by a curve passing through the real-axis near $z=6.65$, it actually passes through the real-axis at $z=7$ and then curves back to include complex numbers whose real parts are less than $7$.
We can use the previous corollary to get our best bound for the size of roots along the positive $x$-axis which holds for general $I$. \begin{prop} If $z_0$ is a real root of $d(I;z)$ then $z_0\le 2m-1$. \end{prop} \begin{proof} For a real number $z_0$ we have $\Arg z_0 =0$ if $z_0>0$ and $\Arg z_0=\pi$ if $z_0<0$. So to be in the region $S$ of equation~\ree{region} we must have $z_0>m-1$. Applying Corollary~\ref{co:lin alg} we see that if $z_0>2m-1$ then it can not be a zero of $d(I;z)$ and the result follows. \end{proof}
\begin{figure}
\caption{Roots of descent polynomials $d(I;n)$ with $I \subseteq [4]$ plotted inside the
two bounding regions and close-up view of the region $R+4$ near the real axis.}
\label{fig:RootBounds411and415cropped}
\end{figure}
\section{Other Coxeter groups} \label{sec:ocg}
Recall that for any finite Coxeter system $(W,S)$, the {\em (right) descent set} of $w\in W$ is \begin{equation} \label{Des w}
\Des w=\{s\in S\ |\ \ell(ws)<\ell(w)\}, \end{equation} where $\ell$ is the length function. In this section we will consider the Coxeter groups $B_n$ and $D_n$. We will use symbols near the beginning of the Greek alphabet for elements of $B_n$ and $D_n$ to distinguish them from the permutations in $A_{n-1}={\mathfrak S}_n$.
We view $B_n$ as the group of signed permutations $\beta=\beta_1\dots\beta_n$ where $\beta_i\in\{\pm1,\dots,\pm n\}$ for all $i \in \mathbb{Z}$
and the sequence $|\beta_1|\dots|\beta_n|$ is a permutation in $A_{n-1}$, and we view $D_n$ as the subgroup of $B_n$ consisting of all $\beta=\beta_1\dots\beta_n$ where there are an even number of $\beta_i$ in $\{-1,-2,\dots, -n\}$. Since $D_n$ is a subgroup of $B_n$, the notation defined below in terms of $B_n$ also applies to $D_n$. We will use the common convention that $-b$ will be written as $\bar{b}$. For example two elements of $B_6$ are $\beta=\bar{3} 4 \bar{1} \bar{5} 6 2$ and $\gamma= \bar{3}\bar{4}\bar{1}\bar{5} 6 2$, and the second element is also an element of the subgroup $D_6$, whereas the first is not.
The simple reflections in $B_n$ are $S_B=S_A\cup\{s_0\}$ where $s_0=(1,\bar{1})$ and $S_A$ denotes the set of adjacent transpositions generating the Coxeter group of type $A_{n-1}$.
Identifying reflections and subscripts as we have done in the symmetric group, we see that for $\beta\in B_n$ we have $\Des\beta\subseteq[n-1]\cup\{0\}$. Because of this, it will be convenient to extend permutations in $B_n$ by writing $\beta=\beta_0\beta_1\dots\beta_n$ where $\beta_0=0$. In this notation, our previous examples would be written $\beta=0 \bar{3} 4 \bar{1} \bar{5} 6 2$ and $\gamma=0 \bar{3}\bar{4}\bar{1}\bar{5} 6 2$. Translating definition~\ree{Des w} using our conventions, we see that if $\beta=\beta_0\beta_1\dots\beta_n\in B_n$ then \begin{equation} \label{Des be}
\Des\beta = \{i\ge0\ |\ \beta_i>\beta_{i+1}\} , \end{equation} where we are using the usual order on the integers for the inequalities. To continue our examples in $B_6$, we have $\Des\beta=\{0, 2, 3, 5\}$ and $\Des\gamma=\{0, 1, 3, 5\}$.
Now given a finite set of nonnegative integers $I$ and $n>m$ where $m$ continues to be defined by equation~\ree{eq:m}, we let \begin{equation} \label{D_B def}
D_B(I;n)=\{\beta\in B_n\ |\ \Des(\beta)=I\} \qmq{and} d_B(I;n)=\#D_B(I;n). \end{equation} We will first derive a recursive formula for $d_B(I;n)$ analogous to the one for $d(I;n)$ in Proposition~\ref{prop:recursion}.
\begin{thm}\label{recursiveBn}
Let $I$ be a nonempty, finite set of nonnegative integers. Then we have
\begin{equation} d_B(I;n)=\binom{n}{m} 2^{n-m} d_B(I^-;m) - d_B(I^-;n). \label{eq:recB} \end{equation}
\end{thm}
\begin{proof} Consider the set $P$ of signed permutations $\beta \in B_n$ which can be written as a concatenation $\beta=0\beta'\beta''$ satisfying \begin{enumerate} \item $\#\beta'=m$ and $\#\beta''=n-m$, and \item $\Des\beta'=I^-$ and $\beta''$ is increasing. \end{enumerate} We can write $P$ as the disjoint union of those $\beta$ where $\beta_m'>\beta_1''$ and those where the reverse inequality holds. So $\#P = d_B(I;n)+d_B(I^-;n)$.
On the other hand, the elements of $P$ can be constructed as follows. Pick a subset $S$ of $m$ elements of $[n]$ which can be done in $\binom{n}{m}$ ways. Form a signed permutation from the elements of $S$ whose descent set is $I^-$ which can be done in $d_B(I^-;m)$ ways. Next choose the sign of the $n-m$ elements in $[n] - S$ which can be done in $2^{n-m}$ ways. Then arrange them in increasing order to form $\beta''$ which can only be done in only one way. It follows that $\#P=\binom{n}{m}2^{n-m} d_B(I^-;m)$. Comparing this with the expression for $\#P$ at the end of the previous paragraph completes the proof. \end{proof}
Next we prove the type $B$ analogue of Theorem~\ref{PIE}. To state it, we let $$I^+=I - \{0\}.$$
Also, if $J$ is a set of positive integers then we will let $\delta_1(J)$ denote the first component of the composition $\delta(J)$. Note that
$$
\delta_1(J)=\case{\min J}{if $J\neq\emptyset$,}{n}{if $J=\emptyset$.}
$$
\begin{thm} \label{PIEB} If $I$ is a set of nonnegative integers with $\# I^+=k$, then \begin{equation} d_B(I;n)=\sum_{i\ge0} (-1)^{k-i} \sum_{J\in \binom{I^+}{i}} \binom{n}{\delta(J)} \cdot \case{2^{n-\delta_1(J)}}{if $0\not\in I$,}{(2^n-2^{n-\delta_1(J)})}{if $0\in I$.} \label{eq:Balt} \end{equation} \end{thm}
\begin{proof} We first consider the case where $0 \notin I$ so that $I=I^+$, and proceed by induction on $\#I$. If $I=\emptyset$, then $d_B(I;n)=1$. In this case, the right-hand side of equation \eqref{eq:Balt} also gives $\binom{n}{\delta(\emptyset)}=1$. We assume that the result holds for all sets $I$ not containing $0$ with $\#I\leq k$. Consider $\#I=k+1$ and $m=\max(I)$. Using recursion~\eqref{eq:recB}, and the induction hypothesis we have \begin{align*} d_B(I;n)&=\binom{n}{m}2^{n-m} \left[\sum_{i\ge0} (-1)^{k-i} \sum_{J\in \binom{I^-}{i}} \binom{m}{\delta(J)}2^{m-\delta_1(J)}\right] -\sum_{i\ge0} (-1)^{k-i} \sum_{J\in \binom{I^-}{i}} \binom{n}{\delta(J)}2^{n-\delta_1(J)}\\ &=\sum_{i\ge0}(-1)^{k+1-i}\left[\sum_{J\in\binom{I}{i},\;m\in J}\binom{n}{\delta(J)} 2^{n-\delta_1(J)}+\sum_{J\in\binom{I}{i},\; m\notin J}\binom{n}{\delta(J)}2^{n-\delta_1(J)}\right]\\ &=\sum_{i\ge0}(-1)^{k+1-i}\sum_{J\in\binom{I}{i}}\binom{n}{\delta(J)}2^{n-\delta_1(J)}. \end{align*} Since $I=I^+$ when $0 \notin I$, this completes the proof for this case.
Next we consider when $0 \in I$. If $I=\{0\}$ then Theorem~\ref{recursiveBn} shows $d_B(I;n)=2^n-1$, and the right hand of equation~\eqref{eq:Balt} above gives $\binom{n}{\delta(\emptyset)}(2^n-2^{n-n})$. So equation~\eqref{eq:Balt} holds in this case. The induction argument is exactly the same as that of the case when $0 \notin I$, but one replaces $2^{m-\delta_1(J)}$ with $2^m-2^{m-\delta_1(J)}$ and $2^{n-\delta_1(J)}$ with $2^n-2^{n-\delta_1(J)}$. \end{proof}
Using Theorems~\ref{PIE} and~\ref{PIEB}, we can also give a simple numerical relationship between the descent formulas in types $A$ and $B$.
\begin{cor}
\label{descentBA} Let $I$ be a finite set of positive integers and $I_0 = I \cup \{0\}$. Then
\vs{5pt}
\eqqed{ d_B(I;n)+d_B(I_0;n) = 2^n d(I;n). } \end{cor}
Since the right-hand side of equation~\ree{eq:Balt} is well defined for all real numbers $n$, we use it to extend the definition $d_B(I;n)$ to ${\mathbb R}$ and talk about its roots. The proof of the following theorem is similar to that of Theorem~\ref{roots} and so is omitted.
\begin{thm}\label{rootsB} If $I$ is a set of nonnegative integers and $i\in I$ then $d_B(I;i)=0$.
\qed \end{thm}
The remaining results of this section pertain to the Coxeter group $D_n$. We continue to use all the conventions for $B_n$ with this subgroup. In particular, we will use the same definition of $\Des\beta$ as in equation~\ree{Des be}, and the notation $D_D(I;n)$ and $d_D(I;n)$ is defined exactly as in equation~\ree{D_B def} except that $\beta$ runs over $D_n$ rather than $B_n$. Our results in type $D_n$ are very similar to those in type $B_n$ except with some changes imposed by using a different power of two and the intermingling of $d_D$ and $d_B$ in the same formula.
\begin{thm}\label{recursiveDn} Let $I$ be a nonempty, finite set of nonnegative integers. Then
\begin{equation} d_D(I;n)=\binom{n}{m} 2^{n-m-1} d_B(I^-;m) - d_D(I^-;n). \label{eq:recD} \end{equation} \end{thm}
\begin{proof} Consider the set $P$ of signed permutations $\beta \in D_n$ satisfying the same two conditions as in the proof of Theorem~\ref{recursiveBn}. As before, $\#P = d_D(I;n)+d_D(I^-;n)$.
An alternative construction of the elements of $P$ is as follows. Pick $m$ elements from $[n]$ which can be done in $\binom{n}{m}$ ways. Use those elements to create a type $B$ signed permutation $\beta'$ with descent set $I^-$ which can be done in $d_B(I^-;m)$ ways. Since a type $D_n$ permutation must have an even number of negative signs, of the remaining $n-m$ elements choose the sign of the first $n-m-1$ of them; the sign of the last element in the set of numbers appearing in $\beta''$ is then determined by the number of negative signs assigned previously. Thus choosing the signs of the elements appearing in $\beta''$ can be done in $2^{n-m-1}$ ways. Now form the unique increasing arrangement of these signed integers to form $\beta''$. It follows that $\#P=\binom{n}{m}2^{n-m-1} d_B(I^-;m)$ and we are done as in the proof of Theorem~\ref{recursiveBn}. \end{proof}
Next we can use Theorem \ref{recursiveDn} to prove a Type $D_n$ analogue of Theorems~\ref{PIE} and \ref{PIEB}.
As the proof are similar to those we have seen before, we omit them.
\begin{thm} \label{PIED} If $I$ is a set of nonnegative integers with $\# I^+=k$, then $$
d_D(I;n)= \case{\displaystyle (-1)^k+\sum_{i>0} (-1)^{k-i} \sum_{J\in \binom{I^+}{i}} \binom{n}{\delta(J)} \cdot 2^{n-\delta_1(J)-1}}{if $0\not\in I$,} {\displaystyle (-1)^k(2^{n-1}-1)+\sum_{i>0} (-1)^{k-i} \sum_{J\in \binom{I^+}{i}} \binom{n}{\delta(J)} \cdot (2^{n-1}-2^{n-\delta_1(J)-1})}{if $0\in I$. \rule{0pt}{30pt}} $$ forall $n>m$
\qed \end{thm}
Finally we present the analogues of Corollary~\ref{descentBA}, and Theorem~\ref{rootsB} for type $D_n$.
\begin{cor}\label{recursiveDn1}
Let $I$ be a nonempty set of positive integers and $I_0=I\cup\{0\}$. Then
\begin{enumerate}
\item $d_D(I;n)+d_D(I_0;n) = 2^{n-1} d(I;n)$, and
\item $d_D(I;i)=d_D(I_0;i)=0$ whenever $i\in I^-$.
\qed
\end{enumerate}
\end{cor}
\section{Comments and open questions} \label{sec:coq}
We end with some comments about our results. These include avenues for future research and more conjectures.
{\bf (1) Consecutive pattern avoidance.} One way to unify Theorems~\ref{th:mac} and~\ref{th:bbs} is through the theory of consecutive pattern avoidance. Call two sequences of integers $a_1 a_2\dots a_k$ and $b_1 b_2\dots b_k$ {\em order isomorphic} provided $a_i<a_j$ if and only if $b_i<b_j$ for all pairs of indices $1\le i,j\le k$. Given $\sigma\in{\mathfrak S}_k$ called the {\em pattern}, we say that $\pi\in{\mathfrak S}_n$ {\em contains a consecutive copy of $\sigma$ at index $i$} if the factor $\pi_i\pi_{i+1}\dots\pi_{i+k-1}$ is order isomorphic to $\sigma$. If $\pi$ contains no consectutive copies of $\sigma$ then we say that $\pi$ {\em consecutively avoids} $\sigma$. Note that a consecutive copy of $21$ is just a descent while a peak is a consective copy of $132$ or $231$.
Given any finite set of patterns $\Pi$ and a finite set of positive integers $I$ define $$ \Pi(I;n)=\{\pi\in{\mathfrak S}_n \mid \text{$\pi$ has a consecutive copy of some $\sigma\in\Pi$ precisely at the indices in $I$}\}. $$ Also define the function $$ \av_\Pi(n) =\#\Pi(\emptyset;n), $$ the number of permutations in ${\mathfrak S}_n$ consecutively avoiding all permutations in $\Pi$. Given $\Pi\subseteq{\mathfrak S}_k$ say that $\Pi$ is {\em nonoverlapping} if for any (not necessarily distinct) $\sigma,\tau\in\Pi$ and any $l$ with $1<l<k$ the prefix of $\sigma$ of length $l$ is not order isomorphic to the suffix of $\tau$ of length $l$. We will now prove our analogue of Theorems~\ref{th:mac} and~\ref{th:bbs} in this setting. \begin{thm} \label{th:consec} Let $\Pi\subseteq{\mathfrak S}_k$ be a nonoverlapping set of patterns and let $I$ be a finite set of positive integers. Then for all $n\ge m+k-1$ we have $\#\Pi(I;n) \in V_\Pi$ where $V_\Pi$ is the vector space of all ${\mathbb Q}$-linear combinations of functions in the set $$ \{n^k \av_\Pi(n+l) \mid k\in{\mathbb Z}_{\ge0},\ l\in{\mathbb Z}\}. $$ \end{thm} \begin{proof} We induct on $m$. We have $\#\Pi(\emptyset;n)=\av_\Pi(n)$ and so the result clearly holds when $m=0$. For $m\ge1$, consider the set $P$ of permutations $\pi\in{\mathfrak S}_n$ which can be written as a concatenation $\pi=\pi'\pi''$ such that $\pi'\in\Pi(I^-;m)$ and $\pi''\in\Pi(\emptyset;n-m)$. Since $\Pi$ is nonoverlapping, copies of consecutive patterns from $\Pi$ in $\pi$ occur at the positions in $I^-$ and possibly also at exactly one of the indices $m,m-1,\dots,m-k+2$. It follows that $$ \#P=\#\Pi(I^-;n)+\#\Pi(I;n)+\sum_{i=1}^{k-2} \#\Pi(I^-\cup \{m-i\};n). $$
We can also construct the elements of $P$ as follows. Pick the $m$ elements of $[n]$ to be in $\pi'$ which can be done in $\binom{n}{m}$ ways. Arrange those elements to have consecutive copies of elements of $\Pi$ at the indices of $I^-$ which can be done in $\#\Pi(I^-;m)$ ways. Finally, put the remaining elements in $\pi''$ so that it avoids consecutive copies of elements of $\Pi$ which can be done in $\av_\Pi(n-m)$ ways. Equating the two counts for $P$ and rearranging terms we get $$ \#\Pi(I;n)=\binom{n}{m}\av_\Pi(n-m) \#\Pi(I^-;m)-\#\Pi(I^-;n)-\sum_{i=1}^{k-2} \#\Pi(I^-\cup \{m-i\};n), $$ from which the theorem follows by induction. \end{proof}
Note that if $\Pi=\{21\}$ then $\av_\Pi(n)=1$ for all $n$. So $V_\Pi={\mathbb Q}[n]$ and thus Theorem~\ref{th:mac} is a special case of the previous result. On the other hand, if $\Pi=\{132,231\}$ then $\av_\Pi(n)=2^{n-1}$ which explains the appearance of the power of $2$ in Theorem~\ref{th:bbs}. Theorem~\ref{th:consec} suggests that there might be other sets of patterns which would yield interesting enumerative results, and that such sets could be found by looking at $\Pi$ such that the numbers $\av_\Pi(n)$ have nice combinatorial properties.
{\bf (2) The sequence $(a_k(I))$.} On reading a version of this paper on the arXiv, Ferenc Bencs~\cite{ben:scs} has found a proof of Conjecture~\ref{con:lc}. But there is a stronger condition which could also be investigated. Consider a finite, real sequence $(a_k)_{0\le k\le n}$ and the corresponding generating function $f(x)=\sum_{k\ge0} a_k x^k$. It is well known that if the $a_k$ are positive and $f(x)$ has only real roots then the original sequence is log-concave. However, if one takes $I=\{1,3\}$ then the corresponding generating function is $f(x)=2x^3+6x^2+5x$ which has complex roots. So this stronger condition does not always apply to the $(a_k(I))$ sequence
{\bf (3) Remarks on Conjecture~\ref{-1 basis}.} Bencs~\cite{ben:scs} has proved this conjecture as well. His argument is inductive, using the recursions we derived in Section~\ref{sec:rec} as well as Proposition~\ref{prop:c_0} as the base case. It would be very interesting to prove nonnegativity by finding a combinatorial interpretation of the $c_k(I)$. Also, one can now further improve the bounds of the roots of $d(I;n)$ on the left side of the $i$-axis by using the linear algebraic method from Section~\ref{sec:roo} on the binomial basis centered at $-1$.
{\bf (4) Limiting behavior of roots.} Bencs~\cite{ben:scs} has proved a result about the behavior of the roots of $d(I;n)$ for certain sets $I$. Given $I$, consider the set $I^k=I\cup\{m+1,m+2,\dots,m+k\}$. Using Neumaier's Gershgorin-type results on location of polynomial roots~\cite{neu:ecz}, Bencs has demonstrated the following. \begin{thm} Suppose I is a finite set of positive integers with $m-1\not\in I$. Then as $k\rightarrow\infty$ the roots of $d(I^k;n)$ converge to $[0,m+k]-\{m-1\}$.
\qed \end{thm}
{\em Acknowledgement.} We wish to thank Marcelo Aguiar for asking the question that lead to this research. Thanks also to Marcelo Aguiar, Jason Brown, Petter Br\"and\'en, Ira Gessel, John Stembridge, and Richard Stanley for helpful discussions and useful references. {A. Diaz-Lopez thanks the AMS and Simons Foundation for support under the AMS-Simons Travel Grant.} {P.~E.~Harris was partially supported by NSF grant DMS--1620202.} {M. Omar thanks the Harvey Mudd College Faculty Research, Scholarship, and Creative Works Award.}
\newcommand{\etalchar}[1]{$^{#1}$}
\end{document} |
\begin{document}
\title{The extremal function for disconnected minors} \baselineskip 20pt \begin{abstract}
For a graph $H$ let $c(H)$ denote the supremum of $|E(G)|/|V(G)|$ taken over all non-null graphs $G$ not containing $H$ as a minor. We show that $$c(H) \leq \frac{|V(H)|+\brm{comp}(H)}{2}-1,$$ when $H$ is a union of cycles, verifying conjectures of Reed and Wood~\cite{ReeWoo14}, and Harvey and Wood~\cite{HarWoo15}.
We derive the above result from a theorem which allows us to find two vertex disjoint subgraphs with prescribed densities in a sufficiently dense graph, which might be of independent interest. \end{abstract}
\section{Introduction}
A classical theorem of Erd\H{o}s and Gallai determines the minimum number of edges necessary to guarantee existence of a cycle of length at least $k$ in a graph with a given number of vertices. (All the graphs considered in this paper are simple.)
\begin{thm}[Erd\H{o}s and Gallai~\cite{ErdGal59}]\label{thm:ErdGal} Let $k \geq 3$ be an integer and let $G$ be a graph with $n$ vertices and more than $(k-1)(n-1)/2$ edges. Then $G$ contains a cycle of length at least $k$. \end{thm}
One of the main results of this paper generalizes Theorem~\ref{thm:ErdGal} to a setting where, instead of a single cycle with prescribed minimum length, we are interested in obtaining a collection of vertex disjoint cycles. In the case when there are no restrictions on the lengths of cycles this problem was completely solved by Dirac and Justesen, who proved the following.
\begin{thm}[Dirac and Justesen~\cite{Just85}]\label{thm:Just} Let $k \geq 2$ be an integer and let $G$ be a graph with $n\geq 3k$ vertices and more than $$\max\left\{(2k-1)(n-k), n - \frac{(3k-1)(3k-4)}{2} \right\}$$ edges. Then $G$ contains $k$ vertex disjoint cycles. \end{thm}
We phrase our extensions of the above results in the language of minors. A graph $H$ is \emph{a minor} of a graph $G$ if a graph isomorphic to $H$ can be obtained from a subgraph of $G$ by contracting edges. Mader~\cite{Mader68} proved that for every graph $H$ there exists a constant $c$ such that every graph on $n \geq 1$ vertices with at least $cn$ edges contains $H$ as a minor. A well-studied extremal question in graph minor theory is determining the optimal value of $c$ for a given graph $H$. Denote by $v(G)$ and $e(G)$ the number of edges and vertices of a graph $G$, respectively. Following Myers and Thomason~\cite{Myers2005}, for a graph $H$ with $v(H) \geq 2$ we define $c(H)$ as the supremum of $e(G)/v(G)$ taken over all non-null graphs $G$ not containing $H$ as a minor. We refer to $c(H)$ as \emph{the extremal function of $H$}.
The extremal function of complete graphs has been extensively studied. Dirac~\cite{Dirac64}, Mader~\cite{Mader68}, J{\o}rgensen~\cite{Jorgensen94}, and Song and Thomas~\cite{SonTho06} proved that $c(K_t)=t-2$ for $t \leq 5$, $t \leq 7$, $t=8$ and $t=9$, respectively. Thomason~\cite{Thomason01} determined the precise asymptotics of $c(K_t)$, proving $$c(K_t)=(\alpha+o_t(1))t\sqrt{\log{t}},$$ for an explicit constant $\alpha=0.37...$. Myers and Thomason~\cite{Myers2005} have extended the results of~\cite{Thomason01} to general dense graphs, while Reed and Wood~\cite{ReeWoo14} and Harvey and Wood~\cite{HarWooAverage15} have recently proved bounds on $c(H)$ for sparse graphs, with the main result of~\cite{ReeWoo14} implying that $$c(H) \leq 3.895v(H)\sqrt{\ln d(H)},$$ for graphs $H$ with average degree $d(H) \geq d_0$ for some absolute constant $d_0$.
The extremal function was explicitly determined for several structured families of graphs. In particular, Chudnovsky, Reed and Seymour~\cite{ChuReeSey11} have shown that $c(K_{2,t})=(t+1)/2$ for $t \geq 2$, and Kostochka and Prince~\cite{KosPri10} proved that $c(K_{3,t})=t+3$ for $t \geq 6300$.
We determine the extremal function of $2$-regular graphs in which every component has odd number of vertices. Let $kH$ denote the disjoint union of $k$ copies of the graph $H$. Note that Theorems~\ref{thm:ErdGal} and~\ref{thm:Just} imply that $c(C_k)=(k+1)/2$ for $k \geq 3$, and $c(kC_3)=2k-1$ for $k \geq 1$. For a general $2$-regular graph $H$ Reed and Wood~\cite{ReeWoo14} conjectured that $c(H) \leq 2v(H)/3 -1$, and Harvey and Wood~\cite[Conjecture 5.5]{HarWoo15} conjectured that $c(kC_r) \leq (r+1)/2-1$ for $r \geq 3$, $k \geq 1$. Our first result verifies these conjectures.
\begin{thm}\label{thm:cycles} Let $H$ be a disjoint union of cycles. Then \begin{equation}\label{e:cycledensity} c(H) \leq \frac{v(H)+\brm{comp}(H)}{2}-1. \end{equation} \end{thm}
It is not hard to see and is shown in Section~\ref{sec:minors} that, if every component of $H$ is odd, then the bound (\ref{e:cycledensity}) is tight.
Theorem~\ref{thm:cycles} follows immediately from Theorem~\ref{thm:ErdGal} and the following more general result, which we prove in Section~\ref{sec:minors}.
\begin{thm}\label{thm:union} Let $H$ be a disjoint union of $2$-connected graphs $H_1$,$H_2$,\ldots,$H_k$. Then $$ c(H) \leq c(H_1)+c(H_2)+\ldots+c(H_k)+k-1. $$ \end{thm}
Theorem~\ref{thm:union} additionally allows us to determine the extremal function for the disjoint union of small complete minors.
\begin{cor}\label{cor:complete} $c(kK_t) = kt-k-1$ for $k\geq 1$ and $3 \leq t \leq 9$. \end{cor}
Let us note that the restriction on connectivity of components of $H$ in Theorem~\ref{thm:union} is an artefact of the proof method, and the following conjecture of Qian, which motivated our work, relaxes this restriction.
\begin{conj}[Qian~\cite{Qian}]\label{conj:main} Let $H$ be a disjoint union of non-null graphs $H_1$ and $H_2$ then $$c(H) \leq c(H_1) + c(H_2)+1.$$ \end{conj}
We prove Theorem~\ref{thm:union} by showing that the graph $G$ with at least $(c(H_1)+c(H_2)+\ldots+c(H_k)+k-1)v(G)$ edges contains $k$ vertex disjoint subgraphs $G_1,\ldots, G_k$, such that $G_i$ is sufficiently dense to guarantee $H_i$ minor for every $1 \leq i \leq k$. The bulk of the paper is occupied by the proof of the following technical theorem, which accomplishes that.
\begin{thm}\label{thm:main} Let $s,t \geq 1$ be real, and let $G$ be a non-null graph with $e(G) > (s+t+1)(v(G)-1)$. Then there exist vertex disjoint non-null subgraphs $G_1$ and $G_2$ of $G$ such that $e(G_1) > s(v(G_1)-1)$ and $e(G_2) > t(v(G_2)-1)$. \end{thm}
In Section~\ref{sec:minors} we derive Theorem~\ref{thm:union} from Theorem~\ref{thm:main}. We prove Theorem~\ref{thm:main} in Section~\ref{sec:proof}.
\section{Proof of Theorem~\ref{thm:union}}\label{sec:minors}
In this section we derive Theorem~\ref{thm:union} from Theorem~\ref{thm:main} and prove a couple of easy related results.
Theorem~\ref{thm:main} is naturally applicable to the following variant of the extremal function. For a graph $H$ with $v(H) \geq 3$ define $c'(H)$ to be the supremum of $e(G)/(v(G)~-~1)$ taken over all graphs $G$ with $v(G) > 1$ not containing $H$ as a minor. Theorem~\ref{thm:main} implies the following variant of Conjecture~\ref{conj:main}.
\begin{cor}\label{cor:c1} Let $H$ be a disjoint union of graphs $H_1$ and $H_2$ such that $v(H_1),v(H_2) \geq 3$. Then $$ c'(H) \leq c'(H_1)+c'(H_2)+1. $$ \end{cor} \begin{proof} Let $s = c'(H_1)$ and $t=c'(H_2)$. Clearly $s,t \geq 1$. Let $G$ be a non-null graph such that $e(G) > (s+t+1)(v(G)-1)$. Let $G_1$ and $G_2$ be the subgraphs of $G$ satisfying the conclusion of Theorem~\ref{thm:main}. Then $G_i$ contains $H_i$ as a minor for $i=1,2$. Therefore $G$ contains $H$ as a minor, as desired. \end{proof}
We derive Theorem~\ref{thm:union} from Corollary~\ref{cor:c1} using the following observation.
\begin{lem}\label{lem:c1} Let $H$ be a $2$-connected graph then $c'(H)=c(H)$. \end{lem} \begin{proof} Let $c=c(H)$. Clearly $c'(H) \geq c$. Suppose for a contradiction that $c'(H)>c$, and there exists a graph $G$ such that $e(G)>c(v(G)-1)$ and $G$ does not contain $H$ as a minor. Let the graph $G_k$ be obtained from $k$ disjoint copies of $G$ by gluing them together on a single vertex. (I.e. $G_k = G^1 \cup G^2 \ldots G^k$, where $G^i$ is isomorphic to $G$ for $1 \leq i \leq k$ and there exists $v \in V(G)$ such that $V(G^i) \cap V(G^j)=\{v\}$ for all $1 \leq i < j \leq k$.) It is well known that if a graph contains a $2$-connected graph as a minor then one of its maximal two connected subgraphs also contains it. Thus $G_k$ does not contain $H$ as a minor. However, for sufficiently large $k$ we have$$\frac{e(G_k)}{v(G_k)} = \frac{ke(G)}{k(v(G)-1)+1}= c+\frac{k(e(G) - c(v(G)-1))-c}{k(v(G)-1)+1}>c(H),$$ a contradiction. \end{proof}
\noindent \emph{Proof of Theorem~\ref{thm:union}.} By Corollary~\ref{cor:c1} and Lemma~\ref{lem:c1} we have $$c(H) \leq c'(H) \leq \sum_{i=1}^kc'(H_i) + k-1 = \sum_{i=1}^kc(H_i) + k-1. \qquad~\qed$$
In the remainder of the section we discuss lower bounds on the extremal function. Let $\tau(H)$ denote \emph{the vertex cover number} of the graph $H$, that is the minimum size of the set $X \subseteq V(H)$ such that $H - X$ is edgeless.
\begin{lem}\label{lem:tau} $c(H) \geq \tau(H)-1$ for every graph $H$. \end{lem} \begin{proof} Let $t=\tau(H)-1$, and let $\bar{K}_{t,n-t}$ denote the graph on $n \geq t$ vertices obtained from the complete bipartite graph $K_{t,n-t}$ by making the $t$ vertices in the first part of the bipartition pairwise adjacent. Then $\tau(G) \leq t$ for every minor $G$ of $\bar{K}_{t,n-t}$. Therefore $H$ is not a minor $\bar{K}_{t,n-t}$, and $$\frac{e(\bar{K}_{t,n-t})}{v(\bar{K}_{t,n-t})}= \frac{nt-t(t+1)/2}{n} \to t,$$ as $n \to \infty.$ \end{proof}
The following corollary follows immediately from Lemma~\ref{lem:tau} and implies that the bound in Theorem~\ref{thm:cycles} is tight whenever all components of $H$ are odd cycles, as claimed in the introduction.
\begin{cor} For every $2$-regular graph $H$ with $\brm{odd}(H)$ odd components we have $$c(H) \geq \frac{v(H)+\brm{odd}(H)}{2}-1.$$ \end{cor}
We finish this section by proving Corollary~\ref{cor:complete}.
\begin{proof}[Proof of Corollary~\ref{cor:complete}] By the results of ~\cite{Dirac64,Jorgensen94,Mader68,SonTho06} we have $c(K_t) = t-2$ for $3 \leq t \leq 9$. Therefore $c(kK_t) \leq kt-k-1$ by Theorem~\ref{thm:union}. On the other hand, $\tau(kK_t)=k\tau(K_t)=k(t-1)$. Thus $c(kK_t) \geq kt-k-1$ by Lemma~\ref{lem:tau}. \end{proof}
\section{Proof of Theorem~\ref{thm:main}}\label{sec:proof}
We prove Theorem~\ref{thm:main} by first constructing a fractional solution and then rounding it in two stages.
Let $n=v(G)$, and assume $V(G)=[n]:=\{1,2,\ldots,n\}$ for simplicity. Let $S^{G}:=[0,1]^{V(G)}$. We will use bold letters for elements of $S^{G}$ and denote components of a vector ${\bf x} \in S^{G}$ by $x_1,x_2,\ldots,x_n$. For $r \in [0,1]$, we denote by ${\bf r}$ a constant vector $(r,r,\ldots, r) \in S^G$. For ${\bf x} \in S^{G}$ let $e({\bf x})=\sum_{ij \in E(G)}x_ix_j$.
Suppose that $x_i \in \{0,1\}$ for every $i \in V(G)$, and let $A = \{ i \in V(G) \: | \: x_i =1\}$ and $B=V(G)-A$. If $e({\bf x}) > {\bf s} \cdot {\bf x} - s$, $e({\bf 1} - {\bf x}) > {\bf t} \cdot ({\bf 1} - {\bf x}) - t$, ${\bf{x}}\neq {\bf 1}$ and ${\bf x} \neq {\bf 0}$, then the subgraphs $G_1$ and $G_2$ of $G$ induced by $A$ and $B$, respectively, satisfy the conditions of the theorem.
The above observation motivates to consider the following functions. Let $$f({\bf x})=e({\bf x})-\left( {\bf s}+{\bf \frac{1}{2}}\right) \cdot {\bf x},$$ and let $$g({\bf x})=e({\bf 1} -{\bf x})-\left({\bf t}+{\bf \frac{1}{2}}\right) \cdot ({\bf 1}- {\bf x}).$$ We say that ${\bf x} \in S^G$ is \emph{balanced} if \begin{equation}\label{e:fbalanced} f({\bf x}) > -\frac{(s+\frac{1}{2})^2}{s+t+1}, \end{equation} \begin{equation}\label{e:gbalanced} g({\bf x})>-\frac{(t+\frac{1}{2})^2}{s+t+1}, \end{equation} \begin{equation}\label{e:xlarge}
\|{\bf x}\|_1 \geq s+1, \; \mathrm{and} \end{equation}
\begin{equation}\label{e:xsmall}
\|{\bf 1}- {\bf x}\|_1 \geq t+1. \end{equation}
\vskip 5pt \noindent {\bf Claim 1:} There exists a balanced $ {\bf x} \in S^G$.
\begin{proof} Let ${\bf x} \equiv ( s+ \frac{1}{2})/(s+t+1)$. Note that $v(G) \geq 2(s+t+1)$, as $v(G)(v(G)-1)/2 \geq e(G) > (s+t+1)(v(G)-1)$. Therefore $$\|{\bf x}\|_1 = \frac{s+\frac{1}{2}}{s+t+1}v(G) \geq 2s+1 \geq s+1, $$ and (\ref{e:xlarge}) holds for ${\bf x}$. Further, \begin{align*}f({\bf x}) &=\left(\frac{s+\frac{1}{2}}{s+t+1} \right)^2e(G) -\left(s+\frac{1}{2}\right)\frac{s+\frac{1}{2}}{s+t+1} n \\ &= \left(\frac{s+\frac{1}{2}}{s+t+1} \right)^2 \left(e(G) - (s+t+1)n\right)\\ &>-\frac{(s+\frac{1}{2})^2}{s+t+1}, \end{align*} implying (\ref{e:fbalanced}). The inequalities (\ref{e:gbalanced}) and (\ref{e:xsmall}) hold by symmetry. \end{proof}
For ${\bf x} \in S^G$ let $\brm{fr}({\bf x})=\{i \in [n] \:|\: 0< x_i <1 \}$ denote the set of vertices corresponding to the non-integral values of ${\bf x}$.
\vskip 5pt
\noindent {\bf Claim 2:} Let a balanced ${\bf x} \in S^G$ be chosen so that $|\brm{fr}({\bf x})|$ is minimum. Then $\brm{fr}({\bf x})$ is a clique in $G$. \begin{proof} Suppose for a contradiction that there exist $i,j \in \brm{fr}({\bf x})$ such that $ij \not \in E(G)$. Then $f({\bf x})$ and $g({\bf x})$ are linear as functions of $x_i$ and $x_j$. That is, there exists linear functions $\delta_f({\bf v}),\delta_g({\bf v})$, such that $f({\bf x}+{\bf v})=f({\bf x})+\delta_f({\bf v})$ and $g({\bf x}+{\bf v})=g({\bf x})+\delta_g({\bf v})$ for every ${\bf v}=(v_1,\ldots,v_n)$ satisfying $v_k=0$ for every $k\not \in \{i,j\}$. Therefore there exists a vector ${\bf v} \not \equiv 0$ as above, such that $\delta_f({\bf v}) \geq 0$ and $\delta_g({\bf v}) \geq 0$. Let $\varepsilon$ be chosen maximum so that $0 \leq {\bf x} + \varepsilon{\bf v} \leq 1$. Then inequalities (\ref{e:fbalanced})and (\ref{e:gbalanced}) hold for ${\bf x}+\varepsilon{\bf v}$ by the choice of ${\bf v}$.
Suppose that
$\|{\bf x}+\varepsilon{\bf v}\|_1 < s+1$. Then there exists $0< \varepsilon' < \varepsilon$ such that
$\|{\bf x}'\|_1 = s+1$, where ${\bf x}' ={\bf x}+\varepsilon'{\bf v}$. Therefore \begin{align*} -\frac{(s+\frac{1}{2})^2}{s+t+1} <f({\bf x}+\varepsilon'{\bf v}) \leq \frac{(s+1)^2}{2} - \left(s+\frac{1}{2}\right)(s+1). \end{align*} The above implies $$\left(\frac{ 1}{2}-\frac{1}{s+t+1}\right)\left(s+\frac{1}{2}\right)^2< \frac{1}{8},$$ which is clearly contradictory for $s,t \geq 1$. Thus (\ref{e:xlarge}) (and, symmetrically, (\ref{e:xsmall})) holds for ${\bf x}+\varepsilon{\bf v}$. It follows that ${\bf x}+\varepsilon{\bf v}$ is balanced, contradicting the choice of ${\bf x}$.
\end{proof}
Let ${\bf y}$ be balanced such that $C:=\brm{fr}({\bf y})$ is a clique. As we can no longer continue to modify $f({\bf y})$ and $g({\bf y})$ linearly as in Claim 2, we adjust them as follows. Let $A=\{i \in [n] \: | \: x_i=1\}$, $B=\{i \in [n] \: | \: x_i=0\}$, $a=|A|$, $b=|B|$ and $c=|C|$. Let $q = \sum_{i \in C}y_i$, and let $r=\lfloor q \rfloor$. For $x \in S^G$, let $$\bar{f}({\bf x})= r\sum_{i \in C}x_i - \frac{r(r+1)}{2}-\sum_{\{i,j\} \subseteq C}x_ix_j + e({\bf x}) -{\bf s} \cdot {\bf x},$$ and let \begin{align*} \bar{g}({\bf x})&= (c-r-1)\sum_{i \in C}(1-x_i) - \frac{(c-r)(c-r-1)}{2} \\&-\sum_{\{i,j\} \subseteq C}(1-x_i)(1-x_j) + e({\bf 1} -{\bf x}) -{\bf t} \cdot ({\bf 1} -{\bf x}). \end{align*}
\vskip 5pt \noindent {\bf Claim 3:} Let ${\bf x} \in S^G$ be such that $x_i \in \{0,1\}$ for $i \in C$. Then $\bar{f}({\bf x}) \leq e({\bf x}) -{\bf s} \cdot {\bf x}$, and $\bar{g}({\bf x}) \leq e({\bf 1} -{\bf x}) -{\bf t} \cdot ({\bf 1} -{\bf x})$. \begin{proof} To verify the first inequality it suffices to show that $$r\sum_{i \in C}x_i - \frac{r(r+1)}{2}-\sum_{\{i,j\} \subseteq C}x_ix_j \leq 0,$$ for every ${\bf x} \in \{0,1\}^C$. Let $p=\sum_{i\in C}x_i$. We have \begin{align*} r&\sum_{i \in C}x_i - \frac{r(r+1)}{2}-\sum_{\{i,j\} \subseteq C}x_ix_j \\&= rp - \frac{r(r+1)}{2} - \frac{p(p-1)}{2}= \frac{p-r -(p-r)^2}{2} \leq 0, \end{align*} as desired. The inequality $\bar{g}({\bf x}) \leq e({\bf 1} -{\bf x}) -{\bf t} \cdot ({\bf 1} -{\bf x})$ follows analogously. \end{proof}
By Claim 3 it suffices to find ${\bf x} \in \{0,1\}^{[n]}$ such that $\bar{f}({\bf x})> -s$, $\bar{g}({\bf x}) > -t$, ${\bf{x}}\neq {\bf 1}$ and ${\bf x} \neq {\bf 0}$. We start by estimating $\bar{f}({\bf y})$ and $\bar{g}({\bf y})$.
\vskip 5pt \noindent {\bf Claim 4:} We have \begin{equation}\label{e:fbary} \bar{f}({\bf y}) > \frac{a}{2}+\frac{q^2}{2c}-\frac{(s+\frac{1}{2})^2}{s+t+1} \end{equation} and \begin{equation}\label{e:gbary} \bar{g}({\bf y}) > \frac{b}{2}+\frac{(c-q)^2}{2c}-\frac{(t+\frac{1}{2})^2}{s+t+1} \end{equation}
\begin{proof} It suffices to prove (\ref{e:fbary}), as (\ref{e:gbary}) is symmetric. We have \begin{align*} \bar{f}({\bf y}) - f({\bf y}) &= \frac{1}{2}(q+a) + rq - \frac{r(r+1)}{2}- \sum_{\{i,j\} \subseteq C}y_iy_j \\ &= \frac{1}{2}(q+a) + rq - \frac{r(r+1)}{2}- \frac{q^2}{2}+\frac{1}{2}\sum_{i \in C}y^2_i\\ &\geq \frac{1}{2}(q+a) + rq - \frac{r(r+1)}{2}- \frac{q^2}{2}+\frac{q^2}{2c} \\ &=\frac{1}{2}(q+a)-\frac{r}{2}-\frac{(q-r)^2}{2}+\frac{q^2}{2c}\\ &\geq \frac{1}{2}(q+a)-\frac{q}{2}+\frac{q^2}{2c} =\frac{a}{2}+\frac{q^2}{2c}. \end{align*} As ${\bf y}$ is balanced, (\ref{e:fbary}) follows. \end{proof}
Note that Claim 4 implies that $\bar{f}({\bf y}) > -s$ and $\bar{g}({\bf y}) > -t$.
We assume now that \begin{equation}\label{e:qbounds} r \leq 2s \qquad \mathrm{and} \qquad c-r-1 \leq 2t \end{equation} The other cases are easier, as we will exploit the fact that the complete subgraph $G_1$ of $G$ on more than $ 2s$ vertices satisfies the theorem requirements.
The proof of the next claim is analogous to that of Claim 2 and we omit it. \vskip 5pt
\noindent {\bf Claim 5:} There exists ${\bf z} \in S^G$ such that $\bar{f}({\bf z}) \geq \bar{f}({\bf y})$, $\bar{f}({\bf z}) \geq \bar{f}({\bf y})$, $z_i=y_i$ for every $i \in V(G)-C$, $\|{\bf z}\|_1 >1$, $\|{\bf 1} - {\bf z}\|_1 >1$ and
$|\brm{fr}({\bf z})| \leq 1$.
\vskip 10pt Consider a vector ${\bf z}$ that satisfies Claim 5. Let $i \in C$ be a vertex such that $z_j \in \{0,1\}$ for every $j \in V(G) - \{i\}$. We suppose without loss of generality that $z_i \leq \frac{1}{2}$, as the case $z_i \geq \frac{1}{2}$ is analogous due to symmetry between ${\bf z}$ and ${\bf 1} - {\bf z}$. Let ${\bf z}^*$ be obtained from ${\bf z}$ by setting $z^*_i=0$. Then ${\bf z}^* \neq {\bf 1}$, ${\bf z}^* \neq {\bf 0}$, and, as noted above, it suffices to show that $\bar{f}({\bf z}^*) > -s$ and $\bar{g}({\bf z}^*) > -t$. We do this in the next two claims.
\vskip 5pt \noindent {\bf Claim 6:} $\bar{f}({\bf z}^*) > -s$.
\begin{proof} Let $x = z_i$ for brevity. We have
$\bar{f}({\bf z}^*) \geq \bar{f}({\bf z}) - (r+a-s)x$. Recall that ${\bf y}$ is balanced, and $\|y\|_1 \leq r+a+1$. Therefore by (\ref{e:xlarge}) we have $s \leq r+a$, and using (\ref{e:fbary}) we have \begin{align*} \bar{f}({\bf z}^*) &> \frac{a}{2}+\frac{q^2}{2c}-\frac{(s+\frac{1}{2})^2}{s+t+1} - (r+a-s)x \\ &\geq\frac{s-q}{2}+\frac{q^2}{2c}-\frac{(s+\frac{1}{2})^2}{s+t+1}, \end{align*} as $x \leq \frac{1}{2}$, $r \leq q$. By (\ref{e:qbounds}), it suffices to show \begin{equation*}\label{e:c6case2} \frac{3}{2}s-\frac{(s+\frac{1}{2})^2}{s+t+1}\geq \frac{q}{2} - \frac{q^2}{2(q+2t+1)}. \end{equation*} As the right side increases with $q$ for fixed $s$ and $t$, it suffices to verify this inequality when $q=2s+1$. In this case we have \begin{align*} \frac{3}{2}s&-\frac{(s+\frac{1}{2})^2}{s+t+1} = \frac{2s^2 + 6st +2s-1}{4(s+t+1)} \\ &\geq \frac{2s + 4st +2t +1}{4(s+t+1)} = \frac{2s+1}{2} - \frac{(2s+1)^2}{2(2s+2t+2)}. \end{align*} as desired.
\end{proof}
\vskip 5pt \noindent {\bf Claim 7:} $\bar{g}({\bf z}^*) > -t$. \begin{proof} To simplify the notation we prove the symmetric statement for $\bar{f}$ instead. That is, if $z_i \geq \frac{1}{2}$ and ${\bf z}^*$ is obtained from ${\bf z}$ by setting $z_i$ to $1$, we show that $\bar{f}({\bf z}^*) > -s$. Denote $1-z_i$ by $x$ for the duration of this claim. Then $\bar{f}({\bf z}^*) \geq \bar{f}({\bf z})+(r-s)x.$ If $r \geq s$ the claim follows directly from Claim 4, and so we assume $s \geq r$. Using (\ref{e:fbary}) and the inequality $s \leq r+a$, which was shown to hold in Claim 6, we have \begin{align*} \bar{f}({\bf z}^*) &\geq \frac{a}{2}+\frac{q^2}{2c}-\frac{(s+\frac{1}{2})^2}{s+t+1} + (r-s)x \\ &\geq \frac{a+r-s}{2}+\frac{q^2}{2c}-\frac{(s+ \frac{1}{2})^2}{s+t+1} \\& \geq \frac{q^2}{2c}-\frac{(s+ \frac{1}{2})^2}{s+t+1} \geq -s, \end{align*} as desired.
\end{proof}
We have now proved the theorem in the case when (\ref{e:qbounds}) holds. Therefore without loss of generality we assume that $c-r-1 > 2t$. We will need the following variant of Claims 2 and 5.
\vskip 5pt \noindent {\bf Claim 8:} There exists ${\bf z} \in \{0,1\}^{V(G)}$ such that $\bar{f}({\bf z}) \geq \bar{f}({\bf y})$, $\sum_{i \in C}z_i \leq \lceil \sum_{i \in C}y_i \rceil$, and $z_i=y_i$ for every $v \in V(G)-C$. \begin{proof} The argument analogous to the proof of Claim 2, applied to the linear functions $\bar{f}$ and $-\sum_{i \in C} x_i$, instead of $f$ and $g$, implies existence of ${\bf z}' \in S^G$ such that $\bar{f}({\bf z}') \geq \bar{f}({\bf y})$, $\sum_{i \in C}z'_i \leq \sum_{i \in C}y_i $,
$z_i=y_i$ for every $v \in V(G)-C$, and $|\brm{fr}({\bf z}')| \leq 1$.
Let $i \in C$ be such that $z'_j \in \{0,1\}$ for every $j \in V(C)-\{i\}$. Let $k = r +|\{j \in A \: |\: ij \in E(G)\}| -s$ be the coefficient of $z_i$ in $\bar{f}$ considered as a linear function of $z_i$. Let ${\bf z}$ be obtained from ${\bf z}'$ by setting $z_i = 1$ if $k \geq 0$, and by setting $z_i = 0$, otherwise. Then $\bar{f}({\bf z}) \geq \bar{f}({\bf z}')$, and ${\bf z}$ satisfies the claim.
\end{proof}
Finally, we consider a vector ${\bf z}$ that satisfies Claim 8, and let $W=\{i\in C \:|\: z_i=0 \}$. As $$\sum_{i \in C}z_i \leq\left \lceil \sum_{i \in C}y_i\right \rceil \leq r+1,$$ we have $|W| \geq c-r-1 > 2t$. Thus the subgraphs $G_1$ and $G_2$ of $G$ induced on $\{i\in V(G) \:|\: z_i=1 \}$ and $W$, respectively, satisfy the conditions of the theorem.
\section{Concluding remarks}
\subsubsection*{Improving Theorem~\ref{thm:main}.}
The following conjecture strengthening several aspects of Theorem~\ref{thm:main}, appears to be plausible and implies Conjecture~\ref{conj:main}.
\begin{conj}\label{conj:partition} Let $s,t \geq 0$ be real, and let $G$ be a non-null graph with $e(G) \geq (s+t+1)v(G)$. Then there exist vertex disjoint non-null subgraphs $G_1$ and $G_2$ of $G$ such that $e(G_1) \geq sv(G_1)$, $e(G_2) \geq tv(G_2)$, and $V(G_1) \cup V(G_2)=V(G)$. \end{conj}
Adjusting the parameters involved in the proof of Theorem~\ref{thm:main} one can prove a number of weakenings of Conjecture~\ref{conj:partition}. In particular, Wu using these methods proved the following.
\begin{thm}[Wu~\cite{WuPrivate}]\label{thm:Wu} Conjecture~\ref{conj:partition} holds if $s=t$, or $e(G) \geq (s+t+\frac{3}{2})v(G)$. \end{thm}
Finally, let us note that a beautiful theorem of Stiebitz can be considered as a direct analogue of Conjecture~\ref{conj:partition} for minimum, rather than average, degrees.
\begin{thm}[Stiebitz~\cite{Stiebitz96}]\label{thm:Stiebitz} Let $s,t \geq 0$ be integers, and let $G$ be a graph with minimum degree $s+t+1$. Then there exist vertex disjoint subgraphs $G_1$ and $G_2$ with $V(G_1) \cup V(G_2)=V(G)$ such that the minimum degree of $G_1$ is at least $s$ and the minimum degree of $G_2$ is at least $t$. \end{thm}
Unfortunately, we were unable to adapt the proof of Theorem~\ref{thm:Stiebitz} to Conjecture~\ref{conj:partition}.
\subsubsection*{Improving Theorem~\ref{thm:cycles}.}
The bound on the extremal function provided by Theorem~\ref{thm:cycles} is not tight when some, but not all, components of $H$ are even cycles. A stronger conjecture below, which differs only slightly from~\cite[Conjecture 5.7]{HarWoo15}, if true would determine the extremal function for all $2$-regular graphs.
\begin{conj}\label{conj:chgeneral} Let $H$ be a $2$-regular graph with $\brm{odd}(H)$ odd components, then $$c(H)= \frac{v(H)+\brm{odd}(H)}{2}-1,$$ unless $H=C_{2l}$, in which case $c(H)=(2l-1)/2$, or $H=kC_4$, in which case $c(H)=2k -\frac{1}{2}$. \end{conj}
\subsubsection*{Asymptotic density.}
Let $\brm{ex}_m(n,H)$ denote the maximum number of edges in a graph on $n$ vertices not containing $H$ as a minor. Then $$c(H)=\sup_{n \geq 1}\left\{\frac{\brm{ex}_m(n,H)}{n}\right\}.$$ The asymptotic density of graphs not containing $H$ as a minor is determined by a different function $$c_{\infty}(H)=\limsup_{n \to \infty}\left\{\frac{\brm{ex}_m(n,H)}{n}\right\},$$ defined by Thomason in~\cite{Thomason08}. If $H$ is connected then $c(H)=c_\infty(H)$, however the equality does not necessarily hold for disconnected graphs which are the subject of this paper. Some of the more advanced tools in graph minor theory could be used to bound $c_{\infty}(H)$, and Kapadia and Norin~\cite{KapNorDensity} were able to establish the following asymptotic analogues of Conjectures~\ref{conj:main} and~\ref{conj:chgeneral}.
\begin{thm}\label{thm:asymptotic1} Let $H$ be a disjoint union of non-null graphs $H_1$ and $H_2$ then $$c_{\infty}(H) \leq c_{\infty}(H_1) + c_{\infty}(H_2)+1.$$ \end{thm}
\begin{thm}\label{thm:asymptotic2} Let $H$ be a $2$-regular graph with $\brm{odd}(H)$ odd components, then $$c_{\infty}(H)= \frac{v(H)+\brm{odd}(H)}{2}-1,$$ unless $H=C_{2l}$, in which case $c_{\infty}(H)=(2l-1)/2$, or $H=kC_4$, in which case $c_{\infty}(H)=2k -\frac{1}{2}$. \end{thm}
\vskip 10pt \noindent {\bf Acknowledgement.} This research was partially completed at a workshop held at the Bellairs Research Institute in Barbados in April 2015. We thank the participants of the workshop and Rohan Kapadia for helpful discussions. We are especially grateful to Katherine Edwards, who contributed to the project, but did not want to be included as a coauthor.
\end{document} |
\begin{document}
\title{On Periods: from Global to Local} \author{Lucian M. Ionescu} \address{Department of Mathematics, Illinois State University, IL 61790-4520} \email{[email protected]} \date{June, 2018}
\begin{abstract} Complex periods are algebraic integrals over complex algebraic domains, also appearing as Feynman integrals and multiple zeta values. The Grothendieck-de Rham period isomorphisms for p-adic algebraic varieties defined via Monski-Washnitzer cohomology, is briefly reviewed.
The relation to various p-adic analogues of periods are considered, and their relation to Buium-Manin arithmetic differential equations. \end{abstract}
\maketitle \setcounter{tocdepth}{3} \tableofcontents
\section{Introduction} In this article we discuss periods and their applications, as a continuation of \cite{Ionescu-Sumitro}, focusing on the relation between global periods in characteristic zero, and their local counterparts.
The main goal of this research is to question the ``stability'' of the connection between scattering amplitudes and periods \cite{Schnetz:QuantumPeriods,Brown:FeynmanIntegrals,Brown:ICMP}, when passing from global to local, by using the analogy between Veneziano amplitudes and Jacobi sums, is addressed in a follow up article \cite{LI:p-adicFrobenius}, which also adopts a Deformation Theory point of view when introducing p-adic numbers. It is expected to provide some feedback on the Feynman amplitudes and Multiple Zeta Values correspondence \cite{QuantaMagazine,LI:Periods-FI-JS-Talk}.
Periods are values of algebraic integrals, extending the field of algebraic numbers. Non-trivial examples are Feynman amplitudes from experimentally ``dirty-gritty'' Quantum Field Theory \cite{Schnetz:QuantumPeriods}, yet which happen to be also linear combinations of multiple zeta values from ``pure'' Number Theory \cite{QuantaMagazine}. That Mathematics is unreasonably effective, we know; but to the point of starting to reconsider Plato's thesis that reality is a mirror of the world of (mathematical) ideas?! So, {\em Number}, (once categorified) does rule the (Quantum) Universe after all ...
After reviewing the idea and concept of period, the article explores the connection with {\em quantization functors}, i.e. representations of (generalized) categories of cobordisms, as a perhaps more physical route than that of abstract motives.
At a more concrete level, the power of analogy \cite{Weil-analogy} between Veneziano amplitude as a String Theory analogue of Feynman amplitude, and Jacobi sum in finite characteristic \cite{Ireland-Rosen} (an finite characteristic analog of Euler beta function), is used to investigate a possible global-to-local correspondence for periods (factorization or reduction of cohomology): $$\xymatrix@R=.2pc{ Veneziano\ Amplitude: & & Jacobi\ Sum: \\ A(a,a')=\frac{\Gamma(\alpha)\Gamma(\beta)}{\Gamma(\alpha+\beta)} & \quad \leftrightarrow \quad & J(c,c')=\frac{g(c)g(c')}{g(cc')} } $$ where $\alpha=-1+(k_1+k_2)^2, \beta=-1+(k_3+k_4)^2$ relate the in/out momenta of the interacting strings, and $c,c':F_p^\times->C^\times$ are multiplicative characters of the finite field $F_p$.
The first measures the correlation (interaction amplitude) of two strings, with momenta expressed in Mandelstam's variables, while the second measures, the ``intersection correlation'' between two multiplicative subgroups (e.g. squares and cubes), yielding the correction term (``defect'' $a_p$) for the number of points $N_p$ (like a constructive or destructive amplitude for the ``volume integral'') of a finite Riemann surface $C(F_p)$, over a finite field $F_p$. This connection has been studied, as for example in \cite{Kholodenko}.
The Local-to-Global Principle could be used even informally, via an analogy with the algebraic number theory case, to guide more experienced investigators deal with the global case of periods, Feynman Diagrams and Mirror Symmetry. In the other direction, it can be used to guide the development of p-adic String Theory, beyond a mere formal replacement of real (complex) numbers by p-adic numbers.
The interplay between Galois symmetries and periods (Feynman amplitudes) \cite{Brown:ICMP}, will be investigated in the framework of Noether's Theorems, connecting conserved quantities (and e.g. unitarity as conservation of probability), and symmetries of the system.
The article is organized as follows. We review the basic ideas regarding periods in \S \ref{S:Periods-AG}, starting from their simple introduction as algebraic integrals, followed by a cohomological interpretation. Remarks on periods, motives and Galois group are followed by considering p-adic periods, in connection with Buium calculus.
Further considerations are postponed for a Deformation Theory approach to p-adic numbers \cite{LI:p-adicFrobenius}, and an investigation of a connection between Grothendieck's algebraic de Rham cohomology, and the discrete analog of de Rham cohomology of the present author \cite{LI:DiscreteDeRham}, as well as possible connections with the discrete periods of \cite{Mercat:DiscretePeriods}.
\section{Periods: from integrals to cohomology classes} The arithmetic notion of period refers essentially to the value of rational integrals over rational domains \cite{KZ}. For example the ubiquitous ``Euclidean circle-radius ratio'' $\pi=\int_{[-1,1]}dx/\sqrt{1-x^2}$, residues like $2\pi i = \int dz/z$ or path integrals as $\log(n)=\int_1^n dx/x$ \cite{K}, are {\em numeric periods}.
The representation of a period as an integral is not unique. When placed in the context of de Rham isomorphism of a compact manifold, or algebraic variety as in the early work by Grothendieck \cite{Grothendieck-deRham}, they are matrix coefficients of the corresponding integration pairing \cite{Muller-Stach}. The resulting isomorphism between de Rham cohomology and singular cohomology: $$de \ Rham\ Theorem: \quad H_{dR}^*(X,D)\otimes_Q C \quad \overset{\cong}{\to}\quad H_{sing}^*(X,D)\otimes_Q C,$$ is called the {\em period isomorphism}.
\subsection{Periods of Algebraic Varieties} \label{S:Periods-AG}
Specifically, from the algebraic geometry point of view, an {\em numeric period} $p$ is represented by a quadruple consisting of a smooth algebraic variety $X$ over $Q$, of dimension $d$, $\omega$ is a regular algebraic d-form, $D$ a normal crossing divisor and $\gamma$ is singular chain on $X(C)$, with boundary on $D(C)$: $$(X,D,\omega,\gamma) \quad \mapsto \quad p=\int_\gamma \omega.$$ Fixing $X$ and such a normal divisor $D$, choosing a rational basis in both cohomology groups allows to represent the above {\em period isomorphism} as a {\em period matrix}.
Of course, there are elementary transformations on such quadruples (linearity, change of variables and Stokes formula), which leave the corresponding period unchanged \cite{KZ}, p.31. Whether the {\em effective periods}, i.e. equivalence classes of quadruples modulo the elementary moves, correspond isomorphically to the numeric periods, is the content of the corresponding Kontsevich Conjecture.
Since the history of periods and period domains goes back to the very beginning of algebraic geometry \cite{CG}, we will proceed with two such elementary examples: the Riemann Sphere (genus zero), the case of elliptic curves (genus une).
\begin{example} With $X=P^1-\{0,\infty\}$, $D=\empty$, $\omega=dz/z$ and $\gamma=S^1$ the unit circle, we find $2\pi i$ as the (only) netry of the period matrix of the period isomorphism $H^1(X;C)\to H_1(X;C)$.
If we change the divisor to $D=\{1,n\}$ and take $\gamma=[1,n]$, then the numeric period $\log(n)$ becomes one of the periods of $H^1(X,D)$. \end{example}
\begin{example}\label{Example:EC} Given an elliptic curve $X:y^2=x^3+ax+b$ with canonical homology basis $\gamma_1,\gamma_2$ \cite{RS} and differential form $\omega=dx/y$, the period matrix (vector) is (\cite{CG}, p.1418): $$(A,B)=\left( \int_{\gamma_1}\omega, \int_{\gamma_2} \omega\right).$$ It is customary to go to the fraction field (of periods) and divide by $A$ to get $(1,\tau)$ with the {\em normalized $B$ period} denoted $\tau=1+it$, having positive imaginary part\footnote{Due to the fact that $i\int_X \omega\cup \bar{\omega}>0$ \cite{CG}.} and constituting an invariant of the elliptic curve \cite{Carlson-Stach-PD}, p.9.
For example with $\lambda=-1$, the elliptic curve $E:y^2=(x-1)x(x+1)$ has invariant $\tau=i$ and $E=C/Z\oplus Zi$ has an additional isomorphism of order $4$ (complex multiplication) \cite{Chowla-Selberg}.
Such normalized periods provide a simple example of {\em period domain}, here the upper-half plane $\C{H}$.
A change of the homology basis by a unimodular transformation in $\Gamma=SL_2(Z)$ corresponds to a fractional transformation relating the corresponding two points of the period domain. Thus the moduli space of genus one Riemann surfaces corresponds to the quotient $\Gamma / \C{H}$.
Additional details and examples can be found in \cite{Carlson-Stach-PD}, Ch.1. \end{example}
Before we move on, a crude physics interpretation of periods will be later useful. \begin{rem} Think of an elliptic curve with a 1-form $\omega$ as a 2D-universe with a flow, or perhaps a world sheet of a string, with a given capacity for propagating action (Quantum computing: duplex channel). In a conformal metric, for convenience of relating to metric picture, the corresponding vector field will represent a free propagation, with certain circulation and flux (harmonic dual pair: streamlines and equipotential lines). The two periods then measure these two: circulation and flux. \end{rem}
\subsection{Families of periods} Continuing the discussion of the above case of Riemann surfaces of genus one, one often has a family of such elliptic curves $E_t$ depending holomorphically on a parameter $t$. The resulting map $t\mapsto \tau(t)$ is the {\em period map}.
For example, if the base space is the Riemann sphere, one finds a globally defined map $S\mapsto \Gamma/\C{H}$ \cite{CG}, p.1418.
\subsection{Interpretation} Comparing periods and algebraic numbers is probably the first thing to do, before developing a theory of periods.
\subsubsection{On algebraic numbers}\label{S:AlgebraicNumbers}
Algebraic numbers (over $Q$) extend rational numbers via extensions $Q[x]/(f(x))$. If focusing on integers, and contenting to systematically view field extensions as fields of fractions (as long as we stay within the commutative world), then we may choose to interpret these algebraic extensions geometrically, as lattices \cite{IonescuMina}, and algebraically as {\em group representations}. For example, $Q(i)$ is the fraction field of its ring of algebraic integers $Z[i]$, which in turn is the group ring of its group of units $U=<i>$, which is a subgroup of the rotation group of the {\em rational plane} $ZxZ$ \footnote{... in the spirit of the geometric interpretation of complex numbers, starting with Argand, Gauss, followed by Riemann and perhaps the modern CFT and String Theory developments.}
At this stage, finite fields (finite characteristic) can be constructed via quotients of lattices of algebraic integers. For example $F_5\cong Z[i]/(2+i), F_{3^2}=Z[i]/(3)$ etc. \footnote{See \cite{IonescuMina} for developments of this direction of reasoning.}.
Then, increasing the dimension from the above $D=0$ case, i.e. the number of variables, we obtain algebraic varieties $X:Z[x_1,...,x_n]/<f_1,...,f_k>$, suited for a geometric study via homology and cohomology. As a reach class of examples, we have the Riemann surfaces $RS:y^2=f(x)$, with de Rham / Dolbeault cohomology over the complex numbers.
\subsubsection{Cohomology pairings and their coefficients} Integrals, as a non-degenerate pairing, e.g. de Rham isomorphism, on the other hand go beyond the arithmetic realm. Nevertheless periods should probably be beter compared with algebraic integers, since thw inverse of a period is not necessarily a period: $1/\pi$ is conjecturally not a period.
Another important aspect is that other important numbers like $e$ and Euler's constant $\gamma$, conjecturally are not periods; why? what lies still beyond periods, but before the ``transcendental junk'' like Louisville's number and such!? Is there a numerical shadow of a Lie correspondence, and hence exponential periods like $e^{period}$ form another important class?
\subsubsection{Speculative remarks} Speaking of shadows, the cohomology theory lies ``above'' the periods themselves, reminiscent of {\em categorification}, a process relating numbers and algebraic structures (e.g. Grothendieck ring etc.).
But how to isolate the ``cohomology theory'' from the actual implementation, based on a specific manifold? Is there such a ``thing'' as isomorphism class of the functor representing the respective cohomology theory? And how are its various matrices, corresponding to its values, related?
If algebraic numbers can be viewed in fact as representations corresponding to their multiplicative structure, as suggested above, then periods should probably relate to representations of groupoids, maybe?
This leads to the ``3-rd level'' of abstraction, beyond arithmetic and algebraic-geometric.
\subsection{Periods, Motives and Galois Group} \cite{Wiki-motives} ``The theory of motives was originally conjectured as an attempt to unify a rapidly multiplying array of cohomology theories, including Betti cohomology, de Rham cohomology, l-adic cohomology, and crystalline cohomology.''
In a more abstract direction, following the work of Grothendieck on pure and mixed motives, in 1990s the work of M. Nori starting from directed graphs encoding ``equivalence moves'' between periods (with a certain similarity to Redemeister's moves and theorem on homotopy classes of knots), let to a degree of abstraction which seams not to serve our purpose here, to understand the ``knots, braids and links'' of {\em real} ``elementary'' particles in decays and collisions.
Now on one hand, Feynman integrals, also periods, are ``closer'' to Chen's theory of iterated integrals, which forms a homotopy theory analog of de Rham cohomology \cite{Chen,Hains}. This explains the ``coincidence'' with (linear combinations of) periods arising from Number Theory, e.g. multiple zeta values, which can also be expressed as iterated integrals. This ``non-commutative side'' of {\em homotopical motives}, is probably better suited to be approached from the (``Cosmic'') Galois action viewpoint \cite{Brown:Galois}.
On one the other hand, the de Rham {\em cohomology} pairing framework for understanding and generalizing periods is reasonably close/similar to the {\em representation theory} viewpoint for algebraic integers.
Then what is the connection between the two directions? It seams that generalizing the idea of Galois action on roots of polynomials (representations point of view), allows to view periods as having infinite orbits under bigger analogues of ``Galois groups'' \cite{Brown:Galois}. For example $\pi$ can be viewed as associated to a 1-dimensional representation of a group \cite{Brown:FeynmanIntegrals}, p.11. This direction provides a framework for studying amplitudes, perhaps not unrelated to the cornerstone idea in high energy physics that ``elementary particles'' are associated to irreducible representations, but definitely to be pursued by ``Mathematicians only!''.
Alternatively, {\em motives} as universal sumands of cohomology theories, allows to connect with the direct approach to periods via the period isomorphism.
This perhaps allows to connect the global and the local. Indeed, since {\em Weil cohomologies} are such universal sumands, one should be able identify what are the natural analog of the concept of period in finite characteristic \cite{Weil-Conjectures}.
We will confine to the simplest nontrivial case of elliptic curves (Example \ref{Example:EC}), and investigate in what follows the reduction modulo a prime, in the context of {\em Ramification Theory}, within the {\em Algebraic Number Theory framework}, suited for the algebraic integers and representation viewpoint, and which is ``close enough'' to Algebraic Geometry framework mentioned in \S\ref{S:Periods-AG}.
\section{p-Adic Periods} The above periods in characteristic zero correspond to the ``real world'' of the ``prime at infinity'' (arguably on both accounts: \cite{Real-fish} \footnote{Real numbers result in completing the rationals the ``other way'' then the direction of the carryover 2-cocycle!}).
But what about the periods of p-adic analysis? How are these defined, and how do they relate to p-adic analogues like p-adic gamma function, Gauss sums and Jacobi sums? Of course, beyond the natural motivation to generalize and applications to Number Theory, such a study would share light on p-adic String Theory and CFT, via their connection to Veneziano amplitudes and other such iterated integrals on moduli spaces of punctured Riemann spheres \cite{Brown:ModuliSpaces}.
\subsection{p-Adic De Rham Cohomology} (Algebraic/Geometric) Number Theory in finite characteristic may be thought of as the ``infinitesimal/linear analysis'' of p-adic analysis\footnote{It is rather {\em Deformation Theory}, as it will be argued elsewhere \cite{LI:p-adicFrobenius}.}, and algebraic de Rham cohomology of a variety does not reduce ``nicely'', requiring a lift to characteristic zero of p-adic number fields, called Monski-Washnitzer cohomology \cite{Hartog}, p.27 (See also \cite{Kedlaya} and references therein).
Briefly, if $X$ is an algebraic variety over $Q$ and $A=Q[x,y]/f(x,y)$ its coordinate ring, one considers the ``overconvergent'' subalgebra $A^\dagger$ of its lift to p-adic numbers (loc. cit.), in order to have exact forms closed under p-adic completion. Then the {\em Monski-Washnitzer cohomology of $X$}, also called here the {\em p-adic algebraic de Rham cohomology of $X$} is: $$H^i_{MW}(A):= H^i_{dR}(A^\dagger)\otimes_{Z_q} Q_q,$$ where $q=p^n$, $Z_q$ is the p-adic integers of the $n-th$ degree unramified extension of $Z_p$, and $Q_q$ its field of fractions.
Constructing a lift to $Z_p$ of Frobenius $x\mapsto x^q$, as a ring endomorphism turns out to be difficult, being equivalent to specifying a p-derivation in the sense of Buium-Manin \cite{Buium-Manin}: $$\phi_p(x)=Frob_p(x)+p\delta_p(x), \quad \delta(x+y)=\delta(x)+\delta(y)+C_p(x,y),$$ $$C_p(x,y)=[x^p+y^p-(x+y)^p]/p\ \in \ Z[x,y].$$ Note at this stage that, if concerned with its action on MW-cohomology, then one may relax the endomorphism requirement, and examples like $\phi_1(x)=x^p$ or $\phi_2(x)=x^p+px$ will do \cite{Hartog}, Ch.3, p.24.
\subsection{p-adic Period Isomorphism} The p-adic analog of period isomorphism is defined via Hodge Theory and Hodge isomorphism.
Since the presentation may benefit from a deformation Theory approach to p-adic numbers, as sketched in \cite{LI:p-adicFrobenius}, it will continued in loc. cit. Indeed, the understanding of the p-adic periods would benefits from a comparison of the MW-cohomology with Hochschild cohomology, and corresponding period isomorphism.
\section{Conclusions} The subject of {\em Periods}, although with a long history, has become essential for understanding the deep mysteries underlying the ``coincidence'' between quantum physics scattering amplitudes and number theoretical special values, like MZVs.
Since the Grothendieck's algebraic de Rham cohomology is instrumental in studying the global (conceptual) aspects of periods, we will also point to its connection to classical de Rham cohomology, via the discrete version of de Rham cohomology, as defined for finite abelian groups \cite{LI:DiscreteDeRham}, which will be addressed elsewhere, in connection with p-adic periods.
Finally, the analogy between Euler's integrals, beta and gamma functions, with the Jacobi and Gauss sums, is used to question the ``stability'' of the (Veneziano) amplitude-periods connection, when passing from global to local.
Understanding the connections with Venatiano amplitudes and Jacobi sums require an understanding of the ``discrete case'', of finite characteristic. A parallel with characteristic zero can be achieved via a Deformation Theory viewpoint, and can be found in \cite{LI:p-adicFrobenius}.
\end{document} |
\begin{document}
\title{Area integral functions and $H^{\infty}$ functional calculus for sectorial operators on Hilbert spaces}
\thanks{{\it 2010 Mathematics Subject Classification:} 47D06, 47A60} \thanks{{\it Key words:} Sectorial operator, $H^{\infty}$ Functional calculus, Area integral function, Square function, Hilbert space.}
\author{Zeqian Chen}
\address{Wuhan Institute of Physics and Mathematics, Chinese Academy of Sciences, 30 West District, Xiao-Hong-Shan, Wuhan 430071,China} \email{[email protected]}
\thanks{Z. Chen is partially supported by NSFC grant No. 11171338.}
\author{Mu Sun}
\address{Wuhan Institute of Physics and Mathematics, Chinese Academy of Sciences, 30 West District, Xiao-Hong-Shan, Wuhan 430071, China and Graduate University of Chinese Academy of Sciences, Beijing 100049, China}
\date{} \maketitle
\markboth{Z. Chen and M. Sun} {$H^{\infty}$ functional calculus}
\begin{abstract} Area integral functions are introduced for sectorial operators on Hilbert spaces. We establish the equivalence relationship between the square and area integral functions. This immediately extends McIntosh/Yagi's results on $H^{\infty}$ functional calculus of sectorial operators on Hilbert spaces to the case when the square functions are replaced by the area integral functions. \end{abstract}
\section{preliminaries}\label{pre}
The theory of sectorial operators, their $H^{\infty}$ functional calculus, and their associated square functions on Hilbert spaces grew out from McIntosh's seminal paper \cite{M1986} and a subsequent work by McIntosh/Yagi \cite{MY1989}, and then was generalized to the setting of Banach spaces by Cowling-Doust-McIntosh-Yagi \cite{CDMY1996} and by Kalton/Weis \cite{KW2001}. The aim of this paper is to introduce so-called area integral functions for sectorial operators on Hilbert spaces and to extend McIntosh/Yagi's theory to the case when the square functions are replaced by the area integral functions. The corresponding $L_p$ case will be given elsewhere \cite{CS}.
To this end, in this section we give a brief review of $H^{\infty}$ functional calculus on general Banach spaces, and preliminary results that will be used for what follows. We mainly follow the fundamental works \cite{CDMY1996, M1986, MY1989}. See also \cite{ADM1996, LeM2007} for further details. We refer to \cite{Gold1985} for the necessary background on semigroup theory.
\subsection{Sectorial operators and $C_0$-semigroups}
Let $\mathbf{X}$ be a complex Banach space. We denote by $\mathcal{B} ( \mathbf{X})$ the Banach algebra of all bounded operators on $\mathbf{X}.$ Let $A$ be a closed and densely defined operator on $\mathbf{X}.$ We let $\mathrm{D} (A),$ $\mathrm{N} (A)$ and $\mathrm{R} (A)$ denote the domain, kernel and range of $A$ respectively. Further we let $\sigma (A)$ and $\rho (A)$ denote the spectrum and resolvent set of $A$ respectively. Then, for any $\lambda \in \rho(A),$ we let \begin{eqnarray*} R(\lambda, A)=(\lambda-A)^{-1} \end{eqnarray*} denote the corresponding resolvent operator.
For any $\omega\in(0,\pi)$, we let \begin{eqnarray*}
\Sigma_\omega = \{z \in \mathbb{C}\setminus \{0\} : \; | \mathrm{Arg} (z) | < \omega\} \end{eqnarray*} be the open sector of angle $2\omega$ around the half-line $(0, \infty).$ Then, $A$ is said to be a sectorial operator of type $\omega$ if $A$ is closed and densely defined, $\sigma (A) \subset \overline{\Sigma}_\omega,$ and for any $\theta \in(\omega,\pi)$ there is a constant $K_\theta>0$ such that \begin{equation}\label{eq:EsitSectorialOper}
| z R(z,A) | \le K_\theta, \quad z \in \mathbb{C} \setminus \overline{\Sigma}_\theta . \end{equation} We say that $A$ is sectorial of type $0$ if it is of type $\omega$ for any $\omega >0.$
Let $(T_t)_{t \ge 0}$ be a bounded $C_0$-semigroup on $\mathbf{X}$ and let $- A$ denote its infinitesimal generator. Then $A$ is closed and densely defined. Moreover, $\sigma (A) \subset \overline{\Sigma}_{\frac{\pi}{2}}$ and, for any $\lambda \in \mathbb{C} \setminus \overline{\Sigma}_{\frac{\pi}{2}}$ we have \begin{eqnarray*} R (\lambda, A ) = - \int^{\infty}_0 e^{\lambda t} T_t d t \end{eqnarray*} in the strong operator topology, from which it follows that $A$ is a sectorial operator of type $\frac{\pi}{2}.$
\begin{prop}\label{prop:AnalyticSemigroup}{\rm ( see e.g. \cite{Gold1985})} Let $(T_t)_{t \ge 0}$ be a bounded $C_0$-semigroup on $\mathbf{X}$ with the infinitesimal generator $- A.$ Given $\omega \in (0, \frac{\pi}{2}),$ the following are equivalent: \begin{enumerate}[{\rm (i)}]
\item $A$ is sectorial of type $\omega.$
\item For any $\alpha \in (0, \frac{\pi}{2} - \omega ),$ $(T_t)_{t \ge 0}$ admits a bounded analytic extension $(T_z)_{z \in \Sigma_{\alpha}}$ in $\mathcal{B} ( \mathbf{X}).$
\end{enumerate} \end{prop}
By definition, a $C_0$-semigroup $(T_t)_{t \ge 0}$ is called a bounded analytic semigroup if there exist a positive angle $0 < \alpha <\frac{\pi}{2}$ and a bounded analytic extension of $(T_t)_{t \ge 0}$ on $\Sigma_\alpha.$ That is, there exists a bounded family of operators $(T_z)_{z \in \Sigma_\alpha}$ extending $(T_t)_{t \ge 0}$ and such that $z \mapsto T_z$ is analytic from $\Sigma_\alpha$ into $\mathcal{B} (\mathbf{X}).$ Note that such an extension necessarily satisfies $T_z T_w = T_{z + w}$ for all $z, w \in \Sigma.$
By Proposition \ref{prop:AnalyticSemigroup}, a $C_0$-semigroup $(T_t)_{t \ge 0}$ with the infinitesimal generator $- A$ is a bounded analytic semigroup if and only if $A$ is a sectorial operator of type $\omega$ for some $\omega \in (0, \frac{\pi}{2}).$
\subsection{$H^{\infty}$ functional calculus}
Given any $\theta \in (0,\pi),$ we let $H^\infty(\Sigma_\theta)$ be the set of all bounded analytic functions $f:\Sigma_\theta \to \mathbb{C}.$ This is a Banach algebra for the supermum norm \begin{eqnarray*}
\| f \|_{\infty, \theta}: = \sup_{z \in \Sigma_\theta} | f (z) |. \end{eqnarray*} Then we let $H^\infty_0(\Sigma_\theta)$ be the subalgebra of all $f\in H^\infty(\Sigma_\theta)$ for which there exist two positive numbers $s,c>0$ such that \begin{equation}\label{eq:EstiH_0funct}
| f(z) | \le c \frac{| z |^s}{(1+ | z | )^{2s}},\quad z \in \Sigma_\theta. \end{equation}
Now given a sectorial operator $A$ of type $\omega\in(0,\pi)$ on a Banach space $\mathbf{X}$, a number $\theta\in(\omega, \pi ),$ and a function $f\in H^\infty_0(\Sigma_\theta),$ one may define an operator $f(A)\in \mathcal{B} ( \mathbf{X} )$ as follows. We let $\gamma \in (\omega,\theta)$ be an intermediate angle and consider the oriented contour $\Gamma_\gamma$ defined by \begin{eqnarray*} \Gamma_\gamma(t)= \left\{ \begin{split} & -te^{i\gamma},\quad t\in \mathbb{R}_-;\\[0.6mm] & te^{-i\gamma},\quad t\in \mathbb{R}_+. \end{split} \right. \end{eqnarray*} In other words, $\Gamma_\gamma$ is the boundary of $\Sigma_\gamma$ oriented counterclockwise. For any $f\in H^\infty_0(\Sigma_\theta),$ we set \begin{equation}\label{eq:f(A)} f(A)=\frac{1}{2\pi i}\int_{\Gamma_\gamma} f(z)R(z,A)dz. \end{equation} By \eqref{eq:EsitSectorialOper} and \eqref{eq:EstiH_0funct}, it follows that this integral is absolutely convergent. Indeed, \eqref{eq:EstiH_0funct} implies that for any $\gamma \in (0, \theta),$ we have \begin{eqnarray*}
\int_{\Gamma_{\gamma}} \Big | \frac{f(z)}{z} \Big | | d z| < \infty. \end{eqnarray*} Thus $f(A)$ is a well defined element of $\mathcal{B} (\mathbf{X}).$ It follows from Cauchy's Theorem that the definition of $f(A)$ does not depend on the choice of $\gamma.$ Furthermore, it can be shown that the mapping $f \mapsto f(A)$ is an algebra homomorphism from $H^\infty_0(\Sigma_\theta)$ into $\mathcal{B} ( \mathbf{X}).$
\begin{defi}\label{def:HinftyCalculus} Let $A$ be a sectorial operator of type $\omega \in(0,\pi)$ on $\mathbf{X}$ and let $\theta \in (\omega, \pi)$. We say that $A$ admits a bounded $H^\infty(\Sigma_\theta)$ functional calculus if there is a constant $K>0$ such that \begin{equation}\label{eq:HinftyInequa}
\| f(A)\| \leq K \| f \|_{\infty,\theta}, \quad \forall f \in H^\infty_0(\Sigma_\theta). \end{equation} \end{defi}
\begin{remark}\label{rk:HinftyFunctDuality}\rm Suppose that $\mathbf{X}$ is reflexive and that $A$ is a sectorial operator of type $\omega \in (0, \pi)$ on $\mathbf{X}.$ Then $A^*$ is a sectorial operator of type $\omega$ on $\mathbf{X}^*$ as well. Given $0< \omega < \theta< \pi$ and any $f \in H^{\infty} (\Sigma_\theta),$ let us define \begin{eqnarray*} \tilde{f} (z) = \overline{f ( \bar{z} )},\quad \forall z \in \Sigma_\theta.
\end{eqnarray*} Then $\tilde{f} \in H^{\infty} (\Sigma_\theta)$ and $\| \tilde{f} \|_{\infty, \theta} = \| f \|_{\infty, \theta}.$ Moreover, \begin{eqnarray*} \tilde{f} (A^*) = f(A)^*,\quad \forall f \in H^{\infty}_0 (\Sigma_\theta). \end{eqnarray*} Consequently, $A^*$ admits a bounded $H^\infty(\Sigma_\theta)$ functional calculus whenever $A$ does. \end{remark}
\begin{remark}\label{rk:HinftyFunctExtension}\rm For any $\lambda \in \mathbb{C} \setminus \overline{\Sigma}_\theta,$ define $R_\lambda (z) = (\lambda -z)^{-1}.$ Then $R_\lambda \in H^{\infty} (\Sigma_\theta).$ Set \begin{eqnarray*} \widetilde{H}^{\infty}_0 (\Sigma_\theta) = H^{\infty}_0 (\Sigma_\theta) \oplus \mathrm{span} \{ 1, R_{-1} \} \subset H^{\infty} (\Sigma_\theta). \end{eqnarray*} This is a subalgebra of $H^{\infty} (\Sigma_\theta).$ Now we define \begin{eqnarray*} u_A : \widetilde{H}^{\infty}_0 (\Sigma_\theta) \mapsto \mathcal{B} (\mathbf{X}) \end{eqnarray*} be the linear mapping such that \begin{eqnarray*} u_A (1) = I_{\mathbf{X}},\quad u_A (R_{-1}) = - (1 + A)^{-1}, \end{eqnarray*} and $u_A (f) = f(A)$ for any $f \in H^{\infty}_0 (\Sigma_\theta).$ Then, it is easy to check that $u_A$ is an algebra homomorphism and for any $\lambda \in \mathbb{C} \setminus \overline{\Sigma}_\theta,$ we have \begin{eqnarray*} R_\lambda \in \widetilde{H}^{\infty}_0 (\Sigma_\theta) \quad \text{and}\quad u_A (R_\lambda) = R(\lambda, A). \end{eqnarray*} $u_A$ is said to be the holomorphic functional calculus of $A$ on $\widetilde{H}^{\infty}_0 (\Sigma_\theta).$
Evidently, $A$ admits a bounded $H^\infty(\Sigma_\theta)$ functional calculus if and only if the homomorphism $u_A$ is continuous. \end{remark}
Let $A$ be a sectorial operator of type $\omega \in (0, \pi)$ and assume that $A$ has dense range. Let $\varphi (z) = z (1+z)^{-2}$ and so $\varphi (A) = A (1 + A)^{-2}.$ Then $\varphi (A)$ is one-one and has dense range (see e.g. \cite[Proposition 2.4]{LeM2007}). Following \cite{M1986, CDMY1996}, we can define an operator $f(A)$ for any $f \in H^{\infty} (\Sigma_\theta)$ whenever $\omega < \theta < \pi.$ Indeed, for each $f \in H^{\infty} (\Sigma_\theta)$ the product function $f \varphi$ belongs to $H^{\infty}_0 (\Sigma_\theta).$ Then using the fact that $\varphi (A)$ is one-one we set \begin{eqnarray*} f(A) = \varphi (A)^{-1} (f \varphi) (A) \end{eqnarray*} with the domain being \begin{eqnarray*} \mathrm{D} ( f(A)) = \big \{ x \in \mathbf{X}:\; (f \varphi) (A) (x) \in \mathrm{D} (A) \cap \mathrm{R} (A) \big \}. \end{eqnarray*} This domain contains $\mathrm{D} (A) \cap \mathrm{R} (A)$ and so is dense in $\mathbf{X}.$ Since $\varphi (A)$ is bounded, $f(A)$ is closed. Therefore, $f(A)$ is bounded if and only if $\mathrm{D} ( f(A)) = \mathbf{X}.$ Note however that $f(A)$ may be unbounded in general.
\begin{thm}\label{th:HinftyCalculus}{\rm (\cite{M1986, CDMY1996})} Let $0< \omega < \theta < \pi$ and let $A$ be a sectorial operator of type $\omega$ on $\mathbf{X}$ with dense range. Then $f(A)$ is bounded for any $f \in H^{\infty} (\Sigma_\theta)$ if and only if $A$ admits a bounded $H^\infty(\Sigma_\theta)$ functional calculus. In that case, we have \begin{eqnarray*}
\| f(A) \| \le K \| f \|_{\infty, \theta},\quad \forall f \in H^{\infty} (\Sigma_\theta), \end{eqnarray*} where the constant $K$ is the one appearing in \eqref{eq:HinftyInequa}. \end{thm}
\begin{remark}\label{rk:HinftyCalculusReflexiveSpace}\rm Let $A$ be a sectorial operator on $\mathbf{X}.$ If $\mathbf{X}$ is a reflexive Banach space, then $\mathbf{X}$ has a direct sum decomposition \begin{eqnarray*} \mathbf{X} = \mathrm{N} (A) \oplus \overline{\mathrm{R} (A)} \end{eqnarray*} (see \cite[Theorem 3.8]{CDMY1996}). Then $A$ is one-one if and only if $A$ has dense range. Moreover, the restriction of $A$ to $\overline{\mathrm{R} (A)}$ is a sectorial operator with dense range. Thus changing $\mathbf{X}$ into $\overline{\mathrm{R} (A)},$ or changing $A$ into $A+P$ where $P$ is the projection onto $\mathrm{N} (A)$ with the kernel equals to $\overline{\mathrm{R} (A)},$ it reduces to the case when a sectorial operator has dense range. \end{remark}
\begin{remark}\label{rk:HinftyCalculusImaginaryPowers}\rm Given $s \in \mathbb{R},$ let $f_s$ be the analytic function on $\mathbb{C} \setminus (-\infty, 0]$ defined by $f_s (z) = z^{\mathrm{i} s}.$ Then $f_s \in H^{\infty} (\Sigma_\theta)$ for any $\theta \in (0, \pi)$ with \begin{eqnarray*}
\| f_s \|_{\infty, \theta} = e^{\theta |s|}. \end{eqnarray*} The imaginary powers of a sectorial operator $A$ with dense range may be defined by letting $A^{\mathrm{i}s}= f_s (A)$ for any $s \in \mathbb{R}.$ In particular, $A^{\mathrm{i}s}$ is bounded for any $s \in \mathbb{R}$ if $A$ admits a bounded $H^\infty(\Sigma_\theta)$ functional calculus for some $\theta \in (0, \pi)$ (see e.g. \cite[Section 5]{CDMY1996}). \end{remark}
\subsection{Square functions on Hilbert spaces}
Square functions for sectorial operators on Hilbert spaces were introduced by McIntosh in \cite{M1986} and developed further with applications to $H^{\infty}$ functional calculus in \cite{MY1989}. We give a brief description of this theory in this subsection.
To this end, we let $\mathbb{H}$ be a Hilbert space throughout the paper. Let $A$ be a sectorial operator of type $\omega \in (0, \pi)$ on $\mathbb{H}.$ We set \begin{eqnarray*} H^{\infty}_0 (\Sigma_{\omega +}) = \bigcup_{\omega < \theta < \pi} H^{\infty}_0 (\Sigma_\theta). \end{eqnarray*} Then for any $F \in H^{\infty}_0 (\Sigma_{\omega +}),$ we set \begin{equation}\label{eq;SquareFunct}
\| x \|_F : = \left ( \int^{\infty}_0 \| F (t A ) x \|^2 \frac{d t}{ t} \right )^{\frac{1}{2}}, \quad \forall x \in \mathbb{H}.
\end{equation} In the above definition, $F(t A)$ means $F_t (A)$ where $F_t (z) = F (t z).$ By Lebesgue's dominated theorem it is easy to check that for any $x \in \mathbb{H},$ the mapping $t \mapsto F(t A)x$ is continuous and hence $\| x \|_F$ is well defined. However we may have $\| x \|_F = \infty$ for some $x.$ We call $\| x \|_F$ a square function associated with $A.$
\begin{thm}\label{th:SquareFunctEquiv}{\rm (McIntosh/Yagi \cite{MY1989})} Let $\mathbb{H}$ be a Hilbert space. Let $A$ be a sectorial operator of type $\omega \in (0, \pi)$ on $\mathbb{H},$ and suppose that $A$ is one-one. Given $\theta \in (\omega, \pi),$ let $F$ and $G$ be two nonzero functions in $H^{\infty}_0 (\Sigma_\theta).$ \begin{enumerate}[{\rm (i)}]
\item There is a constant $K>0$ such that for any $f \in H^{\infty} (\Sigma_\theta),$ \begin{eqnarray*}
\left ( \int^{\infty}_0 \| f (A) F (t A ) x \|^2 \frac{d t}{ t} \right )^{\frac{1}{2}} \le K \| f \|_{\infty, \theta} \| x \|_G, \quad \forall x \in \mathbb{H}. \end{eqnarray*}
\item There is a constant $C>0$ such that
\begin{eqnarray*} C^{-1} \| x \|_G \le \| x \|_F \le C \| x \|_G,\quad \forall x \in \mathbb{H}. \end{eqnarray*}
\end{enumerate} \end{thm}
Let $G \in H^{\infty}_0 (\Sigma_{\omega +}).$ We denote by $\| \cdot \|^*_G$ the square function for $G$ associated with the adjoint operator $A^*,$ that is, \begin{eqnarray*}
\| x \|^*_G = \left ( \int^{\infty}_0 \| G (t A^* ) x \|^2 \frac{d t}{ t} \right )^{\frac{1}{2}}, \quad \forall x \in \mathbb{H}. \end{eqnarray*} The following theorem establishes the close connection between $H^{\infty}$ functional calculus and square functions on Hilbert spaces.
\begin{thm}\label{th:SquareFunctHinftyCalculus}{\rm (McIntosh \cite{M1986})} Let $\mathbb{H}$ be a Hilbert space. Let $A$ be a sectorial operator of type $\omega \in (0, \pi)$ on $\mathbb{H},$ and suppose that $A$ is one-one. Given $\theta \in (\omega, \pi),$ the following assertions are equivalent: \begin{enumerate}[{\rm (i)}]
\item $A$ has a bounded $H^\infty(\Sigma_\theta)$ functional calculus.
\item For some (equivalently, for any) pair $(F, G)$ of nonzero functions in $H^{\infty}_0 (\Sigma_{\omega +}),$ there is a constant $K>0$ such that \begin{eqnarray*}
\| x\|_F \le K \| x \| \quad \text{and}\quad \|x\|^*_G \le K \|x\| \end{eqnarray*} for all $x \in \mathbb{H}.$
\item For some (equivalently, for any) nonzero function $F \in H^{\infty}_0 (\Sigma_{\omega +}),$ there is a constant $C>0$ such that
\begin{eqnarray*} C^{-1} \| x \| \le \| x \|_F \le C \| x \|,\quad \forall x \in \mathbb{H}. \end{eqnarray*}
\end{enumerate} \end{thm}
Consequently, for a sectorial operator $A$ of type $\omega \in (0, \pi)$ on a Hilbert space $\mathbb{H},$ if $A$ has a bounded $H^\infty(\Sigma_\theta)$ functional calculus for some $\theta \in (\omega, \pi)$ then it has a bounded $H^\infty(\Sigma_\theta)$ functional calculus for all $\theta \in (\omega, \pi).$ In this case, we simply say that $A$ has a bounded $H^\infty$ functional calculus.
\begin{remark}\label{rk:SquareFunctHinftyCalculus}\rm
$A$ is said to satisfy a square function estimate if for some (equivalently, for any) $F \in H^{\infty}_0 (\Sigma_{\omega +}),$ there is a constant $C>0$ such that $\| x \|_F \le C \| x \|$ for all $x \in \mathbb{H}.$ As a consequence of Theorem \ref{th:SquareFunctHinftyCalculus} (and Remark \ref{rk:HinftyCalculusReflexiveSpace}), $A$ has a bounded $H^\infty$ functional calculus if and only if both $A$ and $A^*$ satisfy a square function estimate. Note that an example was given in \cite{LeM2003} of a sectorial operator $A$ which satisfies a square function estimate, but does not have a bounded $H^\infty$ functional calculus. \end{remark}
Our goal of this paper is to extend Theorems \ref{th:SquareFunctEquiv} and \ref{th:SquareFunctHinftyCalculus} to the case where square functions are replaced by so-called area integral functions defined below.
\section{Area integral functions}\label{AreaFunct}
First of all, we introduce so-called area integral functions associated with sectorial operators on Hilbert spaces.
\begin{defi}\label{df:AreaFunct} Let $\omega \in (0, \pi)$ and $\theta \in (\omega, \pi).$ Let $A$ be a sectorial operator of type $\omega$ on a Hilbert space $\mathbb{H}.$ Given $0 < \alpha < \frac{\theta - \omega}{2},$ for any $F \in H^\infty_0(\Sigma_{\theta +})$ we define \begin{equation}\label{eq:AreaFunct}
\| x \|_{F, \alpha}: = \left ( \int_{\Sigma_\alpha} \| F (z A ) x \|^2 \frac{d m(z)}{|z|^2} \right )^{\frac{1}{2}}, \quad \forall x \in \mathbb{H}, \end{equation} where $d m$ is the Lebesgue measure in $\mathbb{R}^2 \cong \mathbb{C}.$ Here, $F (z A )$ is understood as $F_z (A)$ where $F_z (w) = F (z w)$ for $w \in \Sigma_{\theta-\alpha}.$
We will call $\| x \|_{F, \alpha}$ the area integral function associated with $A.$ \end{defi}
Evidently, for any $z \in \Sigma_\alpha$ one has \begin{eqnarray*} F_z \in H^\infty_0(\Sigma_{\theta -\alpha}) \subset H^\infty_0(\Sigma_{\omega +}).
\end{eqnarray*} Also, by Lebesgue's dominated theorem, for any $x \in \mathbb{H}$ the mapping $z \mapsto F_z (A) x$ is continuous from $\Sigma_\alpha$ into $\mathbb{H}.$ Hence, $\| x \|_{F, \alpha}$ is well defined but possibly $\| x \|_{F, \alpha} = \infty.$
The corresponding area integral function associated with $A^*$ is defined as \begin{equation}\label{eq:AreaFunctDualOperator}
\| x \|^*_{F, \alpha}: = \left ( \int_{\Sigma_\alpha} \| F (z A^* ) x \|^2 \frac{d m(z)}{|z|^2} \right )^{\frac{1}{2}}, \quad \forall x \in \mathbb{H}. \end{equation}
Our main results read as follows.
\begin{thm}\label{th:AreaIntFunctEquiv} Let $\mathbb{H}$ be a Hilbert space. Let $A$ be a sectorial operator of type $\omega \in (0, \pi)$ on $\mathbb{H},$ and suppose that $A$ is one-one. Given $\theta \in (\omega, \pi)$ and $0 < \alpha, \beta < \frac{\theta - \omega}{2},$ let $F$ and $G$ be two nonzero functions in $H^{\infty}_0 (\Sigma_\theta).$ \begin{enumerate}[{\rm (i)}]
\item There is a constant $K>0$ such that for any $f \in H^{\infty} (\Sigma_\theta),$ \begin{eqnarray*}
\left ( \int_{\Sigma_\alpha} \| f (A) F (z A ) x \|^2 \frac{d m(z)}{|z|^2} \right )^{\frac{1}{2}} \le K \| f \|_{\infty, \theta} \| x \|_{G, \beta}, \quad \forall x \in \mathbb{H}. \end{eqnarray*}
\item There is a constant $C>0$ such that
\begin{eqnarray*} C^{-1} \| x \|_{G, \beta} \le \| x \|_{F, \alpha} \le C \| x \|_{G, \beta},\quad \forall x \in \mathbb{H}. \end{eqnarray*}
\end{enumerate} \end{thm}
\begin{thm}\label{th:AreIntFunctHinftyCalculus} Let $\mathbb{H}$ be a Hilbert space. Let $A$ be a sectorial operator of type $\omega \in (0, \pi)$ on $\mathbb{H},$ and suppose that $A$ is one-one. Given $\theta \in (\omega, \pi)$ and $0 < \alpha < \frac{\theta - \omega}{2},$ the following assertions are equivalent: \begin{enumerate}[{\rm (i)}]
\item $A$ has a bounded $H^\infty(\Sigma_\theta)$ functional calculus.
\item For some (equivalently, for any) pair $(F, G)$ of nonzero functions in $H^{\infty}_0 (\Sigma_{(\omega + \alpha) +}),$ there is a constant $K>0$ such that \begin{eqnarray*}
\| x \|_{F, \alpha} \le K \| x \| \quad \text{and}\quad \| x \|^*_{G, \alpha} \le K \|x\| \end{eqnarray*} for all $x \in \mathbb{H}.$
\item For some (equivalently, for any) nonzero function $F \in H^{\infty}_0 (\Sigma_{(\omega + \alpha) +}),$ there is a constant $C>0$ such that
\begin{eqnarray*} C^{-1} \| x \| \le \| x \|_{F, \alpha} \le C \| x \|,\quad \forall x \in \mathbb{H}. \end{eqnarray*}
\end{enumerate} \end{thm}
\begin{ex} As similar to the square functions that are used in Stein's book \cite{Stein1970}, area integral functions associated with sectorial operators originate naturally in harmonic analysis. We mention a few classical ones for illustrations. For any $k \ge 1,$ let \begin{eqnarray*} G_k = z^k e^{-z},\quad \forall z \in \mathbb{C}. \end{eqnarray*} Then $G_k \in H^{\infty}_0 (\Sigma_{\omega +})$ for any $\omega \in (0, \frac{\pi}{2}).$ Hence, if $A$ is a sectorial operator of type $\omega$ on a Hilbert space for some $\omega \in (0, \frac{\pi}{2}),$ then $G_k$ gives rise area integral functions associated with $A.$ Indeed, if $(T_t)_{t \ge 0}$ is the bounded analytic semigroup generated by $- A,$ we have \begin{eqnarray*} G_k (z A)x = z^k A^k e^{-z A} x = (-z)^k \frac{\partial^k}{\partial z^k} (T_z x), \quad z \in \Sigma_{\frac{\pi}{2}- \omega} \; \text{and}\; x \in \mathbb{H}. \end{eqnarray*} Hence the corresponding area integral function is \begin{eqnarray*}
\| x \|_{G_k, \alpha} = \left ( \int_{\Sigma_\alpha} |z|^{2(k -1)} \Big \| \frac{\partial^k}{\partial z^k} (T_z x) \Big \|^2 d m (z) \right )^{\frac{1}{2}}, \quad \forall x \in \mathbb{H} \end{eqnarray*} for any $0 < \alpha < \frac{\pi}{2}- \omega.$ We thus have that \begin{eqnarray*}
\| x \|_{G_k, \alpha} \thickapprox \| x \|_{G_m, \beta},\quad \forall x \in \mathbb{H} \end{eqnarray*} for any $k, m \ge 1$ and any $0< \alpha, \beta < \frac{\pi}{2}- \omega.$ \end{ex}
\section{Proofs of main results}\label{pf}
This section is devoted to the proofs of Theorems \ref{th:AreaIntFunctEquiv} and \ref{th:AreIntFunctHinftyCalculus}. Our proofs require two technical variants of the square and area integral functions $\| x \|_F$ and $\| x \|_{F, \alpha}.$
Let $A$ be a sectorial operator of type $\omega \in (0, \pi)$ on $\mathbb{H}.$ Let $\theta \in (\omega, \pi)$ and $0 < \alpha < \frac{\theta - \omega}{2}.$ Given $\epsilon > 0$ and $\delta>0,$ we set for any $F \in H^{\infty}_0 (\Sigma_{\theta}),$ \begin{equation}\label{eq:SuqareFunctVariant}
G_\epsilon (F)(x): = \left ( \int^{\infty}_\epsilon \| F (t A ) x \|^2 \frac{d t}{ t} \right )^{\frac{1}{2}}, \quad \forall x \in \mathbb{H}, \end{equation} and \begin{equation}\label{eq:AreaIntFunctVariant}
S_{\alpha, \delta} ( F ) (x) : = \left ( \int_{\Sigma_{\alpha, \delta}} \| F (z A ) x \|^2 \frac{d m(z)}{|z|^2} \right )^{\frac{1}{2}}, \quad \forall x \in \mathbb{H},
\end{equation} where $\Sigma_{\alpha, \delta} = \{z \in \mathbb{C}:\; |z| > \delta,\; | \mathrm{Arg} (z)| < \alpha \},$ respectively. Evidently, \begin{eqnarray*}
\| x \|_F = \lim_{\epsilon \to 0} G_\epsilon (F)(x) \quad \text{and} \quad \| x \|_{F, \alpha} = \lim_{\delta \to 0} S_{\alpha, \delta} ( F ) (x). \end{eqnarray*}
\begin{lem}\label{le:SquareAreaFunct} For any $\epsilon >0,$ \begin{eqnarray*} G_\epsilon (F)(x) \le \frac{2}{\sqrt{\pi \sin \alpha}} S_{\alpha, \epsilon (1- \sin \alpha)} ( F ) (x),\quad \forall x \in \mathbb{H}. \end{eqnarray*} Consequently, for every $0 < \alpha < \frac{\theta - \omega}{2}$ we have \begin{equation}\label{eq:Square<AreaFunct}
\| x \|_F \le \frac{2}{\sqrt{\pi \sin \alpha}}\| x \|_{F, \alpha},\quad \forall x \in \mathbb{H}. \end{equation} \end{lem}
\begin{proof} Given $t > \epsilon,$ let $D_t$ be the disc in $\mathbb{R}^2 \cong \mathbb{C}$ centered at $(t, 0)$ and tangent to the boundary of $\Gamma_{\alpha, \epsilon (1- \sin \alpha )}.$ Note that the mapping $z \mapsto F(z A) x$ is analytic in $\Sigma_\alpha,$ we have \begin{eqnarray*} F (t A) x = \frac{2}{ (\pi \sin^2 \alpha) t^2}\int_{D_t} F (z A) x d m (z). \end{eqnarray*} Consequently, \begin{eqnarray*}
\| F (t A) x \|^2 \le \frac{C_\alpha}{ t^2} \int_{D_t} \| F (z A) x \|^2 d m (z) \end{eqnarray*} with $C_\alpha = \frac{2}{\pi \sin^2 \alpha}.$ Then \begin{eqnarray*}
[ G_\epsilon (F)(x)]^2 \le C_\alpha \int^{\infty}_\epsilon \int_{D_t} \| F (z A) x \|^2 \frac{d m (z) d t}{t^3}.
\end{eqnarray*} However, since $\frac{|z|}{1+ \sin \alpha} \le t \le \frac{|z|}{1 - \sin \alpha}$ for any $z \in D_t,$ we have \begin{eqnarray*}\begin{split}
[ G_\epsilon (F)(x)]^2 & \le C_\alpha \int_{\Sigma_{\alpha, \epsilon (1- \sin \alpha)}} \| F( z A)\|^2 \int^{\frac{|z|}{1- \sin \alpha}}_{\frac{|z|}{1 + \sin \alpha}} \frac{d t}{t^3} d m(z)\\
& = 2 C_\alpha \sin \alpha \int_{\Sigma_{\alpha, \epsilon (1 - \sin \alpha )}} \| F( z A)\|^2 \frac{ d m (z)}{ |z|^2}\\ & = \frac{4}{\pi \sin \alpha} [ S_{\alpha, \epsilon (1- \sin \alpha)} (F)(x)]^2. \end{split}\end{eqnarray*} This completes the proof. \end{proof}
\
{\it Proof of Theorem \ref{th:AreaIntFunctEquiv}.}\; Note that the second assertion follows from the first one. Indeed, applying (i) with the constant function $f=1$ yields an estimate $\| x \|_{F, \alpha} \le K \| x \|_{G, \beta}.$ Then (ii) follows by switching the roles of $F$ and $G$ as well as $\alpha$ and $\beta.$
To prove (i), note that \begin{eqnarray*}
\int_{\Sigma_\alpha} \| f (A) F (z A ) x \|^2 \frac{d m(z)}{|z|^2} = \int^\alpha_{-\alpha} d s \int^{\infty}_0 \| f (A) F (t e^{\mathrm{i} s} A ) x \|^2 \frac{d t}{t}. \end{eqnarray*} By the proof of Theorem \ref{th:SquareFunctEquiv} (i) (see e.g. \cite{ADM1996, MY1989}), there exists a constant $K>0$ such that for any $f \in H^{\infty} (\Sigma_\theta)$ and any $s \in (- \alpha, \alpha),$ \begin{eqnarray*}
\left ( \int^{\infty}_0 \| f (A) F (t e^{\mathrm{i} s}A ) x \|^2 \frac{d t}{ t} \right )^{\frac{1}{2}} \le K \| f \|_{\infty, \theta} \| x \|_G, \quad \forall x \in \mathbb{H}. \end{eqnarray*} Thus, we deduce that \begin{equation}\label{eq:Area<SquareFunct}
\left ( \int_{\Sigma_\alpha} \| f (A) F (z A ) x \|^2 \frac{d m(z)}{|z|^2} \right )^{\frac{1}{2}} \le \sqrt{2 \alpha} K \| f \|_{\infty, \theta} \| x \|_G, \quad \forall x \in \mathbb{H}. \end{equation} By Lemma \ref{le:SquareAreaFunct} we conclude (i).
$\Box$
\begin{rk}\label{rk:Sqare=AreaFunct} Taking $f =1$ in \eqref{eq:Area<SquareFunct}, we obtain that \begin{eqnarray*}
\| x \|_{F, \alpha} \le \sqrt{2 \alpha} K \| x \|_G,\quad \forall x \in \mathbb{H}. \end{eqnarray*} Combining this inequality with \eqref{eq:Square<AreaFunct} implies that \begin{equation}\label{eq:Square=AreaFunct}
\| x \|_{F, \alpha} \thickapprox \| x \|_G,\quad \forall x \in \mathbb{H}. \end{equation} \end{rk}
\
{\it Proof of Theorem \ref{th:AreIntFunctHinftyCalculus}.}\; This is a straightforward consequence of Theorem \ref{th:SquareFunctHinftyCalculus} and the equivalence relationship \eqref{eq:Square=AreaFunct} between the square and area integral functions.
$\Box$
\end{document} |
\begin{document}
\title{Compact equations for the envelope theory}
\author{Lorenzo \surname{Cimino}} \email[E-mail: ]{[email protected]} \thanks{ORCiD: 0000-0002-6286-0722}
\author{Claude \surname{Semay}} \email[E-mail: ]{[email protected]} \thanks{ORCiD: 0000-0001-6841-9850}
\affiliation{Service de Physique Nucl\'{e}aire et Subnucl\'{e}aire, Universit\'{e} de Mons, UMONS Research Institute for Complex Systems, Place du Parc 20, 7000 Mons, Belgium} \date{\today}
\begin{abstract} \textbf{Abstract} The envelope theory is a method to easily obtain approximate, but reliable, solutions for some quantum many-body problems. Quite general Hamiltonians can be considered for systems composed of an arbitrary number of different particles in $D$ dimensions. In the case of identical particles, a compact set of 3 equations can be written to find the eigensolutions. This set provides also a nice interpretation and a starting point to improve the method. It is shown here that a similar set of 7 equations can be determined for a system containing an arbitrary number of two different particles. \keywords{Envelope theory; Many-body quantum systems; Approximation methods}
\end{abstract}
\maketitle
\section{introduction} \label{sec:intro}
The envelope theory (ET) \cite{hall80,hall83,hall04} is a technique to compute approximate eigenvalues and eigenvectors of $N$-body systems. This method, first developed for systems with identical particles, has been extended to treat non-standard kinematics in $D$ dimensions in \cite{sema13,sema19}, and it has been recently generalized for systems with different particles \cite{sema20}. The big advantage of this method is that the computation cost is independent of the number of particles. Quite general Hamiltonians can be considered, and the approximate eigenvalues are lower or upper bounds in favorable cases. The method relies on the existence of an exact solution for the $N$-body harmonic oscillator Hamiltonian \cite{hall79,cint01}. The accuracy of the method has been checked for various three-dimensional systems \cite{sema15a} and one-dimensional systems containing up to 100 bosons \cite{sema19}.
It is worth noting that the ET method has been rediscovered in 2008 under the name of the auxiliary field method, following an approach different from the one used by Hall \cite{hall80,hall83,hall04}. It has been recognized later that both methods are actually completely equivalent. This story is described in \cite{silv12}, where a lot of information is given about this approximation method.
The ET has been used to obtain physical results about hadronic systems as in \cite{sema09}, and is especially useful when the number of particles can be arbitrary large as in the large-$N$ formulation of QCD \cite{buis11,buis12}. The method has allowed the study of a possible quasi Kepler's third law for quantum many-body systems \cite{sema21}. It can also be simply used to test accurate numerical calculations as in \cite{char15}.
Let us consider the $N$-body Hamiltonian \begin{equation}\label{trueH}
H=\sum_{i=1}^N T_i(p_i) + \sum_{i<j=2}^N V_{ij}(r_{ij}), \end{equation} where $T_i$ is an arbitrary kinetic energy with some constraints \cite{sema18a} and $V_{ij}$ is a two-body central potential. We also define $p_i=\abs{\bm{p}_i}$ and $r_{ij}=\abs{\bm{r}_i-\bm{r}_j}$, where $\bm{r}_i$ and $\bm{p}_i$ are respectively the position and the momentum of the $i$th particle. It is assumed in the following that we are always working in the centre of mass (CM) frame, $\bm{P} = \sum_{i=1}^N \bm{p}_i=\bm{0}$, and with natural units ($\hbar=c=1$).
As explained in \cite{silv10,sema20}, in the framework of the ET, Hamiltonian (\ref{trueH}) is replaced by an auxiliary Hamiltonian (it is the origin of the other name of the method) \begin{equation}\label{auxH}
\tilde{H}(\{\alpha\})=\sum_{i=1}^N \left[\frac{\bm{p}_i^2}{2\mu_i}+T_i(G_i(\mu_i))-\dfrac{G_i^2(\mu_i)}{2\mu_i}\right] + \sum_{i<j=2}^N\left[ \rho_{ij}\bm{r}_{ij}^2+V_{ij}(J_{ij}(\rho_{ij}))-\rho_{ij}J_{ij}^2(\rho_{ij})\right], \end{equation} where $\{\alpha\} = \{\{\mu_i\},\{\rho_{ij}\}\}$ is a set of auxiliary parameters to determine later, and where the auxiliary functions $G_i$ and $J_{ij}$ are such that \begin{equation}\label{deffunc}
\begin{array}{cc}
T'_i(G_i(x))-\dfrac{G_i(x)}{x}=0,\\[0.5cm]
V'_{ij}(J_{ij}(x))-2xJ_{ij}(x)=0,
\end{array} \end{equation} where $U'(x)=dU(x)/dx$. It is useful to write Hamiltonian (\ref{auxH}) in the form \begin{equation}
\tilde{H}(\{\alpha\}) = H_\text{ho}(\{\alpha\})+B(\{\alpha\}), \end{equation} where $H_\text{ho}$ is the harmonic oscillator part and $B$ is a function obtained by subtracting the harmonic oscillator contributions from (\ref{auxH}). An eigenvalue of (\ref{auxH}) is given by \begin{equation}\label{energy}
\tilde{E}(\{\alpha\})=E_\text{ho}(\{\alpha\})+B(\{\alpha\}), \end{equation} where $E_\text{ho}$ is an eigenvalue of $H_\text{ho}$. A procedure in \cite{silv10,sema20} explains how to compute $E_\text{ho}$ but an example will be given below. An eigenvalue $\tilde{E}$ also depends on the set of parameters $\{\alpha\}=\{\{\mu_i\},\{\rho_{ij}\}\}$. The principle of the method is to search for the set of parameters $\{\alpha_0\}=\{\{\mu_{i0}\},\{\rho_{ij0}\}\}$ such that \begin{equation}\label{mineq}
\frac{\partial\tilde{E}}{\partial\mu_i}\biggr\rvert_{\{\alpha_0\}}=\frac{\partial\tilde{E}}{\partial\rho_{ij}}\biggr\rvert_{\{\alpha_0\}}=0 \hspace{5 mm} \forall\ i,j. \end{equation} Equations (\ref{mineq}) can be easily implemented and solutions $\{\alpha_0\}$ are easily found since we only need to find an extremum \cite{sema20}. After solving (\ref{mineq}), we obtain the desired approximate energy by substituting the set $\{\alpha_0\}$ back to (\ref{energy}), $\tilde{E}(\{\alpha_0\})=\tilde{E}_0$.
In the case of identical particles, it has been showed \cite{sema13,sema19} that we can equivalently find the eigenvalue $\tilde{E}_0$ by using a set of three compact equations
\begin{subequations}\label{compacteq}
\begin{equation}\label{compacteq1}
\tilde{E}_0 = N\,T(p_0)+C^2_N\,V(\rho_0),
\end{equation}
\begin{equation}\label{compacteq3}
N\,T'(p_0)\,p_0=C^2_N\,V'(\rho_0)\,\rho_0,
\end{equation}
\begin{equation}\label{compacteq2}
Q(N) = \sqrt{C^2_N}\,p_0\,\rho_0,
\end{equation}
\end{subequations} where $C^2_N=N(N-1)/2$ is the number of pairs, and where $p_0^2=\bk{\bm{p}_i^2}$ and $\rho_0^2 = \bk{\bm{r}_{ij}^2} \hspace{2mm} \forall\ i,j$. The mean values are taken with an eigenstate of the auxiliary Hamiltonian corresponding to the global quantum number $Q(N)$ for the set $\{\alpha_0\}$ insuring the constraints (\ref{mineq}). The eigenstate is also completely (anti)symmetric for the exchange between particles. \begin{equation}
Q(N)=
\begin{cases}
\sum\limits_{i=1}^{N-1} \left(2n_i+l_i+\frac{D}{2}\right) & \text{ if }D\geq2\\[15pt]
\sum\limits_{i=1}^{N-1} \left(n_i+\frac{1}{2}\right) & \text{ if }D=1
\end{cases}, \end{equation} where the quantum numbers $\{n_i,l_i\}$ are associated with the internal Jacobi variables. Some values of $Q(N)$ for the bosonic and fermionic ground states are given in \cite{sema20,sema19}. In previous papers \cite{sema13,sema19}, the variable $r_0^2 = N^2 \bk{\left(\bm{r}_i-\bm{R}\right)^2}$, where $\bm{R}$ is the CM position, was used instead of $\rho_0$ because one-body and two-body potentials are treated together.
These equations are called compact because all the relevant variables appear in 3 equations giving the definition of the energy (\ref{compacteq1}), the equation of motion (\ref{compacteq3}) and the rule for the quantization (\ref{compacteq2}). Moreover, the uninteresting auxiliary parameters and functions are not present. Equations (\ref{compacteq}) can also be easily implemented and solved. There are good reasons to prefer the compact equations (\ref{compacteq}) over the ``extremization'' equations (\ref{mineq}). First, the quantities $p_0$ and $\rho_0$ give direct access to more interesting expectation values than $\{\alpha_0\}$. Secondly, these equations have a nice semiclassical interpretation as explained in \cite{sema13}. Thirdly, it is possible to improve the ET with the dominantly orbital state method starting from these equations \cite{sema15b}, which is the main motivation to write these equations. As the improvement obtained can be significant in some cases, it is worth generalizing it beyond systems of identical particles. In the following section, we will present the compact equations for a system composed of two different sets of $N_a$ and $N_b$ identical particles.
\section{$\bm{N_a + N_b}$ systems} \label{sec:nanb}
Let us specify the auxiliary Hamiltonian (\ref{auxH}) for this system. The harmonic oscillator Hamiltonian for a system of $N_a$ particles of type $a$ and $N_b$ particles of type $b$ is given by \begin{equation}\label{ho}
H_\text{ho}=\sum_{i=1}^{N_a} \frac{\bm{p}_i^2}{2\mu_a}+\sum_{j=1}^{N_b} \frac{\bm{p}_j^2}{2\mu_b}+\sum_{i<i'=2}^{N_a}\rho_{aa}\bm{r}_{ii'}^2+\sum_{j<j'=2}^{N_b}\rho_{bb}\bm{r}_{jj'}^2+\sum_{i=1}^{N_a}\sum_{j=1}^{N_b}\rho_{ab}\bm{r}_{ij}^2. \end{equation} In the following, letters $i\text{ }(j)$ are reserved for particles of type $a\text{ }(b)$. As explained in \cite{sema20,hall79}, it is useful to write (\ref{ho}) in the form
\begin{subequations}\label{ho2}
\begin{equation}\label{ho20}
H_\text{ho}=H_a+H_b+H_\text{CM} \hspace{1cm} \text{with}
\end{equation}
\begin{equation}\label{ho21}
H_a = \sum_{i=1}^{N_a}{\frac{\bm{p}_i^2}{2\mu_a}}-\frac{\bm{P}_a^2}{2M_a}+\sum_{i<i'=2}^{N_a}\left(\rho_{aa}+\frac{N_b}{N_a}\rho_{ab}\right)\bm{r}_{ii'}^2,
\end{equation}
\begin{equation}\label{ho22}
H_b = \sum_{j=1}^{N_b}{\frac{\bm{p}_j^2}{2\mu_b}}-\frac{\bm{P}_b^2}{2M_b}+\sum_{j<j'=2}^{N_b}\left(\rho_{bb}+\frac{N_a}{N_b}\rho_{ab}\right)\bm{r}_{jj'}^2,
\end{equation}
\begin{equation} \label{ho23}
H_\text{CM} = \frac{\bm{p}^2}{2\mu}+N_aN_b\rho_{ab}\bm{r}^2,
\end{equation}
\end{subequations} where $\bm{P}_\alpha$ and $M_\alpha=N_\alpha\,\mu_\alpha$ are the total momentum and mass for the set $\alpha = \{a,b\}$, $\mu=\frac{M_aM_b}{M_a+M_b}$ is a reduced mass, and $\bm{p}=\frac{M_b\bm{P}_a-M_a\bm{P}_b}{M_a+M_b}$ and $\bm{r}=\bm{R}_a-\bm{R}_b$ are the relative momentum and position between the CM of the two sets, respectively. The three parts of (\ref{ho20}) are entirely decoupled since (\ref{ho21}) and (\ref{ho22}) depends on the internal coordinates of their respective set, and (\ref{ho23}) on the relative coordinates between the two CM.
Then, an eigenvalue $E_\text{ho}$ is easily obtained since (\ref{ho2}) is composed of three decoupled parts \cite{sema20} \begin{equation}\label{enho}
E_\text{ho}=Q(N_a)\sqrt{\frac{2}{\mu_a}(N_a\rho_{aa}+N_b\rho_{ab})}+Q(N_b)\sqrt{\frac{2}{\mu_b}(N_b\rho_{bb}+N_a\rho_{ab})}+Q(2)\sqrt{\frac{2}{\mu}N_aN_b\rho_{ab}}. \end{equation} To be complete, the expression of the function $B(\{\alpha\})$ is given by \begin{equation}\label{B}
\begin{aligned}
B & = N_a\left[T_a(G_a(\mu_a))-\frac{G^2_a(\mu_a)}{2\mu_a}\right]+C^2_{N_a}\left[V_{aa}(J_{aa}(\rho_{aa}))-\rho_{aa}J^2_{aa}(\rho_{aa})\right]\\
& +N_b\left[T_b(G_b(\mu_b))-\frac{G^2_b(\mu_b)}{2\mu_b}\right]+C^2_{N_b}\left[V_{bb}(J_{bb}(\rho_{bb}))-\rho_{bb}J^2_{bb}(\rho_{bb})\right] \\
& +N_aN_b\left[V_{ab}(J_{ab}(\rho_{ab}))-\rho_{ab}J^2_{ab}(\rho_{ab})\right].
\end{aligned} \end{equation} When combining (\ref{ho2}) and (\ref{B}), we can see that our auxiliary Hamiltonian (\ref{auxH}) is also composed of three distinct parts: one for the particles of type $a$, another for the particles of type $b$ and a last one for the relative motion between the two sets.
The compact equations can then be established in a similar way as done for identical particles \cite{silv12}. First, we apply the Hellmann-Feynman theorem \cite{hell} on Hamiltonian (\ref{auxH}) to evaluate extremization conditions (\ref{mineq}). By using definitions (\ref{deffunc}) we get the following results \begin{equation}\label{hf}
\begin{array}{lllll}
& G_a^2(\mu_{a0})=p_a^2+\frac{P_0^2}{N_a^2} = {p^\prime_a}^2, \\[0.3cm]
& G_b^2(\mu_{b0})=p_b^2+\frac{P_0^2}{N_b^2} = {p^\prime_b}^2, \\[0.3cm]
& J^2_{aa}(\rho_{aa0})=r_{aa}^2, \\[0.3cm]
& J^2_{bb}(\rho_{bb0})=r_{bb}^2, \\[0.3cm]
& J^2_{ab}(\rho_{ab0})=\frac{N_a-1}{2N_a}r_{aa}^2+\frac{N_b-1}{2N_b}r_{bb}^2+R_0^2= {r^\prime_0}^2,
\end{array} \end{equation} where we have defined the six physical parameters \begin{equation}
\begin{array}{lll}
& p_a^2=\bk{\bm{p}_i^2-\frac{\bm{P}_a^2}{N_a^2}}\text{ and } p_b^2=\bk{\bm{p}_j^2-\frac{\bm{P}_b^2}{N_b^2}},\\[0.3cm]
& r_{aa}^2= \bk{\bm{r}_{ii'}^2}\text{ and } r_{bb}^2= \bk{\bm{r}_{jj'}^2},\\[0.3cm]
& P_0^2=\bk{\bm{p}^2}\text{ and } R_0^2=\bk{\bm{r}^2}.
\end{array} \end{equation} The mean values are taken with an eigenstate of the auxiliary Hamiltonian corresponding to the quantum numbers $Q(N_a)$, $Q(N_b)$ and $Q(2)$ for the set $\{\alpha_0\}$ insuring the constraints (\ref{mineq}). The eigenstate is also completely (anti)symmetric for the exchange between the $N_a$ or the $N_b$ particles.
Then, by evaluating $\tilde{E}_0=\bk{\tilde{H}(\{\alpha_0\})}$ and using results (\ref{hf}), we obtain the following equation for the energy \begin{equation}\label{eqen}
\tilde{E}_0=N_aT_a\left(p'_a\right)+N_bT_b\left(p'_b\right)+C^2_{N_a}V_{aa}\left(r_{aa}\right)+C^2_{N_b}V_{bb}\left(r_{bb}\right)+N_aN_bV_{ab}\left(r_0'\right) \end{equation} It is interesting to look at the meaning of the linear combinations $p'_a$, $p'_b$ and $r'_0$ since they appear in (\ref{eqen}). As shown in \cite{sema20}, we can derive a similar equation for the energy by using the form (\ref{ho}), instead of (\ref{ho2}), of the harmonic oscillator. By comparing, one can identify ${p'_a}^2 = \bk{\bm{p}_i^2}$, ${p'_b}^2=\bk{\bm{p}_j^2}$ and ${r'_0}^2=\bk{\bm{r}_{ij}^2}$.
In order to find these parameters, we need 6 additional equations. We can find three of them by applying the virial theorem separately on each of the three parts of the auxiliary Hamiltonian \cite{sema20}. One gets \begin{subequations}\label{eqvirial}
\begin{equation}\label{eqvirial1}
N_aT'_a(p'_a)\frac{p_a^2}{p'_a}=C^2_{N_a}V'_{aa}(r_{aa})r_{aa}+\frac{N_b}{N_a}C^2_{N_a}V'_{ab}(r'_0)\frac{r_{aa}^2}{r'_0},
\end{equation}
\begin{equation}\label{eqvirial2}
N_bT'_b(p'_b)\frac{p_b^2}{p'_b}=C^2_{N_b}V'_{bb}(r_{bb})r_{bb}+\frac{N_a}{N_b}C^2_{N_b}V'_{ab}(r'_0)\frac{r_{bb}^2}{r'_0},
\end{equation}
\begin{equation}\label{eqvirial3}
\frac{1}{N_a} T'_a(p'_a)\frac{P_0^2}{p'_a}+\frac{1}{N_b} T'_b(p'_b)\frac{P_0^2}{p'_b}=N_aN_bV'_{ab}(r'_0)\frac{R_0^2}{r'_0}.
\end{equation} \end{subequations}
Finally, we obtain three last equations by using the exact eigenvalue (\ref{enho}) of the harmonic oscillator and comparing it to $\bk{H_\text{ho}\{\alpha_0\}}$. Thanks to (\ref{ho2}), the comparison is done in a similar way as in \cite{silv12} and one gets \begin{subequations}\label{eqcomp}
\begin{equation}\label{eqcomp1}
Q(N_a)=\sqrt{C^2_{N_a}}p_ar_{aa},
\end{equation}
\begin{equation}\label{eqcomp2}
Q(N_b)=\sqrt{C^2_{N_b}}p_br_{bb},
\end{equation}
\begin{equation}\label{eqcomp3}
Q(2)=P_0R_0.
\end{equation} \end{subequations}
Equations (\ref{eqvirial}) and (\ref{eqcomp}) form a set of six equations which, combined with (\ref{eqen}), form the ET compact equations for a system of $N_a+N_b$ particles, and allow us to compute the approximate eigenvalue $ \tilde{E}_0$. We have verified on several systems that these equations give the same results than those found with the extremization equations in \cite{sema20}. We can note that the three equations (\ref{eqvirial}) can be derived by minimizing (\ref{eqen}) with respect to $r_{aa}$, $r_{bb}$ and $R_0$, and using (\ref{eqcomp}).
Equations (\ref{eqen})-(\ref{eqcomp}) are more complicated than equations (\ref{compacteq}). But, when comparing the two sets, it is possible to find an interpretation for equations (\ref{eqen})-(\ref{eqcomp}). Equation (\ref{eqen}) is obviously the energy computed in terms of the mean momenta and relative distances. Equations (\ref{eqvirial}) are the equations of motion determining these mean quantities, and equations (\ref{eqcomp}) are the semiclassical quantifications of the various orbital and radial motions. These equations make clear what are the relevant quantities appearing in a quantum system containing two different sets of identical particles. It is worth recalling that solutions obtained by the ET are full quantum ones with eigenfunctions associated \cite{silv10,sema20} and that observables can be computed \cite{sema15a}.
As a first check for these equations, we need to recover the three equations (\ref{compacteq}) when considering all particles identical. In this case $T_a = T_b$ and $V_{aa}=V_{bb}=V_{ab}$, and we must impose the following symmetries \begin{equation}\label{sym}
\begin{array}{cc}
& \bk{\bm{p}_i^2} = \bk{\bm{p}_j^2}, \hspace{5 mm} \forall\ i,j, \\[0.5cm]
& \bk{\bm{r}_{ii'}^2} = \bk{\bm{r}_{jj'}^2} = \bk{\bm{r} _{ij}^2}, \hspace{5mm} \forall\ i,i',j,j'.
\end{array} \end{equation} From the definitions of our 6 parameters, we conclude \begin{equation}\label{sym2}
\begin{array}{cc}
& p'_a = p'_b = p_0, \\[0.5cm]
& r_{aa} = r_{bb} = r'_0=\rho_0,
\end{array} \end{equation} where $p_0$ and $\rho_0$ are defined as before in (\ref{compacteq}). Then, we easily see that equation (\ref{eqen}) reduces to (\ref{compacteq1}) with $N = N_a + N_b$. It is a matter of algebra to show that the sum of the three equations (\ref{eqvirial}) \begin{equation}
N_aT'_a(p'_a)p'_a + N_bT'_b(p'_b)p'_b = C^2_{N_a}V'_{aa}(r_{aa})r_{aa} + C^2_{N_b}V'_{bb}(r_{bb})r_{bb} + N_aN_bV'_{ab}(r'_0)r'_0, \end{equation} reduces to (\ref{compacteq3}). When all the particles are identical, it is not relevant to separate the energy on several subsets. We notice that $Q(N_a)+Q(N_b)+Q(2) = Q(N_a+N_b)$. This is a hint that the sum of the three equations (\ref{eqcomp}) must reduce to (\ref{compacteq2}), but the proof is more subtle. Thanks to the symmetries (\ref{sym}) and (\ref{sym2}), one can express $R_0$ in terms of $\rho_0$, and $p_a$, $p_b$ and $P_0$ in terms $p_0$. Then, simple calculations show that (\ref{eqcomp}) reduces to (\ref{compacteq2}). Finally, all equations (\ref{compacteq}) are recovered. Note that (\ref{sym2}) also implies symmetries on the auxiliary parameters, $\mu_a = \mu_b$ and $\rho_{aa} = \rho_{bb} = \rho_{ab}$, which is also expected as explained in \cite{sema20}.
As a second test, we have substituted the harmonic oscillator Hamiltonian (\ref{ho}) into our 7 equations. Then, it is a matter of algebra to find the exact solution (\ref{enho}). A third check is given in the following section.
\section{$\bm{N_a=1}$ or/and $\bm{N_b=1}$} \label{sec:na1}
The 7 equations (\ref{eqen}), (\ref{eqvirial}) and (\ref{eqcomp}) were computed for a system with $N_a+N_b$ particles. It is interesting to look at what happens when only one particle is present in a set. For example, let's look at the case $N_b=1$. Then, all the terms in $C^2_{N_b}$ and $Q(N_b)$ vanish. Equation (\ref{eqcomp2}) becomes trivial and (\ref{eqvirial2}) leads to $p_b = 0$. As $p_b=0$, we also have $p'_b = P_0$. At the end, we are left with a system of 5 equations \begin{subequations}\label{eqn+1}
\begin{equation}
\tilde{E}_0=N_aT_a\left(p'_a\right)+T_b\left(P_0\right)+C^2_{N_a}V_{aa}\left(r_{aa}\right)+N_aV_{ab}\left(r_0'\right),
\end{equation}
\begin{equation}\label{eqn+12}
N_aT'_a(p'_a)\frac{p_a^2}{p'_a}=C^2_{N_a}V'_{aa}(r_{aa})r_{aa}+\frac{N_a-1}{2}V'_{ab}(r'_0)\frac{r_{aa}^2}{r'_0},
\end{equation}
\begin{equation}
\frac{1}{N_a} T'_a(p'_a)\frac{P_0^2}{p'_a}+T'_b(P_0)P_0=N_aV'_{ab}(r'_0)\frac{R_0^2}{r'_0},
\end{equation}
\begin{equation}\label{eqn+14}
Q(N_a)=\sqrt{C^2_{N_a}}p_ar_{aa},
\end{equation}
\begin{equation}
Q(2)=P_0R_0,
\end{equation} \end{subequations} where our four parameters are now defined as $p_a^2=\bk{\bm{p}_i^2-\frac{\bm{P}_a^2}{N_a^2}}$, $P_0^2=\bk{\left(\frac{\mu_b\bm{P}_a-M_a\bm{p}_b}{M_a + \mu_b}\right)^2}$, $\bm{r}_{aa}^2= \bk{\bm{r}_{ii'}^2}$ and $R_0^2=\bk{\left(\bm{R}_a-\bm{r}_b\right)^2}$. We also have ${p'_a}^2=p_a^2+\frac{P_0^2}{N_a^2}$ and ${r'_0}^2=\frac{N_a-1}{2N_a}r_{aa}^2+R_0^2$. The five equations (\ref{eqn+1}) can also be found from scratch with the above explained procedure.
Another special case is when $N_a=N_b=1$, that is we have a two-body system. We then have similar simplifications as in the previous case and we obtain the equations of the envelope theory at $N=2$, which are a generalization of the results obtained in \cite{sema13,sema12} \begin{subequations}
\begin{equation}
\tilde{E}_0=T_a(P_0)+T_b(P_0) + V_{ab}(R_0),
\end{equation}
\begin{equation}
T'_a(P_0)P_0 + T'_b(P_0)P_0 = V'_{ab}(R_0)R_0,
\end{equation}
\begin{equation}
Q(2)=P_0R_0,
\end{equation} \end{subequations} where $P_0^2=\bk{\left(\frac{\mu_b\bm{p}_a-\mu_a\bm{p}_b}{\mu_a + \mu_b}\right)^2}$ and $R_0^2=\bk{\left(\bm{r}_a-\bm{r}_b\right)^2}$. The fact that the correct limits are obtained for $N_a=1$ or $N_b=1$ is also a test of coherence for the set~(\ref{eqen})-(\ref{eqvirial})-(\ref{eqcomp}).
\section{concluding remarks} \label{sec:conclu}
We were able to build the 7 compact equations of the envelope theory for a system of $N_a+N_b$ particles. These equations reduce to the 3 usual ones when considering identical particles. We also presented the special cases when $N_b =1$ or/and $N_a=1$. Starting from these equations, it is possible to improve the envelope theory using a similar procedure than the one used in \cite{sema15b}. This is performed for a system of $N_a+1$ particles in \cite{chev21}. With these 7 equations, it is possible to open new domains of applicability of the envelope theory, especially in hadronic physics where the method is proven to be useful as mentioned in the introduction. But the method can also be used to estimate the binding energies of other systems such as nuclei or clusters of cold atoms for which ab-initio calculations are already available, as for instance in \cite{gatt13}. In particular, accurate calculations have been performed for large helium clusters \cite{gatt11,kiev20}. For such systems, it is necessary to take into account a three-body forces that can be handled by the envelope theory \cite{sema18b}.
\begin{acknowledgments} L.C. would thank the Fonds de la Recherche Scientifique - FNRS for the financial support. This work was also supported under Grant Number 4.45.10.08. \end{acknowledgments}
\end{document} |
\begin{document}
\noindent
\title[Finiteness of meromorphic mappings from K\"{a}hler manifold]{Finiteness of meromorphic mappings from \\K\"{a}hler manifold into projective space} \author{Pham Duc Thoan} \address[Pham Duc Thoan]{Department of Mathematics, National University of Civil Engineering\\
55 Giai Phong street, Hai Ba Trung, Hanoi, Vietnam} \email{[email protected]}
\author{Nguyen Dang Tuyen} \address[Nguyen Dang Tuyen]{Department of Mathematics, National University of Civil Engineering\\
55 Giai Phong street, Hai Ba Trung, Hanoi, Vietnam} \email{[email protected]}
\author{Noulorvang Vangty} \address[Noulorvang Vangty]{Department of Mathmatics, National University of Education\\ 136-Xuan Thuy str., Hanoi, Vietnam} \email{[email protected]}
\maketitle
\begin{abstract}
The purpose of this paper is to prove the finiteness theorems for meromorphic mappings of a complete connected K\"{a}hler manifold into projective space sharing few hyperplanes in subgeneral position without counting multiplicity, where all zeros with multiplicities more than a certain number are omitted. Our results are extensions and generalizations of some recent ones. \end{abstract}
\def\empty{\empty} \footnotetext{\textit{2010 Mathematics Subject Classification}: Primary 32H30, 32A22; Secondary 30D35.\\ \hskip8pt Key words and phrases: finiteness theorems, meromorphic mapping, complete K\"{a}hler manifold.}
\section{Introduction}
Let $f$ be a non-constant meromorphic mapping of $\mathbb C^m$ into $\mathbb P^n(\mathbb C)$ and let $H$ be a hyperplane in $\mathbb P^n(\mathbb C)$. Denote by $\nu_{(f, H_j)}(z)$ the intersecting multiplicity of the mapping $f$ with the hyperplane $H_j$ at the point $f(z)$.
For a divisor $\nu$ on $\mathbb C^m$ and for a positive integer $k$ or $k=+\infty$, we set $$ \nu_{\leqslant k}(z)= \begin{cases} 0& {\text{ if }} \nu(z)>k,\\ \nu(z)&{\text{ if }} \nu(z)\leqslant k. \end{cases} $$ Similarly, we define $\nu_{>k}(z).$ If $\varphi$ is a meromorphic function, the zero divisor of $\varphi$ is denoted by $\nu_{\varphi}.$
Let $H_1,H_2,\ldots,H_{q}$ be hyperplanes of $\mathbb P^n(\mathbb C)$ (in subgeneral position or in general position) and let $k_1,\ldots,k_q$ be positive integers or $+\infty$. Assume that $f$ is a meromorphic mapping satisfying $$ \dim \{z:\nu_{(f,H_i),\leqslant k_i}(z)\cdot\nu_{(f,H_j),\leqslant k_j}(z)\}\leqslant m-2\ \ (1\leqslant i<j\leqslant q).$$
Let $d$ be an integer number. We denote by $\mathcal {F}(f,\{H_j,k_j\}_{j=1}^q,d)$ the set of all meromorphic mappings $g: \mathbb C^m \to \mathbb P^n(\mathbb C)$ satisfying the following two conditions:
\begin{itemize} \item[(a)] $\min(\nu_{(f, H_j),\leqslant k_j},d)=\min(\nu_{(g, H_j),\leqslant k_j},d)$ \ \ ($1\leqslant j \leqslant q$). \item[(b)] $f(z)=g(z)$ on $\bigcup_{j=1}^q \{z:\nu_{(f,H_j),\leqslant k_j}(z)>0\}$.
\end{itemize}
If $k_1=\cdots=k_q=+\infty$, we will simply use notation $\mathcal {F}(f,\{H_j\}_{j=1}^q,d)$ instead of $\mathcal {F}(f,\{H_j,\infty\}_{j=1}^q,d).$
In 1926, Nevanlinna \cite{Ne} showed that two distinct nonconstant meromorphic functions $f$ and $g$ on the complex plane cannot have the same inverse images for five distinct values, and that $g$ is a linear fractional transformation of $f$ if they have the same inverse images counted with multiplicities for four distinct values. After that, many authors have extended and improved Nevanlinna's results to the case of meromorphic mappings into complex projective spaces such as Fujimoto \cite{Fu0, Fu2, F98}, Smiley \cite{LS}, Ru-Sogome \cite{R-S2}, Chen-Yan \cite{CY}, Dethloff-Tan \cite{DT}, Quang \cite{Q, Q1, Q2, Q3}, Nhung-Quynh \cite{NQ}.... These theorems are called uniqueness theorems or finiteness theorems. The first finiteness theorem for the case of meromorphic mappings from $\mathbb C^m$ into complex projective space $\mathbb P^n(\mathbb C)$ sharing $2n+2$ hyperplanes is given by Quang \cite{Q1} in 2012 and its correction \cite{QQ} in 2015. Recently, he \cite{Q2} extended his results and obtained the following finiteness theorem, in which he did not need to count all zeros with multiplicities more than certain values. \vskip0.2cm \noindent
\textbf{Theorem A} (see \cite[Theorem 1.1]{Q2})\ {\it Let $f$ be a linearly nondegenerate meromorphic mapping of $\mathbb C^m$ into $\mathbb P^n(\mathbb C)$. Let $H_1,\ldots, H_{2n+2}$ be $2n+2$ hyperplanes of $\mathbb P^n(\mathbb C)$ in general position and let $k_1,\ldots,k_{2n+2}$ be positive integers or $+\infty$. Assume that $$ \sum_{i=1}^{2n+2}\frac1{k_i+1}<\min\left\{\frac{n+1}{3n^2+n}, \frac{5n-9}{24n+12},\frac{n^2-1}{10n^2+8n}\right\}.$$ Then $\sharp\mathcal F(f,\{H_i,k_i\}_{i=1}^{2n+2},1)\leq2.$}
Note that the condition $\displaystyle\sum_{i=1}^{2n+2}\frac1{k_i+1}<\min\left\{\frac{n+1}{3n^2+n}, \frac{5n-9}{24n+12},\frac{n^2-1}{10n^2+8n}\right\}$ in Theorem A becomes $\displaystyle\sum_{i=1}^{2n+2}\frac1{k_i+1}<\frac{n+1}{3n^2+n}$ when $n\geq5.$
We now consider the general case, where $f : M \to \mathbb{P}^n(\mathbb{C})$ is a meromorphic mapping of an $m$-dimensional complete connected K\"{a}hler manifold $M$, whose universal covering is biholomorphic to a ball $B(R_0)=\{z\in{\mathbf{C}}^m\ :\ ||z||<R_0\}$ $(0<R_0\leqslant \infty)$, into $\mathbb{P}^n(\mathbb{C})$.
Let $H_1,\ldots,H_q$ be hyperplanes of $\mathbb P^n(\mathbb C)$ and let $k_1,\ldots,k_q$ be integers or $+\infty$. Then, the family $\mathcal F(f,\{H_i,k_i\}_{i=1}^{q},d)$ are defined similarly as above, where $d$ is an integer number.
For $\rho \geqslant 0,$ we say that $f$ satisfies the condition $(C_\rho)$ if there exists a nonzero bounded continuous real-valued function $h$ on $M$ such that $$\rho \Omega_f + dd^c\log h^2\ge \text{Ric}\omega,$$ where $\Omega_f$ is the full-back of the Fubini-Study form $\Omega$ on $\mathbb{P}^n(\mathbb{C})$, $\omega = \dfrac{\sqrt{-1}}{2}\sum_{i,j}h_{i\bar{j}}dz_i\wedge d\overline{z}_j$ is K\"{a}hler form on $M$, $\text{Ric}\omega=dd^c\log(det(h_{i\overline{j}}))$, $d = \partial + \overline{\partial}$ and $d^c = \dfrac{\sqrt{-1}}{4\pi}(\overline{\partial} - \partial)$.
Very recently, Quang \cite{Q3} obtained a finiteness theorem for meromorphic mappings from such K\"{a}hler manifold $M$ into $\mathbb P^n(\mathbb C)$ sharing hyperplanes regardless of multiplicities by giving new definitions of "functions of small intergration" and "functions of bounded intergration" as well as proposing a new method to deal with the difficulties when he met on the K\"{a}hler manifold. We would like to emphasize that Quang's result is also the first finiteness theorem for meromorphic mappings on the K\"{a}hler manifold, although the uniqueness theorems were discovered early by Fujimoto \cite{Fu2} and later by many authors such as Ru-Sogome \cite{R-S2} or Nhung-Quynh \cite{NQ} and others. Here is his result. \vskip0.2cm \noindent \textbf{Theorem B}\ (see \cite[Theorem 1.1]{Q3}).
{\it Let $M$ be an $m$-dimensional connected K\"{a}hler manifold whose universal covering is biholomorphic to $\mathbb C^m$ or the unit ball $B(1)$ of $\mathbb C^m$, and let $f$ be a linearly nondegenerate meromorphic mapping of $M$ into $\mathbb P^n(\mathbb C)\ (n\geqslant2)$. Let $H_1,\ldots,H_q$ be $q$ hyperplanes of $\mathbb P^n(\mathbb C)$ in general position. Assume that $f$ satisfies the condition $(C_{\rho})$. If $$\displaystyle q>n+1+\frac{3nq}{6n+1}+\rho\frac{(n^2+4q-3n)(6n+1)}{6n^2+2}$$ then $\sharp\mathcal F(f,\{H_i\}_{i=1}^{q},1)\leq2.$ }
Unfortunately, in this result, all zeros with multiplicities must need to be counted and hence Theorem B can not be an extension or a generalization of Theorem A.
Our purpose in this article is to prove a similar result to Theorems A and B for the case of a meromorphic mapping from a complete connected K\"{a}hler manifold into projective space, in which all zeros with multiplicities more than a certain number are omitted. However, the key used in the proof of Theorem A is technique βrearranging counting functionsβ to compare counting functions with characteristic functions, which is not valid on the KΓ€hler manifold. In addition, the proof of Theorem B cannot work on the case of $k_i<\infty$. To overcome these difficulties, we use the technique in \cite{TN} and the methods in \cite{Q3}, as well as considering new auxiliary functions to obtain a new finiteness theorem which will generalize and extend the theorems cited above. Namely, we will prove the following theorem.
\begin{Theorem}\label{theo1}
Let $M$ be an $m$-dimensional connected K\"{a}hler manifold whose universal covering is biholomorphic to $\mathbb C^m$ or the unit ball $B(1)$ of $\mathbb C^m$, and let $f$ be a linearly nondegenerate meromorphic mapping of $M$ into $\mathbb P^n(\mathbb C)\ (n\geqslant2)$. Let $H_1,\ldots,H_q$ be $q$ hyperplanes of $\mathbb P^n(\mathbb C)$ in $N$-subgeneral position and let $k_1,\ldots,k_q$ be integers or $+\infty$. Assume that $f$ satisfies the condition $(C_{\rho})$. Let $k$ be the largest integer number not exceeding $\dfrac{q-2N-2}{2}$ and let $l$ be the smallest integer number not less than $\dfrac{2N-2}{k+2}+2$ if $k>0$ or let $l=2N+1$ if $k=0.$ Then $\sharp\mathcal F(f,\{H_i,k_i\}_{i=1}^q,1)\leqslant2$ if \begin{align*} q&>2N-n+1+\sum_{i=1}^q\frac{n}{k_i+1}+\rho\big( n(2N-n+1)+\frac{4(q-n)n}{n-1}\big)\\ &+\max\left\{\frac{3nq}{2\big(3n+1+\frac{n-1}l\big)}, \frac{4q+3nq-14}{4q+3n-14},\frac{3nq^2}{6nq+(n-2)(q-2)+4q-6n-8}\right\}. \end{align*} \end{Theorem}
\noindent \noindent {\bf Remark 1.} It is easy to see that $$\dfrac{3nq}{2\big(3n+1+\frac{n-1}l\big)}<\dfrac{3nq}{6n+2}<\dfrac{3nq}{6n+1},$$ and $$\dfrac{3nq^2}{6nq+(n-2)(q-2)+4q-6n-8}<\dfrac{3nq^2}{6nq+q}=\dfrac{3nq}{6n+1}, \forall n\geq2.$$ We now show that $$ \frac{4q+3nq-14}{4q+3n-14}<\dfrac{3nq}{6n+1}, \forall n\geq 3.$$ Indeed, it suffices to prove that $12nq^2-9n^2q-69nq-4q+84n+14>0$ for all $n\geq3.$ Since $q\geq2n+2$, we have $12nq^2-9n^2q-69nq-4q\geq q(15n^2-45n-4)> 0$ for all $n\geq4.$ For $n=3,$ we have $12nq^2-9n^2q-69nq-4q+84n+14=36q^2-292q+266>0$ since $q\geq8.$
Hence, when $k_1=\cdots=k_q=+\infty$ and $N=n$, Theorem \ref{theo1} is an extension of Theorem B.
When $q=2n+2$, $M=\mathbb C^n$ and $H_1,\ldots, H_q$ are in general position, by $\rho=0$, $N=n$, $k=0$ and $l=2n+1,$ we obtain the following corollary from Theorem \ref{theo1}.
\begin{corollary} \label{theo2} Let $f$ be a linearly nondegenerate meromorphic mapping of $\mathbb C^m$ into $\mathbb P^n(\mathbb C)$. Let $H_1,\ldots, H_{2n+2}$ be $2n+2$ hyperplanes of $\mathbb P^n(\mathbb C)$ in general position and let $k_1,\ldots,k_{n+2}$ be positive integers or $+\infty$. Then $\sharp\mathcal F(f,\{H_i,k_i\}_{i=1}^{2n+2},1)\leq2$ provided $$ \sum_{i=1}^{2n+2}\frac1{k_i+1}<\min\left\{\frac{1}{2n},\frac{n^3+2n+3}{n(7n^2+5n+3)}\right\}.$$ In particular, if $n\geq 4$ then $\sharp\mathcal F(f,\{H_i,k_i\}_{i=1}^{2n+2},1)\leq2$ provided $$ \sum_{i=1}^{2n+2}\frac1{k_i+1}<\frac{1}{2n}.$$
\end{corollary}
\noindent {\bf Remark 2.} Consider the quantities $A=\min\left\{\frac{n+1}{3n^2+n}, \frac{5n-9}{24n+12},\frac{n^2-1}{10n^2+8n}\right\}$ in Theorem A and $B=\min\left\{\frac{1}{2n},\frac{n^3+2n+3}{n(7n^2+5n+3)}\right\}$ in Corollary \ref{theo2}. We have the following estimates.
$\bullet$ For $n\geq 5$, $A=\frac{n+1}{3n^2+n}<\frac1{2n}=B.$
$\bullet$ For $n=4$, $A=\frac{n^2-1}{10n^2+8n}<\frac1{2n}=B.$
$\bullet$ For $n=3$, $A=\frac{n^2-1}{10n^2+8n}<\frac{n^3+2n+3}{n(7n^2+5n+3)}=B.$
$\bullet$ For $n=2$, $A=\frac{5n-9}{24n+12}<\frac{n^3+2n+3}{n(7n^2+5n+3)}=B.$
In all the cases, always $A<B$. Therefore, Corollary \ref{theo2} is a nice improvement of Theorem A.
In order to prove our results, we first give an new estimate of the counting function of the Cartanβs auxiliary function (see Lemma 2.8). We second improve the algebraically dependent theorem of three meromorphic mappings (see Lemma 3.3). After that we use arguments similar to those used by Quang \cite{Q3} to finish the proofs.
\section{Basic notions and auxiliary results from Nevanlinna theory}
We will recall some basic notions in Nevanlinna theory due to \cite{R-S1,T-Q}.
\noindent
{\bf 2.1. Counting function.}\ We set $||z|| = \big(|z_1|^2 + \dots + |z_n|^2\big)^{1/2}$ for $z = (z_1,\dots,z_n) \in \mathbb{C}^m$ and define \begin{align*}
B(r) := \{ z \in \mathbb{C}^m : ||z|| < r\},\quad S(r) := \{ z \in \mathbb{C}^m : ||z|| = r\}\ (0 < r \leqslant \infty), \end{align*} where $B(\infty) = \mathbb{C}^m$ and $S(\infty) = \emptyset$.
Define
$$v_{m-1}(z) := \big(dd^c ||z||^2\big)^{m-1}\quad \quad \text{and}$$
$$\sigma_m(z):= d^c \text{log}||z||^2 \land \big(dd^c \text{log}||z||^2\big)^{m-1} \text{on} \quad \mathbb{C}^m \setminus \{0\}.$$
A divisor $E$ on a ball $B(R_0)$ is given by a formal sum $E=\sum\mu_{\nu}X_{\nu}$, where $\{X_\nu\}$ is a locally family of distinct irreducible analytic hypersurfaces in $B(R_0)$ and $\mu_{\nu}\in \mathbb{Z}$. We define the support of the divisor $E$ by setting $\mathrm{Supp}\, (E)=\cup_{\mu_{\nu}\ne 0} X_\nu$. Sometimes, we identify the divisor $E$ with a function $E(z)$ from $B(R_0)$ into $\mathbb{Z}$ defined by $E(z):=\sum_{X_{\nu}\ni z}\mu_\nu$.
Let $M,k$ be positive integers or $+\infty$. We define the truncated divisor $E^{[M]}$ by \begin{align*} E^{[M]}:= \sum_{\nu}\min\{\mu_\nu, M \}X_\nu , \end{align*} and the truncated counting function to level $M$ of $E$ by \begin{align*} N^{[M]}(r,r_0;E) := \int\limits_{r_0}^r \frac{n^{[M]}(t,E)}{t^{2m-1}}dt\quad (r_0 < r < R_0), \end{align*} where \begin{align*} n^{[M]}(t,E): = \begin{cases} \int\limits_{\mathrm{Supp}\, (E) \cap B(t)} E^{[M]}v_{m-1} &\text{ if } m \geqslant 2,\\
\sum_{|z| \leqslant t} E^{[M]}(z)&\text{ if } m = 1. \end{cases} \end{align*} We omit the character $^{[M]}$ if $M=+\infty$.
Let $\varphi$ be a non-zero meromorphic function on $B(R_0)$. We denote by $\nu^0_\varphi$ (resp. $\nu^{\infty}_\varphi$) the divisor of zeros (resp. divisor of poles ) of $\varphi$. The divisor of $\varphi$ is defined by $$\nu_\varphi=\nu^0_\varphi-\nu^\infty_\varphi.$$
For a positive integer $M$ or $M= \infty$, we define the truncated divisors of $\nu_\varphi$ by $$\nu^{[M]}_\varphi(z)=\min\ \{M,\nu_\varphi(z)\}, \quad \nu^{[M]}_{\varphi, \leqslant k}(z):=\begin{cases} \nu^{[M]}_\varphi(z)&\text{ if }\nu^{[M]}_\varphi(z)\leqslant k,\\ 0&\text{ if }\nu^{[M]}_\varphi(z)> k. \end{cases} $$
For convenience, we will write $N_\varphi(r,r_0)$ and $N^{[M]}_{\varphi,\leqslant k}(r,r_0)$ for $N(r,r_0;\nu^0_\varphi)$ and $N^{[M]}(r,r_0;\nu^0_{\varphi,\leqslant k})$ respectively.
\vskip0.2cm \noindent {\bf 2.2. Characteristic function.}\ Let $f : B(R_0)\longrightarrow \mathbb{P}^n(\mathbb{C})$ be a meromorphic mapping. Fix a homogeneous coordinates system $(w_0 : \cdots : w_n)$ on $\mathbb{P}^n(\mathbb{C})$. We take a reduced representation $f = (f_0 : \cdots : f_n)$, which means $f_i\ (0\leqslant i\leqslant n)$ are holomorphic functions and $f(z) = \big(f_0(z) : \dots : f_n(z)\big)$ outside the analytic subset
$\{ f_0 = \dots = f_n= 0\}$ of codimension at least two. Set $\Vert f \Vert = \big(|f_0|^2 + \dots + |f_n|^2\big)^{1/2}$. Let $H$ be a hyperplane in $\mathbb{P}^n(\mathbb{C})$ defined by $H = \{(\omega_0,\ldots,\omega_n): a_0\omega_0 + \cdots + a_n\omega_n = 0 \}$. We set $H(f) = a_0f_0 + \cdots + a_nf_n$ and $\Vert H \Vert = \big(|a_0|^2 + \dots + |a_n|^2\big)^{1/2}.$
The characteristic function of $f$ (with respect to Fubini Study form $\Omega$) is defined by \begin{align*} T_f(r,r_0) := \int_{t=r_0}^r\dfrac{dt}{t^{2m-1}}\int_{B(t)}f^*\Omega\wedge v_{m-1} ,\quad\quad 0 < r_0 < r < R_0. \end{align*}
By Jensen's formula we have \begin{align*}
T_f(r,r_0) = \int_{S(r)}\log ||f||\sigma_m - \int_{S(r_0)}\log ||f||\sigma_m,\quad\quad 0 < r_0 < r < R_0. \end{align*}
Through this paper, we assume that the numbers $r_0$ and $R_0$ are fixed with $0<r_0<R_0$. By notation ``$||\ P$'', we mean that the asseartion $P$ hold for all $r\in [r_0, R_0]$ outside a set $E$ such that $\int_E dr < \infty$ in case $R_0 = \infty$ and $\int_E \dfrac{1}{R_0-r}dr < \infty$ in case $R_0 < \infty$.
\vskip0.2cm \noindent {\bf 2.3. Functions of small intergration.} We recall some definitions due to Quang \cite{Q3}.
Let $f^1,\ldots,f^k$ be $k$ meromorphic mappings from the complete K\"{a}hler manifold $B(1)$ into $\mathbb P^m(\mathbb C)$, which satisfies the condition $(C_{\rho})$ for a non-negative number $\rho$. For each $1\leqslant u\leqslant k$, we fix a reduced representation $f^u=(f_0^u:\cdots:f_n^u)$ of $f^u$.
A non-negative plurisubharmonic function $g$ on $B(1)$ is said to be of small intergration with respective to $f^1,\ldots,f^k$ at level $l_0$ if there exists an element $\alpha=(\alpha_1,\ldots,\alpha_m)\in\mathbb N^m$ with $|\alpha|\leqslant l_0$, a positive number $K$, such that for every $0\leqslant tl_0<p<1$ then
$$ \int_{S(r)}|z^{\alpha}g|^t\sigma_m\leqslant K\left(\frac{R^{2m-1}{R-r}}\sum_{u=1}^mT_{f^u}(r,r_0)\right)^p $$ for all $r$ with $0<r_0<r<R<1,$ where $z^{\alpha}=z_1^{\alpha_1}\cdots z_m^{\alpha_m}.$
We denote by $S(l_0;f^1,\ldots,f^k)$ the set of all non-negative plurisubharmonic functions on $B(1)$ which are of small intergration with respective to $f^1,\ldots,f^k$ at level $l_0.$ We see that, if $g\in S(l_0;f^1,\ldots,f^k)$ then $g\in S(l;f^1,\ldots,f^k)$ for all $l>l_0.$ Moreover, if $g$ is a constant function then $g\in S(0;f^1,\ldots,f^k)$.
By \cite[Proposition 3.2]{Q3}, if $g_i\in S(l_i;f^1,\ldots,f^k)$, then $g_1\cdots g_s\in S(\sum_{i=1}^sl_i;f^1,\ldots,f^k)$.
A meromorphic function $h$ on $B(1)$ is said to be of bounded intergration with bi-degree $(p,l_0)$ for the family $\{f^1,\ldots,f^k\}$ if there exists $g\in S(l_0;f^1,\ldots,f^k)$ satisfying $$ |h|\leqslant||f^1||^p\cdots||f^u||^p\cdot g,$$ outside a proper analytic subset of $B(1).$
We denote by $B(p,l_0;f^1,\ldots,f^k)$ the set of all meromorphic functions on $B(1)$ which are of bounded intergration of bi-degree $p,l_0$ for $\{l_0;f^1,\ldots,f^k\}$. We have the following assertions:
$\bullet$ For a meromorphic mapping $h$, $|h|\in S(l_0;f^1,\ldots,f^k)$ iff $h\in B(0,l_0;f^1,\ldots,f^k)$.
$\bullet$ $B(p,l_0;f^1,\ldots,f^k)\subset B(p,l;f^1,\ldots,f^k)$ for all $0\leqslant l_0<l.$
$\bullet$ If $h_i\in B(p_i,l_i;f^1,\ldots,f^k)$ then $h_1\cdots h_s\in B(\sum_{i=1}^sp_i,\sum_{i=1}^sl_i;f^1,\ldots,f^k)$.
\vskip0.2cm \noindent {\bf 2.4. Some Lemmas and Propositions.}
\begin{lemma}\label{lem2.1}\cite[Lemma 3.4]{F98}
If $\Phi^{\alpha}(F,G,H)=0$ and $\Phi^{\alpha}\left(\frac1F,\frac1G,\frac1H\right)=0$ for all $\alpha$ with $|\alpha|\leq1$, then one of the following assertions holds:
(i) $F=G, G=H$ or $H=F$.
(ii) $\frac FG, \frac{G}H$ and $\frac HF$ are all constants. \end{lemma}
\begin{proposition}[see \cite{NK, NGC}]\label{B0011} \emph {\it Let $H_1,\ldots,H_q $\ $( q > 2N - n+ 1)$ be hyperplanes in $\mathbb{P}^n(\mathbb{C})$ located in $N$-subgeneral position. Then there exists a function $\omega:\{1,\ldots, q\}\to (0,1]$ called a Nochka weight and a real number $\tilde{\omega}\geqslant1$ called a Nochka constant satisfying the following conditions:\\
\indent (i) If $j\in \{1,\ldots, q\}$, then $0<\omega_j\tilde{\omega}\leqslant1.$\\
\indent (ii) $q-2N+n-1=\tilde{\omega}(\sum^{q}_{j=1}\omega_j-n-1).$\\
\indent (iii) For $R\subset \{1,\ldots, q\}$ with $ |R|=N+1$, then $\sum_{i\in R}\omega_i\leqslant n+1.$\\
\indent (iv) $\frac{N}{n}\leqslant \tilde{\omega} \leqslant \frac{2N-n+1}{n+1}.$\\
\indent (v) Given real numbers $\lambda_1, \ldots,\lambda_q$ with $\lambda_j\geqslant1$ for $1\leqslant j\leqslant q$ and given any $R\subset \{1,\ldots, q\}$ and $|R|= N+1,$ there exists a subset $R^1\subset R$ such that $ |R^1|=\text{rank}\{H_i\}_{i\in R^1}=n+1 $ and $$ \prod_{j\in R}\lambda_j^{\omega_j}\leqslant\prod_{i\in R^1}\lambda_i.$$ } \end{proposition}
\noindent \begin{proposition}[see \cite{T-Q}, Lemma 3.2]\label{prop4}
Let $\{H_i\}_{i=1}^q\ (q\geqslant n+1)$ be a set of hyperplanes of $\mathbb{P}^n(\mathbb{C})$ satisfying $\cap_{i=1}^{q}H_i = \emptyset$ and let $f: B(R_0) \longrightarrow \mathbb{P}^n(\mathbb{C})$ be a meromorphic mapping. Then there exist positive constants $\alpha$ and $\beta$ such that $$\alpha\Vert f\Vert \leqslant \max\limits_{i\in \{1,\ldots,q\}} |H_i(f)|\leqslant \beta\Vert f\Vert.$$ \end{proposition}
\begin{proposition}[see \cite{Fu1}, Proposition 4.5]\label{prop1}
Let $F_1,\ldots,F_{n+1}$ be meromorphic functions on $B(R_0)\subset\mathbb{C}^m$ such that they are linearly independent over $\mathbb{C}$. Then there exists an admissible set $\{\alpha_i=(\alpha_{i1},\ldots,\alpha_{im})\}_{i=1}^{n+1}$ with $\alpha_{ij}\ge 0$ being integers, $|\alpha_i|=\sum_{j=1}^m|\alpha_{ij}|\leqslant i$ for $1\leqslant i\leqslant n+1$ such that the generalized Wronskian $W_{\alpha_1,\ldots,\alpha_{n+1}}(F_1,\ldots,F_{n+1})\not\equiv 0$ where $W_{\alpha_1,\ldots,\alpha_{n+1}}(F_1,\ldots,F_{n+1}) = det \left(\mathcal{D}^{\alpha_i}F_j\right)_{1\leqslant i, j \leqslant n+1}.$ \end{proposition}
Let $L_1,\ldots,L_{n+1}$ be linear forms of $n+1$ variables and assume that they are linearly independent. Let $F=(F_1:\cdots:F_{n+1}): B(R_0)\to\mathbb{P}^n(\mathbb{C})$ be a meromorphic mapping and $(\alpha_1,\ldots,\alpha_{n+1})$ be an admissible set of $F$. Then we have following proposition.
\noindent \begin{proposition} [see \cite{R-S1}, Proposition 3.3]\label{prop3}
In the above situation, set $l_0=|\alpha_1|+\cdots+|\alpha_{n+1}|$ and take $t,p$ with $0<tl_0<p<1.$ Then, for $0<r_0<R_0$ there exists a positive constant $K$ such that for $r_0 < r < R < R_0,$
$$\int\limits_{S(r)}\left |z^{\alpha_1+\cdots+\alpha_{n+1}}\dfrac{W_{\alpha_1,\ldots,\alpha_{n+1}}(F_1,\ldots,F_{n+1})}{L_1(F)\cdots L_{n+1}(F)}\right|^t \sigma_m\leqslant K\left(\dfrac{R^{2m-1}}{R-r}T_F(R,r_0)\right)^{p},$$
where $z^\alpha = z_1^{\alpha_1}\cdots z_m^{\alpha_m}$ for $z = (z_1,\ldots,z_m)$ and $\alpha = (\alpha_1,\ldots,\alpha_m)$. \end{proposition}
For convenience of presentation, for meromorphic mappings $f^u: B(R) \to \mathbb{P}^n(\mathbb{C})$ and hyperplanes $\{H_i\}_{i=1}^q$ of $\mathbb{P}^n(\mathbb{C})$, we denote by $\mathcal{S}$ the closure of $$\cup_{1\leqslant u \leqslant 3} I(f^u)\cup \cup_{1 \leqslant i<j \leqslant q} \{z: \nu_{(f,H_i),\leqslant k_i}(z) \cdot \nu_{(f,H_j),\leqslant k_j}(z) > 0 \}.$$ We see that $\mathcal{S}$ is an analysis subset of codimension two of $B(R)$.
\begin{lemma}\cite[Lemma 2.6]{TN}\label{2.4} Let $f^1, f^2, f^3$ be three mappings in $\mathcal F(f,\{H_i, k_i\}_{i=1}^q,1)$. Suppose that there exist $s,t,l\in\{1,\ldots ,q\}$ such that $$ P:=Det\left (\begin{array}{ccc} (f^1,H_s)&(f^1,H_t)&(f^1,H_l)\\ (f^2,H_s)&(f^2,H_t)&(f^2,H_l)\\ (f^3,H_s)&(f^3,H_t)&(f^3,H_l) \end{array}\right )\not\equiv 0. $$ Then we have \begin{align*} \nu_P(z)\geq \sum_{i=s,t,l}(\min_{1\leqslant u\leqslant 3}\{\nu_{(f^u,H_i),\leqslant k_i}(z)\}-\nu^{[1]}_{(f^1,H_i),\leqslant k_i}(z))+ 2\sum_{i=1}^q\nu^{[1]}_{(f^1,H_i),\leqslant k_i}(z), \forall z \not\in \mathcal{S}. \end{align*} \end{lemma}
\begin{lemma}\label{lem1}\cite[Lemma 2.7]{TN}
Let $f$ be a linearly nondegenerate meromorphic mapping from $B(R_0)$ into $\mathbb{P}^n(\mathbb{C})$ and let $H_1, H_2,\ldots,H_q$ be $q$ hyperplanes of $\mathbb{P}^n(\mathbb{C})$ in $N$-subgeneral position. Set $l_0=|\alpha_0|+\cdots+|\alpha_n|$ and take $t,p$ with $0 < tl_0 < p < 1.$ Let $\omega(j)$ be Nochka weights with respect to $H_j$, $1\leqslant j\leqslant q$ and let $k_j\ (j=1,\ldots, q)$ be positive integers not less than $n$. For each $j$, we put $\hat{\omega}(j):=\omega{(j)}\big(1-\frac{n}{k_j+1}).$ Then, for $0 < r_0 < R_0$ there exists a positive constant $K$ such that for $r_0 < r < R < R_0,$ $$
\int\limits_{S(r)}\left|z^{\alpha_0+\cdots+\alpha_n}\frac{W_{\alpha_0\ldots\alpha_n}(f)}{(f,H_1)^{\hat{\omega}(1)}\cdots (f,H_q)^{\hat{\omega}(q)}} \right|^{t}\bigl(\Vert f\Vert ^{\sum_{j=1}^q\hat{\omega}(j)-n-1}\bigr)^{t} \sigma_m \leqslant K\bigl(\frac{R^{2m-1}}{R-r} T_f(R,r_0) \bigl)^p, $$
\end{lemma}
In fact, Lemma \ref{lem1} is another version of Lemma 8 in \cite{NT}, in which $\omega{(j)}$ is replaced by $\hat{\omega}(j)$.
\begin{lemma}\label{lem22} Let $M$, $f$ and $H_1, H_2,\ldots,H_q$ be as in Theorem \ref{theo1}. Let $P$ be a holomorphic function on $M$ and $\beta$ be a positive real number such that $P^{\beta}\in B(\alpha,l_0; f^1, f^2, f^3)$ and \begin{align*} \sum_{u=1}^3\sum_{i=1}^q\nu^{[n]}_{H_i(f^u),\leqslant k_i}\leqslant\beta\nu_{P}, \end{align*} where $f^1, f^2, f^3\in\mathcal F(f,\{H_j,k_j\}_{j=1}^q,1)$. Then $$q\leqslant 2N-n+1+\sum_{i=1}^q\frac{n}{k_i+1}+\rho\big( n(2N-n+1)+\frac23l_0\big)+{\alpha}.$$ \end{lemma}
\begin{proof} Let $F_u=(f^u_0:\cdots:f^u_n)$ be a reduced representation of $f^u\ (1\leqslant u\leqslant 3)$. By routine arguments in the Nevanlinna theory and using Proposition \ref{B0011} (i), we have \begin{equation*} \begin{aligned}
\sum\limits_{i=1}^q\omega_i\nu_{H_i(f^u)}(z)&-\nu_{W_{\alpha_{u,0}\cdots\alpha_{u,n}}(F_u)}(z)\\
&\leqslant \sum\limits_{i=1}^q\omega_i \min\{n,\nu_{H_i(f^u)}(z)\}\\
&= \sum\limits_{i=1}^q\omega_i \min\{n,\nu_{H_i(f^u),\leqslant k_i}(z)\} + \sum\limits_{i=1}^q\omega_i \min\{n,\nu_{H_i(f^u),> k_i}(z)\}\\
&\leqslant \sum\limits_{i=1}^q\frac1{\tilde\omega} \nu^{[n]}_{H_i(f^u),\leqslant k_i}(z) +\sum\limits_{i=1}^q\omega_i \dfrac{n}{k_i+1} \nu_{H_i(f^u)}(z).
\end{aligned} \end{equation*} Hence, it is easy to see from the assumption that \begin{align}\label{th11} \sum_{i=1}^q {\hat{\omega}_{i}}(\nu_{H_i(f^1)}+\nu_{H_i(f^2)}+\nu_{H_i(f^3)}) - (\nu_{W_{\alpha_1}(F_1)} + \nu_{W_{\alpha_2}(F_2)}+ \nu_{W_{\alpha_3}(F_3)}) \leqslant\frac{\beta}{\tilde{\omega}} \nu_P, \end{align} where $\hat{\omega}_{i}:=\omega_i\big(1-\dfrac{n}{k_i+1}\big)$ for all $1\leqslant i\leqslant q$.
Since the universal covering of $M$ is biholomorphic to $B(R_0), 0<R_0\leqslant\infty$, by using the universal covering if necessary, we may assume that $M = B(R_0)\subset {\mathbf{C}}^m$. We consider the following cases.
\noindent{\bf $\bullet$ First case:} $R_0 = \infty$ or $\lim\sup_{r\to R_0}\dfrac{T_{f^1}(r,r_0)+ T_{f^2}(r,r_0) + T_{f^3}(r,r_0)}{\log(1/(R_0-r))}=\infty$.
Integrating both sides of inequality (\ref{th11}), we get
\begin{equation}\label{th12} \begin{aligned} \beta N_{P}(r)&\geqslant {\tilde\omega}\sum_{u=1}^3(\sum_{i=1}^q {\omega_i}N_{H_i(f^u)}(r,r_0)-N_{W_\alpha(F_u)}(r,r_0))-\sum_{u=1}^3\sum_{i=1}^q\frac{\tilde\omega\omega_in}{k_i+1}(T_{f^u}(r,r_0)+O(1). \end{aligned} \end{equation}
Applying Lemma \ref{lem1} to $\omega_{i}\ (1\leqslant i\leqslant q),$ we have
$$
\int\limits_{S(r)}\left|z^{\alpha_0+\cdots+\alpha_{n}}\frac{W_{\alpha_0\ldots\alpha_{n}}(F_u)}{H_1^{{\omega}_1}(f^u)(z)\cdots H_q^{{\omega}_q}(f^u)(z)} \right|^{t_u}\left(\Vert f^u\Vert ^{\sum_{i=1}^q{\omega}_i-n-1}\right)^{t_u} \sigma_m \leqslant K\bigl(\frac{R^{2m-1}}{R-r} T_{f^u}(R,r_0) \bigl)^{p_u}.
$$
By the concativity of the logarithmic function, we obtain
\begin{align*}
\int\limits_{S(r)}\log|z^{\alpha_0+\cdots+\alpha_{n}}|\sigma_m&+(\sum_{i=1}^q{\omega}_i-n-1)\int\limits_{S(r)}\log||f^u||\sigma_m+\int\limits_{S(r)}\log|W_{\alpha_0\ldots\alpha_{n}}(F_u)|\sigma_m\\
&-\sum_{i=1}^q\omega_i\int\limits_{S(r)}\log|H_i(f^u)|\sigma_m\leqslant \frac{p_uK}{t_u}\big(\log^{+}\frac1{R_0-r}+\log^+T_{f^u}(r,r_0)\big).
\end{align*}
By the definition of the characteristic function and the counting function, we get the following estimate
\begin{align*}
||\ (\sum_{i=1}^q{\omega}_i-n-1)T_{f^u}(r,r_0)&\leqslant\sum_{i=1}^q\omega_iN_{H_i(f^u)}(r,r_0)-N_{W_{\alpha_1\ldots\alpha_{n}}F_u)}(r)\\
&+K_1\big(\log^{+}\frac1{R_0-r}+\log^+T_{f^u}(r,r_0)\big).
\end{align*}
Using Proposition \ref{B0011} (ii), we get
\begin{equation*} \begin{aligned}
||\ (q-2N+n-1)T_{f^u}(r,r_0)&\leqslant{\tilde\omega}\left(\sum_{i=1}^q{\omega_i}N_{H_i(f^u)}(r,r_0)-N_{W_{\alpha_0\ldots\alpha_{n}}(F_u)}(r,r_0)\right)\\&+{\tilde\omega}{K_1}\big(\log^{+}\frac1{R_0-r}+\log^+T_{f^u}(r,r_0)\big. \end{aligned} \end{equation*} Combining these inequalities with (\ref{th12}) and noticing that $\tilde\omega\omega_i\leq1$, we get \begin{equation}\label{th3} \begin{aligned}
||\ \beta N_{P}(r)&\geqslant (q-2N+n-1)T(r,r_0)-\sum_{i=1}^q\frac{n}{k_i+1}T(r,r_0)+O(1), \end{aligned} \end{equation} where $T(r,r_0):=T_{f}(r,r_0)+T_g(r,r_0).$
Since the assumption $P^{\beta}\in B(\alpha,l_0; f^1, f^2, f^3)$, there exists $g\in S(l_0;f^1,f^2,f^3)$ satisfying $$ |P|^{\beta}\leqslant||f^1||^{\alpha}\cdot||f^2||^{\alpha}\cdot||f^3||^{\alpha}\cdot g,$$ outside a proper analytic subset of $B(1).$ Hence, by Jensen's formula and the definition of the characteristic function, we have the following estimate \begin{equation}\label{th4} \begin{aligned}
||\ \beta N_{P}(r)=&\int_{S(r)}\log |P|^{\beta}\sigma_n + O(1)\\
\leqslant &\int_{S(r)}({\alpha}\sum_{u=1}^3\log ||f^u||+\log ||g||)\sigma_n +O(1)\\ =&{\alpha}T_{f}(r,r_0)+o(T(r,r_0)). \end{aligned} \end{equation}Together (\ref{th3}) with (\ref{th4}), we obtain \begin{align*} (q-2N+n-1)T(r,r_0)-\sum_{i=1}^q\frac{n}{k_i+1}T(r,r_0)\leqslant {\alpha}T(r,r_0)+o(T(r,r_0)) \end{align*} for every $r$ outside a Borel finite measure set. Letting $r\rightarrow\infty$, we deduce that $$q-2N+n-1-\sum_{i=1}^q\frac{n}{k_i+1}\leqslant\rho\big( n(2N-n+1)+\frac23l_0\big)+{\alpha}$$ with $\rho=0.$
\vskip0.2cm \noindent {\bf $\bullet$ Second Case:} $R_0 < \infty$ and $\lim\sup_{r\to R_0}\dfrac{T_{f^1}(r,r_0)+T_{f^2}(r,r_0) + T_{f^3}(r,r_0)}{\log(1/(R_0-r))} < \infty$.\\ It suffices to prove the lemma in the case where $B(R_0) = B(1)$.
Suppose that $$q>2N-n+1+\sum_{i=1}^q\frac{n}{k_i+1}+\rho\big( n(2N-n+1)+\frac23l_0\big)+{\alpha}.$$ Then, we have $$q>2N-n+1+\sum_{i=1}^q{\tilde\omega}{\omega_i}\frac{n}{k_i+1}+\rho \big( n(2N-n+1)+\frac23l_0\big)+\alpha.$$ It follows from Proposition \ref{B0011} ii), iv) that \begin{equation*} \begin{aligned} \sum_{i=1}^q{{\omega_i}}\big(1-\frac{n}{k_i+1}\big)-(n+1)-\dfrac{\alpha}{\tilde{\omega}}&>\rho\big(\frac{n(2N-n+1)}{\tilde\omega}+\frac23\frac{l_0}{\tilde\omega}\big)\\ &\geqslant\rho \big(n(n+1)+\frac23\frac{l_0}{\tilde\omega}\big). \end{aligned} \end{equation*} Put $$t=\dfrac{\frac{2\rho}3}{\displaystyle\sum_{i=1}^q{\hat{\omega}_{i}}-(n+1)-\dfrac{\alpha}{\tilde{\omega}}}.$$ It implies that \begin{equation}\label{th01} \begin{aligned}
\big(\frac{3n(n+1)}2+\frac{l_0}{\tilde\omega}\big)t<1. \end{aligned} \end{equation}
Put $\psi_u=z^{\alpha_{u,0}+\cdots+\alpha_{u,n}}\dfrac{W_{\alpha_{u,0}\cdots\alpha_{u,n}}(F_u)}{H_1^{\hat{\omega}_{1}}(f^u)\cdots H_q^{\hat{\omega}_{q}}(f^u)}\ \ (1\leqslant u\leqslant 3)$. It follows from (\ref{th11}) that $\psi_1^{t}\psi_2^{t}\psi_3^{t} P^{\frac{ t\beta}{\tilde{\omega}}}$ is holomorphic. Hence $a=\log|\psi_1^{t}\psi_2^{t}\psi_3^{t} P^{\frac{t\beta}{\tilde\omega }}|$ is plurisubharmonic on $B(1)$.
We now write the given K\"{a}hler metric form as $${\omega}=\frac{\sqrt{-1}}{2\pi}\sum\limits_{i,j}h_{i\bar{j}}dz_i\wedge d\bar{z}_j.$$ From the assumption that $f^1$, $f^2$ and $f^3$ satisfy condition $(C_\rho)$, there are continuous plurisubharmonic functions $a'_u$ on $B(1)$ such that $$e^{a'_u}\text{det}(h_{i\bar{j}})^{\frac{1}{2}}\leqslant \Vert f^u\Vert ^\rho, u=1,2,3.$$ Put $a_u=\frac23a'_u$,\ $u=1,2,3$ and we get $$e^{a_u}\text{det}(h_{i\bar{j}})^{\frac{1}{3}}\leqslant \Vert f^u\Vert ^{\frac{2\rho}{3}}.$$ Therefore, by the definition of $t$, we get
\begin{align*} e^{a+a_1+a_2+a_3}\text{det}(h_{i\bar{j}})&\leqslant e^{a}\Vert f^1\Vert^{\frac{2\rho}{3}}\Vert f^2\Vert^{\frac{2\rho}{3}} \Vert f^3\Vert^{\frac{2\rho}{3}}\\
&= |\psi_1|^{t}|\psi_2|^{t}|\psi_3|^{t}|P|^{\frac{t\beta}{\tilde\omega}}\Vert f^1\Vert^{\frac{2\rho}{3}}\Vert f^2\Vert^{\frac{2\rho}{3}}\Vert f^3\Vert^{\frac{2\rho}{3}}\\
&\leqslant |\psi_1|^{t}|\psi_2|^{t}|\psi_3|^{t}\big(\Vert f^1\Vert \Vert f^2 \Vert \Vert f^3|\big)^{\frac{t \alpha}{\tilde\omega}}\Vert f^1\Vert^{\frac{2\rho}{3}}\Vert f^2\Vert^{\frac{2\rho}{3}}\Vert f^3\Vert^{\frac{2\rho}{3}}\cdot|g|^{\frac{t}{\tilde\omega}}\\
&= |\psi_1|^{t}|\psi_2|^{t}|\psi_3|^{t}\big(\Vert f^1 \Vert \Vert f^2 \Vert \Vert f^3 \Vert\big)^{t(\frac{\alpha}{\tilde\omega}+\frac{2\rho}{3t})}\cdot|g|^{\frac{t}{\tilde\omega}}\\
&= |\psi_1|^{t}|\psi_2|^{t}|\psi_3|^{t}\big(\Vert f^1 \Vert \Vert f^2 \Vert \Vert f^3 \Vert\big)^{t(\sum_{i=1}^q\hat{\omega}_{i}-n-1)}\cdot|g|^{\frac{t}{\tilde\omega}}. \end{align*} Note that the volume form on $B(1)$ is given by
$$dV:=c_m\text{det}(h_{i\bar{j}})v_m;$$
therefore,
$$\int\limits_{B(1)} e^{a+a_1+a_2+a_3}dV\leqslant C\int\limits_{B(1)}\prod_{u=1}^3\big( |\psi_u|\Vert f^u \Vert^{\sum_{i=1}^q\hat{\omega}_{i}-n-1}\big)^{t}\cdot|g|^{\frac{t}{\tilde\omega}}v_m,$$ with some positive constant $C.$
Setting $x=\dfrac{l_0/\tilde\omega}{3n(n+1)/2+l_0/\tilde\omega},\ y=\dfrac{n(n+1)/2}{3n(n+1)/2+l_0/\tilde\omega}$, then $x+3y=1$. Thus, by the H\"{o}lder inequality and by noticing that
$$v_m=(dd^c\Vert z\Vert^2)^m=2m\Vert z\Vert^{2m-1}\sigma_m\wedge d\Vert z\Vert,$$
we obtain
\begin{align*}
\int\limits_{B(1)} e^{a+a_1+a_2+a_3}dV&\leqslant C\prod_{u=1}^3\left (\int\limits_{B(1)}\big( |\psi_u|\Vert f^u\Vert ^{\sum_{i=1}^q\hat{\omega}_{i}-n-1}\big)^{\frac{t}y} v_m \right)^{y}\left(\int\limits_{B(1)} |z^{\beta }g|^{\frac{t}{x\tilde\omega}}v_m \right)^{x}\\
&\leqslant C\prod_{u=1}^3\bigl(2m\int\limits_0\limits^1 r^{2m-1}\bigl(\int\limits_{S(r)} \big(|\psi_u|\Vert f^u\Vert ^{\sum_{i=1}^q\hat{\omega}_{i}-n-1}\big)^{\frac{t}y} \sigma_m\bigl)dr\bigl)^{y}\\
&\times\bigl(2m\int\limits_0\limits^1 r^{2m-1}\bigl(\int\limits_{S(r)} |z^{\beta}g|^{\frac{t}{x\tilde\omega}}\sigma_m\bigl)dr\bigl)^{x}.
\end{align*} We see from (\ref{th01}) that $\dfrac{l_0t}{\tilde\omega x}=\big(\dfrac{3n(n+1)}2+\dfrac{l_0}{\tilde\omega}\big)t<1$ and
$$\sum\limits_{s=0}^{n}|\alpha_{u,s}|\dfrac{t}y\leqslant\dfrac{n(n+1)}2\dfrac{t}y=\big(\dfrac{3n(n+1)}2+\dfrac{l_0}{\tilde\omega}\big)t<1.$$
Then, we can choose a positive number $p$ such that $\dfrac{l_0t}{\tilde\omega x}<p<1$ and $\sum\limits_{s=0}^{n}|\alpha_{u,s}|\dfrac{t}y<p<1.$ Applying Lemma \ref{lem1} to $\hat{\omega}_{i}$, and from the property of $g$, we get
$$\int\limits_{S(r)}\big(|\psi_u|\Vert f^u\Vert ^{\sum_{i=1}^q\hat{\omega}_{i}-n-1}\big)^{\frac{t}y} \sigma_m\leqslant K_1\left(\frac{R^{2m-1}}{R-r} T_f^u(R,r_0) \right)^p$$ and
$$\int\limits_{S(r)} |z^{\beta}g|^{\frac{t}{\tilde\omega x}}\sigma_m\leqslant K\left(\frac{R^{2m-1}}{R-r} T_g(R,r_0) \right)^{p}$$
outside a subset $E\subset [0,1]$ such that $\displaystyle\int\limits_{E}\dfrac{1}{1-r}dr\leqslant +\infty.$ Choosing $R=r+\dfrac{1-r}{eT_{f^u}(r,r_0)},$ we have
$$T_{f^u}(R,r_0)\leqslant 2T_{f^u}(r,r_0),$$
Hence, the above inequality implies that
$$\int\limits_{S(r)}\big(|\psi_u|\Vert f^u\Vert ^{\sum_{i=1}^q\hat{\omega}_{i}-n-1}\big)^{\frac{t}y}\sigma_m\leqslant\frac{K_2}{(1-r)^p}(T_{f^u}(r,r_0))^{2p}
\leqslant \frac{K_2}{(1-r)^p}(\log\frac{1}{1-r})^{2p},$$
since $\lim \limits_{r\to R_0}\sup\dfrac{T_{f^1}(r,r_0)+T_{f^2}(r,r_0)+T_{f^2}(r,r_0)}{\log (1/(R_0-r))}<\infty.$
It implies that
$$\int\limits_0\limits^1 r^{2m-1}\left(\int\limits_{S(r)} \big(|\psi_u|\Vert f^u\Vert ^{\sum_{i=1}^q\hat{\omega}_{i}-n-1}\big) \sigma_m\right)dr\leqslant \int\limits_0\limits^1 r^{2m-1}\frac{K_2}{(1-r)^p}\left(\log\frac{1}{1-r}\right)^{2p} dr <\infty.$$
Similarly,
$$\int\limits_0\limits^1 r^{2m-1}\left(\int\limits_{S(r)} |z^{\beta}g|^{\frac{t}{\tilde\omega x}}\sigma_m\right)dr\leqslant \int\limits_0\limits^1 r^{2m-1}\frac{K_2}{(1-r)^p}\left(\log\frac{1}{1-r}\right)^{2p} dr <\infty.$$ Hence, we conclude that $\int\limits_{B(1)} e^{a+a_1+a_2+a_3}dV<\infty,$
which contradicts Yau's result \cite{Y} and Karp's result \cite{K}. The proof of Lemma \ref{lem22} is complete. \end{proof}
\section{Proof of Theorem \ref{theo1}}
\begin{lemma}[see \cite{TN}, Lemma 3.1]\label{lem23} If $q>2N+1+\sum_{v=1}^{q}\frac{n}{k_v+1}+\rho n(2N-n+1)$, then every $g\in\mathcal F(f,\{H_i,k_i\}_{i=1}^q,1)$ is linearly nondegenerate. \end{lemma}
\begin{lemma}[see \cite{NT}, Lemma 12]\label{lem4.2}
Let $q, N$ be two integers satisfying $q\geq 2N+2$, $N \geq 2$ and $q$ be even. Let $\{a_1, a_2,\ldots,a_q\}$ be a family of vectors in a 3-dimensional vector space such that $\text{rank}\{a_j\}_{j\in R}=2$ for any subset ${R}\subset Q= \{1,\ldots,q\}$ with cardinality $|R|=N+1$. Then there exists a partition $\bigcup_{j=1}^{q/2}I_j$ of $\{1,\ldots,q\}$ satisfying $|I_j|=2$ and $\text{rank}\{a_i\}_{i\in I_j}=2$ for all $j=1,\ldots,q/2.$ \end{lemma}
We need the following result which slightly improves \cite[Theorem 1.3]{TN}.
\begin{lemma}\label{theo3} Let $k$ be the largest integer number not exceeding $\dfrac{q-2N-2}{2}$. If $n\geqslant2$ then $f^1\wedge f^2\wedge f^3\equiv0$ for every $f^1, f^2, f^3\in\mathcal(f,\{H_i,k_i\}_{i=1}^q,1)$ provided $$q>2N-n+1+\sum_{i=1}^q\frac{n}{k_i+1}+\rho n(2N-n+1)+\frac{3nq}{2\big(q+(n-1)\frac{l+1}{l}\big)},$$ where $l$ is the smallest integer number not less than $\dfrac{2N+2+2k}{k+2}$ if $k>0$ or $l=2N+1$ if $k=0.$ \end{lemma} \begin{proof} We consider $\mathcal M^{3}$ as a vector space over the field $\mathcal M$ and denote $Q=\{1,\ldots,q\}$. For each $i\in Q$, we set $$ V_i=\left ((f^1,H_i),(f^2,H_i),(f^3,H_i)\right )\in \mathcal M^{3}.$$
By Lemma \ref{lem23}, $f^1, f^2, f^3$ are linearly nondegenerate. Suppose that $f^1\wedge f^2\wedge f^3\not\equiv 0$. Since the family of hyperplanes $\{H_1,H_2,\ldots,H_q\}$ are in $N$-subgeneral position, for each subset $R\subset Q$ with cardinality $|R|=N+1$, there exist three indices $l, t, s\in R$ such that the vectors $V_l, V_t$ and $V_s$ are linearly independent. This means that $$ P_I:=\det\left ( \begin{array}{ccc} (f^1,H_l)&(f^1,H_t)&(f^1,H_s)\\ (f^2,H_l)&(f^2,H_t)&(f^2,H_s)\\ (f^3,H_l)&(f^3,H_t)&(f^3,H_s) \end{array}\right )\not\equiv 0,
$$ where $I:=\{l, t, s\}.$ We separate into the following cases.
\noindent{$\bullet$ \bf Case 1: $q\mod2=0$}
By the assumption, we have $q=2N+2+2k$\ $(k\geq0)$. Applying Lemma \ref{lem4.2}, we can find a partition $\{J_1,\ldots, J_{q/2}\}$ of $Q$ satisfying $|J_j|=2$ and $\text{rank}\{V_v\}_{v\in J_j}=2$ for all $j=1,2,\ldots,q/2.$ Take a fixed subset $S_j=\{j_1,\ldots,j_{k+2}\}\subset \{1,\ldots,q\}$. We claim that:
{\it There exists a partition $J^j_1,\ldots,J^j_{N+1+k}$ with $k+2$ indices ${r^j_1,\ldots,r^j_{k+2}}\in\{1,\ldots,N+1+k\}$ satisfying $\text{rank}\{V_v,V_{j_i}\}_{v\in J^j_{r^j_i}}=3$ for all $1\leqslant i\leqslant k+2$.}
Indeed, consider $N$ sets $J_1,\ldots, J_{N}$ and $j_1$. Assume that $\text{rank}\{V_{j_1}, V_{t_2} \ldots, V_{t_u}\}=1$ where $u$ is maximal. By the assumption, we have $1\leqslant u\leqslant N-1.$ It follows that there exist $N-u$ pairs, for instance $\{V_v\}_{v\in J_1},\ldots, \{V_v\}_{v\in J_{N-u}}$ which do not contain $V_{j_1}$ or $V_{t_i}$ with $2\leqslant i\leqslant u$. Obviously, $N-u\geq1$. Without loss of generality, we can assume that $V_{j_1}\in\{V_v\}_{v\in J_{N}}$.
If $u=N-1$ then obviously, $\text{rank}\{V_v, V_{j_1}\}_{v\in J_{1}}=3$ since $\sharp(\{V_{j_1}, V_{t_2}, \ldots, V_{t_{N-1}}\}\cup\{V_v\}_{v\in J_1})=N+1.$
If $u\leqslant N-2$, there are at least two pairs vectors, which do not contain $V_{j_1}$ or $V_{t_i}$ with $2\leqslant i\leqslant u$. Assume that $V_{j_1}\in$ span$\{V_v\}_{v\in J_{r_1}}$ with some $r_1\in\{1,\ldots,N-u\}$, there exists at least one pair, for instance $\{V_v\}_{v\in J_{j_0}}$
with $j_0\in\{1,\ldots,N-u\}$ such that $\text{rank}\{V_v\}_{v\in (J_{r_1}\cup J_{j_0})}=3$. Indeed, otherwise $\text{rank}\{V_v\}_{v\in (\cup_{i=1}^{N-u}J_i)\cup\{j_1, t_2\ldots,t_u\}}=\text{rank}\{V_v\}_{v\in J_{r_1}}=2$. This is impossible since $\{V_v\}_{v\in (\cup_{i=1}^{N-u}J_i)\cup\{j_1,t_2\ldots,t_u\}}$ has at least $N+2$ vectors. From sets $\{V_v\}_{v\in J_{r_1}}$ and $\{V_v\}_{v\in J_{j_0}}$, we can rebuild two linearly independent pairs $\{V_{i_1},V_{i_2}\}$ and $\{V_{i_3},V_{i_4}\}$ such that $\text{rank}\{V_{i_1},V_{i_2},V_{j_1}\}=3$, where $\{i_1,i_2,i_3,i_4\}=J_{r_1}\cup J_{j_0}.$ We redenote by $J_{r_1}=\{i_1, i_2\}$ and $J_{j_0}=\{i_3, i_4\}$.
Therefore, we obtain a partition still denoted by $J_1,\ldots,J_{N+1+k}$ such that there exists an index ${r^j_1}\in\{1,\ldots,N\}$ satisfying $\text{rank}\{V_v,V_{j_1}\}_{v\in J_{r^j_1}}=3$.
Next, we consider $N$ sets $J_1,\ldots,J_{r^j_1-1},J_{r^j_1+1},\ldots, J_{N+1}$ and $j_2$. Repeating the above argument, we get a partition still denoted by $J_1,\ldots,J_{q/2}$ such that there exists an index ${r^j_2}\in\{1,\ldots,{r^j_1-1},{r^j_1+1},\ldots, N+1\}$ satisfying $\text{rank}\{V_v,V_{j_2}\}_{v\in J_{r^j_2}}=3$. Of course, this partition still satisfies $\text{rank}\{V_v,V_{j_1}\}_{v\in J_{r^j_1}}=3$.
Continue to the process, after $k+2$ times, we will obtain a new partition denoted by $J^j_1,\ldots,J^j_{N+1+k}$ such that there exists $k+2$ indices ${r^j_1,\ldots,r^j_{k+2}}\in\{1,\ldots,N+1+k\}$ satisfying $\text{rank}\{V_v,V_{j_i}\}_{v\in J^j_{r^j_i}}=3$ for all $1\leqslant i\leqslant k+2$. The claim is proved.
Put $I^j_{r^j_i}=J^j_{r^j_i}\cup\{j_i\}$, then $P_{{I^j_{r^j_i}}}\not\equiv 0$ for all $1\leqslant i\leqslant k+2$.
For each remained index $i\in\{1,\ldots,N+1+k\}\setminus\{r^j_1,\ldots,r^j_{k+2}\}$, we choose a vector $V_{s_i}$ such that $\text{rank}\{V_v\}_{v\in J^j_i\cup\{s_i\}}=3.$ Put $I^j_i=J^j_i\cup\{s_i\}$, then $P_{{I^j_i}}\not\equiv 0$ for all $i.$
\noindent$\bullet$ If $k=0$ then $l=2N+1$ and $q=2N+2$. Put $S_1=\{1\}, S_2=\{2\},\ldots, S_{l-1}=\{2N\}, S_l=\{2N+1,2N+2\}.$
\noindent$\bullet$ If $k>0$ then $q=(k+2)(l-1)+t$ with $0<t\leqslant k+2.$ Put $S_1=\{1,\ldots,k+2\},S_2=\{(k+2)+1,\ldots,2(k+2)\},\ldots,S_{l-1}=\{(k+2)(l-2)+1,\ldots,(k+2)(l-1)\}, S_l=\{(k+2)(l-1)+1,\ldots,2N+2+2k\}.$
Applying the claim to each set $S_j$ $(1\leqslant j\leqslant l)$, we get a partition $J^j_1,\ldots,J^j_{N+1+k}$ with $s_j=\sharp S_j$ indices ${r^j_1,\ldots,r^j_{s_j}}\in\{1,\ldots,N+1+k\}$ satisfying $\text{rank}\{V_v,V_{u}\}_{v\in J^j_{r^j_i}, u\in S_j}=3$ for all $1\leqslant i\leqslant s_j$.
We put $$P_Q=\prod_{j=1}^{l}\prod_{i=1}^{N+1+k}P_{I^j_i},$$ where $I^j_i$ is defined as in the above.
Since $(\min\{a,b,c\}-1)\geq\min\{a,n\}+\min\{b,n\}+\min\{c,n\}-2n-1$ for any positive integers $a,b,c$, we have \begin{align*}
\min_{1\leqslant u\leqslant 3}\{\nu_{(f^u,H_v),\leqslant k_v}(z)\}-\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z)&\geq\sum_{u=1}^3\nu^{[n]}_{(f^u,H_v),\leqslant k_v}(z)-(2n+1)\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z), \end{align*} for all $z\in\mathrm{Supp}\,\nu_{(f^k,H_v),\leqslant k_v}$.
Putting $\nu_v(z)=\sum_{u=1}^3\nu^{[n]}_{(f^u,H_v),\leqslant k_v}(z)-(2n+1)\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z)\ (1\leqslant k\leqslant 3,\ v\in Q),$ from Lemma \ref{2.4}, we have
\begin{align*}
\nu_{P_{{I}^j_i}}(z)\geq\sum_{v\in{I}^j_i}\nu_v(z)+ 2\sum_{v=1}^q\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z) \end{align*} and \begin{align*}
\nu_{P_{{I}^j_i}}(z)\geq\sum_{v\in{J}^j_i}\nu_v(z)+ 2\sum_{v=1}^q\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z). \end{align*} Note that for $k=0$ then $l(q-2N-1)-(2N+1)=0$. For $k>0$ then $2N+1\leqslant\frac{q}{k+2}(2k+1)\leqslant l(2k+1)=l(q-2N-1)$. Therefore, we always have $l(q-2N-1)-(2N+1)\geqslant0.$ It implies that $l(q-2n-1)-(2n+1)\geqslant0$ since $N\geq n.$ Then, for all $z \not\in \mathcal{S}$, we obtain \begin{align*}
\nu_{P_Q}(z)&\geq l\sum_{v=1}^q\nu_v(z)+\sum_{v=1}^q\nu_v(z)+lq\sum_{v=1}^q\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z)\\ &=(l+1)\sum_{v=1}^q(\sum_{u=1}^3\nu^{[n]}_{(f^u,H_v),\leqslant k_v}(z)-(2n+1)\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z))+lq\sum_{v=1}^q\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z)\\ &=(l+1)\sum_{v=1}^q\sum_{u=1}^3\nu^{[n]}_{(f^u,H_v),\leqslant k_v}(z)+\big(l(q-2n-1)-(2n+1)\big)\sum_{v=1}^q\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z)\\
&\geq\left(l+1+\frac{l(q-2n-1)-(2n+1)}{3n}\right)\sum_{v=1}^q\sum_{u=1}^3\nu^{[n]}_{(f^u,H_v),\leqslant k_v}(z)\\ &\geq\frac{l(q+n-1)+n-1}{3n}\sum_{v=1}^q\sum_{u=1}^3\nu^{[n]}_{(f^u,H_v),\leqslant k_v}(z). \end{align*} We put $P:=P_Q$. The above inequality implies that \begin{align*}
\sum_{v=1}^q\sum_{u=1}^3\nu^{[n]}_{(f^u,H_v),\leqslant k_v}(z)\leqslant\frac{3n}{l(q+n-1)+n-1}\nu_P(z), \forall z \not \in \mathcal{S}. \end{align*} Define $\beta:=\dfrac{3n}{l(q+n-1)+n-1}$ and $\gamma:=\dfrac{lq}2$.
\noindent{$\bullet$ \bf Case 2: $q\mod2=1$.}
By the assumption, we have $q-1=2N+2+2k.$ We consider any subset ${R}=\{j_1,\ldots,j_{q-1}\}$ of $\{1,\ldots,q\}$. By the same argument as in Case 1 for $R$, we get \begin{align*}
\nu_{P_R}(z)&\geq(l+1)\sum_{v=1}^{q-1}\nu_{j_v}(z)+l(q-1)\sum_{v=1}^q\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z), \forall z \not \in \mathcal{S}. \end{align*}
We now define $P:=\prod_{|R|=q-1}P_{R},$ so we obtain \begin{align*}
\nu_{P}(z)&=\sum_{|R|=q-1}\nu_{P_R}\\ &\geq(q-1)(l+1)\sum_{v=1}^{q}\nu_{v}(z)+ql(q-1)\sum_{v=1}^q\nu^{[1]}_{(f^k,H_v),\leqslant k_v}(z)\\ &\geq(q-1)\frac{l(q+n-1)+n-1}{3n}\sum_{v=1}^q\sum_{u=1}^3\nu^{[n]}_{(f^u,H_v),\leqslant k_v}(z). \end{align*} Hence, we have \begin{align*}
\sum_{v=1}^q\sum_{u=1}^3\nu^{[n]}_{(f^u,H_v),\leqslant k_v}(z)\leqslant\frac{3n}{(l(q+n-1)+n-1)(q-1)}\nu_P(z), \forall z \not \in \mathcal{S}. \end{align*} Define $\beta:=\dfrac{3n}{\big(l(q+n-1)+n-1\big)(q-1)}$ and $\gamma:=\dfrac{(q-1)lq}2.$ Then, from all the above cases, we always get $$\alpha:=\beta\gamma=\dfrac{3nlq}{2(l(q+n-1)+n-1)}=\frac{3nq}{2\big(q+(n-1)\frac{l+1}{l}\big)},$$ and \begin{align*} \sum_{u=1}^3\sum_{v=1}^q\nu_{(f^u,H_v),\leqslant k_v}^{[n]}(z)\leqslant\beta\nu_{P}(z), \forall z \not \in \mathcal{S}. \end{align*}
It is easy to see that $|P|^{\beta}\leqslant C(\Vert f^1\Vert\Vert f^2\Vert\Vert f^3\Vert)^{\beta\gamma}=C(\Vert f^1\Vert\Vert f^2\Vert\Vert f^3\Vert)^{\alpha}$, where $C$ is some positive constant. This means that $P^{\beta}\in B(\alpha,0; f^1, f^2, f^3)$. Applying Lemma \ref{lem22}, we obtain \begin{align*} q &\leqslant 2N-n+1+\sum_{j=1}^q\frac{n}{k_j+1}+\rho n(2N-n+1)+\alpha\\ &=2N-n+1+\sum_{j=1}^q\frac{n}{k_j+1}+\rho n(2N-n+1)+\frac{3nq}{2\big(q+(n-1)\frac{l+1}{l}\big)}, \end{align*}
which contradicts the assumption. Therefore, $f^1\wedge f^2\wedge f^3 \equiv 0$ on $M$. The proof of Lemma \ref{theo3} is complete. \end{proof}
By basing on the proofs of Quang \cite[Lemma 3.3, 3.4, 3.5, 3.6]{Q2} or \cite[Lemma 4.4, 4.5, 4.6, 4.8]{Q3}, we obtain the following Lemmas which are necessary for the proof of our theorem.
The first, for three mappings $f^1, f^2, f^3\in\mathcal F(f,\{H_i,k_i\}_{i=1}^{q},1)$, we define
$\bullet F^{ij}_k=\frac{(f^k,H_i)}{(f^k,H_i)}, \ \ 0\leqslant k\leqslant 2,\ 1\leqslant i,j\leqslant q,$
$\bullet V_i=((f^1,H_i), (f^2,H_i), (f^3,H_i))\in\mathcal M^3_m,$
$\bullet \nu_i: \text{ the divisor whose support is the closure of the set } $ $\{z:\nu_{(f^u,H_i),\leqslant k_i}(z)\geqslant\nu_{(f^v,H_i),\leqslant k_i}(z)=\nu_{(f^t,H_i),\leqslant k_i}(z) \text{ for a permutation } (u,v,t) \text{ of } (1,2,3)\}.$
We write $V_i\cong V_j$ if $V_i \wedge V_j\equiv 0$, otherwise we write $V_i\not\cong V_j$. For $V_i \not\cong V_j$, we write $V_i\sim V_j$ if there exist $1 \leqslant u < v\leqslant 3$ such that $F_u^{ij} = F_v^{ij}$, otherwise we write $V_i\not\sim V_j.$
\begin{lemma}\label{3.3}\cite[Lemma 3.3]{Q2} or \cite[Lemma 4.4]{Q3} With the assumption of Theorem \ref{theo1}, let $h$ and $g$ be two elements of the family $\mathcal F(f, \{H_i, k_i\}_{i=1}^{q}, 1)$. If there exists a constant $\lambda$ and two indices $i, j$ such that $\frac{(h, H_i)}{(h, H_j)} = \lambda\frac{(g, H_i)}{(g, H_j)},$ then $\lambda = 1.$ \end{lemma}
\begin{lemma}\label{3.4} \cite[Lemma 3.4]{Q2} or \cite[Lemma 4.5]{Q3} Let $f^1, f^2, f^3$ be three elements of $\mathcal F(f, \{H_i, k_i\}_{i=1}^{q}, 1)$. Suppose that $f^1\wedge f^2 \wedge f^3 \equiv 0$ and $V_i\sim V_j$ for some distinct indices $i$ and $j$. Then $f^1, f^2, f^3$ are not distinct. \end{lemma}
\begin{lemma}\label{3.5}\cite[Lemma 3.5]{Q2} or \cite[Lemma 4.6]{Q3} With the assumption of Theorem \ref{theo1}, let $f^1, f^2, f^3$ be three maps in $\mathcal F(f, \{H_i, k_i\}_{i=1}^{q}, 1)$. Suppose that $f^1, f^2, f^3$ are distinct and there are two indices $i, j\in \{1, 2,\ldots, q\} \ (i \not= j)$ such that $V_i\not\cong V_j$ and
$$\Phi^{\alpha}_{ij} := \Phi^{\alpha}(F_1^{ij}, F_2^{ij}, F_3^{ij}) \equiv 0$$ for every $\alpha = (\alpha_1,\ldots , \alpha_m)\in \mathbb Z^m_+$ with $|\alpha| = 1.$ Then for every $t\in\{1,\ldots, q\} \setminus \{i\}$, the following assertions hold:
(i) $\Phi^{\alpha}_{it} \equiv 0$ for all $|\alpha|\leq1,$
(ii) if $V_i\not\cong V_t$, then $F^{ti}_1,F^{ti}_2,F^{ti}_3$ are distinct and there exists a meromorphic function $h_{it}\in B(0,1; f^1, f^2, f^3)$ such that \begin{equation*} \begin{aligned} \nu_{h_{ti}}\geq-\nu^{[1]}_{(f,H_i),\leqslant k_i}-\nu^{[1]}_{(f,H_t),\leqslant k_t}+\sum_{j\not=i,t}\nu^{[1]}_{(f,H_j),\leqslant k_j}. \end{aligned} \end{equation*} \end{lemma}
\begin{lemma}\label{3.6}\cite[Lemma 3.6]{Q2} or \cite[Lemma 4.8]{Q3}
With the assumption of Theorem \ref{theo1}, let $f^1, f^2, f^3$ be three maps in $\mathcal F(f, \{H_i, k_i\}_{i=1}^{q}, 1)$. Assume that there exist $i, j\in \{1, 2,\ldots, q\} \ (i \not= j)$ and $\alpha \in \mathbb Z^m_+$ with $|\alpha| = 1$ such that $\Phi^{\alpha}_{ij} \not\equiv 0$. Then there exists a holomorphic function $g_{ij}\in B(1,1;f^1,f^2,f^3)$ such that \begin{equation*} \begin{aligned} \nu_{g_{ij}}&\geq\sum_{u=1}^3\nu^{[n]}_{(f^u,H_i),\leqslant k_i}+\sum_{u=1}^3\nu^{[n]}_{(f^u,H_j),\leqslant k_j}+2\sum_{t=1,t\not=i,j}\nu^{[1]}_{(f,H_t),\leqslant k_t}-(2n+1)\nu^{[1]}_{(f,H_i),\leqslant k_i}\\ &-(n+1)\nu^{[1]}_{(f,H_j),\leqslant k_j}+\nu_j. \end{aligned} \end{equation*}
\end{lemma}
{\it We now prove Theorem \ref{theo1}.} \noindent
Suppose that there exist three distinct meromorphic mappings $f^1, f^2, f^3$ belonging to $\mathcal F(f, \{H_i, k_i\}_{i=1}^{q}, 1)$. By Lemma \ref{theo3}, we get $f^1\wedge f^2\wedge f^3 \equiv 0.$ We may assume that $$ \underset{group\ 1}{ \underbrace{{V_1\cong\cdots\cong V_{l_1}}}}\not\cong\underset{group\ 2}{ \underbrace{V_{l_1+1}\cong\cdots\cong V_{l_2}}}\not\cong\underset{group\ 3}{ \underbrace{V_{l_2+1}\cong\cdots\cong V_{l_3}}}\not\cong \cdots\not\cong\underset{group\ s}{ \underbrace{V_{l_{s-1}+1}\cong\cdots\cong V_{l_s}}},$$ where $l_s=q.$
Denote by $P$ the set of all $i\in \{1,\ldots, q\}$ satisfying that there exists $j\in \{1,\ldots, q\} \setminus \{i\}$ such that $V_i\not\cong V_j$ and $\Phi^{\alpha}_{ij}\equiv 0$ for all $\alpha\in\mathbb Z^m_+$ with $|\alpha| \leqslant 1.$ We separate into three cases.
\noindent$\bullet$ {\bf Case 1:} $\sharp P \geq 2.$ It follows that $P$ contains two elements $i, j.$ We get $\Phi^{\alpha}_{ij}=\Phi^{\alpha}_{ji}=0$ for all
$\alpha\in\mathbb Z^m_+$ with $|\alpha|\leqslant 1.$ By Lemma \ref{lem2.1}, there exist two functions, for instance $F_1^{ij}$ and $F_2^{ij}$, and a constant $\lambda$ such that $F_1^{ij}=\lambda F_2^{ij}.$ Applying Lemma \ref{3.3}, we have $F_1^{ij}= F_2^{ij}$. Hence, since Lemma \ref{3.5} (ii), we can see that $V_i\cong V_j$, i.e., $V_i$ and $V_j$ belong to the same group in the partition. We may assume that $i = 1$ and $j = 2.$ Since our assumption $f^1, f^2, f^3$ are distinct, the number of each group in the partition is less than $N + 1.$ Thus, we get $V_1\cong V_2\not\cong V_t$ for all $t\in \{N+ 1,\ldots, q\}.$ By Lemma \ref{3.5} (ii), we obtain \begin{equation*}\begin{aligned} \nu_{h_{1t}}\geq-\nu^{[1]}_{(f,H_1),\leqslant k_1}-\nu^{[1]}_{(f,H_t),\leqslant k_t}+\sum_{s\not=1,t}\nu^{[1]}_{(f,H_s),\leqslant k_s}, \end{aligned}\end{equation*} and \begin{equation*}\begin{aligned} \nu_{h_{2t}}\geq-\nu^{[1]}_{(f,H_2),\leqslant k_2}-\nu^{[1]}_{(f,H_t),\leqslant k_t}+\sum_{s\not=2,t}\nu^{[1]}_{(f,H_s),\leqslant k_s}. \end{aligned}\end{equation*} By summing up both sides of the above two inequalities, we have \begin{equation*}\begin{aligned} \nu_{h_{1t}}+\nu_{h_{2t}}\geq-2\nu^{[1]}_{(f,H_t)\leqslant k_t}+\sum_{s\not=1,2,t}\nu^{[1]}_{(f,H_s),\leqslant k_s}. \end{aligned}\end{equation*} Summing up both sides of the above inequalities over all $t\in\{N+ 1,\ldots, q\},$ we obtain \begin{equation*}\begin{aligned} \sum_{t=N+1}^q(\nu_{h_{1t}}+\nu_{h_{2t}})&\geq(q-N)\sum_{t=3}^N\nu^{[1]}_{(f,H_t)\leqslant k_t}+(q-N-3)\sum_{t=N+1}^q\nu^{[1]}_{(f,H_t)\leqslant k_t}\\ &\geq (q-N-3)\sum_{t=3}^q\nu^{[1]}_{(f,H_t)\leqslant k_t}\geq\frac{q-N-3}{3n}\sum_{u=1}^3\sum_{t=3}^q\nu^{[n]}_{(f,H_t)\leqslant k_t}. \end{aligned}\end{equation*} Hence, we get \begin{equation*}\begin{aligned} \sum_{u=1}^3\sum_{t=3}^q\nu^{[n]}_{(f,H_t)\leqslant k_t}\leqslant\frac{3n}{q-N-3}\nu_{\prod_{t=N+1}^q}(h_{1t}h_{2t}). \end{aligned}\end{equation*} Since $({\prod_{t=N+1}^q}(h_{1t}h_{2t}))^{\frac{3n}{q-N-3}}\in B(0,2(q-N)\frac{3n}{q-N-3};f^1,f^2,f^3)$, applying Lemma \ref{lem22}, we obtain $$q-2\leqslant 2N-n+1+\sum_{i=1}^q\frac{n}{k_i+1}+\rho\big( n(2N-n+1)+4(q-N)\frac{n}{q-N-3}\big).$$ From the definition of $l$ and the condition of $q$, it is easy to see that $l\geq3.$ It is easy to see that $$2\leqslant\frac{3nq}{2\big(q+n-1+\frac{n-1}3\big)}\leqslant\frac{3nq}{2\big(q+n-1+\frac{n-1}l\big)},$$ and $$ 4(q-N)\frac{n}{q-N-3}\leqslant \frac{4(q-n)n}{n-1}.$$ These inequalities imply that $$q\leqslant 2N-n+1+\sum_{i=1}^q\frac{n}{k_i+1}+\rho\big( n(2N-n+1)+\frac{4(q-n)n}{n-1}\big)+\frac{3nq}{2\big(q+n-1+\frac{n-1}l\big)},$$ which is a contradiction.
\noindent$\bullet$ {\bf Case 2:} $\sharp P=1$. We assume that $P = \{1\}.$ It is easy to see that $V_1\not\cong V_i$ for all $i = 2,\ldots,q$. By Lemma \ref{3.5} (ii), we obtain \begin{equation*} \begin{aligned} \nu_{h_{1i}}\geq-\nu^{[1]}_{(f,H_1)\leqslant k_1}-\nu^{[1]}_{(f,H_i)\leqslant k_i}+\sum_{s\not=1,t}\nu^{[1]}_{(f,H_s)\leqslant k_s}. \end{aligned} \end{equation*} Summing up both sides of the above inequalities over all $i = 2,\ldots,q,$ we have \begin{equation}\label{thoan1} \begin{aligned} \sum_{i=2}^q\nu_{h_{1i}}\geq(q-3)\sum_{i=2}^q\nu^{[1]}_{(f,H_i)\leqslant k_i}-(q-1)\nu^{[1]}_{(f,H_1)\leqslant k_1}. \end{aligned} \end{equation} Obviously, $i\not\in P$ for all $i = 2,\ldots,q.$ Now put $$\sigma(i)=\begin{cases} i+N,& \text{ if } i+N\leqslant q \\ i-N-q+1,& \text{ if }i+N>q,
\end{cases}$$ then $i$ and $\sigma(i)$ belong to distinct groups, i.e., $V_i\not\cong V_{\sigma(i)}$ for all $i = 2,\ldots,q$ and hence $\Phi^{\alpha}_{i\sigma(i)}\not\equiv0$ for some $\alpha\in\mathbb Z^m_+$ with $|\alpha|\leq1.$ By Lemma \ref{3.6}, we get \begin{equation*} \begin{aligned} \nu_{g_{i\sigma(i)}}&\geq\sum_{u=1}^3\sum_{t=i,\sigma(i)}\nu^{[n]}_{(f^u,H_t)\leqslant k_t}-(2n+1)\nu^{[1]}_{(f,H_i)\leqslant k_i}-(n+1)\nu^{[1]}_{(f,H_{\sigma(i)})\leqslant k_{\sigma(i)}}\\ &+2\sum_{t=1,t\not=i,\sigma(i)}\nu^{[1]}_{(f,H_t)\leqslant k_t}. \end{aligned} \end{equation*} Summing up both sides of this inequality over all $i\in\{2, \ldots, q\}$ and using (\ref{thoan1}), we obtain \begin{equation*} \begin{aligned} \sum_{i=2}^q\nu_{g_{i\sigma(i)}}&\geq2\sum_{i=2}^q\sum_{u=1}^3\nu^{[n]}_{(f^u,H_i),\leqslant k_i}+(2q-3n-8)\sum_{i=2}^q\nu^{[1]}_{(f,H_i),\leqslant k_i})+2(q-1)\nu^{[1]}_{(f,H_1),\leqslant k_1}\\ &\geq2\sum_{i=2}^q\sum_{u=1}^3\nu^{[n]}_{(f^u,H_i),\leqslant k_i}+\frac{4q-3n-14}3\sum_{u=1}^3\sum_{i=2}^q\nu^{[1]}_{(f^u,H_i),\leqslant k_i})-2\sum_{i=2}^q\nu_{h_{1i}}\\ &\geq\frac{4q+3n-14}{3n}\sum_{i=2}^q\sum_{u=1}^3\nu^{[n]}_{(f^u,H_i),\leqslant k_i}-2\sum_{i=2}^q\nu_{h_{1t}}. \end{aligned} \end{equation*} It implies that $$\sum_{u=1}^3\sum_{i=2}^q\nu^{[n]}_{(f^u,H_i)}\leqslant\frac{3n}{4q+3n-14}\nu_{\prod_{i=2}^q(g_{i\sigma(i)}h^2_{1i})}.$$ Obviously, $\prod_{i=2}^q(g_{i\sigma(i)}h^2_{1i})\in B(q-1,3(q-1);f^1,f^2,f^3)$. Applying Lemma \ref{lem22}, we obtain $$q-1\leqslant 2N-n+1+\sum_{i=1}^q\frac{n}{k_i+1}+\rho\big( n(2N-n+1)+\frac{6n(q-1)}{4q+3n-14}\big)+\frac{3n(q-1)}{4q+3n-14}.$$ Since $q\geq2n+2$ and by the simple calculation, we have $$\frac{6n(q-1)}{4q+3n-14}\leqslant \frac{6n(q-1)}{11n-6}<\frac{4(q-n)n}{n-1}.$$ It implies that $$q\leqslant 2N-n+1+\sum_{i=1}^q\frac{n}{k_i+1}+\rho\big( n(2N-n+1)+\frac{4(q-n)n}{n-1}\big)+\frac{4q+3nq-14}{4q+3n-14},$$
which is a contradiction.
\noindent$\bullet$ {\bf Case 3:} $\sharp P=0$. By Lemma \ref{3.6}, for all $i\not=j$, we get \begin{equation*} \begin{aligned} \nu_{g_{ij}}&\geq\sum_{u=1}^3\nu^{[n]}_{(f^u,H_i),\leqslant k_i}+\sum_{u=1}^3\nu^{[n]}_{(f^u,H_j),\leqslant k_j}+2\sum_{t=1,t\not=i,j}\nu^{[1]}_{(f,H_t),\leqslant k_t}-(2n+1)\nu^{[1]}_{(f,H_i),\leqslant k_i}\\ &-(n+1)\nu^{[1]}_{(f,H_j),\leqslant k_j}+\nu_j. \end{aligned} \end{equation*} Put $$ \gamma(i)=\begin{cases} i+N& \text{ if } i\leqslant q-N\\ i+N-q& \text{ if } i> q-N. \end{cases} $$ By summing up both sides of the above inequality over all pairs $(i, \gamma(i)),$ we obtain \begin{equation}\label{eq3.8} \begin{aligned} \sum_{i=1}^q\nu_{g_{i\gamma(i)}}\geq2\sum_{u=1}^3\sum_{i=1}^q\nu^{[n]}_{(f^u,H_i),\leqslant k_i}+(2q-3n-6)\sum_{t=1}^q\nu^{[1]}_{(f,H_t),\leqslant k_t}+\sum_{t=1}^q\nu_t. \end{aligned} \end{equation} By Lemma \ref{3.4}, we can see that $V_j\not\sim V_l$ for all $j\not=l.$ Thus, we have $$ P^{i\gamma(i)}_{st}:=(f^s,H_i)(f^t,H_{\gamma(i)})-(f^t,H_{\gamma(i)})(f^s,H_i)\not\equiv0,\ s\not= t, 1\leqslant i\leqslant q.$$
We claim that: {\it With $i\not=j\not=\gamma(i)$, for every $z\in f^{-1}(H_j)$, we have $$\sum_{1\leqslant s<t\leq3}\nu_{P^{i\gamma(i)}_{st}}(z)\geq4\nu^{[1]}_{(f,H_j),\leqslant k_j}-\nu_j(z).$$}
Indeed, for $z\in f^{-1}(H_j)\cap\mathrm{Supp}\, {\nu_j},$ we have $$ 4\nu^{[1]}_{(f,H_j),\leqslant k_j}(z)-\nu_j(z)\leq4-1=3\leqslant\sum_{1\leqslant s<t\leq3}\nu_{P^{i\gamma(i)}_{st}}.$$
For $z\in f^{-1}(H_j)\setminus \mathrm{Supp}\, \nu_j$, we assume that $\nu_{(f^1,H_j),\leqslant k_j}(z)<\nu_{(f^2,H_j),\leqslant k_j}(z)\leqslant\nu_{(f^3,H_j),\leqslant k_j}(z).$ Since $f^1\wedge f^2\wedge f^3\equiv0,$ we have $\det(V_i,V_{\gamma(i)}, V_j)\equiv0,$ and hence $$(f^1,H_j)P^{i\gamma(i)}_{23}=(f^2,H_j)P^{i\gamma(i)}_{13}-(f^3,H_j)P^{i\gamma(i)}_{12}.$$ It implies that $ \nu_{P^{i\gamma(i)}_{23}}\geq2 $ and so $$\sum_{1\leqslant s<t\leq3}\nu_{P^{i\gamma(i)}_{st}}(z)\geq4=4\nu^{[1]}_{(f,H_i),\leqslant k_i}(z)-\nu_j(z).$$ The claim is proved.
On the other hand, with $j=i$ or $j=\sigma(i)$, for every $z\in f^{-1}(H_j)$, we see that \begin{align*}\nu_{P^{i\gamma(i)}_{st}}(z)&\geq\min\{\nu_{(f^s,H_j),\leqslant k_j}(z),\nu_{(f^t,H_j),\leqslant k_j}(z)\}\\ &\geq\nu^{[n]}_{(f^s,H_j),\leqslant k_j}(z)+\nu^{[n]}_{(f^t,H_j),\leqslant k_j}(z)-n\nu^{[1]}_{(f,H_j),\leqslant k_j}(z). \end{align*} Hence, $ \sum_{1\leqslant s<t\leq3}\nu_{P^{i\gamma(i)}_{st}}(z)\geq2\sum_{u=1}^3\nu^{[n]}_{(f^u,H_j),\leqslant k_j}(z)-3n\nu^{[1]}_{(f,H_j),\leqslant k_j}(z).$ Together this inequality with the above claim, we obtain \begin{equation*} \begin{aligned} \sum_{1\leqslant s<t\leq3}\nu_{P^{i\gamma(i)}_{st}}(z)&\geq\sum_{j=i,\gamma(i)}\big(2\sum_{u=1}^3\nu^{[n]}_{(f^u,H_j),\leqslant k_j}(z)-3n\nu^{[1]}_{(f,H_j),\leqslant k_j}(z)\big)\\ &+\sum_{j=1,j\not=i,\gamma(i)}(4\nu^{[1]}_{(f,H_j),\leqslant k_j}(z)-\nu_j(z)). \end{aligned} \end{equation*} On the other hand, it is easy to see that $\prod_{1\leqslant s<t\leq3}P^{i\gamma(i)}_{st}\in B(2,0;f^1,f^2,f^3).$ Summing up both sides of the above inequality over all $i,$ we obtain \begin{equation*} \begin{aligned} \sum_{i=1}^q\sum_{1\leqslant s<t\leq3}\nu_{P^{i\gamma(i)}_{st}}\geq4\sum_{u=1}^3\sum_{i=1}^q\nu^{[n]}_{(f^u,H_i),\leqslant k_i}+(4q-6n-8)\sum_{i=1}^q\nu^{[1]}_{(f,H_i),\leqslant k_i}-(q-2)\sum_{i=1}^q\nu_i. \end{aligned} \end{equation*} Thus, $$ \sum_{i=1}^q\nu_i+\frac1{q-2}\sum_{i=1}^q\sum_{1\leqslant s<t\leq3}\nu_{P^{i\gamma(i)}_{st}}\geq\frac{4}{q-2}\sum_{u=1}^3\sum_{i=1}^q\nu^{[n]}_{(f^u,H_i),\leqslant k_i}+\frac{4q-6n-8}{q-2}\sum_{i=1}^q\nu^{[1]}_{(f,H_i),\leqslant k_i}.$$ Using this inequality and (\ref{eq3.8}), we have \begin{equation*} \begin{aligned} \sum_{i=1}^q\nu_{g_{i\gamma(i)}}&+\frac{1}{q-2}\sum_{i=1}^q\sum_{1\leqslant s<t\leq3}\nu_{P^{i\gamma(i)}_{st}}\\ &\geq\big(2+\frac{4}{q-2}\big)\sum_{u=1}^q\sum_{t=1}^q\nu^{[n]}_{(f^u,H_t),\leqslant k_t}+\big(n-2+\frac{4q-6n-8}{q-2}\big)\sum_{i=1}^q\nu^{[1]}_{(f^u,H_i),\leqslant k_i}\\ &\geq\big(2+\frac{4}{q-2}+\frac{n-2}{3n}+\frac{4q-6n-8}{3n(q-2)}\big)\sum_{u=1}^q\sum_{t=1}^q\nu^{[n]}_{(f^u,H_t),\leqslant k_t}. \end{aligned} \end{equation*} It implies that $$\sum_{u=1}^q\sum_{t=1}^q\nu^{[n]}_{(f^u,H_t),\leqslant k_t}\leqslant\frac{3n}{6nq+(n-2)(q-2)+4q-6n-8}\nu_{\prod_{i=1}^q(g^{q-2}_{i\gamma(i)}P^{i\gamma(i)}_{12}P^{i\gamma(i)}_{13}P^{i\gamma(i)}_{23})}.$$ Observe that $\prod_{i=1}^qg^{q-2}_{i\gamma(i)}P^{i\gamma(i)}_{12}P^{i\gamma(i)}_{13}P^{i\gamma(i)}_{23}\in B(q^2,q(q-2);f^1,f^2,f^3)$, hence applying Lemma \ref{lem22}, we obtain \begin{align*}q&\leqslant 2N-n+1+\sum_{i=1}^q\frac{n}{k_i+1}+\rho\big( n(2N-n+1)+\frac{2nq(q-2)}{6nq+(n-2)(q-2)+4q-6n-8}\big)\\ &+\frac{3nq^2}{6nq+(n-2)(q-2)+4q-6n-8}, \end{align*} which is impossiple since $$ \frac{2nq(q-2)}{6nq+(n-2)(q-2)+4q-6n-8}<\frac{2nq(q-2)}{6nq+q-2}=\frac{2n(q-2)}{6n+1}\leqslant\frac{4(q-n)n}{n-1}.$$ The proof of Theorem \ref{theo1} is complete.
$\square$
\noindent \textbf{Acknowledgement:} This work was done while the first author was staying at the Vietnam Institude for Advanced Study in Mathematics (VIASM). He would like to thank VIASM for the support.
\end{document} |
\begin{document}
\title{Sequences and nets in topology}
In a metric space, such as the real numbers with their standard metric, a set $A$ is open if and only if no sequence with terms outside of $A$ has a limit inside $A$. Moreover, a metric space is compact if and only if every sequence has a converging subsequence. However, in a general topological space these equivalences may fail. Unfortunately this fact is sometimes overlooked in introductory courses on general topology, leaving many students with misconceptions, e.g. that compactness would always be equal to sequence compactness. The aim of this article is to show how sequences might fail to characterize topological properties such as openness, continuity and compactness correctly. Moreover, I will define nets and show how they succeed where sequences fail.
This article grew out of a discussion I had at the University of Leeds with fellow PhD students Phil Ellison and Naz Miheisi. It also incorporates some work I did while enrolled in a topology module taught by Paul Igodt at the Katholieke Universiteit Leuven in 2010.
\section{Prerequisites and terminology}
I will assume that you are familiar with the basics of topological and metric spaces. Introductory reading can be found in many books, such as \cite{kelley} and \cite{munkres}.
I will frequently refer to a topological space $(X, \tau)$ by just the unlying set $X$, when it is irrelevant or clear from the context which topology on $X$ is considered. Remember that any metric space $(X,d)$ has a topology whose basic opens are the open balls \[ B(x,\delta) = \{ y \ \ |\ \ d(x,y) < \delta \} \] for all $x \in X$ and $\delta > 0$.
A \textbf{neighbourhood} of a point $x$ in a topological space is an open set $U$ with $x \in U$. Note that some people call $U$ a neighbourhood of $x$ if $U$ just contains an open set containing $x$ \cite[p.~97]{munkres}, but in this article neighbourhoods are always open themselves.
A \textbf{sequence} $(x_n)$ \textbf{converges} to a point $y$ if every neighbourhood of $y$ contains $x_n$ for $n$ large enough. We write $x_n \to y$ and say that $y$ is a \textbf{limit} of the sequence $(x_n)$. If $(x_n)$ converges to $y$, then so does every subsequence of $(x_n)$. If $f: X \to Y$ is a continuous function and $x_n \to y$ in $X$, then also $f(x_n) \to f(y)$ in $Y$. (We say that continuous functions \textbf{preserve} convergence of sequences.) Convergence in a product space is pointwise, i.e. a sequence $(x_n)$ in $\prod_{i \in I} X_i$ converges to $y$ if and only if $x_n(i) \to y(i)$ in $X_i$ for all $i \in I$.
A topological space is \textbf{Hausdorff} if for every two distinct points $x$ and $y$, we can find a neighbourhood of $x$ and a neighbourhood of $y$ that are disjoint. Sequences in general can have more than one limit, but in a Hausdorff space limits (if they exist at all) are unique. Indeed, a sequence cannot be eventually in two disjoint neighbourhoods at once.
A set $X$ is \textbf{countable} when there is a surjection from $\mathbb{N} = \{0, 1, 2, 3, \ldots \}$ onto $X$. So $X$ is countable if and only if $X$ is finite of in bijection with the natural numbers. A countable union of countable sets is still countable. Cantor's famous diagonal argument proves that the unit interval $[0,1]$ and $\mathbb{R}$ are \textbf{uncountable}.\cite{cantor}
A few of my examples will make use of \textbf{ordinal numbers}. If you are unfamiliar with ordinal numbers, you can either find background reading in \cite{hrbacek} or you can skip over these examples. I write the first infinite ordinal (i.e. the order type of the natural numbers) as $\omega_0$ and the first uncountable ordinal as $\omega_1$. Because a countable union of countable ordinals is still countable, no countable sequence of countable ordinals can have $\omega_1$ as limit. In other words: the cofinality of $\omega_1$ is $\omega_1$.
\section{Open versus sequentially open}
In a topological space $X$, a set $A$ is \textbf{open} if and only if every $a \in A$ has a neighbourhood contained in $A$. $A$ is \textbf{sequentially open} if and only if no sequence in $X \setminus A$ has a limit in $A$, i.e. sequences cannot \emph{converge out of} a sequentially closed set.
If $X$ is a metric space, then the two notions of open and sequentially open are equivalent. Indeed if $A$ is open, $(x_n)$ is a sequence in $X \setminus A$ and $y \in A$, then there is a neighbourhood $U$ of $y$ contained in $A$. Hence $U$ cannot contain any term of $(x_n)$, so $y$ is not a limit of the sequence and $A$ is sequentially open. Conversely, if $A$ is not open, then there is an $y \in A$ such that every neighbourhood of $y$ intersects $X \setminus A$. In particular we can pick an element \[ x_n \in (X \setminus A) \cap B(y, \frac{1}{n+1} ) \] for all $n \in \mathbb{N}$. The sequence $(x_n)$ in $X \setminus A$ then converges to $y \in A$, so $A$ is not sequentially open.
The implication from open to sequentially open is true in any topological space.
\begin{proposition} \label{openseqopen} In any topological space $X$, if $A$ is open, then $A$ is sequentially open. \end{proposition}
We can just copy the proof for metric spaces, it remains valid in any topological space.
\begin{proof} Suppose that $A$ is open, let $(x_n)$ be a sequence in $X \setminus A$ and take any $y \in A$. There is a neighbourhood of $y$ contained in $A$, so this neighbourhood doesn't contain any terms of $(x_n)$. Hence the sequence doesn't converge to $y$, as required. \end{proof}
It is tempting to think that the converse might also hold in any topological space. When this is indeed the case, we call the space sequential.
\begin{definition} A topological space is \textbf{sequential} when any set $A$ is open if and only if $A$ is sequentially open. \end{definition}
However, importantly, not every space is sequential.
\begin{proposition} \label{notseq} There is a topological space that is not sequential. \end{proposition}
\begin{proof} Any of the three examples below constitutes a proof.
\begin{description} \item{Example 1:} Let $X$ be an uncountable set, such as the set of real numbers. Consider $(X, \tau_{cc})$, the countable complement topology on $X$. Thus $A \subseteq X$ is closed if and only if $A=X$ of $A$ is countable. Suppose that a sequence $(x_n)$ has a limit $y$. Then the neighbourhood \[ \left(\mathbb{R} \setminus \{x_n \ \ |\ \ n \in \mathbb{N} \} \right) \cup \{y\}\] of $y$ must contain $x_n$ for $n$ large enough. This is only possible if $x_n = y$ for $n$ large enough. Consequently a sequence in any set $A$ can only converge to an element of $A$, so every subset of $X$ is sequentially open. But as $X$ is uncountable, not every subset is open. So $(X, \tau_{cc})$ is not sequential. \item{Example 2:} Consider the order topology on the ordinal $\omega_1 +1 = [0, \omega_1]$. Because $\omega_1$ has cofinality $\omega_1$, every sequence of countable ordinals has a countable supremum. Hence no sequence of countable ordinals converges to $\omega_1$, so $\{ \omega_1 \}$ is sequentially open. However, $\{ \omega_1 \}$ is not open as $\omega_1$ is a limit ordinal. So the order topology on $[0, \omega_1]$ is not sequential. \item{Example 3:} Let $X$ be an uncountable set and let $\{0,1\}$ have the discrete topology. Consider $\mathcal{P}(X) = \{0,1\}^X$ with the product topology. Let $\mathcal{A} \subseteq \mathcal{P}(X)$ be the collection of all uncountable subsets of $X$. $\mathcal{A}$ is not open; indeed every basic open contains finite sets. However, we claim that $\mathcal{A}$ is sequentially open. Let $(X_n)$ be a sequence of countable subsets of $X$ and suppose that $X_n \to Y$. Then for every $x \in X$ we must have $x \in Y$ if and only if $x \in X_n$ for $n$ large enough. In particular \[ Y \subseteq \bigcup_{n \in \mathbb{N}} X_n.\] But $ \displaystyle \cup_{n \in \mathbb{N}} X_n$ is a countable union of countable sets. Hence a sequence of countable sets can only converge to countable sets, so $\mathcal{A}$ is sequentially open. \qedhere \end{description} \end{proof}
Still, a large class of topological spaces is sequential.
\begin{definition} A \textbf{countable basis at a point $x$} is a countable set \linebreak\mbox{$\{U_n \ \ |\ \ n \in \mathbb{N} \}$} of neighbourhoods of $x$, such that for any neighbourhood $V$ of $x$ there is an $n \in \mathbb{N}$ such that $U_n \subseteq V$.
A topological space is \textbf{first countable} if every point has a countable basis. \end{definition}
Every metric space is first countable, as \[\left\{ B\left(x, \frac{1}{n+1}\right) \ \ |\ \ n \in \mathbb{N} \right\} \] is a countable basis at any point $x$. We can prove that every first countable space is sequential by generalizing the proof that every metric space is sequential.
\begin{proposition} \label{firstcountableseq} Every first countable space $X$ (and hence every metric space) is sequential. \end{proposition}
\begin{proof} Because of Proposition \ref{openseqopen}, we only need to prove that every sequentially open set $A$ is also open. So suppose that $A$ is not open. Then there is an $y \in A$ such that every neighbourhood of $y$ intersects $X \setminus A$. Let $\{U_n \ \ |\ \ n \in \mathbb{N}\}$ be a countable basis at $y$. For every $n\in \mathbb{N}$, we can choose \[ x_n \in (X \setminus A) \cap \left( \bigcap_{i=0}^n U_i \right). \] Then for every neighbourhood $V$ of $y$, there is an $n \in \mathbb{N}$ such that $U_n \subseteq V$, and hence $x_m \in V$ for every $m \geq n$. So $(x_n)$ is a sequence in $X \setminus A$ that converges to $y \in A$. Therefore $A$ is not sequentially open, as required. \end{proof}
Sequential spaces are also exactly those spaces $X$ where sequences can correctly define continuity of functions from $X$ into another topological space.
\begin{lemma} \label{lemmaseq} Let $X$ be a topological space. Then $A \subseteq X$ is sequentially open if and only if every sequence with a limit in $A$ has all but finitely many terms in $A$. \end{lemma} \begin{proof} We prove that $A \subseteq X$ is \emph{not} sequentially open if and only if there is a sequence with infinitely many terms in $X \setminus A$ and with a limit in $A$.
If $A$ is not sequentially open, then by definition there is a sequence with terms in $X \setminus A$ but with limit in $A$.
Conversely, suppose $(x_n)$ is a sequence with infinitely many terms in $X \setminus A$ that converges to $y \in A$. Then $(x_n)$ has a subsequence in $X \setminus A$ that must still converge to $y \in A$, so $A$ is not sequentially open. \end{proof}
\begin{proposition} The following are equivalent for any topological space $X$:
\begin{enumerate}
\item $X$ is sequential;
\item for any topological space $Y$ and function $f: X \to Y$, $f$ is continuous if and only if $f$ preserves convergence (i.e. whenever $x_n \to y$ in $X$, also $f(x_n) \to f(y)$ in $Y$).
\end{enumerate} \end{proposition}
\begin{proof} \begin{description} \item[$1 \Rightarrow 2$: ] Suppose $X$ is sequential. Any continuous function preserves convergence of sequences, so we only need to prove that if $f: X \to Y$ preserves convergence, then $f$ is continuous. Suppose for contradiction that $f$ is not continuous. Then there is an open $U \subseteq Y$ such that $f^{-1}(U)$ is not open in $X$. As $X$ is sequential, $f^{-1}(U)$ is also not sequentially open, so there is a sequence $(x_n)$ in $X \setminus f^{-1}(U)$ that converges to an $y \in f^{-1}(U)$. However $\left(f(x_n)\right)$ is then a sequence in the closed set $Y \setminus U$, so it cannot have $f(y)$ as a limit. So $f$ does not preserve convergence, as required. \item[$2 \Rightarrow 1$: ] Suppose that the topological space $(X, \tau)$ is not sequential. Let $(X, \tau_{seq})$ be the topological space where $A \subseteq X$ is open if and only if $A$ is sequentially open in $(X, \tau)$. This is indeed a topology: it is trivial that $\emptyset$ and $X$ are sequentially open, and that any union of sequentially open sets is also sequentially open. It remains to prove that the intersection of two sequentially open sets $A$ and $B$ is sequentially open. Suppose that $(x_n)$ is a sequence with limit $y \in A \cap B$. By Lemma \ref{lemmaseq}, $(x_n)$ must have all but finitely many terms in $A$ and all but finitely many terms in $B$. So $(x_n)$ has all but finitely many terms in $A \cap B$. By Lemmma \ref{lemmaseq} again, $A \cap B$ is sequentially open.
As $(X, \tau)$ is not already sequential, the topology $\tau_{seq}$ is strictly finer than $\tau$. Hence the identity map \[id : (X, \tau) \to (X, \tau_{seq}) \] is not continuous. We claim that $f$ nonetheless preserves convergence. Indeed, suppose $x_n \to y$ in $(X, \tau)$. Every open neighbourhood of $y$ in $(X, \tau_{seq})$ is sequentially open in $(X, \tau)$, so by Lemma \ref{lemmaseq} contains all but finitely many terms of $(x_n)$. Hence also $x_n \to y$ in $(X, \tau_{seq})$, as required.\qedhere \end{description} \end{proof}
\section{Sequential spaces as quotients of metric spaces}
First, we recall the definition of a quotient space. Let $X$ be a topological space and let $\sim$ be an equivalence relation on $X$. Consider the set of equivalence classes $X / \sim$ and the projection map $\pi: X \to X/\sim$. We topologize $X / \sim$ by defining $A \subseteq X / \sim$ to be open if and only if $\pi^{-1}(A)$ is open in $X$.
Note that given a surjective function $f: X \to Y$ such that $A \subseteq Y$ is open if and only if $f^{-1}(A)$ is open in $X$, we can consider $Y$ to be a quotient of $X$. Indeed, define an equivalence relation $\sim$ on $X$ such that $x \sim y$ if and only if $f(x) = f(y)$, i.e. the equivalence classes are the fibers of $f$. Then $X / \sim$ is isomorphic to $Y$ by mapping the equivalence class of $x$ to $f(x)$.
We are now ready to prove that the sequential spaces are exactly the quotients of metric spaces. This is a corollary of the following two propositions.
\begin{proposition} \label{quotientseq} Any quotient $X / \sim$ of a sequential space $X$ is sequential. \end{proposition}
\begin{proof} Suppose that $A \subseteq X / \sim$ is not open. We need to prove that $A$ is not sequentially open either. By definition of quotient space, $\pi^{-1}(A)$ is not open in $X$. As $X$ is sequential, there is a sequence $(x_n)$ in $X \setminus \pi^{-1}(A)$ that converges to some $y \in \pi^{-1}(A)$. But $\pi$ is continuous, so it preserves convergence of sequences. Hence $(\pi(x_n))$ is a sequence in $(X/\sim) \setminus A$ with limit $\pi(y) \in A$. Thus $A$ is not sequentially open, as required. \end{proof}
\begin{proposition}[Franklin \cite{franklin}] \label{franklin} Every sequential space $X$ is a quotient of some metric space. \end{proposition}
\begin{proof} Let $\mathcal{C}$ be the set of all sequences $(x_n)$ in $X$ that converge to their first term, i.e. $x_n \to x_0$.
Consider the subspace $Y = \{0\} \cup \{\frac{1}{n+1} \ \ |\ \ n \in \mathbb{N} \}$ of $\mathbb{R}$ with the standard metric. Thus, $A \subseteq Y$ is open if and only if $0 \not\in A$ or $A$ contains all but finitely many elements of $Y$. Note that $Y$ is metric as a subspace of a metric space.
Now consider the disjoint sum (i.e. the coproduct in category theory jargon) \[ Z = \bigoplus_{(x_n) \in \mathcal{C}} \{(x_n)\} \times Y. \] The underlying set of $Z$ is \[ \bigcup_{(x_n) \in \mathcal{C}} \{(x_n)\} \times Y \]
and $A \subseteq Z$ is open if and only if for every $(x_n) \in \mathcal{C}$ the set \[ \{ y \in Y \ \ |\ \ ((x_n), y) \in A\} \] is open in $Y$. Note that $Z$ is metrizable a disjoint sum of metric spaces.
Next consider the map \begin{align*} f: \ Z &\to X \\ ((x_n),0) &\mapsto x_0 \\ \left((x_n),\frac{1}{i+1}\right) &\mapsto x_i \quad \quad \quad \mbox{for all $i \in \mathbb{N}$} \end{align*} I claim that this map exhibits $X$ as a quotient of $Z$. Indeed $f$ is clearly surjective: for all $x \in X$ the constant sequence at $x$ converges to $x$, so $x=f((x),0)$. Hence it remains to show that $A \subseteq X$ is open if and only if $f^{-1}(A)$ is open in $Z$.
Suppose that $A \subseteq X$ is open. As $X$ is sequential, every sequence $(x_n)$ in $X$ converging to some $a \in A$ must have all but finitely many terms in $A$ by Lemma \ref{lemmaseq}. So if $((x_n), 0) \in f^{-1}(A)$ (which means that $(x_n)$ converges to $x_0 \in A$), then $f^{-1}(A)$ will contain all but finitely many elements of $\{(x_n)\} \times Y$. So for each $(x_n) \in \mathcal{C}$, the set \[ \{ y \in Y \ \ |\ \ ((x_n), y) \in f^{-1}(A) \} \] is open in $Y$. Hence $f^{-1}(A)$ is open in $Z$.
Conversely, if $A$ is not open in $X$, then there is some sequence $(x_n)$ in $X \setminus A$ that converges to some $a \in A$. But then \[ \{ y \in Y \ \ |\ \ ((x_n), y) \in f^{-1}(A) \} = \{0\} \] is not open in $Y$, so $f^{-1}(A)$ is not open in $Z$. \end{proof}
\begin{corollary} \label{seqquotientmetric} A topological space is sequential if and only if it is a quotient of a metric space. \end{corollary}
\begin{proof} One direction is the above proposition. For the other direction, note that by Proposition \ref{firstcountableseq} every metric space is sequential, so by Proposition \ref{quotientseq} any quotient of a metric space is also sequential. \end{proof}
We can now also easily prove that sequential is a strictly weaker notion than first countable.
\begin{proposition} \label{seqnotfirstcountable} There exists a sequential space which is not first countable. \end{proposition}
\begin{proof} Consider $\mathbb{R}$ with the standard topology, and the equivalence relation $\sim$ on $\mathbb{R}$ that identifies all the natural numbers, i.e. the equivalence classes are $\mathbb{N}$ and $\{x\}$ for every $x \in \mathbb{R} \setminus \mathbb{N}$.
The quotient space $\mathbb{R} / \sim$ is sequential as a quotient of a metric space.
I claim that $\mathbb{R} / \sim$ is not first countable, in particular that there is no countable basis at $\mathbb{N}$. Suppose that $\{U_n \ \ |\ \ n \in \mathbb{N} \}$ is any countable collection of neighbourhoods of $\mathbb{N}$. For all $n \in \mathbb{N}$, $\pi^{-1}(U_n)$ is a neighbourhood of $n$ in $\mathbb{R}$ with the standard topology, so there is a $\delta_n > 0$ such that $B(n, \delta_n) \subseteq \pi^{-1}(U_n)$. Then consider \[ \pi\left(\bigcup_{n \in \mathbb{N}} B\left(n, \frac{\delta_n}{2} \right) \right). \] This is a neighbourhood of $\mathbb{N}$ in $X / \sim$, but it doesn't contain $U_n$ for any $n \in \mathbb{N}$. So $\{U_n \ \ |\ \ n \in \mathbb{N} \}$ is not a countable basis at $\mathbb{N}$, as required. \end{proof}
\section{Nets save the day}
Looking back at the section on open and sequentially open sets, we see that convergence of sequences doesn't give us full information on the topology. For example the discrete topology and the countable complement topology on an uncountable set $X$ have the same converging sequences (namely $x_n \to y$ if and only if $x_n = y$ for $n$ large enough), but the discrete topology is strictly finer than the countable complement topology. The discrete topology is sequential, but the countable complement topology contains sequentially open sets which are not open.
Convergence of sequences works fine when the space is first countable, because a countable basis at a point allows us to \emph{approach} that point nicely with a sequence. However, if a point $x$ does not have a countable basis, then sequences might not succeed in getting \emph{close to} $x$, i.e. eventually in every neighbourhood of $x$. Sequences fall short in two respects: they are \emph{too short} and \emph{too thin}.
Remember Example 2 from the proof of Proposition \ref{notseq}; $[0, \omega_1]$ with the order topology. Even though $\{\omega_1\}$ is not open, a sequence of countable ordinals can only have a countable limit. Because a sequence only has countable many terms, it never can advance \emph{deep enough} in the ordinal numbers to get close to $\omega_1$. A possible solution is to allow sequences indexed by any linearly ordered set, instead of just the natural numbers. Indeed the $\omega_1$-sequence \begin{align*} x: \omega_1 &\to [0, \omega_1] \\
\alpha &\mapsto \alpha \end{align*} converges to $\omega_1$, even though every term is countable.
This overcomes the \emph{shortness} of sequences, but is still not enough to solve all difficulties. Indeed, reconsider Example 3 from the proof of Proposition \ref{notseq}; the product space $\mathcal{P}(X) = \{0,1\}^X$ where $X$ is uncountable. We take the subspace $\mathcal{Q}$ of $\mathcal{P}(X)$ which consists of those subsets of $X$ that are either finite or uncountable. Define $\mathcal{A} \subseteq \mathcal{P}(X)$ to be the collection of all uncountable subsets of $X$ like before. $\mathcal{A}$ is still not open in $\mathcal{Q}$ as every basic open contains finite sets. But no sequence of finite sets can converge to an uncountable set, not even if we allow sequences indexed by any linearly ordered set. Indeed, let $(X_i)_{i\in I}$ be any $I$-sequence of finite subsets of $X$, where $I$ is any linearly ordered set. For all $i \in I$, define \[ Z_i = \{x \in X \ \ |\ \ \mbox{$x \in X_j$ for all $j \geq i$} \}. \] Every $Z_i$ is finite as $Z_i \subseteq X_i$. Moreover $Z_i \subseteq Z_j$ for $i \leq j$, so there can only be countably many distinct sets $Z_i$. (There can be at most one $Z_i$ with cardinality $1$, at most one with cardinality $2$, etc.) Hence $\cup_{i \in I} Z_i$ is countable as a countable union of countable sets. But if $X_i \to Y$ then we must have $Y = \cup_{i \in I} Z_i$, so $Y$ cannot be uncountable. Intuitively, the problem with sequences here is that they are linearly ordered, so they can only approach a point from \emph{one angle at a time}, whereas to capture the topology we need to consider all \emph{angles of approach} at the same time.
Nets are defined to overcome the shortcomings of sequences. Nets generalize sequences, but they can go both \emph{deeper} and \emph{wider} than sequences. Sequences associate a point to every natural number. Nets are more general, as they can associate a point to every element of a directed set.
\begin{definition} A \textbf{directed set} is a set $D$ with a preorder relation (i.e. a reflexive and transitive binary relation) such that every two elements have an upper bound. \end{definition}
Note that we \emph{don't} require that a pair of elements has a \emph{least} upper bound, we just require that some upper bound exists.
{ \setlength{\leftmargini}{5em} \begin{enumerate} \item[Example 4]: Every linearly ordered set (such as the $\mathbb{N}$ with the usual order) is a directed set. \item[Example 5]: Any collection of sets that is closed under binary intersections is a directed set when ordered by reverse inclusion, i.e. $X \le Y$ if and only if $Y \subseteq X$.
In particular, given any point $x$ of a topological space, the collection of all neighbourhoods of $x$ ordered by reverse inclusion is a directed set, which we write as $\mathcal{N}(x)$. \item[Example 6]: If $D$ and $E$ are directed sets, then so is their product $D \times E$ ordered by $(d_1,e_1) \le (d_2,e_2)$ if and only if $d_1 \le d_2$ in $D$ and $e_1 \le e_2$ in $E$. \end{enumerate} }
\begin{definition} A \textbf{net} in a topological space $X$ is a function $f$ from a directed set $D$ to $X$. We usually write $f(d) = x_d$ for all $d \in D$, an refer to the net by $(x_d)_{d\in D}$.
A net $(x_d)_{d\in D}$ \textbf{converges} to a point $y \in X$ if for every neighbourhood $U$ of $y$, there is a $d \in D$ such that $x_e \in U$ for all $e \ge d$. \end{definition}
Our motivation for defining nets was the hope that convergence of nets (in contrast to convergence of sequences) would completely determine the topology of the space. We now prove that this is indeed the case.
\begin{proposition} \label{netsopen} In any topological space $X$, a set $A \subseteq X$ is open if and only if no net in $X \setminus A$ has a limit in $A$. \end{proposition}
\begin{proof} Let $A$ be open, let $(x_d)_{d\in D}$ be a net in $X \setminus A$ and take any $y \in A$. As $A$ is open, there is a neighbourhood of $y$ that is contained in $A$. Hence this neighbourhood does not contain any terms of the net, so $y$ is not a limit of $(x_d)$.
Conversely suppose that $A$ is not open. Then there is an $y \in A$ such that every neighbourhood of $y$ intersects $X \setminus A$. So there is a net $(x_U)_{U \in \mathcal{N}(y)}$ such that \[ x_U \in (X \setminus A) \cap U \] for all neighbourhoods $U$ of $y$. For every neighbourhood $U$ of $y$, $U$ is an element of $\mathcal{N}(y)$ such that $x_V \in U$ for every $V \ge U$ (i.e. $V \subseteq U$). Hence $(x_U)$ is a net in $X \setminus A$ that converges to $y \in A$, as required. \end{proof}
Like for sequences, convergence of nets is preserved by continuous functions. But again, for nets the converse if also true.
\begin{proposition} \label{netscontinuous} Let $f: X \to Y$ be a function between two topological spaces. Then $f$ is continuous if and only if for every net $(x_d)_{d\in D}$ that converges to $y$ in $X$, we have $f(x_d)_{d\in D} \to f(y)$ in $Y$ \end{proposition}
\begin{proof} Suppose that $f$ is continuous and $x_d \to y$ in X. Take a neighbourhood $U$ of $f(y)$ in $Y$. Then $f^{-1}(U)$ is a neighbourhood of $y$ in $X$. By definition of convergence of nets, there is a $d \in D$ such that $x_e \in f^{-1}(U)$ for all $e \ge d$. So also $f(x_e) \in U$ for all $e \ge d$. This means that $f(x_d) \to f(y)$, as required.
Conversely, suppose that $f$ is not continuous, say $U \subseteq Y$ is open but $f^{-1}(U)$ is not open. By Proposition \ref{netsopen} there is a net $(x_d)_{d\in D}$ in $X \setminus f^{-1}(U)$ that converges to some $y \in f^{-1}(U)$. But then $(f(x_d))_{d\in D}$ is a net in the closed set $Y \subseteq U$ which (again by Proposition \ref{netsopen}) cannot converge to $f(y) \in U$. So $f$ doesn't preserve convergence of nets. \end{proof}
Like a sequence, a net can have more than one limit, although in Hausdorff spaces every converging sequence has a unique limit. Sequences can also have unique limits in spaces that are not Hausdorff. Consider for example an uncountable set $X$ with the countable complement topology. In Example 1 we saw that limits are unique, but the space is obviously not Hausdorff. On the contrary, nets do succeed in exactly characterizing Hausdorff spaces.
\begin{proposition} A space $X$ is Hausdorff is and only if no net has two distinct limits. \end{proposition}
\begin{proof} Suppose that $X$ is Hausdorff and consider a net $(x_d)_{d\in D}$. Suppose for contradiction that $x$ and $y$ are distinct limits of $(x_d)$. Take disjoint neighbourhoods $U$ of $x$ and $V$ of $y$. By definition of convergence, there is a $d_x$ such that $x_e \in U$ for all $e \ge d_x$ and a $d_y$ such that $x_e \in V$ for all $e \ge d_y$. In particular we have $x_e \in U \cap V$ for an upper bound $e$ of $d_x$ and $d_y$ in the directed set $D$, contradicting the disjointness of $U$ and $V$. Thus $(x_d)$ cannot have two distinct limits.
Conversely, suppose that $X$ is not Hausdorff, so there are two distinct points $x$ and $y$ such that any neighbourhood of $x$ intersects any neighbourhood of $y$. So there is a net $(x_{(U,V)})_{(U,V) \in \mathcal{N}(x) \times \mathcal{N}(y)}$ such that \[ x_{(U,V)} \in U \cap V \] for any neighbourhoods $U$ of $x$ and $V$ of $y$. Take any neighbourhood $U_0$ of $x$ and any $(U,V) \in \mathcal{N}(x) \times \mathcal{N}(y)$ with $(U,V) \ge (U_0, X)$. By definition we have $U \subseteq U_0$ and thus $x_{(U,V)} \in U \cap V \subseteq U_0$. This proves that $x_{(U,V)} \to x$ and we can similarly show that $x_{(U,V)} \to y$. So the net $(x_{(U,V)})_{(U,V) \in \mathcal{N}(x) \times \mathcal{N}(y)}$ has two distinct limits, as required.\end{proof}
\section{Compactness and sequential compactness}
Let's now look at compactness of topological spaces. Remember that a space $X$ is \textbf{compact} if and only if every open covering of $X$ (i.e. every collection of open sets whose union is $X$) has a finite subcovering. It is sufficient to consider coverings of basic opens.
Equivalently, $X$ is compact if and only if every collection of closed sets with the finite intersection property (i.e. all finite intersections are nonempty) has a nonempty intersection.
An important theorem by Tychonoff \cite{tychonoff} says that any product of compact spaces is itself compact.
If every sequence in a topological space has a convergent subsequence, then we call the space \textbf{sequentially compact}. A metric space is compact if and only if it is sequentially compact (\cite{munkres}, theorem 28.2). However, bearing in mind the the difference between open and sequentially open, we should be very suspicious of this equivalence holding in general. And indeed, neither direction of the equivalence holds in every topological space.
\begin{proposition} \label{compactnotseqcompact} There is a topological space that is compact but not sequentially compact. \end{proposition}
\begin{proof} Let $\{0,1\}$ have the discrete topology and consider $\{0,1\}^{[0,1)}$ with the product topology.
By Tychonoff's theorem, $\{0,1\}^{[0,1)}$ is compact as product of compact spaces.
Consider the sequence $(x_n)$ where $x_n(r)$ equals the $n$'th digit in the binary expansion of $r$ for all $r \in [0,1)$, where we never pick an expansion that ends in all $1$s. We claim that $(x_n)$ does not have a convergent subsequence. Indeed if the subsequence $x_{k_n}$ has a limit $y$, then for any $r \in [0,1)$, we must have $x_{k_n}(r) = y(r)$ for $n$ large enough. But there is a real number $r \in [0,1]$ whose unique binary expansion has a $0$ in the $k_n$th position if $n$ is even and a $1$ in the $k_n$th position is $n$ is odd, contradicting that $x_{k_n}$ has any limit. Hence $\{0,1\}^{[0,1)}$ is not sequentially compact. \end{proof}
\begin{proposition} \label{seqcompactnotcompact} There is a topological space that is sequentially compact but not compact. \end{proposition}
\begin{proof} Consider the order topology on $\omega_1 = [0, \omega_1)$, the set of all countable ordinals.
The open covering $\mathcal{A} = \{[0, \alpha) \ \ |\ \ \alpha < \omega_1\}$ does not have a finite subcovering. Indeed the supremum of the sets in any finite subset $\mathcal{B}$ of $\mathcal{A}$ is a finite union of countable ordinals, and hence itself a countable ordinal which has a successor in $[0, \alpha)$ that is not covered by $\mathcal{B}$.
Now let $(x_n)$ be any sequence in $[0, \omega_1)$. Let $A \subseteq \mathbb{N}$ be the set of all indices $n$ such that $x_n$ is minimal in $\{x_m \ \ |\ \ m \geq n \}$. Note that $A$ is infinite as by definition it cannot have a largest element. Let $k_n$ be the $n$'th element of $A$ for all $n \in \mathbb{N}$. The sequence $(x_{k_n})$ is then a subsequence of $(x_n)$, which is nondecreasing and therefore converges to the supremum of its elements. This supremum is countable as a countable union of countable ordinals, so it is indeed an element of $[0, \omega_1)$. Thus every sequence in $[0, \omega_1)$ has a convergent subsequence, as required. \end{proof}
We can however characterize compact spaces using nets. To do this, we need the notion of a subnet.
\begin{definition} Let $(x_d)_{d \in D}$ be a net. A \textbf{subnet} of $(x_d)_{d \in D}$ is a net $(x_{f(e)})_{e \in E}$ where $E$ is a directed set and $f: E \to D$ is a function such that: \begin{enumerate} \item if $e_1 \le e_2$, then $f(e_1) \le f(e_2)$ (i.e. $f$ is order preserving), \item for all $d \in D$, there is an $e \in E$ such that $f(e) \ge d$ (i.e. $f(E)$ is cofinal in $D$). \end{enumerate} \end{definition}
\begin{proposition} A topological space $X$ is compact if and only every net has a convergent subnet. \end{proposition}
\begin{proof} Suppose $X$ is compact and let $(x_d)_{d \in D}$ be a net. The sets \[ X_d = \{x_e \ \ |\ \ e \ge d\} \] have the finite intersection property, so by compactness their closures have a nonempty intersection. Thus we can take an $y \in \cup_{d \in D} \overline{X_d}$. Every neighbourhood $U$ of $y$ intersects $X_d$ for all $d \in D$. In other words, for all $d \in D$ there is an $e \ge d$ such that $x_e \in U$. (Such an $y$ is called a \textbf{cluster point} of the net $(x_d)$.)
Consider the set \[ E = \{ (d,U) \in D \times \mathcal{N}(y) \ \ |\ \ x_d \in U \}, \] preordered by $(d_1, U_1) \le (d_2, U_2)$ if and only if $d_1 \le d_2$ in $D$ and $U_2 \subseteq U_1$. We claim that this is a directed set. Indeed let $(d_1, U_1)$ and $(d_2, U_2)$ be any pair of elements of $E$. There is an upper bound $e$ of $d_1$ and $d_2$ in $D$, and as $y$ is a cluster point of $(x_d)$, there is an $e' \ge e$ such that $x_{e'} \in U_1 \cap U_2$. Then $(e', U_1 \cap U_2)$ is an upper bound of $(d_1, U_1)$ and $(d_2, U_2)$ in $E$. Therefore $E$ is indeed a directed set.
Define the projection \begin{align*} f:\quad E &\to D \\
(d, U) &\mapsto d. \end{align*} This is clearly an order preserving function and it is a surjection as $d = d(d, X)$ for all $d \in D$. So $(x_{f(d,U)})_{(d,U)\in E}$ is a subnet of $(x_d)_{d \in D}$. Moreover, if $U$ is any neighbourhood of $y$, then there is by choice of $y$ a $d \in D$ such that $x_d \in U$. By definition of $E$ we have $x_{f(e,V)} \in U$ for all $(e, V) \ge (d, U)$. Therefore $x_{f(d,U)} \to y$, as required.
\underline{Conversely} suppose that $X$ is not compact. Then there is a collection $\mathcal{A}$ of closed sets with the finite intersection property, but with empty intersection. Let $\mathbf{D}$ be the set of finite subcollections of $A$, ordered by inclusion. This is clearly a directed set. We can choose a net $(x_\mathcal{B})_{\mathcal{B} \in \mathbf{D}}$ where \[ x_\mathcal{B} \in \cap_{A \in \mathcal{B}} A \] for every finite $\mathcal{B} \subseteq \mathcal{A}$. Suppose for contradiction that $(x_{f(x)})_{e \in E}$ is a subnet of $(x_\mathcal{B})_{\mathcal{B} \in \mathbf{D}}$ that converges to some $y \in X$. By assumption, there is an $A \in \mathcal{A}$ such that $y \not\in A$. As $A$ is closed, there is neighbourhood $U$ of $y$ such that $U \cap A = \emptyset$, and hence $x_\mathcal{B} \not\in U$ for all $\mathcal{B} \ge \{A\}$. As $f(E)$ is cofinal in $D$, there is an $e_1 \in E$ such that $\{A\} \le f(e_1)$. But there must also be an $e_2 \in E$ such that $x_{f(e)} \in U$ for all $e \ge e_2$. Let $e$ be an upper bound of $e_1$ and $e_2$. Then we must have $x_{f(e)} \in U$, but on the other hand $\{A\} \le f(e_1) \le f(e)$ so we must have $x_{f(e)} \not\in U$, a contradiction. So the net $(x_\mathcal{B})_{\mathcal{B} \in \mathbf{D}}$ does not have a convergent subnet. \end{proof}
It is now tempting to accept the following argument: \emph{In a compact space, every nets has a convergent subnet. So every sequence, considered as a net, has a convergent subnet, which is a convergent subsequence. So every compact space is sequentially compact.} But we know from Proposition \ref{compactnotseqcompact} that this is not true. The mistake is the fact that not every subnet of a sequence is a subsequence. In particular note that the function $f$ in the definition of a subnet need not be injective.
\section{Further reading}
\subsection{Nets and filters}
As an alternative to nets in $X$, one can consider filters on $X$. A \textbf{filter} on $X$ is a nonempty collection of subsets of $X$ that is closed under binary intersections and supersets, and does not contain $\emptyset$. A filter $\mathcal{F}$ \textbf{converges} to a point $x$ if $U \in \mathcal{F}$ for every neighbourhood $U$ of $x$. Given a net $(x_d)_{d \in D}$ in $X$ one can consider the filter \[ \mathcal{F} = \{ A \subseteq X \ \ |\ \ \exists d \in D: \mbox{$x_e \in A$ for all $e \geq d$} \} \] of sets which \emph{eventually} contain all point of the net. This filter has the same limits as $(x_d)_{d \in D}$. Conversely given a filter $\mathcal{F}$ on $X$ one can consider the directed set $\mathcal{F}$ ordered by reversed inclusion. Then $\mathcal{F}$ converges to a point if and only if any net \[ (x_A)_{A \in \mathcal{F}} \] with \[ x_A \in A \] for all $A \in F$ converges to that point as well. Hence nets and filters are in many ways interchangable. Most of the propositions that I've proved using nets, can equally well be proven using filters. I've only considered nets, because they arize as a generalization of sequences, which was the starting point of this article. However it is instructive to also think about filters, as they give a diferent point of view. Only by combining both points of view, one can get the best insight into the mathematics.
Filters have another advantage. An easy application of Zorn's lemma gives that every filter on $X$ can be refined to an \textbf{ultrafilter}. An ultrafilter on $X$ is a filter which contains either $A$ or $X \setminus A$ for all $A \subseteq X$. The corresponding proposition for nets is that every net has a \textbf{universal} subnet. A net in $X$ is universal (also called an \textbf{ultranet}) if for every $A \subseteq X$, the net is either eventually in $A$ or eventually in $X \setminus A$. There is however no nice direct proof for the fact that every net has a universal subnet, filters are the more natural tool here.
From the fact that every filter has an ultrafilter refinement, an easy proof of Tychonoff's theorem is possible. Indeed, compact spaces can be characterized as those spaces where every ultrafilter has a limit. But then in a product of compact spaces, we can find a limit of any ultrafilter by considering the projections on every component and taking a limit in each of these compact spaces.
A proof of Tychonoff's theorem using just nets is also possible, but is not as elegant \cite{chernoff}.
Nets were introduced in 1922 by E.~H.~Moore and H.~L.~Smith in \cite{mooresmith}. Hence nets were at first called \emph{Moore-Smith sequences}. The theory of nets was further developed by Birkhoff \cite{birkhoff} (most of the propositions of this article first appear in his paper) and by Kelley \cite{kelley1950} (who introduces the terms \emph{net} and \emph{ultranet}, and who proves Tychonoff's theorem using ultrafilters). McShane \cite{mcshane} gives an extensive motivation for the definitions of nets and subnets.
Filters were introduced in 1937 by Cartan \cite{cartan1,cartan2}. Even though filters are now also used in very different contexts, Cartan's motivation for defining them was to generalize the notion of convergence for sequences. Indeed he starts of his article \emph{Th\'eorie des filtres} \cite{cartan1} by writing: \begin{quote} Malgr\'e les services rendus en topologie par la consideration des \emph{suites d\'enombrables}, leur emploi n'est pas adapt\'e \`a l'\'etude des espaces g\'en\'eraux. Nous voulons indiquer ici quel est l'instrument qui semble devoir les remplacer.\footnote{Translation from French: In spite of the accomplishments of considering countable sequences in topology, their use is not suitable in the study of general spaces. We want to indicate here which tool apparently should replace them.} \end{quote} Bourbaki's book on General Topology \cite{bourbaki} was the first to fully adopt the use of filters. The connections between nets and filters were investigated by Bartle \cite{bartle} and by Bruns and Schmidt \cite{bruns}.
\subsection{Sequential spaces and Fr\'echet-Urysohn spaces}
We have considered sequential spaces. Similar to sequential spaces are the Fr\'echet-Urysohn spaces. A topological space $X$ is \textbf{Fr\'echet-Urysohn} if the closure of any $A \subseteq X$ contains exactly the limits of sequences in $A$. They are also sometimes simply called Fr\'echet spaces, but this might cause confusion as there are other uses for the term \emph{Fr\'echet space}.
Any first countable space is Fr\'echet-Urysohn. This can be proven just like Proposition \ref{firstcountableseq}. The example from the proof of Proposition \ref{seqnotfirstcountable} shows that there is a Fr\'echet-Urysohn which is not first countable. Any Fr\'echet-Urysohn space is sequential, as by definition every sequentially closed set is its own closure. Fr\'echet-Urysohn are exactly those spaces of which every subspace is sequential. However not every subspace of a sequential space is sequential. Indeed, consider \[Y = \{(x,0) \ \ |\ \ x \in \mathbb{R} \setminus \{0\} \} \cup \{(0,1\} \cup \left\{ \left(\frac{1}{n+1},1\right) \ \ |\ \ n \in \mathbb{N} \right\} \] as a subset of the real plane. Let $(\mathbb{R}, \tau_q)$ be the quotient of $Y$ obtained by projecting onto the first coordinate. Then $(\mathbb{R}, \tau_q)$ is sequential as quotient of a metric space (Corollary \ref{seqquotientmetric}). But the subspace $\mathbb{R} \setminus \{ \frac{1}{n+1} \ \ |\ \ n \in \mathbb{N} \}$ has a sequentially open set $\{0\}$ which is not open. So $(\mathbb{R}, \tau_q)$ is a sequential space that is not Fr\'echet-Urysohn.
Sequential spaces and Fr\'echet-Urysohn spaces where most intensively studied in the 1960s. Most of the results mentioned here where obtained by S.~P.~Franklin in \cite{franklin} and \cite{franklin2}. Sequential spaces and Fr\'echet-Urysohn spaces are also covered in Engelking's book \cite{engelking}.
\end{document} |
\begin{document}
\title{Flat approximations of hypersurfaces along curves}
\author{Irina Markina} \address{Department of Mathematics\\ University of Bergen\\ 5020 Bergen\\ Norway} \email{[email protected]}
\author{Matteo Raffaelli} \address{DTU Compute\\ Technical University of Denmark\\ 2800 Kongens Lyngby\\ Denmark} \email{[email protected]}
\date{\today}
\begin{abstract} Given a smooth curve $\gamma$ in some $m$-dimensional surface $M$ in $\mathbb{R}^{m+1}$, we study existence and uniqueness of a flat surface $H$ having the same field of normal vectors as $M$ along $\gamma$, which we call a flat approximation of $M$ along $\gamma$. In particular, the well-known characterisation of flat surfaces as torses (ruled surfaces with tangent plane stable along the rulings) allows us to give an explicit parametric construction of such approximation. \end{abstract} \maketitle
\section{Introduction and Main Result} \noindent Developable, or flat, hypersurfaces in $\mathbb{R}^{m+1}$, where $m\geq 2$, are classical objects in Riemannian geometry. They are characterised by being foliated by open subsets of $(m-1)$-dimensional planes, called rulings, along which the tangent space remains stable \cite[Theorem~1]{ushakov1999}. Here we are concerned with the problem of existence and uniqueness---as well as with the explicit construction---of flat approximations of hypersurfaces along curves. Let $M^{m}$ be a (possibly curved) Euclidean hypersurface and $\gamma$ a curve in $M^{m}$. A hypersurface $H$ is called an \textit{approximation of} $M^{m}$ \textit{along} $\gamma$ if the two manifolds have common tangent space at every point of $\gamma$.
In dimension $2$, the question of existence has been settled for a long time. A constructive proof, under suitable assumptions, is already present in Do Carmo's textbook \cite[p.~195--197]{docarmo1976}. It turns out the existence of a flat approximation of $M^{2}$ along $\gamma$ implies the existence of a rolling, in Nomizu's sense, of $M^{2}$ on the tangent space $T_{\gamma(0)}M^{2}$ along the given curve -- see \cite{nomizu1978} and \cite{raffaelli2016}. More recently, Izumiya and Otani have shown uniqueness \cite[Corollary~6.2]{izumiya2015}.
In this paper, we extend the result in \cite{docarmo1976} to any curve in $M^{m}$. More precisely, we shall present a constructive proof of the following
\begin{theorem} \label{mainresult} Let $\gamma \colon I \to M^{m}$ be a smooth curve in a hypersurface $M^{m}$ in $\mathbb{R}^{m+1}$. If the curve is never parallel to an asymptotic direction of $M^{m}$, then there exists a flat approximation $H$ of $M^{m}$ along $\gamma$. Such hypersurface is unique in the following sense: if $H_{1}$ and $H_{2}$ are two flat approximations of $M^{m}$ along $\gamma$, then they agree on an open set containing $\gamma(I)$. \end{theorem}
The strategy to prove this result involves looking for $(m-1)$-tuples of linearly independent vector fields $(X_{1},\dotsc, X_{m-1})$ along $\gamma$ satisfying $\dot{\gamma}(t) \notin \Span(X_{j}(t))_{j\,=\,1}^{m-1}$ for all $t$ and having zero \emph{normal} derivative (normal projection of Euclidean covariant derivative). Indeed, such conditions guarantee the image of the map $\gamma + \Span(X_{j})_{j\,=\,1}^{m-1}$ be a flat hypersurface of $\mathbb{R}^{m+1}$ in a neighbourhood of $\gamma$. The main difficulty resides in getting around the many-to-one correspondence between tuples of vector fields and rank-$(m-1)$ distributions along $\gamma$.
It is worth pointing out that the solution depends on the original hypersurface $M^{m}$ only through its distribution of tangent planes along $\gamma$. Thus, when $m=2$, our problem is nothing but the classical Bj\"{o}rling's problem---to find all minimal surfaces passing through a given curve with prescribed tangent planes---addressed to a different class of surfaces. In this respect, the present work joins several other recent studies aimed at solving Bj\"{o}rling-type questions, see \cite{brander2018,brander2013} and references therein.
The paper is organised as follows. The next two sections present some preliminaries, mostly for the sake of introducing relevant notation and terminology. In Section $4$ we derive a simple condition for discerning when a parametrised ruled hypersurface has a flat metric. Such condition is then used in Section $5$ to prove the main theorem. Finally, in Section $6$ we give some general remarks about the construction of the approximation.
\section{Vector Cross Products} \noindent Let $V$ be an $n$-dimensional, real vector space equipped with a positive definite inner product $\langle\cdot{,}\cdot\rangle$. In the following, $V^{k}$ will indicate the $k$-th Cartesian power of $V$, and $L^{k}(V)$ the set of all multilinear maps from $V^{k}$ to $V$. Note that, under pointwise addition and scalar multiplication, $L^{k}(V)$ is a \emph{finite dimensional} vector space, in that it is naturally isomorphic to the space $T^{(1,k)}(V)$ of tensors on $V$ of type $(1,k)$ -- see for example \cite[Lemma~2.1]{lee1997}. Thus, $\dim L^{k}(V) = n^{k+1}$.
A $k$-\textit{fold vector cross product on} $V$, $1 \leq k \leq n$, is an element of $L^{k}(V)$---i.e., a multilinear map $ X \colon V^{k} \to V$---satisfying the following two axioms: \begin{align*} &\langle X(v_{1}, \dotsc, v_{k}), v_{i} \rangle = 0 \, , \quad 1 \leq i \leq k \, . \\ &\langle X(v_{1}, \dotsc, v_{k}), X(v_{1}, \dotsc, v_{k}) \rangle = \det (\langle v_{i}, v_{j} \rangle) \, . \end{align*} We emphasize that the second axiom implies any such $X$ being alternating.
In particular, in the case $V$ carries an orientation $\mathcal{O}$, we say that an $(n-1)$-fold vector cross product $X$ is \textit{positively oriented} if the following condition holds for all $(n-1)$-tuples of linearly independent vectors $v_{1}, \dotsc, v_{n-1}$: \begin{equation*} \left(v_{1}, \dotsc, v_{n-1}, X(v_{1}, \dotsc, v_{n-1})\right) \in \mathcal{O} \, . \end{equation*} Analogously, a \textit{negatively oriented} vector cross product satisfies the same relation with $-\mathcal{O}$ in place of $\mathcal{O}$.
In \cite{brown1967}, Brown and Gray proved the following theorem: \begin{theorem} \label{VCP-TH1} Let $V$ be an oriented finite dimensional inner product space, of dimension $n$. There exists a unique positively oriented $(n-1)$-fold vector cross product $X = \cdot \times \dotsb \times \cdot$ on $V$. It is given by: \begin{equation*} v_{1} \times \dotsb \times v_{n-1} = \star (v_{1} \wedge \dotsb \wedge v_{n-1}) \, \end{equation*} where $\star$ is the Hodge star operator on $V$. \end{theorem}
We now turn our attention to manifolds. If $M$ is a smooth Riemannian manifold of dimension $m$, let $L^{k}\mathit{TM}$ be the disjoint union of all the vector spaces $L^{k}(T_{p}M)$: $$ L^{k}\mathit{TM} = \bigsqcup_{p \in M} L^{k}(T_{p}M) \, .$$
Clearly, for $L^{k}(T_{p}M) \cong T^{(1,k)}(T_{p}M)$, the set $L^{k}\mathit{TM}$ has a canonical choice of topology and smooth structure turning it into a smooth vector bundle of rank $m^{k+1}$ over $M$. We define a $k$-\textit{fold vector cross product on} $M$, where $1 \leq k \leq m$, to be a smooth section $X$ of $L^{k}\mathit{TM}$ such that, for every point $p \in M$, the map $X_{p}$ is a $k$-fold vector cross product on $T_{p}M$.
We thus have the following corollary of Theorem \ref{VCP-TH1}: \begin{corollary} Let $M$ be a smooth oriented $m$-dimensional Riemannian manifold. There exists a unique $(m-1)$-fold positively oriented vector cross product on $M$. It acts on $(m-1)$-tuples of vector fields $X_{1}, \dotsc, X_{m-1}$ on $M$ by \begin{equation*} X_{1} \times \dotsb \times X_{m-1} = \star (X_{1} \wedge \dotsb \wedge X_{m-1}) \,. \end{equation*} \end{corollary}
\section{Frames Along Curves} \noindent In this section we review some basic facts about Euclidean submanifolds and orthonormal frames along curves.
Let us start with some notation. If $m\geq2$, let $M$ be an $m$-dimensional embedded submanifold of $\mathbb{R}^{d}$, and $\gamma \colon I=[0,\alpha] \to M$ a smooth regular curve in $M$. Throughout this paper, $\mathbb{R}^{d}$ will always be equipped with the standard Euclidean metric $\overline{g}$, typically indicated by a dot ``$\,\cdot\,$'', and standard orientation. Thus, there is a natural choice of Riemannian metric on $M$: the induced metric $\iota^{\ast}\overline{g}$, i.e., the pullback of $\overline{g}$ by the inclusion $\iota \colon M \hookrightarrow \mathbb{R}^{d}$.
Working with submanifolds, it is customary to identify each tangent space $T_{p}M$ with its image under the differential of $\iota$. In so doing, the ambient tangent space $T_{p}\mathbb{R}^{d}$ splits as the orthogonal direct sum $T_{p}M \oplus N_{p}M$, where $N_{p}M$ is the normal space of $M$ at $p$. Thus, the set $\mathfrak{X}(M)$ of tangent vector fields \emph{on} $M$ becomes a proper subset of the set of vector fields \emph{along} $M$, which we denote by $\overline{\mathfrak{X}}(M)$. If $X \in \mathfrak{X}(M)$ and $\varUpsilonΒ \in \overline{\mathfrak{X}}(M)$, \begin{equation*} \overline{\nabla}_{X}\varUpsilon = (\overline{\nabla}_{X}\varUpsilon)^{\top} + (\overline{\nabla}_{X}\varUpsilon)^{\perp}\,, \end{equation*} where $\overline{\nabla}$ is the Euclidean connection, $\top$ and $\perp$ are the orthogonal projections onto the tangent and normal bundle of $M$, and where the vector fields $X$ and $\varUpsilon$ are extended arbitrarily to $\mathbb{R}^{d}$. It turns out that the map $\mathfrak{X}(M) \times \mathfrak{X}(M) \to \mathfrak{X}(M)$ defined by \begin{equation*} (X,Y) \mapsto (\overline{\nabla}_{X}Y)^{\top} \end{equation*} is a linear connection on $M$, called the tangential connection. In fact, it is no other than the (intrinsic) Levi-Civita connection $\nabla$ of $(M,\iota^{\ast}\overline{g})$.
Similarly, indicating by $\mathfrak{X}(M)^{\perp}$ the set of normal vector fields along $M$, we define the normal connection on $M$ as the map $\mathfrak{X}(M) \times \mathfrak{X}(M)^{\perp} \to \mathfrak{X}(M)^{\perp}$ given by \begin{equation*} (X,N) \mapsto (\overline{\nabla}_{X}N)^{\perp}\,. \end{equation*}
Let us recall that an orthonormal frame along $\gamma$ is an $m$-tuple of smooth vector fields $(E_{i})_{i\,=\,1}^{m}$ along $\gamma$ such that $(E_{i}(t))_{i\,=\,1}^{m}$ is an orthonormal basis of $T_{\gamma(t)}M$ for all $t$. In particular, an orthonormal frame $(W_{1}, \dotsc, W_{d})$ along a curve $\iota \circ \gamma$ in $\mathbb{R}^{d}$ is said to be $M$-\textit{adapted} if $(W_{i})_{i\,=\,1}^{m}$ spans the ambient tangent bundle over $\gamma$.
In the remainder of this section, we assume that $M$ has codimension one in $\mathbb{R}^{d}$, i.e., that $d=m+1$. Under such hypothesis, given any orthonormal frame $(E_{i})_{i\,=\,1}^{m}$ along $\gamma$, we can construct an associated $M$-adapted orthonormal frame along $\iota \circ \gamma$ as follows. For $k=1, \dotsc, m$, let $W_{k} = E_{k}$; then, for $k=m+1$, \begin{equation*} W_{m+1} = E_{1} \times \dotsb \times E_{m}\,, \end{equation*} so that $(W_{1}, \dotsc, W_{m+1})$ is the unique extension of $(E_{i}(t))_{i\,=\,1}^{m}$ to a positively oriented, orthonormal frame along $\iota\circ\gamma$.
Denoting by $D_{t}$ and $\overline{D}_{t}$ the covariant derivative operators determined by $\nabla$ and $\overline{\nabla}$, respectively, we may write \begin{equation} \label{FAC-EQ1} \overline{D}_{t} E_{i} = D_{t} E_{i} +\tau_{i}W_{m+1} \,, \end{equation} for some smooth function $\tau_{i} \colon I \to \mathbb{R}$. Clearly, should $M$ be orientable, $\tau_{i} = \pm h(E_{1},E_{i})$, where $h$ is the (scalar) second fundamental form of $M$ determined by a choice of unit normal vector field. Moreover, it easily follows from orthonormality that \begin{equation*} \overline{D}_{t} W_{m+1} = -\tau_{1}E_{1} - \dotsb -\tau_{m}E_{m}\,. \end{equation*}
\section{Developable Surfaces} \noindent The main purpose of this section is to generalize to higher dimensions the following well-known fact about ruled surfaces in $\mathbb{R}^{3}$ -- see for example \cite[p.~194]{docarmo1976}: \begin{lemma} \label{DH-LM1} Let $I$, $J$ be open intervals. Further, let $\gamma$ and $X$ be curves $I \to \mathbb{R}^{3}$ such that the map $\sigma \colon I \times J \to \mathbb{R}^{3}$ given by \begin{equation*} \sigma(t,u) = \gamma(t)+ u X(t) \end{equation*} is a smooth injective immersion. Then the Gauss curvature of $\sigma(I \times J)$ is zero precisely when $\gamma$ and $X$ satisfy $\dot{\gamma} \cdot \dot{X} \times X = 0$. \end{lemma}
We shall begin with some definitions extending the classical notions of ruled and torse surface to arbitrary dimension, yet keeping the codimension fixed to $1$. If $m \geq 2$, let $H$ be a hypersurface in $\mathbb{R}^{m+1}$, as always smooth and embedded. \begin{definition} \label{DH-DEF2} We say that $H$ is a \textit{ruled} surface if \begin{enumerate} \item \label{cond1} $H$ is free of planar points, that is, there exists no point of $H$ where the second fundamental form vanishes; \item there exists a \textit{ruled structure on} $H$, that is, a foliation of $H$ by open subsets of $(m-1)$-dimensional affine subspaces of $\mathbb{R}^{m+1}$, called \textit{rulings}. \end{enumerate} In particular, a ruled surface $H$ is said to be a \textit{torse surface} if, for every pair of points $(p,q)$ on the same ruling, we have $T_{p}H = T_{q}H$, i.e., if all tangent spaces of $H$ along a fixed ruling can be canonically identified with the same linear subspace of $\mathbb{R}^{m+1}$. \end{definition}
\begin{remark} \label{DS-RMK3} Although condition \ref{cond1} in Definition \ref{DH-DEF2} may seem overly restrictive, it gives any ruled surface $H$ a desirable property. Namely, it ensures the existence of a \emph{smooth} ruled parametrisation of $H$ \cite{ushakov1996}. On the other hand, we will also need to work with the broader class of \textit{generalised ruled hypersurfaces} obtained by relaxing such condition. It is well known that every generalised torse with planar points is made up of both standard torses and pieces of $m$-planes, always glued along a well-defined ruling. \end{remark}
Remember that any $d$-dimensional Riemannian manifold locally isometric to $\mathbb{R}^{d}$ is said to be \textit{flat}. In particular, the classical term for hypersurfaces is \emph{developable}, see \cite[Section~1]{ushakov1999} for a detailed discussion on terminology. Remarkably, it turns out that
\begin{theorem}[{\cite[Theorem~1]{ushakov1999}}] $H$ is a torse surface if and only if it is free of planar points and, when equipped with the induced metric $\iota^{\ast}\overline{g}$, $H$ becomes a flat Riemannian manifold. \end{theorem}
\begin{corollary} \label{DS-COR} $H$ is a generalised torse surface if and only if the induced metric on $H$ is flat. \end{corollary}
Given a curve $\gamma$ in $\mathbb{R}^{m+1}$, the following result is key for constructing ruled surfaces containing $\gamma$. Note that in its statement we use the canonical isomorphism between $\mathbb{R}^{m+1}$ and any of its tangent spaces to identify the vector fields $X_{1}, \dotsc, X_{m-1}$ along $\gamma$ with curves in $\mathbb{R}^{m+1}$.
\begin{lemma} Let $I$ be a closed interval. Let $\gamma \colon I \to \mathbb{R}^{m+1}$ be a smooth injective immersion. Let $(X_{1}, \dotsc, X_{m-1})$ be a smooth, linearly independent $(m-1)$-tuple of vector fields along $\gamma$ such that $\dot{\gamma}(t) \times X_{1}(t) \times \dotsb \times X_{m-1}(t) \neq 0$ for all $t \in I$. Then there exists an open box $V$Β in $\mathbb{R}^{m-1}$ containing the origin such that the restriction to $I \times V$ of the map $\sigma \, \colon I \times \mathbb{R}^{m-1} \to \mathbb{R}^{m+1}$ defined by \begin{equation*} \sigma(t, u) = \gamma(t) + \sum\nolimits_{j} u^{j} X_{j}(t)
\end{equation*} is a smooth embedding. \end{lemma} \begin{proof} To show that $\sigma$ restricts to an embedding, we first prove the existence of an open box $V_{1}$ such that $\sigma \rvert_{I\times V_{1}}$ is a smooth immersion. Essentially, the statement will then follow by compactness of $I$.
Obviously, $\sigma$ is immersive at $(t,u)$ if and only if the length $\ell \,\colon I \times \mathbb{R}^{m-1} \to \mathbb{R}$ of the cross product of the partial derivatives of $\sigma$ is non-zero at $(t,u)$. Thus, define $W_{t}$ to be the subset of $\{t\} \times \mathbb{R}^{m-1}$ where $\sigma$ is immersive. It is an open subset in $\mathbb{R}^{m-1}$ because it is the inverse image of an open set under a continuous map, $W_{t} = \ell(t,\cdot)^{-1}(\mathbb{R} \setminus \{0\})$; it contains $0$ by assumption. Thence, there exists an $\epsilon_{t} >0$ such that the open ball $B(\epsilon_{t},0) \subset \mathbb{R}^{m-1}$ is completely contained in $W_{t}$. Letting $\epsilon_{1} = \inf_{t \, \in \, I}(\epsilon_{t})$, we can conclude that the restriction of $\sigma$ to the box $I\times (-\epsilon_{1}/2,\epsilon_{1}/2)^{m-1}$ is a smooth immersion.
Now, being $\sigma$ a smooth immersion on $I \times V_{1}$, it follows that every point of $I \times V_{1}$ has a neighbourhood on which $\sigma$ is a smooth embedding. Let then $W_{t}'$ be the subset of $W_{t}$ where $\sigma$ is an embedding. It is open in $\mathbb{R}^{m-1}$, and it contains the origin because $\gamma$ is a smooth injective immersion of a compact manifold. From here we may proceed as before.
\end{proof}
Thus, for suitably chosen $(X_{1}, \dotsc, X_{m-1})$ and $V \subset \mathbb{R}^{m-1}$, we have verified that $H_{\sigma} = \Ima\sigma\rvert_{I\times V}$ is a hypersurface in $\mathbb{R}^{m+1}$, and $\mathscr{F}_{\sigma} = \{ \sigma(t,V) \}_{t \, \in \, I}$ a ruled structure on it. Under such hypothesis, let us assume $H_{\sigma}$ is orientable (this we can do, possibly limiting the analysis to an open subset). Then, we may pick out a smooth unit normal vector field $N$ along $H_{\sigma}$ by means of the $m$-fold cross product on $\mathbb{R}^{m+1}$, as follows. Letting \begin{equation} \label{DH-EQ1} Z = \frac{\partial \sigma}{\partial t} \times \frac{\partial \sigma}{\partial u^{1}}Β \times \dotsb \times \frac{\partial \sigma}{\partial u^{m-1}} \, , \end{equation} define $\widehat{N} = Z \lvert Z \rvert^{-1}$, and so $N = \widehat{N} \circ \sigma^{-1}$. In this situation, assuming there are no planar points, $H_{\sigma}$ being a torse surface is equivalent to $N$ being constant along each of the rulings. Thus, indicating with $\overline{\nabla}$ the Euclidean connection on $\mathbb{R}^{m+1}$, $(H_{\sigma},\iota^{\ast}\overline{g})$ is flat if and only if, for all vector fields $X$ tangent to $\mathscr{F}_{\sigma}$ on $H_{\sigma}$: \begin{equation} \label{DH-EQ2}
\overline{\nabla}_{X}N = 0\, . \end{equation} In fact, by linearity -- and writing $\partial_{j}$ as a shorthand for $\frac{\partial}{\partial u^{j}}$ -- it suffices that \eqref{DH-EQ2} holds for the vector fields $\sigma_{\ast}(\partial_{1}), \dotsc, \sigma_{\ast}(\partial_{m-1})$ spanning the distribution corresponding to $\mathscr{F}_{\sigma}$. We may thereby express the developability condition for $(H_{\sigma},\iota^{\ast}\overline{g})$ simply as \begin{equation} \label{DH-EQ3} \partial_{1}\widehat{N} = \dotsb = \partial_{m-1}\widehat{N} = 0\,, \end{equation} where we understand $\partial_{j}$ as acting on the coordinate functions $\widehat{N}^{1},\dotsc, \widehat{N}^{m+1}$ of $\widehat{N}$ in the standard coordinate frame of $T \mathbb{R}^{m+1}$.
The next lemma finally translates \eqref{DH-EQ3} into $m-1$ conditions involving the vector fields $X_{1}, \dotsc, X_{m-1}$ along $\gamma$, and represents the sought generalization of Lemma \ref{DH-LM1}. It says that $\iota^{\ast}\overline{g}$ is a flat Riemannian metric precisely when $\overline{D}_{t}X_{j} = D_{t}X_{j}$ for every $j$, or equivalently when each of the normal projections $(\overline{D}_{t}X_{1})^{\perp}, \dotsc, (\overline{D}_{t}X_{m-1})^{\perp}$ vanishes identically. \begin{lemma} Assume $\sigma\rvert_{I\times V}$ is a smooth embedding. The hypersurface $H_{\sigma}$ is a generalised torse surface if and only if the following equations hold: \begin{align} \label{DH-EQ4} \begin{split} \dot{\gamma} \cdot \partial_{1} Z \equiv \dot{\gamma} \cdot \overline{D}_{t}X_{1} \times X_{1} \times \dotsb \times X_{m-1} &= 0 \\ &\mathrel{\makebox[\widthof{=}]{\vdots}} \\ \dot{\gamma} \cdot \partial_{m-1} Z \equiv \dot{\gamma} \cdot \overline{D}_{t}X_{m-1} \times X_{1} \times \dotsb \times X_{m-1} &= 0 \end{split} \end{align} \end{lemma} \begin{proof} Computing the partial derivatives of $\sigma$ and substituting them into the expression \eqref{DH-EQ1} for $Z$, we get: \begin{equation*} Z(t,u)= \{ \dot{\gamma}(t) + u^{i} \overline{D}_{t}X_{i}(t) \} \times X_{1}(t)Β \times \dotsb \times X_{m-1}(t)\,, \end{equation*} from which the identity $\partial_{j}Z \equiv \overline{D}_{t}X_{j} \times X_{1} \times \dotsb \times X_{m-1}$ clearly follows. Thus, we need to prove that $\partial_{1}\widehat{N} = \dotsb = \partial_{m-1}\widehat{N} = 0$ if and only if $\partial_{1}Z \cdot \dot{\gamma} = \dotsb = \partial_{m-1}Z \cdot \dot{\gamma} = 0$. In fact, for $\partial_{j}Z$ is orthogonal to $X_{1}, \dotsc, X_{m-1}$, it is enough to check that $\partial_{1}\widehat{N} = \dotsb = \partial_{m-1}\widehat{N} = 0$ if and only if $(\partial_{1}Z )^{\top}= \dotsb = (\partial_{m-1}Z)^{\top}= 0$. First, assume $\partial_{j}\widehat{N} = 0$. Since $\widehat{N} = Z \lvert Z \rvert^{-1}$, it follows by linearity of the tangential projection that \begin{equation*} \lvert Z \rvert (\partial_{j}Z)^{\top} - Z^{\top} \partial_{j} \lvert Z \rvert = 0\,, \end{equation*} which is true exactly when $(\partial_{j}Z)^{\top} = 0$, as desired. To verify the converse, note that $(\partial_{j}N)^{\perp} = 0$ because $N$ has unit length. Thus, again by linearity of $\top$, \begin{equation*} \partial_{j}\widehat{N}= \frac {(\partial_{j}Z)^{\top}\lvert Z\rvert - Z^{\top}\partial_{j}\lvert Z \rvert}{\lvert Z\rvert^{2}}\,. \end{equation*} Since $Z^{\top}=0$, the claim follows. \end{proof}
\section{Proof of the Main Result}
\noindent Here we prove our main result, stated in Theorem \ref{mainresult} in the Introduction. The proof is constructive and is based on the fact that an Euclidean hypersurface without planar points has a flat induced metric precisely when it is a torse surface (Theorem 1.3). Let $M$ be a hypersurface in $\mathbb{R}^{m+1}$ and $\gamma$ a smooth curve in $M$, as defined at the beginning of Section $3$. Denoting by $\mathfrak{X}(\gamma)$ the set of smooth, non-vanishing vector fields along $\gamma$, define an equivalence relation on the $n$-th Cartesian power $\mathfrak{X}(\gamma)^{n}$ of $\mathfrak{X}(\gamma)$ by the following rule: \begin{equation*} \{(X_{1},\dotsc,X_{n}) \sim (Y_{1},\dotsc,Y_{n})\} \Leftrightarrow \{\Span (X_{1},\dotsc,X_{n}) =\Span (Y_{1},\dotsc,Y_{n})\}. \end{equation*} Let us indicate an element of the quotient $\mathfrak{X}(\gamma)^{n}/{\sim}$, that is, an element of $\mathfrak{X}(\gamma)^{n}$ up to equivalence, by $[X_{1},\dotsc,X_{n}]$. We wish to find $[X_{1}, \dotsc, X_{m-1}]$ such that, for every $t \in I$ and integer $j$ with $1 \leq j \leq m-1$, both the conditions \begin{align} &\dot{\gamma} \cdot \overline{D}_{t}X_{j} \times X_{1} \times \dotsb \times X_{m-1} = 0 \label{PMR-EQ1} \\ &\dot{\gamma}(t) \times X_{1}(t) \times \dotsb \times X_{m-1}(t) \neq 0 \label{PMR-EQ2} \end{align} are satisfied. Beware that, throughout this section, we will extensively use Einstein summation convention: every time the same index appears twice in any monomial expression, once as an upper index and once as a lower index, summation over all possible values of that index is understood.
Once and for all, let us choose a $\gamma$\textit{-adapted} orthonormal frame $(E_{1}, \dotsc, E_{m})$ along $\gamma$: this is just an orthonormal frame along $\gamma$ whose first element coincides with the tangent vector $\dot{\gamma}$. The first step is to rewrite \eqref{PMR-EQ1} as an equation involving the $m(m-1)$ coordinate functions $X_{j}^{i}$ of $X_{1},\dotsc,X_{m-1}$ with respect to $(E_{1}, \dotsc, E_{m})$. Differentiating covariantly $X_{j} = X_{j}^{i}E_{i}$ and substituting, we obtain \begin{equation} \label{PMR-EQ3} E_{1} \cdot (\overline{D}_{t}X_{j}^{i}E_{i} + X_{j}^{i} \overline{D}_{t}E_{i}) \times X_{1}^{i}E_{i} \times \dotsb \times X_{m-1}^{i}E_{i} = 0 \,, \end{equation} whereas, from \eqref{FAC-EQ1}. \begin{align*} \sum_{i\, =\, 1}^{m} \overline{D}_{t}E_{i}&=\sum_{i \, =\, 1}^{m} D_{t}E_{i}+ E_{m+1}\sum_{i \,=\, 1}^{m}\tau_{i} \\ &= \sum_{i \, =\, 1}^{m} \left\{ (D_{t}E_{i} \cdot E_{1}) E_{1} + \dotsb + (D_{t}E_{i} \cdot E_{m}) E_{m}\right\} + E_{m+1}\sum_{i \,=\, 1}^{m}\tau_{i} \,. \end{align*} Now, given any ordered $m$-tuple $(i_{1},\dotsc, i_{m})$ of integers with $1 \leq i_{1} \leq m+1$ and $1 \leq i_{k} \leq m$ for $k = 2,\dotsc,m$, a necessary condition for the $m$-fold cross product $E_{i_{1}}\times \dotsb \times E_{i_{m}}$ to give either $E_{1}$ or $-E_{1}$ is that $i_{1} = m+1$ and $i_{k} \neq 1$. It follows that \eqref{PMR-EQ3} is equivalent to \begin{equation} \label{PMR-EQ4} E_{1} \cdot X_{j}^{i}\tau_{i}E_{m+1} \times (X_{1}^{2}E_{2} + \dotsb + X_{1}^{m}E_{m}) \times \dotsb \times (X_{m-1}^{2}E_{2} + \dotsb + X_{m-1}^{m}E_{m}) = 0\,. \end{equation} In fact, $E_{i_{1}}\times \dotsb \times E_{i_{m}} = \pm E_{1}$ if and only if $i_{1} = m+1$ and the $(m-1)$-tuple $(i_{2},\dotsc, i_{m})$ is a permutation of $(2,\dotsc,m)$. In particular, if it is an \emph{even} permutation, then the basis $(E_{m+1},E_{i_{2}},\dotsc,E_{i_{m}},E_{1})$ is \emph{negatively} oriented, for transposing $E_{m+1}$ and $E_{1}$ must give a positive basis, and so $E_{i_{1}}\times \dotsb \times E_{i_{m}} = -E_{1}$. Thence, denoting by $S_{m}^{2}$ the group of permutations $\sigma$ of $(2,\dotsc,m)$, we may write \eqref{PMR-EQ4} simply as \begin{equation*} -X_{j}^{i}\tau_{i} \sum_{\sigma \,\in\, S_{m}^{2}} \Sign(\sigma) X_{1}^{\sigma(2)} \dotsm X_{m-1}^{\sigma(m)} = 0\,. \end{equation*} On the other hand, a similar computation would reveal that condition \eqref{PMR-EQ2} is satisfied for every $t$ if and only if the summation term above (the term independent of $j$) never vanishes. We may thereby conclude that, under the assumption of \eqref{PMR-EQ2} being true, condition \eqref{PMR-EQ1} is equivalent to $X_{j}^{i}\tau_{i} = 0$.
Next, consider the set $\mathscr{Z} \subset \mathfrak{X}(\gamma)$ of smooth vector fields $Z$ along $\gamma$ such that $Z^{1}(t) = Z \cdot E_{1}(t) \neq0$ for every $t$. We establish a bijection between its quotient $\mathscr{Z}/{\sim}$ by $\sim$ and the subset of $\mathfrak{X}(\gamma)^{m-1}/{\sim}$ where \eqref{PMR-EQ2} holds. For every $j$, let \begin{equation} \label{PMR-EQ5} X_{j}(Z) = Z \times E_{2} \times \dotsb \times \widetilde{E}_{m-j+1} \times \dotsb \times E_{m}\,, \end{equation} where the tilde indicates that $E_{m-j+1}$ is omitted, so that the cross product is $(m-1)$-fold. For example, when $j=1$, we omit the last vector field $E_{m}$; when $j=2$ the second to last, and so on, until dropping $E_{2}$ for $j=m-1$. Linear independence of $E_{1},X_{1}(Z), \dotsc, X_{m-1}(Z)$ is easily seen, as by definition $Z$ is never in the span of $E_{2},\dotsc,E_{m}$. Since the normal projection $Z \mapsto Z^{\perp}$ induces a bijection between $\mathscr{Z}/{\sim}$ and the set of smooth $(m-1)$-distributions along $\gamma$ nowhere parallel to $E_{1}$, it follows that the map $[Z] \mapsto [X_{1}(Z),\dotsc,X_{m-1}(Z)]$ between classes of equivalence is indeed a valid parametrisation of the solution set of \eqref{PMR-EQ2}.
We then compute the coordinates of the cross product in \eqref{PMR-EQ5} with respect to the frame $(E_{1},\dotsc,E_{m})$. Substituting $Z = Z^{i}E_{i}$, all but the terms $Z^{1}E_{1}$ and $Z^{m-j+1}E_{m-j+1}$ will not give any contribution. In particular, $E_{1} \times \dotsb \times \widetilde{E}_{m-j+1} \times \dotsb \times E_{m} = \pm E_{m-j+1}$ depending on whether $(E_{1}, \dotsc, \widetilde{E}_{m-j+1}, \dotsc, E_{m}, E_{m-j+1})$ is positively or negatively oriented. Since the corresponding permutation of $(1,\dotsc, m)$ has sign $(-1)^{j-1}$, we conclude that $X_{j}^{m-j+1}(Z) = (-1)^{j-1}Z^{1}$. An analogous argument would show that $X_{j}^{1}(Z) = (-1)^{j}Z^{m-j+1}$.
Summing up, solving the original problem on $\mathfrak{X}(\gamma)^{m-1}/{\sim}$ essentially amounts to finding $[Z] \in \mathscr{Z}/{\sim}$ such that $X_{j}^{i}(Z)\tau_{i} = 0$ for every $j$. Moreover, by the previous computation, \begin{equation*} X_{j}^{i}(Z)\tau_{i} = (-1)^{j}Z^{m-j+1}\tau_{1}+(-1)^{j-1}Z^{1}\tau_{m-j+1}\,. \end{equation*}
Thus, denoting again by $\sim$ the equivalence relation on $C^{\infty}(I)^{m} = C^{\infty}(I;\mathbb{R}^{m})$ naturally induced from the one on $\mathfrak{X}(\gamma)$, we need to look for $(Z^{1}, \dotsc,Z^{m})$, up to equivalence, satisfying the following system of $m-1$ linear equations on $C^{\infty}(I;\mathbb{R}_{\neq0}) \times C^{\infty}(I)^{m-1}$: \begin{align} \begin{split} \label{PMR-EQ6} Z^{m}\tau_{1}-Z^{1}\tau_{m} &=0\\ Z^{m-1}\tau_{1}-Z^{1}\tau_{m-1} &=0\\ &\mathrel{\makebox[\widthof{=}]{\vdots}} \\ Z^{3}\tau_{1}-Z^{1}\tau_{3} &=0\\ Z^{2}\tau_{1}-Z^{1}\tau_{2} &=0\,. \end{split} \end{align} Assume $\tau_{1}(t) \neq0$ for all $t$. Then, for any given $Z^{1}$ (remember $Z^{1}$ is non-vanishing by definition), the system has solution \begin{equation*} \frac{Z^{1}}{\tau_{1}}\left(\tau_{1},\dotsc,\tau_{m}\right). \end{equation*} However, it is easy to see that all solutions are in one and the same equivalence class. Indeed, if $f$ and $g$ are two distinct values of $Z^{1}$, then \begin{equation*} \frac{\tau_{i}}{\tau_{1}}f = \frac{f}{g}\frac{\tau_{i}}{\tau_{1}}g\,. \end{equation*}
In particular, letting $Z^{1}=\tau_{1}$, we obtain $Z^{i} = \tau_{i}$ for every $i=1,\dotsc,m$, and the solution of the original problem on $\mathfrak{X}(\gamma)^{m-1}/{\sim}$ is given by \begin{align*}
X_{1} &= -\tau_{m}E_{1} + \tau_{1}E_{m}\\ X_{2} &= \tau_{m-1}E_{1} - \tau_{1}E_{m-1}\\ &\mathrel{\makebox[\widthof{=}]{\vdots}} \\ X_{m-2} &= (-1)^{m-2} \tau_{3}E_{1} + (-1)^{m-3}\tau_{1}E_{3}\\ X_{m-1} &= (-1)^{m-1} \tau_{2}E_{1} + (-1)^{m-2}\tau_{1}E_{2}\,.
\end{align*}
As for uniqueness, in view of Remark \ref{DS-RMK3}, it is sufficient to show that the condition $\tau_{1}(t)Β \neq 0$ for all $t$ implies any flat approximation $H$ of $M^{m}$ along $\gamma$ be free of planar points, i.e., be a torse surface. To see this, let $N_{H}$ and $N_{M}$ be smooth unit normal vector fields along $H$ and $M^{m}$, respectively, defined in a neighbourhood of $\gamma(t)$. Then, $\overline{D}_{t}N _{H} =\overline{D}_{t}N _{M}$. Since $H$ is a generalised torse surface by Corollary \ref{DS-COR}, the claim easily follows.
\section{Construction of an Adapted Frame} \noindent As seen in the last section, the construction of the flat approximation of $M$ along $\gamma$ requires choosing some $\gamma$-adapted orthonormal frame $(E_{i})_{i \,= \,1}^{m}$ along $\gamma$. We emphasize that such a choice is completely arbitrary. If the curve in question satisfies some (rather strong) conditions on its derivatives, then a natural generalization of the classical Frenet--Serret frame is available. The reader may find details on this construction in \cite{spivak1999} or \cite{kuhnel2015}. Here we briefly review an alternative approach, one that does not require any initial assumption on the curve. Such approach is due to Bishop \cite{bishop1975}.
First of all, since the problem is local, we are free to assume that $\gamma$ is a smooth embedding. Thus, for any point $p \in S = \gamma(I)$, there exist slice coordinates $(x_{1},\dotsc,x_{m})$ in a neighbourhood $U$ of $p$. It follows that $(\partial_{1}\lvert_{p}, \dotsc, \partial_{m}\lvert_{p})$ is a $\gamma$-adapted basis of $T_{p}M$, i.e., it satisfies $T_{p}S = \Span \partial_{1}\lvert_{p}$ and $N_{p}S =Β \Span(\partial_{2}\lvert_{p}, \dotsc, \partial_{m}\lvert_{p})$. By applying the Gram--Schmidt process to these vectors, one obtains an orthonormal basis $(n_{j})$ of $N_{p}S$. Although this basis is by no means canonical, the normal connection $\nabla^{\perp}$ of $S$ provides an obvious means for extending it to a frame for the normal bundle of $S$: for each $j$, let $\varUpsilon_{j}$ be the unique normal parallel vector field along $\gamma$ such that $\varUpsilon_{j}\lvert_{p} = n_{j}$ -- see \cite[p.~119]{oneill1983}. Because normal parallel translation is an isometry, the frame $(\dot{\gamma}, \varUpsilon_{1}, \dotsc, \varUpsilon_{m-1})$ is an orthonormal adapted frame along $\gamma$, as desired.
\end{document} |
\begin{document}
\maketitle
\begin{abstract} In the paper \cite{renato} Renato Targino shows that bi-Lipschitz type of plane curve is determined by the local ambient topological properties of curves. Here we show that it is not longer true in higher dimensions. However we show that bi-Lipschitz type of space curves is determined by the number of singular points and by the local ambient topological type of a generic projection of such curves into the affine plane. \end{abstract}
\section{Introduction} Let $\Lambda, \Gamma\subset \field{C}^n$ be two algebraic curves. In general if the germs $(\Lambda, p)$ and $(\Gamma, q)$ are {\it ambient topologically equivalent}, in the sense that there exists a germ of homeomorphism $\varphi\colon (\field{C}^n,p)\rightarrow (\field{C}^n,q)$ such that $\varphi (\Lambda,p)=(\Gamma,q)$, then this does not imply that the germs $(\Lambda, p)$ and $(\Gamma, q)$ are {\it bi-Lipschitz equivalent} in the sense that there exists a germ of bi-Lipschitz homeomorphism $\varphi\colon (\Lambda,p)\rightarrow (\Gamma,q)$ (with respect to the outer metric). Moreover we have even the following global result:
\begin{ex} Let $n>2$. For every irreducible singular algebraic curve $\Gamma\subset \field{C}^n$, there is an algebraic curve $\Lambda$, such that curves $\Gamma, \Lambda$ are not bi-Lipschitz equivalent but they are topologically equivalent.
{\rm Indeed, let $\Lambda$ be the normalization of $\Gamma$. Hence $\Lambda$ is an algebraic curve and still can be embedded into $\field{C}^n$ as smooth algebraic curve. But $\Lambda$ has no singularities and by \cite{Bir}, \cite{Sam} curves $\Gamma, \Lambda$ are not bi-Lipschitz equivalent. We have a canonical semi-algebraic mapping $\Phi: \Lambda\to\Gamma,$ which by the assumption is a homeomorphism. Now, by Theorem 6.6 in \cite{jel} we can extend the mapping $\phi: \Lambda\to \Gamma$ to a global semi-algebraic homeomorphism $\Phi: \field{C}^n\to\field{C}^n.$} \end{ex}
The situation is different in the dimension $n=2.$ In this dimension two germs are ambient topologically equivalent if and only if they are outer bi-Lipschitz equivalent (See \cite{p-t}, \cite{fer}, \cite{n-p}). In fact, in the paper \cite{renato}, Renato Targino classifies plane algebraic curves, up to global bi-Lipschitz homeomorphisms, in terms of local ambient topological properties of the projective closure of those curves. More precisely, two algebraic curves $\Lambda, \Gamma\subset \field{C}^n$ are said {\it bi-Lipschitz equivalent} if there exists a bi-Lipschitz homeomorphism $\varphi\colon\Lambda\rightarrow\Gamma$ (with respect to the outer Euclidean metric induced from $\field{C}^n$). In the paper \cite{renato}, Renato Targino shows the Theorem \ref{renato} below.
\noindent{{\bf Notation.} Consider $\field{C}^2$ embedded into $\Bbb {CP}^2$. Given an algebraic plane curve $X\subset\field{C}^2$, let $\overline{X}\subset \Bbb{C P}^2$ be the respective projective closure of $X$ and let $\pi_{\infty}$ denote the line at infinity in $\Bbb{C P}^2$.}
\begin{theo}[\cite{renato}, Theorem 1.6]\label{renato}
Let $\Lambda, \Gamma\subset \field{C}^2$ be two algebraic plane curves with irreducible components $\Lambda = \bigcup_{i\in I} \Lambda_j$ and $\Gamma = \bigcup_{j\in J} \Gamma_i$. The following statements are mutually equivalent. \begin{enumerate}
\item The curves $\Lambda$ and $\Gamma$ are bi-Lipschitz equivalent.
\item There are bijections $\sigma \colon I\rightarrow J$ and $\rho$ between the set of singular points of $\overline{\Lambda}\cup\pi_{\infty}$ and the set of singular points of $\overline{\Gamma}\cup\pi_{\infty}$ such that $\rho(p)\in\pi_{\infty}$ if and only if $p\in\pi_{\infty}$, $(\overline{\Lambda}\cup\pi_{\infty}, p)$ is topologically equivalent to $(\overline{\Gamma}\cup\pi_{\infty},\rho(p))$, $(\overline{\Lambda}_i\cup\pi_{\infty}, p)$ is topologically equivalent to $(\overline{\Gamma}_{\sigma(i)}\cup\pi_{\infty},\rho(p))$ ($\forall i\in I$), for all singular point $p$ of $\overline{\Lambda}\cup\pi_{\infty}$. \end{enumerate}
\end{theo}
In this note we address the global classification problem given by bi-Lipschitz equivalence of algebraic space curves, i. e., algebraic curves in $\field{C}^n$ ($n>2$). As in the local case, we obtain a characterization of the bi-Lipschitz equivalence classes of algebraic space curves by looking for their generic plane projections. In this direction, we point out that: given an algebraic curve $\Lambda$ in $\field{C}^n$, any two generic plane projection are bi-Lipschitz equivalent, and this is why we can refer to a generic plane projection of $\Lambda$ as {\it the generic plane projection of} $\Lambda$. We are ready to state the main results of the paper.
\begin{theo}\label{main1} Two irreducible algebraic curves in $\field{C}^n$ are bi-Lipschitz equivalent if and only if they have the same number of singular points and their generic plane projections are bi-Lipschitz equivalent. \end{theo}
As a direct consequence of Theorem \ref{main1} and Theorem \ref{renato} (Theorem 1.6 of \cite{renato}) we get a characterization of bi-Lipschitz equivalence classes of irreducible algebraic space curves in terms of the local ambient topology of its generic plane projection.
\begin{co}\label{co}Let $\Lambda, \Gamma\subset \field{C}^n $ ($n>2$) be two irreducble algebraic curves. Let $X_{\Lambda}$ and $X_{\Gamma}$ denote their generic plane projections (respectively). The following statements are mutually equivalent:
\begin{enumerate}
\item The curves $\Lambda$ and $\Gamma$ are bi-Lipschitz equivalent.
\item The curves $\Lambda$ and $\Gamma$ have the same number of singular points and there is a bijection $\rho$ between the set of singular points of $\overline{X}_{\Lambda}\cup\pi_{\infty}$ and the set of singular points of $\overline{X}_{\Gamma}\cup\pi_{\infty}$ such that $\rho(p)\in\pi_{\infty}$ if and only if $p\in\pi_{\infty}$ and $(\overline{X}_{\Lambda}\cup\pi_{\infty}, p)$ is topologically equivalent to $(\overline{X}_{\Gamma}\cup\pi_{\infty},\rho(p))$,
\end{enumerate} \end{co}
\section{Main Result} Let $B^k(R)\subset \field{C}^k$ denote the $2k$ real dimensional Euclidean ball of radius $R$ and center at $0.$
\begin{theo}\label{infinity} Let $n>2$. If $X\subset \field{C}^n$ is a closed algebraic curve, then there are a real number $r>0$ and the proper projection $\pi: X\to\field{C}^2$ such that $\pi: X\setminus \pi^{-1}(B^2(r))\to \field{C}^2\setminus B^2(r)$ is a bi-Lipchitz embedding. \end{theo}
\begin{proof} Of course it is enough to prove our theorem for a projection $\pi: X\to\field{C}^{n-1}$ and then use induction on the number $n$. Consider $\field{C}^n$ embedded into $\Bbb {CP}^n$. Let $X'$ be the projective closure of $X$ in $\Bbb {CP}^n.$
Let $Z:=X'\setminus X=\{ z_1,...,z_r\}.$ For $i\not=j$, let us denote by $L_{ij}$ the line $\overline{z_i,z_j}$ and let us denote by $\pi_\infty$ the hyperplane at infinity of $\field{C}^n.$ Thus $\pi_\infty\cong \Bbb {C P}^{n-1}$ is a projective space of dimension $n-1.$ For a non-zero vector $v\in \field{C}^n$, let $[v]$ denote the corresponding point in $\pi_\infty.$
Let $\Delta =\{ (x,y)\in {X}\times {X} : x=y\}.$ Consider the mapping $$A: {X}\times {X}\setminus \Delta \ni (x,y)\to [x-y]\in \pi_\infty.$$
Let $\Gamma$ be the graph of $A$ in $X'\times X'\times {\pi_\infty}$ and take $\Gamma':=\overline{\Gamma}$ (we take this closure in $X'\times X' \times {\pi_\infty}$). Let $p:\Gamma' \to \pi_\infty$ and $q:\Gamma' \to X'\times X'$ be the canonical projections. Note that, for $z_i\in Z$, the set $q^{-1}(z_i,z_i)=Z_{i}$ is an algebraic set of dimension at most $1$. Let $Z_i'=p(Z_i).$ The set $W:=\bigcup Z_i'$ is a closed subset of $\pi_\infty$ of dimension at most $1$, hence $\pi_\infty\setminus W \not=\emptyset.$ Let $Q\in \pi_\infty\setminus (W \cup \bigcup L_{ij})$, since $W$ is closed, there exists a small ball $B_1\subset \pi_\infty$ with center at $Q$ such that $B_1\cap (W\cup \bigcup L_{ij})=\emptyset.$
Let $B^n(R)$ be a large ball in $\field{C}^n$ and take $V(R)=(X'\setminus B^n(R))\times (X'\setminus B(R))$ and let $O_R=q^{-1}(V(R)).$ Hence $O_R$ is a neighborhood of $W$ in $\pi_\infty$. We show that for $R$ sufficiently large, if $x,y\in X\setminus B^n(R),\ x\not=y$, then $A(x,y)\not\in B_1.$ Indeed, in other case, take $R_k=k \to \infty.$ Hence for every $k\in \Bbb N$ we have points $x_k,y_k\in X\setminus B^n(R),$ such that $A(x_k,y_k)\in B_1.$ But then $x_k,y_k\to \infty$, this means that we can assume that $x_k\to z_i$ and $y_k\to z_j.$ If $z_i=z_j$, then $\lim A(x_k,y_k)=\lim p((x_k,y_k, [x_k-y_k]))\in p(Z_i)=Z_i'.$ It is a contradiction. If $z_i\not=z_j$, then $\overline{x_k,y_k}\to\overline{z_i,z_j}=L_{ij}$ and this means that $L_{ij}\cap B_1\not=\emptyset$, a contradiction again.
Hence, there is a number $R$ sufficiently large, such that if $x,y\in X\setminus B(R)), \ x\not=y$, then $A(x,y)\not\in B_1.$ Let $\Sigma=A((X\setminus B(R))\times (X\setminus B(R))\setminus \Delta).$ Then $Q\not\in \overline{\Sigma}.$ Take a hyperplane $H\subset \field{C}^n$ in this way, such that $Q\not\in \overline{H}.$ Of course $H\cong \field{C}^{n-1}.$ Let $\pi:\field{C}^n\to H$ be the projection with center $Q$ and let $K=\pi(X\cap B^n(R)).$ It is a compact set, hence there exists a ball $B^{n-1}(r)$ such that $K\subset B^{n-1}(r).$
Consider the proper mapping $\pi: X\setminus \pi^{-1}(B^{n-1}(r))\to H\setminus B^{n-1}(r)$. We show that the projection $\pi$ is a bi-Lipschitz embedding. Indeed, since a complex linear isomorphism is a bi-Lipschitz mapping, we can assume that
$Q=(0:0:...0:1)$ and $H=\{x_n=0\}.$ Of course $||p(x)-p(y)||\le ||x-y||.$ Assume that $p$ is not bi-Lipschitz, i. e., there is a sequence of points $x_j,y_j\in X\setminus \pi^{-1}(B^{n-1}(r))$ such that $$\frac{||p(x_j)-p(y_j)||}{||x_j-y_j||}\to 0,$$ as $n\to \infty.$ Let $x_j-y_j=(a_1(j),...,a_{n-1}(j),b(j))$ and denote by $P_j$ the corresponding point $(a_1(j):...:a_{n-1}(j):b(j))$ in $\Bbb {CP}^{n-1}.$ Hence $$P_j=\frac{(a_1(j):...:a_{n-1}(j):b(j))}{||x_j-y_j||}.$$ Since $\displaystyle\frac{(a_1(j),...,a_{n-1}(j))}{||x_j-y_j||}=
\frac{p(x_j)-p(y_j)}{||x_j-y_j||}\to 0$, we have that $P_j\to Q.$ It is a contradiction.
\end{proof}
In the sequel we will use the following theorem of Jean-Pierre Serre (see \cite{mil}, p. 85):
\begin{theo}\label{serre} If $\Gamma$ is an irreducible curve of degree $d$ and genus $g$ in the complex projective plane, then $$\frac{1}{2} (d-1)(d-2)= g + \sum_{z\in Sing(\Gamma)} {\delta}_z,$$ where $\delta_z$ denotes the delta invariant of a point $z$. \end{theo}
Before starting to prove Theorem \ref{main1}, let us introduce the notion of Euclidean subsets being bi-Lipschitz equivalent at infinity.
\begin{defi}
Two subsets $X\in\field{C}^n$ and $Y\in\field{C}^m$ are called {\bf bi-Lipschitz equivalent at infinity} if there exist compact subsets $K_1\in\field{C}^n$ and $K_2\in\field{C}^m$ and a bi-Lipschitz homeomorphism $X\setminus K_1\rightarrow Y\setminus K_2.$ \end{defi}
\begin{re}\label{remark} {\rm In order to prove that two algebraic plane curves $X$ and $Y$ are bi-Lischitz equivalent, Renato Targino (see \cite{renato}) showed that is enough to verify the following two conditions: \begin{enumerate}
\item There is a bijetion $\varphi\colon Sing(X)\rightarrow Sing(Y)$ such that $(X,p)$ is bi-Lipschitz equivalent to $(Y,\varphi(p))$ as germs, $\forall p\in Sing(X)$;
\item $X$ and $Y$ are bi-Lipschitz equivalent at infinity. \end{enumerate} Note that the proof of Renato Targino still works in the case where $X$ and $Y$ are algebraic curves in $\field{C}^n$ (not necessarily $n=2$).} \end{re}
Now we can prove Theorem \ref{main1}.
\begin{proof}[Proof of Theorem \ref{main1}] Let us suppose that $\Lambda$ and $\Gamma$ are bi-Lipschitz equivalent, in particular, they have the same genus and they are bi-Lipschitz equivalent at infinity. We are going to prove that their generic plane projections $\Lambda'$ and $\Gamma'$, respectively, satisfy conditions 1) and 2) of Remark \ref{remark}. By using Theorem \ref{infinity}, we see that $\Lambda$ and $\Gamma$ are bi-Lipschitz equivalent at infinity to $\Lambda'$ and $\Gamma'$, respectively. Since, $\Lambda$ and $\Gamma$ are bi-Lipschitz equivalent, it follows that $\Lambda'$ and $\Gamma'$ are bi-Lipschitz equivalent at infinity as well ($\Lambda'$ and $\Gamma'$ satisfy item 2) of Remark \ref{remark}).
Before starting to show that $\Lambda'$ and $\Gamma'$ satisfy item 1) of Remark \ref{remark}, let us do some remarks about singularities of plane generic projections $X'$ of a space curve $X$ in $\field{C}^n$. We have a partition of the singular subset $Sing(X')$ into two types of singularities: singularities that come from singularities of $X$ via the associated linear generic projection $X\subset\field{C}^n\rightarrow X'\subset\field{C}^2$ (let us denote the set of such singularities by $S_1(X')$) and the so-called {\bf new nodes} which are singularities that come from double-points of the associated linear generic projection $X\subset\field{C}^n\rightarrow X'\subset\field{C}^2$ (let us denote the set of new nodes by $S_2(X')$).
We resume our proof that $\Lambda'$ and $\Gamma'$ satisfy item 1) of Remark \ref{remark}. It is clear that the local composition of the bi-Lipschitz homeomorphism $\Lambda'\rightarrow\Gamma'$ and the linear generic projections $\Lambda\subset\field{C}^n\rightarrow \Lambda'\subset\field{C}^2$ and $\Gamma\subset\field{C}^n\rightarrow \Gamma'\subset\field{C}^2$ gives a natural bijection $\varphi\colon S_1(\Lambda')\rightarrow S_1(\Gamma')$ such that, $(\Lambda',p)$ is bi-Lipschitz equivalent to $(\Gamma',\varphi(p))$ as germs, $\forall p\in S_1(\Lambda')$. Next, we are going to extend $\varphi$ to the set of new nodes. Notice that, since $\Lambda$ and $\Gamma$ have the same number of singular points and the same genus, we can deduce by Theorem \ref{serre} that the number of new nodes which appear in $\Lambda'$ and $\Gamma'$ is the same in both cases. Indeed, since $\Lambda'$ and $\Gamma'$ are bi-Lipschitz equivalent at infinity they have the same degree (see Corollary 3.2 in \cite{bfs}) and they have topologically equivalent germs at infinity (as stated in Theorem 1.5 of \cite{renato}). Moreover $\Gamma'$ and $\Lambda'$ also have the same genus. Now, by Serre's Formula (Theorem \ref{serre}) we see that the number of new nodes must be the same in both cases. Since any two nodes are bi-Lipschitz equivalent as germs, we can consider $\varphi\colon S_2(\Lambda')\rightarrow S_2(\Gamma')$ as being any bijection such that $(\Lambda',p)$ is bi-Lipschitz equivalent to $(\Gamma',\varphi(p))$ as germs, $\forall p\in S_2(\Lambda')$. In other words, according to Remark \ref{remark}, we have proved that $\Lambda'$ and $\Gamma'$ are bi-Lipschitz equivalent.
On the other hand, let us suppose that the generic plane projections $\Lambda'$ (of $\Lambda $) and $\Gamma'$ (of $\Gamma$) are bi-Lipschitz equivalent. Thus, by using Theorem \ref{infinity}, we see that $\Lambda$ and $\Gamma$ are bi-Lipschitz equivalent at infinity, i.e., they satisfy item 2) of Remark \ref{remark}. Concerning to item 1) of Remark \ref{remark}, we have natural bijections $\varphi_{\Lambda}\colon Sing(\Lambda)\rightarrow S_1(\Lambda')$ and $\varphi_{\Gamma}\colon Sing(\Gamma)\rightarrow S_1(\Gamma')$ such that $(\Lambda,p)$ (respectively $(\Gamma,q)$) is bi-Lipschitz equivalent to $(\Lambda',\varphi_{\Lambda}(p))$ (respectively $(\Gamma',\varphi_{\Gamma}(q))$) as germs. Now, via the bi-Lipschitz homeomorphism between $\Lambda'$ and $\Gamma'$, the linear generic projections $\Lambda\subset\field{C}^n\rightarrow \Lambda'\subset\field{C}^2$ and $\Gamma\subset\field{C}^n\rightarrow \Gamma'\subset\field{C}^2$ are local bi-Lipschitz homeomorphisms, we have a natural bijection $\varphi'\colon S_1(\Lambda')\rightarrow S_1(\Gamma')$ such that $(\Lambda',p)$ is bi-Lipschitz equivalent to $(\Gamma',\varphi'(p))$ as germs, $\forall p\in \Lambda'$. Finally, by the composite mapping $\varphi_{\Gamma}^{-1}\circ\varphi'\circ\varphi_{\Lambda}$, we conclude that $\Lambda$ and $\Gamma$ satisfy item 1) of Remark \ref{remark}. \end{proof}
\end{document} |
\begin{document}
\title{Demonstration of universal control between non-interacting qubits using the Quantum Zeno effect}
\author{E. Blumenthal$^1$} \author{C. Mor$^1$} \author{A. A. Diringer$^1$} \author{L. S. Martin$^2$} \author{P. Lewalle$^{3,4}$} \author{D. Burgarth$^5$} \author{K. B. Whaley$^{3,4}$} \author{S. Hacohen-Gourgy$^1$} \affiliation{$^1$Department of Physics, Technion - Israel Institute of Technology, Haifa 32000, Israel\\ $^2$Department of Physics, Harvard University, Cambridge, Massachusetts 02138, USA\\ $^3$Department of Chemistry, University of California, Berkeley, California 94720 USA\\ $^4$Berkeley Center for Quantum Information and Computation, Berkeley, California 94720 USA\\ $^5$Center for Engineered Quantum Systems, Dept. of Physics \& Astronomy, Macquarie University, 2109 NSW, Australia}
\begin{abstract} The Zeno effect occurs in quantum systems when a very strong measurement is applied, which can alter the dynamics in non-trivial ways. Despite being dissipative, the dynamics stay coherent within any degenerate subspaces of the measurement. Here we show that such a measurement can turn a single-qubit operation into a two- or multi-qubit entangling gate, even in a non-interacting system. We demonstrate this gate between two effectively non-interacting transmon qubits. Our Zeno gate works by imparting a geometric phase on the system, conditioned on it lying within a particular non-local subspace. These results show how universality can be generated not only by coherent interactions as is typically employed in quantum information platforms, but also by Zeno measurements. \end{abstract}
\maketitle
Control of quantum systems can be divided to two distinct schemes, coherent and incoherent control. Coherent control is achieved by application of control Hamiltonians to evoke deterministic time evolution. In contrast, incoherent control is based on non-deterministic measurement outcomes to prepare the system in a desired state. The two schemes may complement each other to enrich quantum control~\cite{Felix2015,martin2015deterministic,thomsen2002continuous,JacobsPurification2003,Riste2013,NKatz2008,Vool2016,SHGReview}. On the boundary between the two schemes lies the quantum Zeno effect, in which frequent measurements effectively freeze the system dynamics, holding the system at an eigenstate of the measurement observable. A more precise description shows that measurements divide the Hilbert space into subspaces with distinct eigenvalues of the measured observable, and give rise to `Zeno dynamics' within each~\cite{facchi2008QZD}. Transitions between subspaces are suppressed by measurement, but the evolution inside each subspace is completely coherent. In particular, previous work has shown that Zeno dynamics can theoretically transform a trivial (e.g., non-interacting with local control only) quantum system into one with universal control within the Zeno subspace~\cite{burgarth2014exponential} and several state entangling schemes have been proposed~\cite{wang2008quantum, shao2009onestep,zhang2011robust}.
\begin{figure}
\caption{Experiment schematic. (a) Two transmons coupled to electromagnetic-mode of a superconducting cavity. (b) Qubit-qutrit energy level diagram, where the energy levels of each element are labeled g (ground), e (excited) and f (second excited level). The colored domain is the subspace defined by the projector $P=\mathbb{1}-\Ket{fe}\Bra{fe}$. The $\Ket{e}\leftrightarrow\Ket{f}$ transition of the qutrit q1 is Rabi driven with frequency $\Omega_R$. Dotted and solid lines are blocked and allowed transitions, respectively. (c) Cavity spectra conditioned on transmon state (red) and the applied driving tones (blue).
}
\label{fig:gateSetup}
\end{figure}
In this letter we show an explicit construction of such universal control, and demonstrate it in a circuit-QED system~\cite{blais2021CQED}. Our unique construction performs in a single operation, an N-Control-phase gate on N qubits, where the last qubit is required to have only one extra level, i.e., it is a qutrit. We refer to this as a Zeno gate. Specifically, we demonstrate the gate between two non-interacting transmon qubits~\cite{Koch2007}. This work is distinct from other measurement based methods that prepare entangled states~\cite{Roch2014,Riste2013,shankar2013autonomously}; the major novelty is that the dynamics here are coherent, deterministic, and allow for universality.
Technically our experimental system has a resonator induced interaction, which can yield a high fidelity gate (RIP-gate)~\cite{RIPgate,RIPgateEXP}. We actively cancel these interactions to make our system effectively non-interacting. We can then demonstrate dynamics due to the Zeno effect alone.
Our purpose is to show how universality can be switched on and off
just by looking at a single level within a quantum system.
\begin{figure}
\caption{$\Ket{gg}$ population after starting in $\Ket{gg}$ and Rabi driving the qutrit $\Ket{g}\leftrightarrow\Ket{e}$ transition for half of an oscillation, while simultaneously Zeno driving the cavity at $\omega_{eg}$, as function of Zeno drive amplitudes $\varepsilon$.
Circles are experimental results, squares are numerically simulated results and triangles are an ideal simulation assuming the cavity is a Markovian bath, Eq.~\ref{eq:idealMasterEq} (the solid lines are provided to guide the eye).}
\label{fig:zenoBlock}
\end{figure}
\begin{figure*}
\caption{(a) Pulse sequence for the Zeno dynamics. (b) The qutrit-qubit density matrix at different times with $\varepsilon / 2 \pi = 2$ MHz, starting with an initial $\Ket{++}$ state. Black squares are partially filled to represent the amplitude, where a full square stands for an amplitude of $0.4$, and the color of the filling represents the phase according to the color bar.
Experimental results (top row), numerical simulation (bottom row).}
\label{fig:timeEvo}
\end{figure*}
The Zeno dynamics we explore here rely on local operations together with non-local projections. Locally driving one transition for a full $2\pi$ rotation imparts a geometric phase of $\pi$ on the initial state. Adding rapid projections blocks transitions between the Zeno subspaces defined by the projector, and allows phase accumulation only for certain states. Choosing an appropriate non-local projector conditions the resulting phase on the state of both qubits and thereby leads to entanglement. This process is similar to entangling operations based on Rydberg blockade with neutral atoms~\cite{isenhower2010demonstration, levine2019rydberg} in the sense that a certain non-local state can not be reached by the system. The main difference is that while the Rydberg blockade is a result of strong coherent interactions~\cite{jaksch2000fast}, we use incoherent measurements to \cut{employ} perform the Zeno block.
Consider first an ideal qubit-qutrit system and infinitely rapid projections, where the qutrit $\Ket{f}$ level is an auxiliary state. We apply a Rabi drive of frequency $\Omega_R$ between $\Ket{e}$ and $\Ket{f}$, and at the same time apply rapid projective measurements of the projector $P=\mathbb{1}-\Ket{fe} \Bra{fe}$ (as depicted in Fig.\ref{fig:gateSetup}b). In the limit of infinitely rapid projections the Hamiltonian reads \cite{facchi2008QZD}
\begin{equation}
\begin{split}
&H_{\textrm{Zeno}} = PHP \\ &=i\hbar\dfrac{\Omega_R}{2}P\left(\Ket{e} \Bra{f} - \Ket{f} \Bra{e}\right)\otimes \left(\Ket{g} \Bra{g} + \Ket{e} \Bra{e}\right) P \\
&= i\hbar\dfrac{\Omega_R}{2}(\Ket{eg} \Bra{fg} - \Ket{fg} \Bra{eg})
\end{split}
\label{idealHamilt} \end{equation}
where $H = \tfrac{1}{2}i\hbar\,\Omega_R\left(\Ket{e} \Bra{f} - \Ket{f} \Bra{e}\right)\otimes \mathbb{1} $ is the Rabi oscillation Hamiltonian without projections.
The $\Ket{eg}\leftrightarrow\Ket{fg}$ transition is allowed and the $\Ket{ee}\leftrightarrow\Ket{fe}$ transition is blocked and does not appear in Eq.~\ref{eq:idealMasterEq}, as shown in Fig.~\ref{fig:gateSetup}b with solid and dotted arrows, respectively. Assuming the system started in the subspace defined by $P$, it will remain there and undergo coherent evolution governed by $U_{\mathrm{Zeno}}=\exp(-iH_{\mathrm{Zeno}}t/\hbar)$. Applying the operation for a time $t=2\pi /\Omega_R$, one full oscillation, the $\Ket{eg}$ state acquires a $\pi$ phase. Thus, our operation is equivalent to a Control-phase gate up to local operations. This scheme can be expanded to entangle multiple qubits and one qutrit by measuring the projector $P=\mathbb{1}-\Ket{fee...e}\Bra{fee...e}$. A $\pi$ phase will be acquired by states $\Ket{exx..x}$, except for $\Ket{ee..e}$, where $x\in [e,g]$. This operation is equivalent to a N-Control-phase gate.
The key experimental requirement is the ability to apply the projector $P$. In a realistic setup, the projection application rate is not infinite, and the system may be described either by a sequence of projections with a finite time interval between them, or by a continuous measurement~\cite{facchi2008QZD}. We focus on the latter case as it fits our experimental circuit-QED scheme. Continuous measurements of the projector $P$ at a rate of $\Gamma$ can be modeled by the master equation
\begin{align}
\frac{d\rho}{dt}=-i[H,\rho]+\Gamma\mathcal{D}[P]\rho
\label{eq:idealMasterEq} \end{align}
where $\mathcal{D}[\cdot]$ is the standard Lindblad dissipator that models coupling to a Markovian bath. The finite measurement rate introduces a chance for the system to escape the Zeno subspace. The corresponding gate error in diamond norm \cite{watrous} can be bounded as $\mathcal{E}_\diamond < 38\;\Omega_R/\Gamma$~\cite{Suppmat}.
Eq. \ref{eq:idealMasterEq} describes the system in the Markovian regime where the bath ``loses its memory'' faster than the system evolution rate. This timescale puts an upper bound on the Rabi frequency $\Omega_R$. Beyond this frequency, in the non-Markovian regime, the system cannot be described by the simple form of Eq. \ref{eq:idealMasterEq}. In our system this time scale is given by the cavity linewidth $\kappa$. However, to maximize our gate fidelity, we perform the gate at a rate faster than the system decoherence, and show that Zeno dynamics are qualitatively the same, differing only in showing a limited blocking ability. This is in line with a recently predicted unification of Zeno physics arising through a wide range of mechanisms \cite{unity1,unity2}.
We implement the Zeno gate on a circuit-QED system composed of two transmons \cite{Koch2007} dispersively coupled to a superconducting 3D cavity, Fig.~\ref{fig:gateSetup}a. The system was designed to optimize implementation of the non-local measurement $P$, while minimizing qubit-qutrit interactions. The transmons were fabricated with far detuned transition frequencies of $\omega_{q1}/2\pi=3.28$ GHz, $\omega_{q2}/2\pi=6.24$ GHz and anharmonicities of $\alpha_1/2\pi=-175$ MHz and $\alpha_2/2\pi=-225$ MHz respectively. We use $q1$ as the qutrit. The cavity mode frequency was $\omega_c/2\pi=7.32$ GHz. The linewidth $\kappa/2\pi = 0.15$ MHz, was predominantly set by the strongly coupled port. The transmons-cavity dispersive couplings were $\chi_1/2\pi=-4.25$ MHz, $\chi_2/2\pi=-4.35$ MHz and the $\Ket{f}$ state was $\chi_f/2\pi=-10$ MHz. The system--cavity interaction is well described by the dispersive Hamiltonian in the interaction picture \cite{blais2021CQED}
\begin{equation}
\begin{split}
H_{\text{disp}}/\hbar&=\left(\chi_1\Ket{e_1}\Bra{e_1} + \chi_2\Ket{e_2}\Bra{e_2} +\chi_f\Ket{f}\Bra{f}\right)a^\dagger a\\
&+\alpha_1\Ket{f}\Bra{f},
\end{split}
\label{eq:dispHamilt} \end{equation}
where $a^\dagger$ and $a$ are the creation and annihilation operators of photons in the cavity, and subscripts in the kets label the qubits. We omitted the residual direct qubit-qutrit interaction, which was measured using Ramsey interferometry, between the $\Ket{ge}$ and $\Ket{ee}$ states. We measured $~30$ KHz, negligible for the timescales of our experiment.
Eq. \ref{eq:dispHamilt} shows that the cavity acquires a frequency shift that depends on the qubit-qutrit state. In the $\chi\gg\kappa$ regime, the cavity resonance frequencies for each state of the qubits are well separated, Fig.~\ref{fig:gateSetup}c. Probing the cavity resonance frequency allows us to deduce the qubit-qutrit state. We do this by driving the cavity through the weakly coupled port, and monitoring the output through the strongly coupled port. We continuously measure the projector $P$ by driving the cavity at a frequency of $\omega_{fe} = \omega_{gg}+\chi_f+\chi_2$, which is the resonance frequency when the system is in $\Ket{fe}$. We refer to such a measurement as a ``Zeno drive". \cut{In the $\chi\gg\kappa$ regime, where the cavity resonance frequencies for each state of the qubits are well separated (see Fig.~\ref{fig:gateSetup}c), this choice makes the transition from $\Ket{ee}$ to $\Ket{fe}$ much more strongly measureable than any other transition, allowing approximation of the desired Zeno projector.} The output signal is amplified using a flux-pumped Josephson Parametric Amplifier (JPA), with design as in~\cite{hacohen2016dynamics}. Changing the pumping frequency, we sequentially amplify signals of different frequencies. We amplify the Zeno drive signal at $\Omega_{fe}$ first, followed by the readout signal at $\Omega_{gg}$. The former enables us to detect whether the system escaped the Zeno subspace during the gate operation, the latter is used for tomography. We note that for the Zeno block to occur, the measurement may be performed by the ``environment". High quantum efficiency is not required to implement the gate and is not even necessary to observe the measurement outcome. However, this is important for high fidelity post-selection.
Before proceeding to the entangling dynamics, we first characterize the Zeno block probability as function of the drive amplitude $\varepsilon$. We demonstrate this here on the two lowest states of the qutrit $q1$. We apply a Zeno drive at $\omega_{eg}$ and Rabi drive the transition for $t=\pi/\Omega_R$. We measure the probability to stay in $|gg\rangle$, as function of the Zeno drive amplitude for three different Rabi frequencies, see Fig.~\ref{fig:zenoBlock}. This procedure resembles that in \cite{ZenoSlichter}, with slight differences because that experiment was conducted using a quantum trajectory approach in the steady state. Furthermore, Ref.~\cite{ZenoSlichter} operated in the $\Omega_R < \kappa$ regime, meaning the cavity could be modeled as a Markovian bath and the textbook jump rate value of $P_{\text{jump}}=\Omega_R^2/2 \Gamma$~\cite{Misra1977,facchi2008QZD} was observed. Here we show that even beyond this regime, the Zeno effect still blocks, albeit with a reduced effectiveness.
Fig.~\ref{fig:zenoBlock} shows the expected qualitative behaviour where the blocking probability increases with the drive amplitude, and decreases with increasing Rabi frequency. Quantitatively the data agree with the numerical simulation of the master equation of the full qubit-qutrit-cavity system~\cite{Suppmat}. However, our system can be simplified to Eq. \ref{eq:idealMasterEq} only in the limit $\Omega_R \ll \kappa$. In that limit, $\Gamma = 4\varepsilon^2 / \kappa$ \cite{Gambetta2008}. Even at $\Omega_R/2 \pi=0.1~\mathrm{MHz} = 2\kappa/3$ we can still see a deviation from Eq.\ref{eq:idealMasterEq} (red symbols), with a reduced blocking probability relative to that expected for this value of $\Gamma$. Recent experiments have probed pertinent regimes in greater detail \cite{Szombati2020, koolstra2021monitoring}, and begun to illustrate how lag in the cavity state ``following'' a qubit on a timescale $\kappa^{-1}$ impacts subsequent measurement mediated by the cavity. While a slower Rabi frequency is better in terms of realizing the Zeno effect, our gate time needs to be significantly shorter than the system coherence times ($T_1^{e\rightarrow g}=52$ $\mu$s, $T_1^{f\rightarrow e}=12.9$ $\mu$s, $T_2^{* e\leftrightarrow g}=22.2$ $\mu$s, $T_2^{* f\leftrightarrow e}=5.8$ $\mu$s for the qutrit, and $T_1=18.9$ $\mu$s, $T_2^*=15.7$ $\mu$s for the qubit). We set $\Omega_R/2 \pi=1$ MHz (blue in Fig.~\ref{fig:zenoBlock}).
Until now we have discussed only the effect of the Zeno drive on the transition that we wish to block. However, residual effects on the rest of the states also emerge. Due to the non-zero cavity linewidth, driving at $\omega_{fe}$ will create a small coherent displacement even if the system is not in $\Ket{fe}$. In the frame rotating with the drive frequency, at a \textit{steady state} this coherent state is $\alpha_{ij} = \dfrac{\varepsilon}{i\Delta_{ij}+\kappa/2}$, where $\varepsilon$ is the drive amplitude, and $\Delta_{ij}$ is the detuning between the cavity resonance frequency $\omega_{ij}$ and the drive frequency when the qubits are in $\Ket{ij}$. In our $\chi\gg\kappa$ regime, we can write $\rho_{ij,k\ell}(t)=e^{i\mu_{ij,k\ell}t}\rho_{ij,k\ell}(0)$, where $\mu_{ij,k\ell}=(\omega_{ij}-\omega_{k\ell})\alpha_{k\ell}^*\alpha_{ij}$ such that a phase will be acquired between each pair of states at a rate of $\mathrm{Re}[{\mu}_{ij,k\ell}]=\dfrac{(\omega_{ij}-\omega_{k\ell})|\varepsilon|^2}{\Delta_{ij}\Delta_{k\ell}}$ and coherence will be lost due to measurement-induced dephasing at a rate of $\mathrm{Im}[{\mu}_{ij,k\ell}]=\dfrac{(\omega_{ij}-\omega_{k\ell})^2|\varepsilon|^2\kappa}{2\Delta_{ij}^2\Delta_{k\ell}^2}$. This is the RIP-gate, where conditional phase accumulation leads to entanglement of the qubits \cite{RIPgate, RIPgateEXP}. To demonstrate the entanglement caused \textit{only} by Zeno dynamics, we negate this effect by applying an additional drive. It is applied to the cavity, at a frequency that is symmetric to the Zeno measurement drive frequency with respect to $\omega_{eg}$ and $\omega_{ge}$, so that $\omega_{sym}=\omega_c+(\chi_f+\chi_2)-(\chi_1+\chi_2)=\omega_c+\chi_f-\chi_1$, as depicted in Fig.~\ref{fig:gateSetup}. This symmetric drive balances the phase accumulation, such that this no longer generates entanglement. We note that while the phase accumulation is given above for the steady state, a cancellation of the phase by the symmetric drive should also occur in the transient regime. We confirmed this by numerical simulation as well as by Ramsey interferometry between $\Ket{gg}\leftrightarrow \Ket{eg}$ and $\Ket{ge}\leftrightarrow \Ket{ee}$ while applying both the Zeno and the symmetric drives to the cavity~\cite{Suppmat}.
\begin{figure}
\caption{Gate fidelity (blue) and concurrence (red) versus the amplitude of the Zeno drive (a), and as function of the post-selection percentage for a Zeno drive amplitude of $2$ MHz (b). The fidelity is calculated with respect to an ideal state, obtained by applying $\mathbb{1}-2\Ket{eg}\Bra{eg}$ to our initial state $\Ket{++}$. Circles are experimental results and squares are numerical results (lines are guide to the eye).}
\label{fig:fdltyVSEpsilon}
\end{figure}
The driven system Hamiltonian with both the Zeno drive and the symmetric drive, in the frame rotating at $\omega_{gg}$ reads \begin{equation}
\begin{split}
H_{\text{driven}}/\hbar = H_{\text{disp}}/\hbar &+ i\varepsilon\left(ae^{-i(\chi_f+\chi_2)t}-a^\dagger e^{i(\chi_f+\chi_2)t}\right) \\
&+ i\varepsilon\left(ae^{-i(\chi_2-\chi_f)t}-a^\dagger e^{i(\chi_2-\chi_f)t}\right).
\label{eq:fullHamilt} \end{split} \end{equation} To perform the Zeno gate we turn on the above drives and then initialize the system in the $(\Ket{g_1}+\Ket{e_1})(\Ket{g_2}+\Ket{e_2})$ state, see Fig.~\ref{fig:timeEvo}a (for other initial states see~\cite{Suppmat}). We then apply the Rabi drive $\Ket{e_1}\leftrightarrow\Ket{f}$ at the Stark shifted frequency for a time of $2\pi/\Omega_R$. Finally, we apply a set of tomography pulses~\cite{Suppmat}, and apply a readout pulse.
We sample the time evolution of the system, as shown in Fig.~\ref{fig:timeEvo}b. We see that the final state, after $1$ $\mu$s, is entangled since $\Ket{eg}$ has acquired a phase of $\pi$. The main discrepancy between the experiment and simulation is the population of $\Ket{fe}$, which is much smaller in the experiment than in the simulation. This is most likely due to the Zeno drive populating the cavity with a large coherent state once an escape occurs, thus shifting the qutrit resonance frequency and preventing the tomography pulse from correctly mapping $\Ket{fe}$~\cite{Suppmat}. The lost $\Ket{fe}$ population is then translated to a completely mixed state, therefore increasing the computational subspace population, which can cause a calculated fidelity increase, as discussed below.
In addition, the relaxation rate may be increased during the gate due to the large Zeno drive amplitude~\cite{DDephasingSlichter, sank2016dressed, hanai2021intrinsic, Lescanne2019escape}.
We performed this procedure with varying Zeno drive amplitudes and calculated the fidelity and concurrence of the final state, as shown in Fig.~\ref{fig:fdltyVSEpsilon}a. Since we start with the state $(\Ket{g_1}+\Ket{e_1})(\Ket{g_2}+\Ket{e_2})$ it is reasonable to use the fidelity of the final state as a proxy for the gate fidelity. As a check, we applied the gate to other initial states, obtaining similar quality results~\cite{Suppmat}. Concurrence is a measure of entanglement between qubits that is non-zero only for entangled states~\cite{Wooters1998Entanglement}. We calculate this on the states in the computational subspace. Increasing the Zeno drive amplitude increases the measurement rate, leading to a higher blocking probability and therefore higher fidelity and concurrence; on the other hand, this also leads to an increased dephasing rate $\mathrm{Im}[\mu_{ij,k\ell}]$, due to the finite $\kappa$. This causes the reduced fidelity and concurrence observed at higher drive amplitudes $\varepsilon$. Furthermore, we can see that the experimental results consistently achieve higher fidelity than the simulated results, while the experimental concurrence does not. This small discrepancy in gate fidelity between the experimental results and the numerical simulation is caused primarily by the incorrect mapping of $\Ket{fe}$ in the tomography process, as explained above.
The main source of infidelity for the gate is escapes from the Zeno subspace, which can be detected using the JPA. This capability allows us to perform the gate probabilistically but with a higher chance of success by post-selecting on the JPA signal. To demonstrate this, we post-selected our tomography results based on the amplitude of the transmitted signal, for the case of $\varepsilon/2 \pi=2$ MHz. Fig.~\ref{fig:fdltyVSEpsilon}b shows an increase in both gate fidelity and concurrence with the post-selection percentage. The increase is limited by the fidelity of our error-detection, which was $\sim75\%$ although our single-shot readout fidelity was $\sim93\%$, due to the measurement time being limited by the gate time and by the increased relaxation rate from the state $\Ket{fe}$.
We have presented a system where universal control was turned on by a Zeno measurement alone. Although the measurement acts trivially in the computational subspace, it nevertheless has a non-trivial effect on the dynamics within that subspace. To demonstrate universality we performed an explicit gate on 2 qubits. The concept can be extended and works simultaneously on multiple-qubits.
To create an effectively non-interacting system and observe dynamics due to Zeno alone, we actively cancelled the RIP-gate mechanism. In our system the RIP-gate alone would yield better performance for computational purposes. However if we consider a hypothetical system with no interactions between the qubits (possibly different type of qubits) and where the measurement drive performs only the measurement with no additional entangling effect, then the Zeno will truly be the only coherent control mechanism.
Overall this experiment emphasizes the ability of the Zeno effect to turn the trivial dynamics of an apparently non-interacting system into universal control, providing proof-of-concept for an entirely novel control strategy.
\begin{acknowledgments}
This Research was supported by ISF grant No. 1113/19, US-Israel BSF grant No. 2020166, and Technion's Helen Diller Quantum Center. P.L. and K.B.W. were partially supported by the U.S. Department of Energy, Office of Science, National Quantum Information Science Research Centers, Quantum Systems Accelerator. D.B. acknowledges funding by the ARC (project numbers FT190100106, DP210101367, CE170100009). \end{acknowledgments}
\section*{Contributions} D.B., L.S.M and S.H.-G. conceived the study. The device was fabricated by C.M. and A.A.D. E.B. and A.A.D. constructed the experimental setup. The experiment and data analysis was done by E.B., assisted by A.A.D. Theoretical modelling was done by L.S.M., S.H.-G., D.B., E.B., and P.L. E.B. and S.H.-G wrote the manuscript. All authors contributed to discussions and preparation of the manuscript. All work was carried out under the supervision of S.H.-G and K.B.W.
\pagebreak
\widetext \begin{center} \textbf{\large Demonstration of an entangling gate between non-interacting qubits using the Quantum Zeno effect - Supplementary Information} \end{center}
\setcounter{equation}{0} \setcounter{figure}{0} \setcounter{table}{0} \setcounter{page}{1} \makeatletter \renewcommand{S\arabic{equation}}{S\arabic{equation}} \renewcommand{S\arabic{figure}}{S\arabic{figure}} \renewcommand{\bibnumfmt}[1]{[S#1]} \renewcommand{\citenumfont}[1]{S#1}
\onecolumngrid \subsection*{Device parameters} The superconducting 3D cavity was made of tin plated copper, and sealed with indium. The cavity supported a $\text{TEM}_{101}$ mode of $\omega_c / 2\pi = $7.32GHz. The transition frequencies of the transmons were far detuned from each other and from the cavity mode in order to achieve dispersive coupling and suppress $2^{nd}$-order interactions (through the cavity mode) between the transmons. In order for the dispersive coupling constant $\chi$ to be roughly equal for both transmons, $q_1$ was fabricated with longer pads compared with $q_2$. Thus, setting the dipole coupling $g_1\approx430$ MHz for $q_1$ and $g_2\approx110$ MHz for $q_2$. The transmons and JPAs were fabricated by Aluminum deposition on resist patterns formed by electron beam lithography with a layer of ZEON ZEP 520A resist on a layer of MicroChem 8.5 MMA EL11 resist on top of a silicon substrate. Development of the resist was done in room temperature for MMA and at 0C for ZEP. The Al/AlOx/Al Josephson junctions were fabricated using a suspended bridge fabrication process \cite{BridgeJJs} for the JPAs and a bridge-free process \cite{PlusMethod} for the transmons.
We used a JPA in phase-sensitive mode to amplify the Zeno drive signal of frequency $\omega_{fe}$ by $~12$ dB and our readout signal of frequency $\omega_{gg}$ by $~15$ dB. The frequencies are separated by $~14$ MHz, and amplifying was enabled by changing the flux-pump frequency. The pumps were applied to the system sequentially, with a $256$ ns delay between them, due to the JPA finite bandwidth. The JPA had a $~3.6$ MHz bandwidth, corresponding to a single photon decay rate of $1/\kappa_{JPA}\approx50$ ns.
The full system schematics are describe in Fig.~\ref{fig:fullSystemSchematics}.
\subsection*{Escape chance due to finite measurement rate} Let us consider a GKLS equation \begin{equation}
\frac{d\rho }{dt} =-i\mathcal{H}\rho+\Gamma \mathcal{D}[P]\rho
\end{equation}
where $i\mathcal{H}\rho\equiv -i[H,\rho]$ where for the time being $H=H^\dagger$ and $P^\dagger P =P$ arbitrary. The dissipator is explicitly given by
\begin{equation}
\mathcal{D}[P]\rho\equiv P\rho P^\dagger - (P^\dagger P \rho+\rho P^\dagger P )/2=P\rho P-(P\rho +\rho P)/2
\end{equation}
Using the notation of \cite{unity1}, $\mathcal{D}$ is already in spectral representation as $\mathcal{D}=d_0 \mathcal{P}_0+d_1 \mathcal{P}_1$ with eigenvalues $d_0=0,d_1=-\frac{1}{2}$ and spectral projectors $\mathcal{P}_1=-2\mathcal{D}$ and $\mathcal{P}_0=\textrm{id}-\mathcal{P}_1$. $\mathcal{P}_0$ is the only peripheral part (e.g., corresponding eigenvalue on the imaginary axis), so $\mathcal{P}_\varphi =\mathcal{P}_0.$ The corresponding reduced resolvent is $S_0=-2\mathcal{P}_1$ and the Zeno generator $\mathcal{H}_Z=\mathcal{P}_0\mathcal{H}\mathcal{P}_0$. We are interested in the distance between the full evolution and the Zeno evolution. Let us first look at this on the peripheral (non-decaying) subspace, using Eq. (B.13) of \cite{unity1},
\begin{equation} \label{bound1}
e^{t(\Gamma \mathcal{D}-i\mathcal{H})}\mathcal{P}_0-e^{t(\Gamma \mathcal{D}-i\mathcal{H}_Z)}\mathcal{P}_0=
\frac{1}{\Gamma} \left( iS_0\mathcal{H}\mathcal{P}_0e^{-it \mathcal{H}_z}-ie^{t(\Gamma \mathcal{D}-i\mathcal{H})}S_0\mathcal{H}\mathcal{P}_0+
\int_0^t ds e^{(t-s)(\Gamma \mathcal{D}-i\mathcal{H})}[\mathcal{H},S_0\mathcal{H}\mathcal{P}_0]\mathcal{P}_0e^{-is \mathcal{H}_z} \right) \end{equation}
It is both convenient and operationally meaningful to bound this expression in the diamond norm $\|\cdot\|_\diamond$ in which CPTP maps are contractions \cite{watrous}. $e^{t(\Gamma \mathcal{D}-i\mathcal{H})}$, $\mathcal{P}_0 e^{-it \mathcal{H}_z}$ and $\mathcal{P}_\varphi=\mathcal{P}_0$ are automatically CPTP. Note that $\mathcal{P}_1$ is not necessarily CPTP, but we have $\|\mathcal{P}_1\|_\diamond=\|\textrm{id}-\mathcal{P}_0\|_\diamond\le 2.$ We can therefore bound the right hand side of Eq. (\ref{bound1}) by
\begin{equation}
\left\|\left(e^{t(\Gamma \mathcal{D}-i\mathcal{H})}-e^{t(\Gamma \mathcal{D}-i\mathcal{H}_Z)}\right)\mathcal{P}_0\right\|_\diamond \le\frac{8}{\Gamma}(\|\mathcal{H}\|_\diamond+t \|\mathcal{H}\|_\diamond^2) \end{equation}
where we used the triangle inequality and sub-multiplicity of the diamond norm. Let us now look at the decaying part $\mathcal{P}_1$. We can compute it using an integral representation as
\begin{equation}\label{bound2}
\left( e^{t(\Gamma \mathcal{D}-i\mathcal{H})}-e^{t(\Gamma \mathcal{D}-i\mathcal{H}_Z)}\right)\mathcal{P}_1=
-i\int _0^t ds e^{(t-s)(\Gamma \mathcal{D}-i\mathcal{H})}\mathcal{H}e^{-s\Gamma /2}\mathcal{P}_1 \end{equation}
This is bounded as \begin{equation}
\left\| \left( e^{t(\Gamma \mathcal{D}-i\mathcal{H})}-e^{t(\Gamma \mathcal{D}-i\mathcal{H}_Z)}\right)\mathcal{P}_1\right\|_\diamond \le 4\|\mathcal{H}\|_\diamond\frac{1-e^{-t\Gamma/2}}{\Gamma} \end{equation}
We can furthermore bound $\|\mathcal{H}\|_\diamond\le 2\|H\|_\infty$. In total, using $\mathcal{P}_0+\mathcal{P}_1=\textrm{id}$ we obtain
\begin{equation}\label{bound22}
\|e^{t(\Gamma \textbf{}-i\mathcal{H})}-e^{t(\Gamma \mathcal{D}-i\mathcal{H}_z)}\|_\diamond\le \frac{16\|\mathcal{H}\|_\infty}{\Gamma}\left(1+t\|\mathcal{H}\|_\infty +\frac{1-e^{-t\Gamma/2}}{2}\right) \end{equation}
Since $\|e^{t(\gamma \mathcal{D}-i\mathcal{H}))}\mathcal{P}_1\|_\diamond \le e^{-t\Gamma/2}$, we can finally write \begin{equation}\label{bound33}
\mathcal{E}_\diamond \equiv\|e^{t(\Gamma \mathcal{D}-i\mathcal{H})}-e^{-it\mathcal{H}_z}\mathcal{P}_0\|_\diamond\le\frac{16\|\mathcal{H}\|_\infty}{\Gamma}\left(1+t\|\mathcal{H}\|_\infty +\frac{1-e^{-t\Gamma/2}}{2}\right)+e^{-t\Gamma/2} \end{equation}
This bound is completely general and improves the bounds given in \cite{unity1} through a better choice of norm. In our experiment, $\|H\|_\infty =\frac{\Omega_R}{2}$ and gate time $t=\frac{2\pi}{\Omega_R}$, and we obtain
\begin{equation}
\mathcal{E}_\diamond\le \frac{8 \Omega_R}{\Gamma}(1+\pi +\frac{1-e^{-\pi\Gamma/\Omega_R }}{2})+e^{-\pi\Gamma/\Omega_R } \end{equation} This bound becomes non-trivial (that is, smaller $2$) only when $\frac{\Omega_R}{\Gamma }<0.06,$ and can be linearly loosened to obtain $\mathcal{E}_\diamond \le 38\frac{\Omega_R}{\Gamma}$ as mentioned in the main text.
\subsection*{Simulation details}
\begin{figure*}
\caption{The schematics of the full experimental system. We apply all the drives to our system using modulated signals, using single sideband modulators, which are controlled with Operator-X (OPX) by Quantum Machines. We use one signal generator to drive both qutrit transitions. We set it at a frequency of $100$ MHz above the $\Ket{g}\leftrightarrow\Ket{e}$ transition frequency and $75$ MHz below the $\Ket{e}\leftrightarrow\Ket{f}$ transition frequency, so we can generate these intermediate frequencies with the OPX. We use another signal generator to drive the qubit. The signal generator is set to $150$ MHz above the qubit's transition frequency. We use a third signal generator set to $125$ MHz above the cavity frequency, to drive the cavity, pump the JPA and down-convert the readout signal after it passes through the cavity. The pumping signal is frequency multiplied to drive the JPA at the doubled frequency of the cavity while maintaining the phase between the cavity drive and the pump.
}
\label{fig:fullSystemSchematics}
\end{figure*}
We simulated the full experimental system, including the transmons, the cavity and their interactions under the dispersive approximation, using the master equation
\begin{equation}
\dot{\rho}=-i[H,\rho] +\kappa\mathcal{D}[a]\rho +\sum_{i} \Gamma_i\mathcal{D}[\sigma_-^{(i)}]\rho +\sum_{i} \dfrac{\gamma_i}{2}\mathcal{D}[\sigma_z^{(i)}]\rho \end{equation}
where $\Gamma_i$, $\gamma_i$, $\sigma_-^{(i)}$ and $\sigma_z^{(i)}$ are the relaxation rate, dephasing rate, lowering operator and Pauli-z operator of the $i^{\text{th}}$ transition, respectively. The transitions are $i\in[f\leftrightarrow e_1, e_1\leftrightarrow g_1$, $e_2\leftrightarrow g_2$]. The master equation takes into account the natural dephasing and relaxation rate of the qutrit, the qubit and the cavity. The full Hamiltonian we used written in the interaction picture is
\begin{equation}
\begin{split}
H/\hbar&=\left(\chi_1\Ket{e_1}\Bra{e_1} + \chi_2\Ket{e_2}\Bra{e_2} +\chi_f\Ket{f}\Bra{f}\right)a^\dagger a\\
&+\dfrac{\alpha_c}{2}a^\dagger aa^\dagger a +\alpha_1\Ket{f}\Bra{f}\\
&+i\dfrac{\Omega_R}{2}\left(e^{-i(\alpha_1+\delta_{ef}(\varepsilon))t}\Ket{e_1}\Bra{f}-e^{i(\alpha_1+\delta_{ef}(\varepsilon))t}\Ket{f}\Bra{e_1}\right) \\
& + \frac{\delta _{g_1e_1}(\varepsilon)}{2}(\Ket{g_1}\Bra{g_1}-\Ket{e_1}\Bra{e_1}) \\
& +\frac{\delta_{g_2e_2}(\varepsilon)}{2}(\Ket{g_2}\Bra{g_2}-\Ket{e_2}\Bra{e_2})
\end{split}
\label{eq:simHamilt} \end{equation}
where $\alpha_c\approx-\dfrac{\chi_1^2(\omega_c-\omega_{q1})}{\alpha_1^2}-\dfrac{\chi_2^2(\omega_c-\omega_{q2})}{\alpha_2^2}\approx40$ KHz is the cavity self-Kerr \cite{Elliott_2018}, which had a negligible effect, and $\delta_{ij}(\varepsilon)$ being the Stark shift of the $i\leftrightarrow j$ transition frequency induced by both the Zeno drive and the symmetric drive with an amplitude of $\varepsilon$. Including $\delta_{ij}(\varepsilon)$ in the Hamiltonian corresponds to driving the Stark shifted frequency in the experiment, as we have done. The Stark shift was calibrated beforehand by simulating a Ramsey experiment with the drives on and extracting the phase accumulation rate between all the relevant states.
\subsection*{Calibration of cavity drives} We calibrated the amplitude of both the Zeno drive and the symmetric drive using Ramsey interferometry on the first transition of the qutrit $q_1$. We measured the detuning of the Ramsey frequency due to the Stark shift induced by each drive separately for various set voltages. By fitting the data to
\begin{equation} \mathrm{Re}[\mu_{gg,eg}]=\dfrac{(\omega_{gg}-\omega_{eg})|\varepsilon|^2}{\Delta_{gg}\Delta_{eg}}, \end{equation} we were able to calibrate the drive amplitude $\varepsilon$ that is actually incident on the cavity. Following this calibration, we repeated the measurement with both drives applied simultaneously, once when the qubit $q_2$ was in the ground state and once when it was in the excited state (shown in Fig.~\ref{fig:RIPcancelation}). The induced Stark shift was almost equal in both cases, which means the symmetric drive does indeed cancel the entangling phase accumulation, as desired. We measured and simulated this shift for all the possible transitions and used the shifted frequencies to drive them when applying the Zeno gate.
\begin{figure}
\caption{The mean Ramsey frequency detuning from an artificial $5$ MHz as a result of the Zeno drive and the symmetric drive as a function of their amplitude $\varepsilon$. We measured oscillations between $\Ket{g}$ and $\Ket{e}$ of the qutrit when the qubit is at $\Ket{g}$ (blue) and $\Ket{e}$ (red). Circles represent experimental results and squares represent simulated results. The additional triangles represent results of a simulation without the symmetric drive for reference. The simulated phase was linearly fit to obtain the detuning, although it showed small oscillations around the linear line. These oscillation's amplitude increased with $\varepsilon$ and were negligible for small amplitude ($\varepsilon<2.5$ MHz.}
\label{fig:RIPcancelation}
\end{figure}
\subsection*{Tomography details} To perform full state tomography we measure all the operators that span the Hilbert space, which is a total of 36 operators for a qutrit and a qubit. We partially map each operator to the projector we measure $\Ket{gg}\Bra{gg}$, as was done in \cite{MattReedThesis}. We then acquire the expectation value of each of the 36 operators. To reconstruct the density matrix we use a method of maximum likelihood estimation (MLE) as described in \cite{SINGH20163051} to find the most likely valid density matrix. If the trace of the density matrix is less than $1$, the MLE process will effectively add an appropriately scaled completely mixed state to achieve $\text{Tr}[\rho]=1$.
\subsection*{Different initial states and incorrect mapping error}
To better understand the effect of the Zeno gate on our system, we applied it to different states in the computational qubit-qubit subspace, and evaluated the resulting fidelity and concurrence. The states were $\Ket{gg},\Ket{ge},\Ket{eg},\Ket{ee},\Ket{++},\Ket{-+},\Ket{+-}$ and $\Ket{--}$. The results are shown in Fig.~\ref{fig:AllInits}. The escape from the Zeno subspace error is evident in the application of the gate on $\Ket{ee}$. In addition, a considerable deviation from the simulation is clear and is due to the incorrect mapping error. The simulation shows large population in $\Ket{fe}$ but the experiment shows an almost completely mixed state. The pulses we used to control the system where Gaussian pulses with $\sigma\approx20$ ns, so that their width in Fourier space is $\Delta f\approx50$ MHz. As $\chi_f=10$ MHz, the pulse would not map states of the type $\Ket{fe}\otimes\Ket{n}$ with $n\gtrapprox5$ correctly. We used a toy model to examine this error. We reconstructed the simulated density matrix after truncating all the states with $n>5$. Fig.~\ref{fig:n-cut} shows the resulting density matrix compared with the experimental result and the full simulation. The truncated case fits better with experiment, which strengthens our assumption that this error is causing the discrepancy. However, a more accurate approach, such as including the tomography pulses in the simulation, could be used, but may not necessarily give a more significant insight.
\begin{figure*}
\caption{Experimental tomography and simulation results of Zeno gate application on different initial states. The black squares are partially filled to represent the amplitude, and the color of the filling represent the phase, according to the color bar.}
\label{fig:AllInits}
\end{figure*}
\begin{figure}
\caption{A comparison between experiment, simulation and truncated simulation showing only states with $n<6$ of the Zeno gate applied to an initial state $\Ket{ee}$. The density matrix from the truncated simulation was reconstructed using MLE method. Truncating states with high photon numbers is roughly similar to applying pulses with finite frequency for tomography. Indeed, we see greater overlap between the experiment and the truncated simulation.}
\label{fig:n-cut}
\end{figure}
\subsection*{Post-selection procedure} We used an error-detection and post-selection procedure to improve the performance of the Zeno gate (Fig.~\ref{fig:fdltyVSEpsilon}b). High amplitude of the amplified signal that was transmitted through the cavity during the time of the gate indicates an escape to the blocked $\Ket{fe}$ state (as shown in Fig.~\ref{fig:PostSelectionData}). We post-selected our tomography data by ignoring measurements with amplitudes above a set threshold when reconstructing the density matrix. The post-selection percentage is the percentage of measurements we ignore, which was increased by decreasing the set threshold.
\begin{figure}
\caption{Histogram of the in-phase amplified transmitted signal amplitude at frequency $\omega_{fe}$. The red and blue lines are for reference, representing $\Ket{fe}$ (escape) and $\Ket{ee}$ (block), respectively. We prepared each state and then applied a readout pulse for a time that is equal to the gate time $t=\dfrac{2\pi}{\Omega_R}$. The gray line is the control measurement of the Zeno drive signal that we detected during the time of the gate. We used this control measurement for our post selection procedure.}
\label{fig:PostSelectionData}
\end{figure}
\subsection*{Zeno dynamics in the Markovian and non-Markovian regimes} In this experiment we worked in the non-Markovian regime, which cannot be described by the simple form of Eq.~\ref{eq:idealMasterEq} in the main text; nevertheless our system displayed Zeno dynamics leading to entanglement, and showed quantitative agreement with numerical simulations. It would be instructive to model the system in this non-Markovian regime and see if a closed form expression connecting the effective blocking rate and the applied drives can be derived. We leave this question to future theoretical work.
In terms of the effect itself, the Markovian regime can yield much improved gate fidelity. The effective measurement rate can be much larger compared with the competing dephasing. This can be reached by increasing $\kappa$ and $\chi$, or slowing down the gate. For example if in our system the gate is slowed to 10$\mu$s and ignoring coherence times, fidelity of 90\% can be achieved (as shown in Fig.~\ref{fig:epsilon sweep long time}).
\begin{figure}
\caption{Results of a simulation of the Zeno gate with the same $\chi$'s and $\kappa$ as in the experimental setup but different $\Omega_R=0.1$ and infinite coherence times. Gate fidelity (blue) and concurrence (red) versus the amplitude of the Zeno drive are shown. Lines are guide to the eye. We see that in the Markovian regime the gate fidelity is increased significantly.}
\label{fig:epsilon sweep long time}
\end{figure}
\end{document} |
\begin{document}
\title{Radius of Starlikeness for Bloch Functions}
\author[Somya Malik]{Somya Malik} \address{Department of Mathematics \\National Institute of Technology\\Tiruchirappalli-620015, India } \email{[email protected]}
\author{V. Ravichandran} \address{Department of Mathematics \\National Institute of Technology\\Tiruchirappalli-620015, India } \email{[email protected]; [email protected]}
\begin{abstract}
For normalised analytic functions $f$ defined on the open unit disc $\mathbb{D}$ satisfying the condition $\sup_{z\in \mathbb{D}}(1-|z^2|) |f'(z)|\leq 1$, known as Bloch functions, we determine various starlikeness radii. \end{abstract}
\subjclass[2010]{30C80, 30C45}
\thanks{The first author is supported by the UGC-JRF Scholarship.}
\maketitle
\section{Introduction}The class $\mathcal{A}$ consists of all analytic functions $f$ on the disc $\mathbb{D}:=\{z\in \mathbb{C}: |z|<1\}$ and normalized by the conditions $f(0)=0$ and $ f'(0)=1$. The class $\mathcal{S}$ consists of all univalent functions $f\in \mathcal{A}$. The class $\mathcal{B}$ of Bloch functions consists of all functions $f\in\mathcal{A}$ satisfying $ \sup_{z\in \mathbb{D}}(1-|z|^2)|f'(z)|\leq 1$ (see \cite{Bonk}, \cite{Pom}). Bonk \cite{Bonk} has shown that the radius of starlikeness of the class $\mathcal{B}$ is the same as radius of univalence and it equals $1/\sqrt{3}\approx 0.57735$. It also follows from his distortion inequalities that the radius of close-to-convexity (with respect to $z$) is also $1/\sqrt{3}$. We extend the radius of starlikeness result by find various other starlikeness radii. An analytic function $f$ is subordinate to the analytic function $g$, denoted as $f\prec g$, if there exists a function $w:\mathbb{D}\rightarrow \mathbb{D}$ with $w(0)=0$ satisfying $f(z)=g(w(z))$. If $g$ is univalent, then $f\prec g$ if and only if $f(0)=g(0)$ and $f(\mathbb{D})\subseteq g(\mathbb{D})$. The subclass of $\mathcal{S}$ of starlike functions $\mathcal{S}^{*}$ is the collection of functions $f\in \mathcal{S}$ with the condition $\RE (zf'(z)/f(z))>0$ where $z \in \mathbb{D}$. The subclass $\mathcal{K}$ of convex functions consists of the functions in $\mathcal{S}$ with $\RE (1+zf''(z)/f'(z))>0$ for $z\in \mathbb{D}$. This characterisation gives a characterization of these classes in terms of the class $\mathcal{P}$ of Carath\'{e}odory functions or the functions with positive real part, comprising of analytic functions $p$ with $p(0)=1$ satisfying $\RE (p(z))>0$ or equivalently the subordination $p(z)\prec (1+z)/(1-z)$. Thus, the classes of starlike and convex functions are consist of $f\in \mathcal{A}$ with $zf'(z)/f(z) \in \mathcal{P}$ and $1+zf''(z)/f'(z) \in \mathcal{P}$ respectively. Several subclasses of starlike and convex functions were defined using subordination of $zf'(z)/f(z)$ and $1+zf''(z)/f'(z)$ to some function in $\mathcal{P}$. Ma and Minda \cite{MaMinda} gave a unified treatment of growth, distortion, rotation and coeffcient inequalities for function in classes $\mathcal{S}^{*}(\varphi)=\{f\in \mathcal{A}:zf'(z)/f(z) \prec \varphi (z) \}$ and $\mathcal{K}(\varphi)=\{f\in \mathcal{A}:1+zf''(z)/f'(z) \prec \varphi (z)\}$, where $\varphi \in \mathcal{P}$, starlike with respect to 1, symmetric about the $x$-axis and $\varphi '(0)>0$. Numerous classes were defined for various choices of the function $\varphi$ such as $(1+Az)/(1+Bz),\ \mathit{e}^{z},\ z+\sqrt{1+z^2}$ and so on.
For any two subclasses $\mathcal{F}$ and $\mathcal{G}$ of $\mathcal{A}$, the $\mathcal{G}-$ radius of the class $\mathcal{F}$, denoted as $R_{\mathcal{G}} (\mathcal{F})$ is the largest number $R_{\mathcal{G}} \in (0,1)$ such that $r^{-1}f(rz)\in \mathcal{G}$ for all $f\in \mathcal{F}$ and $0<r<R_{\mathcal{G}}$. We determine the radius of various subclasses of starlike functions such as starlikeness associated with the exponential function, lune, cardioid and a particular rational function for the class of Bloch Functions.
\section{Radius Problems}
In 2015, Mendiratta \emph{et al.} \cite{Exp} introduced a subclass $S^{*}_{\mathit{e}}$ of starlike functions associated with the exponential function. This class $S^{*}_{\mathit{e}}$ consists of all functions $f\in\mathcal{A}$ satisfying the subordination $ zf'(z)/f(z)\prec e^z$. This subordination is equivalent to the inequality $|\log (zf'(z)/f(z))| <1$. Our first theorem gives the $S^{*}_{\mathit{e}}$-radius of Bloch functions.
\begin{theorem}
The $S^{*}_{\mathit{e}}$-radius of the class $\mathcal{B}$ of Bloch functions is \[\mathcal{R}_{S^{*}_{\mathit{e}}}(\mathcal{B})= \frac{1}{4} \sqrt{3} \left(3-3 e+\sqrt{1-10 e+9 e^2}\right) \approx 0.517387.\] The obtained radius is sharp. \end{theorem} \begin{proof} For functions $f \in \mathcal{B}$, Bonk \cite{Bonk} proved the following inequality
\begin{equation}\label{eqn1}
\left|\dfrac{zf'(z)}{f(z)}-\frac{\sqrt{3}}{\sqrt{3}-r}\right|\leq \frac{\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)},\ \ |z|=r<\frac{1}{\sqrt{3}}.
\end{equation} The function \[h(r):=\frac{\sqrt{3}}{\sqrt{3}-r}-\frac{\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}=\frac{3-3\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}\] is a decreasing funciton of $r$ for $0\leq r<1/\sqrt{3}=\mathcal{R}_{S^{*}}(\mathcal{B})$. The number $R=\mathcal{R}_{S^{*}_{\mathit{e}}}(\mathcal{B})<1/\sqrt{3}=\mathcal{R}_{S^{*}}(\mathcal{B})$ is the smallest positive root of the polynomial \begin{equation}\label{eqR}
2R^2+3\sqrt{3}(\mathit{e}-1)R+3(1-\mathit{e})=0 \end{equation} or
$h(R)=1/\mathit{e}$. Therefore, for $0\leq r< R$, it follows that $ 1/e=h(R)<h(r)$ and hence \begin{align}\label{eqn3} \frac{\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}<\frac{\sqrt{3}}{\sqrt{3}-r}-\frac{1}{\mathit{e}}. \end{align}
Thus \eqref{eqn1} and \eqref{eqn3} give \begin{align}\label{eqn3a} \left|\dfrac{zf'(z)}{f(z)}-\frac{\sqrt{3}}{\sqrt{3}-r}\right|< \frac{\sqrt{3}}{\sqrt{3}-r}-\frac{1}{\mathit{e}}, \quad |z|=r< R.\end{align}
The function $C(r)=\sqrt{3}/(\sqrt{3}-r )$ is an increasing function of $r$, so for $r\in [0,R)$,
it follows that $C(r) \in [1,C(R))\subseteq [1,C(0.6))\approx [1,1.53001)\subseteq (0.367879,1.54308)\approx (1/\mathit{e},(\mathit{e}+\mathit{e}^{-1})/2)$. By \cite[Lemma 2.2]{Exp}, for $1/\mathit{e}<c<\mathit{e}$, we have $\{w:\ \left|w-c\right|<r_c\}\subseteq \{w:\ \left|\log (w)\right|<1\}$ when $r_c$ is given by \begin{align}\label{eqn2} r_c &= \begin{dcases} c- \mathit{e}^{-1} & \text{ if }\ \mathit{e}^{-1}<c\leq \frac{\mathit{e} +\mathit{e}^{-1}}{2} ,\\ \mathit{e} -c & \text{ if }\ \frac{\mathit{e} +\mathit{e}^{-1}}{2}\leq c<\mathit{e}. \end{dcases} \end{align}
By \eqref{eqn3a}, we see that $w=zf'(z)/f(z)$, $|z|<R$, satisfies $ |w-c|<c-\mathit{e}^{-1}$ and hence it follows that $ \left|\log (w)\right|<1 $. This shows that $S^{*}_{\mathit{e}}$-radius of the class $\mathcal{B}$ is at least $R$.
We now show that $R$ is the exact $S^{*}_{\mathit{e}}$-radius of the class $\mathcal{B}$. The function $f_0:\mathbb{D}\to\mathbb{C}$ defined by \[f(z)=\dfrac{\sqrt{3}}{4}\left\{1-3\left(\dfrac{z-\sqrt{1/3}}{1-\sqrt{1/3}z}\right)^2\right\}\ =\ \dfrac{3z(3-2\sqrt{3}z)}{(3-\sqrt{3}z)^2}\] is an example of function in the class $\mathcal{B}$ and it serves as an extremal function for the various problems. For this function, we have \[\dfrac{zf'(z)}{f(z)}=\dfrac{3\sqrt{3}-9z}{2\sqrt{3}z^2-9z+3\sqrt{3}}.\] Using the equation \eqref{eqR}, we get
$2\sqrt{3}R^2-9R+3\sqrt{3}=\mathit{e} (3\sqrt{3}-9R)$, thus, for $z=R$ \begin{align*}
\left|\log \left(\dfrac{zf'(z)}{f(z)}\right)\right| &=\left|\log \left(\dfrac{3\sqrt{3}-9z}{2\sqrt{3}z^2-9z+3\sqrt{3}}\right)\right|
= \left|\log \left(\dfrac{1}{\mathit{e}}\right)\right|=1. \end{align*} This proves that $R$ is the exact $S^{*}_{\mathit{e}}$-radius of the class $\mathcal{B}$. \end{proof}
Sharma \emph{et al.} studied the class $\mathcal{S}^{*}_{c}=\mathcal{S}^{*} (\phi _c)= \ \mathcal{S}^{*} (1+(4/3)z+(2/3)z^2)$ and gave \cite[Lemma 2.5]{Cardioid} \\ For $1/3<c<3,$ \begin{align}\label{eqn4} r_c &= \begin{dcases} \frac{3c-1}{3} & \text{ if }\ \frac{1}{3}<c\leq \frac{5}{3}\\ 3-c & \text{ if } \ \frac{5}{3}\leq c<3 \end{dcases} \end{align}
then $\{w: |w-c|<r_c\} \subseteq \Omega _c$. Here $\Omega_c$ is the region bounded by the cadioid $\{x+\iota y: (9x^2+9y^2-18x+5)^2 -16(9x^2+9y^2-6x+1)=0\}.$
\begin{theorem} \
The $S^{*}_{c}$-radius $\mathcal{R}_{S^{*}_{c}}\approx 0.524423.$ This radius is sharp. \end{theorem}
\begin{proof}
$R=\mathcal{R}_{S^{*}_{c}}$ is the smallest positive root of the equation \[R^2+3\sqrt{3}R-3=0.\]
The function \[h(r):=\frac{\sqrt{3}}{\sqrt{3}-r}-\frac{\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}=\frac{3-3\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}\]
is a decreasing funciton of $r$ for $0\leq r<1/\sqrt{3}=\mathcal{R}_{S^{*}}$. \cite[Corollary, P.455]{Bonk} Note that the class $\mathcal{S}^{*}_{c}$ is a subclass of the parabolic starlike class $S^{*}$, Also since, $R=\mathcal{R}_{S^{*}_{c}}$ is the smallest positive root of the equation $h(r)=1/3$. For $0\leq r< R$, we have
\begin{align}\label{eqn5}
\frac{\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}<\frac{\sqrt{3}}{\sqrt{3}-r}-\frac{1}{3}
\end{align}
Thus \eqref{eqn1} and \eqref{eqn5} give \[\left|\dfrac{zf'(z)}{f(z)}-\dfrac{1}{1-ar}\right|< \frac{1}{1-ar}-\frac{1}{3}; \ |z|\leq r,\ a=\frac{1}{\sqrt{3}}.\]
The center $C(r)$ of \eqref{eqn1} is an increasing function of $r$, so for $r\in [0,R),\ C(r) \in [1,C(R))\subseteq [1,c(0.6))\approx [1,1.53001)\subseteq (1/3,5/3)$. Now, by \eqref{eqn4} we get that the disc $\{w: |w-c|<c-1/3\} \subseteq \Omega _c. $
\\
\\
For proving sharpness, consider the function \[f(z)=\dfrac{\sqrt{3}}{4}\left\{1-3\left(\dfrac{z-\sqrt{1/3}}{1-\sqrt{1/3}z}\right)^2\right\}\]
for this function, $\dfrac{zf'(z)}{f(z)}=\dfrac{3\sqrt{3}-9z}{2\sqrt{3}z^2-9z+3\sqrt{3}},$\
and using the equation for $R$, we get
\\ $2\sqrt{3}r^2-9r+3\sqrt{3}=\ 3(3\sqrt{3}-9r)$, thus for $z=R$
\begin{align*}
\dfrac{zf'(z)}{f(z)}
&= \dfrac{1}{3}\\
&= \phi _c (-1).
\end{align*} \end{proof}
The class $\mathcal{S}^{*}_{\leftmoon}=\mathcal{S}^{*} (z+\sqrt{1+z^2})$ was introduced in 2015 by Rain and Sok\'{o}l \cite{Sokol} in 2015 and proved that $f\in \mathcal{S}^{*}_{\leftmoon} \iff zf'(z)/f(z)$ lies in the lune region $\{w: |w^2-1|<2|w|\}$. Gandhi and Ravichandran \cite[Lemma 2.1]{Lune} proved that \\ for $\sqrt{2}-1<c\leq \sqrt{2}+1,$ \begin{align}\label{eqn6}
\{w: |w-c|<1-|\sqrt{2}-c|\}\subseteq \{w: |w^2-1|<2|w|\} \end{align}
\begin{theorem}
The $\mathcal{S}^{*}_{\leftmoon}$ radius, $\mathcal{R}_{\mathcal{S}^{*}_{\leftmoon}} \approx 0.507306.$ The radius is sharp. \end{theorem}
\begin{proof}
For $R=\mathcal{R}_{\mathcal{S}^{*}_{\leftmoon}}$, the center of \eqref{eqn1},\ $C(R)=\sqrt{2};$ since $C(r)$ is an increasing function of $r$, thus for $0\leq r< R,\ 1\leq C(r)<\sqrt{2}$, or\[ \text{for}\ 0\leq r<R,\ \sqrt{2}-C(r)\geq 0.\]
So, $R=\mathcal{R}_{\mathcal{S}^{*}_{\leftmoon}}$ is the smallest positive root of the equation \[(2-2\sqrt{2})R^2+\sqrt{3}(3\sqrt{2}-6)R+3(2-\sqrt{2})=0.\]
The function \[h(r):=\frac{\sqrt{3}}{\sqrt{3}-r}-\frac{\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}=\frac{3-3\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}\]
is a decreasing funciton of $r$ for $0\leq r<1/\sqrt{3}=\mathcal{R}_{S^{*}}$. \cite[Corollary, P.455]{Bonk} Note that the class $\mathcal{S}^{*}_{\leftmoon}$ is a subclass of the parabolic starlike class $S^{*}$. Also since, $R=\mathcal{R}_{\mathcal{S}^{*}_{\leftmoon}}$ is the smallest positive root of the equation $h(r)=\sqrt{2}-1$. For $0\leq r< R$, we have
\begin{align}\label{eqn7}
\frac{\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}<1-\sqrt{2} +\frac{\sqrt{3}}{\sqrt{3}-r}=1-\left|\sqrt{2}-\frac{\sqrt{3}}{\sqrt{3}-r}\right|.
\end{align}
Thus \eqref{eqn1} and \eqref{eqn7} give \[\left|\dfrac{zf'(z)}{f(z)}-\dfrac{1}{1-ar}\right|<1-\left|\sqrt{2}-\frac{1}{1-ar}\right|; \ |z|\leq r,\ a=\frac{1}{\sqrt{3}}.\]
The center $C(r)$ of \eqref{eqn1} is an increasing function of $r$, so for $r\in [0,R),\ C(r) \in [1,C(R))\subseteq [1,c(0.6))\approx [1,1.53001)\subseteq (\sqrt{2}-1,\sqrt{2}+1)$. Now, by \eqref{eqn6} we get that the $R$ is the required radius.
\\
\\
\\
Consider the \[f(z)=\dfrac{\sqrt{3}}{4}\left\{1-3\left(\dfrac{z-\sqrt{1/3}}{1-\sqrt{1/3}z}\right)^2\right\}\]
for this function, $\dfrac{zf'(z)}{f(z)}=\dfrac{3\sqrt{3}-9z}{2\sqrt{3}z^2-9z+3\sqrt{3}},$\\
and we can easily see that for $z=\frac{1}{2}[2\sqrt{3}-\sqrt{6}],$
\[\left|\left(\dfrac{zf'(z)}{f(z)}\right)^2-1\right|=\ 2\left(\dfrac{zf'(z)}{f(z)}\right)\ =\ 2(\sqrt{2}-1).\] Thus, the result is sharp. \end{proof}
The next class that we consider is the class of starlike functions associated with a rational function. Kumar and Ravichandran \cite{Rational} introduced the class of starlike functions associated with the rational function $\psi (z)=1+ ((z^2+kz)/(k^2-kz))$ where $k=\sqrt{2}+1$, denoted by $\mathcal{S}^{*}_{R}=\mathcal{S}^{*}(\psi(z))$ They proved\cite[Lemma 2.2]{Rational} that \\ for $2(\sqrt{2}-1)<c<2,$ \begin{align}\label{eqn8} r_c &= \begin{dcases} c-2(\sqrt{2}-1)\ & \text{ if }\ 2(\sqrt{2}-1)<c\leq \sqrt{2}\\ 2-c\ & \text{ if }\ \sqrt{2}\leq c<2 \end{dcases} \end{align}
then $\{w: |w-c|<r_c\} \subseteq \psi (\mathbb{D})$
\begin{theorem}
The $\mathcal{S}^{*}_{R}$ radius is the smallest positive root of the polynomial $4(1-\sqrt{2})r^2+3\sqrt{3}(2\sqrt{2}-3)r+3(3-2\sqrt{2})$ that is $\mathcal{R}_{\mathcal{S}^{*}_{R}} \approx 0.349865.$ The result is sharp. \end{theorem} \begin{proof}
$R=\mathcal{S}^{*}_{R}$ is the smallest positive root of the equation \[4(1-\sqrt{2})R^2+3\sqrt{3}(2\sqrt{2}-3)R+3(3-2\sqrt{2})=0.\]
The function \[h(r):=\frac{\sqrt{3}}{\sqrt{3}-r}-\frac{\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}=\frac{3-3\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}\]
is a decreasing funciton of $r$ for $0\leq r<1/\sqrt{3}=\mathcal{R}_{S^{*}}$. \cite[Corollary, P.455]{Bonk} Note that the class $\mathcal{S}^{*}_{R}$ is a subclass of the parabolic starlike class $S^{*}$. Also since, $R=\mathcal{S}^{*}_{R}$ is the smallest positive root of the equation $h(r)=2(\sqrt{2}-1)$. For $0\leq r< R$, we have
\begin{align}\label{eqn9}
\frac{\sqrt{3}r}{(\sqrt{3}-r)(\sqrt{3}-2r)}<\frac{\sqrt{3}}{\sqrt{3}-r}-2(\sqrt{2}-1)
\end{align}
Thus \eqref{eqn1} and \eqref{eqn9} give \[\left|\dfrac{zf'(z)}{f(z)}-\dfrac{1}{1-ar}\right|< \frac{1}{1-ar}-2(\sqrt{2}-1); \ |z|\leq r,\ a=\frac{1}{\sqrt{3}}.\]
The center $C(r)$ of \eqref{eqn1} is an increasing function of $r$, so for $r\in [0,R),\ C(r) \in [1,C(R))\subseteq [1,c(0.4))\approx [1,1.30029)\subseteq (2(\sqrt{2}-1),\sqrt{2})$. Now, by \eqref{eqn8} we get that the disc $\{w: |w-c|<x-2(\sqrt{2}-1)\} \subseteq \psi (\mathbb{D}). $
\\
\\
To show that the result is sharp, consider the function \[f(z)=\dfrac{\sqrt{3}}{4}\left\{1-3\left(\dfrac{z-\sqrt{1/3}}{1-\sqrt{1/3}z}\right)^2\right\}\]
for this function, $\dfrac{zf'(z)}{f(z)}=\dfrac{3\sqrt{3}-9z}{2\sqrt{3}z^2-9z+3\sqrt{3}},$\
and using the equation for $R$ we get
\\ $3\sqrt{3}-9r=(2\sqrt{2}-2)(2\sqrt{3}r^2-9r+3\sqrt{3})$ thus for $z=R$
\begin{align*}
\dfrac{zf'(z)}{f(z)}
&= 2\sqrt{2}-2\\
&= \psi(-1).
\end{align*} \end{proof}
\begin{theorem}
For the class $\mathcal{B}$ the following results hold:
\begin{enumerate}
\item The Lemniscate starlike radius,\ $R_{\mathcal{S}^{*}_{L}}=\frac{2\sqrt{3}-\sqrt{6}}{4}\approx 0.253653.$
\item The starlike radius associated with the sine function,\ $R_{\mathcal{S}^{*}_{sin}}=\frac{\sqrt{3}\sin 1}{2+2\sin 1}\approx 0.395735.$
\item The nephroid radius, \ $R_{\mathcal{S}^{*}_{Ne}}=\frac{\sqrt{3}}{5}\approx 0.34641.$
\item The sigmoid radius,\ $R_{\mathcal{S}^{*}_{SG}}=\frac{\sqrt{3}(\mathit{e}-1)}{4\mathit{e}}\approx 0.273716.$
\end{enumerate}
\end{theorem}
\end{document} |
\begin{document}
\title{Bijections in de Bruijn Graphs}
\begin{abstract} A T-net of order $m$ is a graph with $m$ nodes and $2m$ directed edges, where every node has indegree and outdegree equal to $2$. (A well known example of T-nets are de Bruijn graphs.) Given a T-net $N$ of order $m$, there is the so called "doubling" process that creates a T-net $N^*$ from $N$ with $2m$ nodes and $4m$ edges. Let $\vert X\vert$ denote the number of Eulerian cycles in a graph $X$. It is known that $\vert N^*\vert=2^{m-1}\vert N\vert$. In this paper we present a new proof of this identity. Moreover we prove that $\vert N\vert\leq 2^{m-1}$.\\ Let $\Theta(X)$ denote the set of all Eulerian cycles in a graph $X$ and $S(n)$ the set of all binary sequences of length $n$. Exploiting the new proof we construct a bijection $\Theta(N)\times S(m-1)\rightarrow \Theta(N^*)$, which allows us to solve one of Stanley's open questions: we find a bijection between de Bruijn sequences of order $n$ and $S(2^{n-1})$. \end{abstract}
\section{Introduction}
In 1894, A. de Rivi\`{e}re formulated a question about existence of circular arrangements of $2^n$ zeros and ones in such a way that every word of length $n$ appears exactly once, \cite{RIVIERE}. Let $B_0(n)$ denote the set of all such arrangements. (we apply the convention that the elements of $B_0(n)$ are binary sequences that start with $n$ zeros). The question was solved in the same year by C. Flye Sainte-Marie, \cite{MARIE}, together with presenting a formula for counting these arrangements: $\vert B_0(n)\vert =2^{2^{n-1}-n}$. However the paper was then forgotten. The topic became well known through the paper of N.G. de Bruijn, who proved the same formula for the size of $B_0(n)$, \cite{DEBRUIJN}. Some time after, the paper of C. Flye Sainte-Marie was rediscovered by Stanley, and it turned out that both proofs were principally the same, \cite{DEBRUIJN2}.
The proof uses a relation between $B_0(n)$ and the set of Eulerian cycles in a certain type of T-nets: A T-net $N$ of order $m$ is defined as a graph with $m$ nodes and $2m$ directed edges, where every node has indegree and outdegree equal to $2$ (a T-net is often referred as a balanced digraph with indegree and outdegree of nodes equal to $2$, see for example \cite{STANLEY_AC}). N.G. de Bruijn defined a doubled T-net $N^*$ of $N$. A doubled T-net $N^*$ of $N$ is a T-net such that: \begin{itemize} \item each node of $N^*$ corresponds to an edge of $N$ \item two nodes in $N^*$ are connected by an edge if their corresponding edges in $N$ are incident and the ending node of one edge is the starting node of the second edge. \end{itemize} \begin{remark} We call two edges to be incident if they share at least one common node; the orientation of edges does not matter. \end{remark} As a result $N^*$ has $2m$ nodes and $4m$ edges, see an example on Figure \ref{fg_doubling_simple}. (A doubled T-net of $N$ is known as well as a line graph of $N$, \cite{KISHORE}.)
\begin{figure}
\caption{A doubling of a de Bruijn graph: $N$ and $N^*$}
\label{fg_doubling_simple}
\end{figure}
Let $\Theta(X)$ be the set of all Eulerian cycles in $X$ and let $\vert X \vert = \vert \Theta(X)\vert $ denote the number of Eulerian cycles in $X$, where $X$ is a graph. It was proved inductively that $\vert N^*\vert=2^{m-1}\vert N\vert$. Moreover N.G. de Bruijn constructed a T-net (nowadays called a "de Bruijn graph") whose Eulerian cycles are in bijection with the elements of $B_0(n)$.
A de Bruijn graph $H_n$ of order $n$ is a T-net of order $2^n$, whose nodes correspond to the binary words of length $n-1$. A node $s_1s_2\dots s_{n-1}$ has two outgoing edges to the nodes $s_2\dots s_{n-1}0$ and $s_2\dots s_{n-1}1$. It follows that a node $s_1s_2\dots s_{n-1}$ has two incoming edges from nodes $0s_1s_2\dots s_{n-2}$ and $1s_1s_2\dots s_{n-2}$. Given an edge $e$ going from the node $s_1s_2\dots s_{n-1}$ to the node $s_2\dots s_{n-1}s_n$, then the edge $e$ corresponds to the word $s_1s_2\dots s_{n-1}s_n$ of length $n$, which implies the natural bijection between Eulerian cycles $\Theta(H_n)$ and binary sequences $B_0(n)$, \cite{DEBRUIJN}. That is why we will write $B_0(n)\equiv \Theta(H_n)$.
De Bruijn graphs found several interesting applications, among others in networking, \cite{BAKER}, and bioinformatics, \cite{PAVEL}, \cite{ZERBINO}.
The important property of de Bruijn graphs is that a doubled T-net of a de Bruijn graph of order $n$ is a de Bruijn graph of order $n+1$, see an example on Figure \ref{fg_doubling_simple} of the de Bruijn graph of order $3$ ($H_3=N$) and of order $4$ ($H_4=N^*)$. Since $\vert B_0(2)\vert=1$ ($B_0(2)=\{0011\}$) it has been derived that $\vert B_0(n)\vert =2^{2^{n-1}-n}$, \cite{BAKER}, \cite{DEBRUIJN}, \cite{DEBRUIJN2}.
There is also another proof using matrix representation of graphs, \cite{STANLEY_AC}. Yet it was an open question of Stanley, \cite{STANLEY_OP}, \cite{STANLEY_AC}, if there was a bijective proof: \begin{quote} Let $B(n)$ be the set of all binary de Bruijn sequences of order $n$, and let $S(n)$ be the set of all binary sequences of length $n$. Find an explicit bijection $B(n) \times B(n)\rightarrow S(2^n)$. \end{quote} This open question was solved in 2009, \cite{KISHORE}, \cite{STANLEY_AC}.
\begin{remark} In the open question of Stanley, $B(n)$ denotes the de Bruijn sequences that do not necessarily start with $n$ zeros like in the case of $B_0$. $B(n)$ contains all $2^n$ "circular rotations" of all sequences from $B_0(n)$; formally, given $s=s_1s_2\dots s_{2^n}\in B_0(n) $, then $s_is_{i+1}\dots s_{2^n}s_1s_2\dots s_{i-1}\in B(n)$, where $1\leq i \leq 2^n$. It is easy to see that all these $2^n$ "circular rotations" are distinct binary sequences. It follows that $\vert B(n)\vert=2^n\vert B_0(n)\vert$. Hence it is enough to find a bijection $B_0(n)\rightarrow S(2^{n-1}-n)$ to solve this open question. \end{remark}
In this paper we present a new proof of the identity $\vert N^*\vert=2^{m-1}\vert N\vert$, which allows us to prove that $\vert N\vert\leq 2^{m-1}$ and to construct a bijection $\nu : \Theta(N)\times S(m-1)\rightarrow \Theta(N^*)$ and consequently to present another solution to the Stanley's open question: We define $\rho_2(\epsilon)=0011$ (recall that $B_0(2)=\{0011\}$) and let $\rho_{n} : S(2^{n-1}-n) \rightarrow B_0(n)$ be a map defined as $\rho_n(s)=\nu(\rho_{n-1}(\dot s),\ddot s)$, where $\epsilon$ is the binary sequence of length $0$,
$n>2$, $s=\dot s\ddot s$, $\dot s \in S(2^{n-2}-(n-1))$, and $\ddot s \in S(2^{n-2}-1)$. \begin{proposition} The map $\rho_n$ is a bijection. \end{proposition} \begin{proof} Note that $\dot s \in S(2^{n-2}-(n-1))$ and $\vert B_0(n-1)\vert=2^{(n-1)-1}-(n-1)=2^{n-2}-(n-1)$; thus $\dot s$ is a valid input for the function $\rho_{n-1}$ and $\rho_{n-1}(\dot s)\in B_0(n-1)\equiv \Theta(H_{n-1})$. In addition, $H_{n-1}$ has $m=2^{n-2}$ nodes and $\ddot s\in S(2^{n-2}-1)$ has the length $m-1$, hence it makes sense to define $\rho_n(s)=\nu(\rho_{n-1}(\dot s),\ddot s)$. Because $\nu$ is a bijection, see Proposition \ref{pr_bij_doublig_tnet}, it is easy to see by induction on $n$ that $\rho_n$ is a bijection as well. \end{proof} \begin{remark} Less formally said, the bijection $\rho_n(s)$ splits the binary sequence $s$ into two subsequences $\dot s$ and $\ddot s$. Then the bijection $\rho_{n-1}$ is applied to $\dot s$, the result of which is a de Bruijn sequence $p$ from $B_0(n-1)$ (and thus an Eulerian cycle in $H_{n-1}$). Then the bijection $\nu$ is applied to $p$ and $\ddot s$. The result is a de Bruijn sequence from $B_0(n)$. \end{remark}
\section{A double and quadruple of a T-net}
\noindent Let $Y$ be a set of graphs; we define $\Theta(Y)=\bigcup_{X\in Y}\Theta(X)$ (the union of sets of Eulerian cycles in graphs from $Y$) and $\vert Y\vert =\sum_{X\in Y}\vert X\vert$ (the sum of the numbers of Eulerian cycles). Let $U(X)$ denote the set of nodes of a graph $X$.
\begin{figure}
\caption{A node replacing by $4$ nodes and $4$ edges}
\label{fg_node_replace}
\end{figure}
We present a new way of constructing a doubled T-net, which will enable us to show a new non-inductive proof of the identity $\vert N^{*}\vert=2^{m-1}\vert N\vert$ and to prove $\vert N\vert\leq 2^{m-1}$. \begin{figure}
\caption{A removing black edges and fusion of nodes}
\label{fg_node_fusion}
\end{figure}
We introduce a quadruple of $N$ denoted by $\hat N$: The quadruple $\hat N$ arises from $N$ by replacing every node $a\in U(N)$ by 4 nodes and 4 edges as depicted on the Figure \ref{fg_node_replace}. Let $\Gamma(a)$ denote the set of these 4 nodes and $\Pi(a)$ denote the set of these 4 edges that have replaced the node $a$. The edges from $\Pi(a)$ are in blue color on the figures and we will distinguish blue and black edges as follows: In a graph containing at least one blue edge, we define an Eulerian cycle to be a cycle that traverses all blue edges exactly once and all black edges exactly twice, see Figure \ref{fg_doubling}.
\begin{figure}
\caption{An example of $N$, $\hat N$, and $N^*$}
\label{fg_doubling}
\end{figure}
\begin{remark} Note that a quadruple $\hat N$ is not a T-net, since the indegree and outdegree are not always equal to $2$. But since the black edges can be traversed twice, we can consider them as parallel edges (two edges that are incident to the same two nodes). Then it would be possible to regard $\hat N$ as a T-net. \end{remark}
By removing black edges and "fusing" their incident nodes into one node in $\hat N$ (as depicted on Figure \ref{fg_node_fusion}), we obtain a doubled T-net $N^{*}$ of $N$. And the reverse process yields $\hat N$ from $N^*$: turn all edges from black to blue and then replace every node by two nodes connected by one black edge, where one node has two outgoing blue edges and one incoming black edge and the second node two incoming edges and one outgoing black edge. Thus we have a natural bijection between Eulerian cycles in $\hat N$ and $N^*$. See an example on Figure \ref{fg_doubling}. \begin{remark} If all edges in a graph are in one color, then it makes no difference if they are black or blue. An Eulerian cycle traverses in that case just once every edge. \end{remark}
\begin{figure}
\caption{Edges replacement. Case I}
\label{fg_edges_repl_a}
\end{figure} \begin{figure}
\caption{Edges replacement. Case II}
\label{fg_edges_repl_b}
\end{figure}
Fix an order on nodes $U(N)$. As a result we have a bijection $\phi: \{1,2,\dots ,m\} \rightarrow U(N)$. Given $i\in \{1,2,\dots ,m\}$, let us denote the edges from $\Pi(\phi(i))$ by $t,u,v,z$, in such a way that $t$ and $v$ are not incident edges; it follows that $u$ and $z$ are not incident as well.
Let $W_0=\{\hat N\}$, we define $W_i=\{\dot w,\ddot w\mid w\in W_{i-1}\}$, where $i\in\{1,2,\dots ,m\}$ and $\dot w$, $\ddot w$ are defined as follows: We construct the graph $\dot w$ by removing edges $t,v$ from $w$ and by changing the color of $u,z$ from blue to black (thus allowing the edges $u,z$ to be traversed twice). Similarly we construct $\ddot w$ from $w$ by removing edges $u,z$ and by changing the color of $t,v$ from blue to black, where $t,u,v,z\in \Pi(\phi(i))$.
The crucial observation is: \begin{proposition} \label{pr_half_of_euler_paths} Let $w\in W_i$, where $i\in\{0,1,\dots ,m-2\}$. Then $\vert w \vert= 2\vert \dot w\vert + 2\vert \ddot w\vert$. \end{proposition} \begin{remark} The following proof is almost identical to the one in \cite{DEBRUIJN}, where the author constructed two graphs $d_1, d_2$ from a graph $d$ and proved that $\vert d\vert = 2\vert d_1\vert + 2\vert d_1\vert$ \end{remark} \begin{proof}
Given an Eulerian cycle $g$ in $w$, then split $g$ in four paths $A,B,C,D$ and edges $t,u,v,z \in \Pi(\phi(i))$. We will count the number of Eulerian cycles in $\dot w, \ddot w$ that are composed from all 4 paths $A,B,C,B$ and that differ only in their connections on edges $t,u,v,z$. Exploiting the N.G. de Bruijn's notation, all possible cases are depicted on Figures \ref{fg_edges_repl_a} and \ref{fg_edges_repl_b}. \begin{itemize} \item In case I, the graph $w$ contains $4$ Eulerian cycles: AtBzDuCv, AtCuBzDv, AtCvDuBz, AzDuBtCv; whereas the graphs $\dot w$ and $\ddot w$ have together $2$ Eulerian cycles: AzDuCuBz and AtBtCvDv. Thus $\vert w\vert=4$ and $\vert \dot w\vert$+$\vert \ddot w\vert=2$. \item In case II, the graph $w$ contains $4$ Eulerian cycles: AtCuDvBz, AtDuCvBz, AzBtCuDv, AzBtDuCv; whereas the graph $\ddot w$ has $2$ Eulerian cycles: AtCvBtDv, AtDvBtCv. The graph $\dot w$ is disconnected and therefore $\dot w$ has $0$ Eulerian cycles. Thus $\vert w\vert=4$ and $\vert \dot w\vert$+$\vert \ddot w\vert=2$. In case II, it is possible the $A=B$ or $C=D$. In such a case, $\vert w\vert=2$ and $\vert \dot w\vert$+$\vert \ddot w\vert=1$. \end{itemize} This ends the proof. \end{proof}
\noindent We define $\Delta = \{w \mid w\in W_m \mbox{ and $w$ is connected}\}$. The Figure \ref{fg_tree} shows an example of all iterations and construction of graphs in $\Delta$ from the graph $\hat N$, where $N$ is a de Bruijn graph of order $3$. The order of nodes from $N$ is $00<10<01<11$. Most of the disconnected graphs are ommited.
\begin{figure}
\caption{Constructing the set $\Delta$ from $\hat N$}
\label{fg_tree}
\end{figure}
\begin{remark} In the previous proof in case II, it can happen that $A=B$ or $C=D$. Note in the iteration step $i=m$ (when constructing $W_m$ from $W_{m-1}$) it holds that $A=B$ and $C=D$, because all nodes have indegree and outdegree equal to $1$ with exception of nodes $\Gamma(\phi(m-1))$. Hence $\vert W_{m-1}\vert = \vert W_{m}\vert$. It follows as well that every connected graph $w\in W_{m-1}$ has exactly one Eulerian cycle. That is why in the Proposition \ref{pr_half_of_euler_paths} we consider $i\in\{0,1,\dots ,m-2\}$. \end{remark}
\begin{corollary} \label{cor_rel_iterated_sets} $2\vert W_{i-1}\vert = \vert W_i\vert$ and $\vert W_{m-1}\vert = \vert W_m\vert$, where $i\in \{1,2,\dots ,m-1\}$. \end{corollary}
\begin{proposition} $2^{m-1}\vert \Delta \vert=\vert N^{*}\vert = \vert \hat N \vert$. \end{proposition} \begin{proof} The only graphs in $W_m$ that contain an Eulerian cycle are connected graphs, it means only graphs from $\Delta$. On the other hand every graph $w\in \Delta$ contains exactly one Eulerian cycle, since every node has indegree and outdegree equal to $1$. The proposition follows then from Corollary \ref{cor_rel_iterated_sets}, because $\vert \hat N\vert=\vert W_0\vert$ (recall that $W_0=\{\hat N\}$). \end{proof}
\begin{proposition} \label{pr_bij_n_delta} There is a bijection between $\Theta(N)$ and $\Theta(\Delta)$ and $\Theta(W_{m-1})$ and $\Theta(W_{m})$. \end{proposition} \begin{proof} Given a connected graph $w\in W_{m-1}$, then just one graph of $\dot w$ and $\ddot w$ is connected. Let us say it is $\dot w$. Recall that there is exactly one Eulerian cycle $AtCuCvAz$ in $w$ ($A=B$ and $C=D$, see Figure \ref{fg_edges_repl_b}). Then $AtCv$ is the only Eulerian cycle in $\dot w\in \Delta \subset W_{m}$. This shows a bijection between $\Theta(W_{m-1})$ and $\Theta(W_{m})$ and $\Theta(\Delta)$.
Let $\bar p = p_1p_2\dots p_{4m}$ be the only Eulerian cycle in $w \in \Delta$, where $p_i$ are edges of $w$. Without loss of generality suppose that $p_1\in \Pi(a)$ for some $a\in U(N)$ (it means that $p_1$ is a blue edge in $\hat N$). It follows that all $p_i$ with $i$ odd are blue edges in $\hat N$ all $p_i$ with $i$ even are edges from $N$ (they are black edges in $\hat N$); in consequence the path $p=p_2p_4\dots p_{4m}$ is an Eulerian cycle in $N$. A turning the Eulerian cycle in $w$ into the Eulerian cycle $p$ in $N$ is schematically depicted on Figure \ref{fg_node_replacement_to_simple}. Thus we have a bijection between $\Theta(N)$ and $\Theta(\Delta$). This ends the proof. \end{proof}
\begin{figure}
\caption{Converting a an Eulerian cycle from $\Delta$ into an Eulerian cycle in $N$}
\label{fg_node_replacement_to_simple}
\end{figure}
\begin{corollary} Let $N$ be a T-net of order $m$. Then $\vert N\vert\leq 2^{m-1}$ Eulerian cycles. \end{corollary} \begin{proof} The set $W_{m-1}$ contains $2^{m-1}$ graphs and recall that every connected graph $w\in W_{m-1}$ has exactly one Eulerian cycle. The result follows then from $\vert W_{m-1}\vert=\vert W_{m}\vert$ and $\Delta\subseteq W_{m}$. \end{proof}
\section{Bijection of binary sequences and de Bruijn sequences}
Given $i\in \{1,2,\dots ,m\}$, in the previous section we agreed the edges from $\Pi(\phi(i))$ are denoted by $t,u,v,z$, in such a way that $t$ and $v$ are not incident edges (and consequently that $u$ and $z$ are not incident as well). For this section we need that these edges are ordered, hence let us suppose that it holds $t<u<v<z$. This will allow us to identify "uniquely" the edges.
Let us look again on the Figure \ref{fg_edges_repl_a}. We can identify the path $A$ as the path between incident nodes of the edge $z$ that do not contain edges $t,u,v$. In a similar way we can identify $B,C,D$.
On the Figure \ref{fg_edges_repl_b} we can not distinguish $A$ from $B$ and $C$ from $D$ only by edges $t,u,v,z$. If $A\not =B$, then let $\delta$ be the first node where $A$ and $B$ differ. The node $\delta$ has two outgoing blue edges, let us say they are $t,z$. We use this difference to distinguish $A$ and $B$. Let us define $A$ to be the path that follows the edge $t$ from $\delta$ and $B$ the path that follows the edge $z$ from $\delta$. Again in a similarly way we can distinguish $C$ from $D$. Hence let us suppose we have an "algorithm" that splits an Eulerian cycle $p\in \Theta(W_i)$ into the paths $A,B,C,D$ and edges $t,u,v,z \in \Pi(\phi(i))$ for given $N,i$ (recall that the nodes of $N$ are ordered and thus $i$ determines the node $\phi(i)\in U(N)$). We introduce the function $\omega_{N,i}: (p,\alpha)\rightarrow \Theta(W_{i-1})$, where \begin{itemize} \item $N$ is a T-net of order $m$ \item $i\in \{1,\dots,m-1\}$ \item $p\in \Theta(W_i)$ \item $\alpha\in \{0,1\}$ \end{itemize}
\begin{remark} Less formally said, the function $\omega$ transform an Eulerian cycle $p\in \Theta(W_{i})$ into an Eulerian cycle $\bar p\in \Theta(W_{i-1})$ for given $N,i,\alpha$. \end{remark}
\noindent Given $N$ and $i$, we define for the case I (Figure \ref{fg_edges_repl_a}):\\ $\omega_{N,i}(AzDuCuBz,0)=AtBzDuCv$\\ $\omega_{N,i}(AzDuCuBz,1)=AtCuBzDv$\\ $\omega_{N,i}(AtBtCvDv,0)=AtCvDuBz$\\ $\omega_{N,i}(AtBtCvDv,1)=AzDuBtCv$\\ For the case II (Figure \ref{fg_edges_repl_b}), where $A\not=B$ and $C\not =D$:\\ $\omega_{N,i}(AtCvBtDv,0)=AtCuDvBz$\\ $\omega_{N,i}(AtCvBtDv,1)=AzBtCuDv$\\ $\omega_{N,i}(AtDvBtCv,0)=AtDuCvBz$\\ $\omega_{N,i}(AtDvBtCv,1)=AzBtDuCv$\\ For the case II where $A=B$ and $C\not =D$:\\ $\omega_{N,i}(AtCvAtDv,0)=AtCuDvAz$\\ $\omega_{N,i}(AtCvAtDv,1)=AtDuCvAz$\\ For the case II where $A\not =B$ and $C=D$:\\ $\omega_{N,i}(AtCvBtCv,0)=AtCuCvBz$\\ $\omega_{N,i}(AtCvBtCv,1)=AzBtCuCv$\\ Now, when we fixed an order on edges at the beginning of this section, it is necessary to distinguish another possibility in the case II, namely the paths $A,B$ can be paths between incident nodes of the edge $t$ that do not contain edges $u,v,z$ and $C,D$ can be paths between incident nodes of the edge $v$ that do not contain edges $t,u,z$. Obviously, in this case it is possible to define $\omega$ in a similar way. To save some space we do not present an explicit definition.
\begin{remark} The previous definition of $\omega_{N,i}(p,\alpha)$ can be modified with regard to the reader's needs, including the way of recognition of paths $A,B,C,D$. It matters only that $\omega_{N,i}$ is injective. Our definition is just one possible way. \end{remark}
\begin{remark} To understand correctly the definition of $\omega$, recall that when comparing two Eulerian cycles, it does not matter which edge is written as the first one. For example the paths $AtCuDvAz$ and $AzAtCuDv$ are an identical Eulerian cycle. \end{remark}
Let $S(n)$ denote the set of all binary sequences of length $n$.
\begin{proposition} \label{pr_bij_doublig_tnet} Let $N$ be a T-net of order $m$, $s=s_1s_2\dots s_{m-1} \in S(m-1)$ be a binary sequence, and $p\in \Theta(N)$. We define $p=p^{m-1}$ and $p^{i-1}=\omega_{N,i}(p^{i},s_i)$, where $i \in \{1,2,\dots ,m-1\}$. Then the map $\nu : \Theta(N)\times S(m-1)\rightarrow \Theta(N^*)$ defined as $\nu(p,s)=p^{0}$ is a bijection. \end{proposition} \begin{proof} Recall that there is a bijection between $\Theta(N)$ and $\Theta(W_{m-1})$, see Proposition \ref{pr_bij_n_delta}; hence we can suppose that $p\in W_{m-1}$.\\ The definition of the function $\omega$ implies that $\omega_{N,i}(p, \alpha)=\omega_{N,i}(\bar p, \bar \alpha)$ if and only if $p=\bar p$ and $\alpha=\bar \alpha$. It follows that $\nu$ is injective. In addition we proved that $\vert N\vert=\vert W_{m-1}\vert$ and that $2^{m-1}\vert N\vert=\vert \hat N\vert=\vert W_0\vert$. In consequence $\nu$ is surjective and thus bijective. \end{proof}
\end{document} |
\begin{document}
\title{Excellence in Prime Characteristic}
\centerline{\it{To Professor Lawrence Ein on the occasion of his sixtieth birthday.}}
\begin{abstract} Fix any field $K$ of characteristic $p$ such that $[K:K^p]$ is finite. We discuss excellence for domains whose fraction field is $K$, showing for example, that $R$ is excellent if and only if the Frobenius map is finite on $R$. Furthermore, we show $R$ is excellent if and only if it admits some non-zero $p^{-e}$-linear map for $R$ (in the language of \cite{BlicBock}), or equivalently, that the Frobenius map $R \rightarrow F_*R$ defines a {\bf solid} $R$-algebra structure on $F_*R$ (in the language of \cite{HochSolid}). In particular, this means that generically $F$-finite, Frobenius split Noetherian domains are always excellent. We also show that non-excellent rings are abundant and easy to construct {in prime characteristic}, even within the world of regular local rings of dimension one inside function fields. \end{abstract}
\section{Introduction}
The notion of {\bf excellence} for a commutative ring was introduced by Grothe- -ndieck. A Noetherian ring is excellent, essentially, if the ring satisfies a list of axioms that ensures it behaves much like a finitely generated algebra over a field; see Definition \ref{definition of excellence}. An arbitrary Noetherian ring can be quite pathological, but the class of excellent rings is supposed to be the most general setting to which one can expect the deep ideas of algebraic geometry, such as resolution of singularities, to extend.
Although a common hypothesis in the literature, excellent rings are not widely understood. They are often dismissed with sentences like the following quoted from Wikipedia: {\it ``Essentially all Noetherian rings that occur naturally in algebraic geometry or number theory are excellent; in fact it is quite hard to construct examples of Noetherian rings that are not excellent''} \cite{wiki:exc}.
In this paper we show that on the contrary, non-excellent rings are quite easy to construct and are abundant, even among regular local rings of dimension one. Our setting is prime characteristic since Dedekind domains of characteristic zero are always excellent \cite[Cor 8.2.40]{Liu06}. The examples we construct, moreover, are {\it generically F-finite}, unlike other known examples such as {Nagata's} $k\otimes_{k^p}{k^p}[[t]]$ (whenever $[k:k^p] = \infty$), whose completion map is purely inseparable.
Excellence in prime characteristic is closely connected to another common hypothesis, that of {\it F-finiteness.} A ring of characteristic $p$ is $F$-finite if the Frobenius (or $p$-th power) map is finite. A well-known theorem of Kunz ensures that $F$-finite rings of characteristic $p$ are excellent \cite[Thm 2.5]{Kunz76}. The converse is also true under the additional hypothesis that the fraction field is $F$-finite. Put differently, a domain is $F$-finite if and only if it is excellent and generically $F$-finite. For example, for any domain $R$ whose fraction field is the function field of some algebraic variety over a perfect (e.g. algebraically closed) field, $R$ is $F$-finite if and only if $R$ is excellent. Because this fact does not seem to be well-known, we show in Section 2 how this statement follows easily from known facts in the literature.
In Section 3, we turn toward the issue of $p^{-e}$-linear maps.
For a ring $R$ of prime characteristic $p$, a $p^{-e}$-linear map is a map $R\overset{\phi}\rightarrow R$ of the underlying abelian group that satisfies $\phi(r^{p^e}s) = r\phi(s)$ for all $r, s \in S$. Any splitting of the Frobenius map is such a $p^{-1}$-linear map. In Section 3, we show that for {Noetherian} domains with F-finite fraction field, there are no non-zero $p^{-e}$-linear maps at all unless $R$ is excellent! Put differently using the language of Hochster \cite{HochSolid}, we show that {a generically $F$-finite Noetherian domain is a solid algebra via Frobenius if and only if it is excellent. In particular, if a generically $F$-finite Noetherian domain is Frobenius split, it must be excellent.}
The study of $p^{-e}$-linear maps, or equivalently, elements of ${\text{Hom}}_R(F_*^eR,R)$, was formalized by Manuel Blickle and later used by Karl Schwede to give an alternate and more global approach to {\it test ideals,} an important topic in characteristic $p$ commutative algebra. Our results show that Schwede's approach to test ideals relies much more heavily {on} $F$-finiteness than previously understood. Test ideals can be viewed as ``prime characteristic analogs" of multiplier ideals due to the results in \cite{Smi00} and \cite{Har01} (see also \cite{HarYos03} and \cite{HarTag04}). While they have attracted great interest in birational algebraic geometry, our results in Section 4 offer a cautionary tale about the limits of this approach.
In Section 4, we consider excellence in the setting of {\it discrete valuation rings} of a function field of characteristic $p$. Excellence in this case is equivalent to the DVR being {\it divisorial,} a topic explored in \cite{DatSmi16}. We show here how this makes it is easy to write down explicit examples of non-excellent discrete valuation rings. Moreover, a simple countability argument shows that among domains whose fraction field is, say the function field of $\mathbb P^2$, {\bf non-excellent} regular local rings of dimension one are far more abundant than the excellent ones.
This paper is largely expository, drawing heavily on the work in \cite{DatSmi16} (and the corrections in \cite{DatSmi17}) where most of the harder proofs of the results discussed here appear. However, we are emphasizing a different aspect of the subject than in that paper, drawing conclusions not explicit there.
\noindent \textbf{Acknowledgments:} We are honored to help celebrate the birthday of Lawrence Ein, who has been a tremendous inspiration and support for the second author, both mathematically and personally. {We thank the referee for their careful comments, and, in particular, for suggesting a generalization of an earlier version of Proposition \ref{prop2}.}
\section{Excellence} An arbitrary Noetherian ring can exhibit pathological behavior. For instance, the integral closure of a Noetherian domain in a finite extension of its fraction field can fail to be Noetherian, and Noetherian rings can have saturated chains of prime ideals of different lengths. Excellent rings were introduced by Grothendieck in \cite[expos\'e IV]{Groth} to rule out such pathologies. We review Grothendieck's definition and some other relevant properties of excellent rings. Another good source is \cite[Chapter 32]{Mat80}.
\begin{definition} \label{definition of excellence} \cite[D\'ef 7.8.2]{Groth} A Noetherian ring $A$ is \textbf{excellent} if it satisfies the following properties: \begin{enumerate} \item $A$ is \emph{universally catenary}. This means that every finitely generated $A$-algebra has the property that for any two prime ideals $p \subseteq q$, all saturated chains of prime ideals from $p$ to $q$ have the same length.\footnote{{The first example of a non-catenary Noetherian ring was given by Nagata in \cite{Nag56}.}} \item All {\it formal fibers } of $A$ are {\it geometrically regular. } This means that for every $p \in \operatorname{Spec}(A)$, the fibers of the natural map $\operatorname{Spec}(\widehat{A_p}) \rightarrow \operatorname{Spec}(A_p)$ induced by completion along $p$ are geometrically regular in the sense that for each $x \in \operatorname{Spec}(A_p)$, the ring $\widehat{A_p} \otimes_{A_p} K$ is regular for any finite field extension $K$ of the residue field $\kappa(x)$.
\item For every finitely generated $A$ algebra $B$, the regular locus of $ \operatorname{Spec}(B)$ is open; that is, the set $$\{q \in \operatorname{Spec}(B)\, : B_q {\text{ is a regular local ring}}\}$$ is open in $\operatorname{Spec}(B)$. \end{enumerate} \end{definition}
The class of excellent rings is closed under homomorphic image, localization, and finite-type algebra extensions. Since every field is excellent, it follows that nearly every ring one is likely to encounter in classical algebraic geometry is excellent. Likewise, because the ring of integers is excellent and all complete local rings are excellent, familiar rings likely to arise in number theory are excellent as well.
{Among the many properties of excellent rings, the following, sometimes called the {\textbf{Japanese}} or {\textbf{N2}} property, will be important for us later.}
\begin{proposition} \label{Japanese} \cite[expos\'e IV, 7.8.3 (vi)]{Groth}. Let $A$ be a Noetherian excellent domain.
The integral closure of $A$ in any finite extension of its fraction field is {\it finite} as an $A$-module. \end{proposition}
{We construct discrete valuation rings of characteristic $p$ which fail to be Japanese in Subsection \ref{examples}.}
\subsection{Excellence in prime characteristic} Fix a commutative ring $R$ of prime characteristic $p$. The Frobenius map is the ring homomorphism $R \overset{F}\to R $ sending each element to its $p$-th power. It is convenient to denote the target copy of $R$ by $F_*R$. Thus the notation $F_*R$ denotes the ring $R$ but viewed as an $R$-module via the Frobenius map: an element $r \in R$ acts on $x \in F_*R$ to produce $ r^px$. Similarly, iterating the Frobenius map, $F_*^eR$ denotes the $R$-algebra defined by the $e$-th iterate of Frobenius
$R \overset{F^e}\longrightarrow F_*^eR$ sending $r\mapsto r^{p^e}$.
\begin{definition} \label{definition of F-finite} The ring $R$ is \textbf{F-finite} if the Frobenius map is finite; that is, $R$ is F-finite if $F_*R$ is a finitely generated $R$-module. \end{definition}
F-finite rings are ubiquitous. For example, it is easy to check that every perfect field is F-finite, and that a finitely generated algebra over an F-finite ring is F-finite. Furthermore, F-finiteness is preserved under homomorphic images, localization and completion, similar to excellence. Indeed, the two notions are closely related:
\begin{theorem} \label{ExcF-finite} Let $R$ be a Noetherian domain whose fraction field $K$ satisfies $[K:K^p] < \infty$. Then $R$ is excellent if and only if $R$ is F-finite. \end{theorem}
\begin{proof} One direction of Theorem \ref{ExcF-finite} is the following famous {result} of Kunz: \begin{theorem} \cite[Thm 2.5]{Kunz76} \label{Kunz-excellent} Let $R$ be any Noetherian ring of prime characteristic. If the Frobenius map $R\rightarrow F_*R$ is finite, then $R$ is excellent. \end{theorem}
So to prove Theorem \ref{ExcF-finite}, we only need to prove the converse under the hypothesis that $R$ is a domain with F-finite fraction field. {The ring $R^p$, with fraction field $K^p$, is excellent because it is isomorphic to $R$ via the Frobenius map}. Since $K^p\hookrightarrow K$ is finite by assumption, the integral closure $S$ of $R^p$ in $K$ is a finitely generated $R^p$-module by Proposition \ref{Japanese}. But clearly every element of $R$ is integral over $R^p$, as each $r\in R$ satisfies the integral polynomial $x^p-r^p$ over $R^p$. This means that $R$ is an $R^p$-submodule of the Noetherian $R^p$-module $S$, hence $R$ itself is a Noetherian $R^p$-module. That is, $R$ is finitely generated as an $R^p$-module, and the Frobenius map is finite. In other words, $R$ is F-finite. \end{proof}
\begin{corollary} \label{Excellent-finite-reduced} Let $R$ be a reduced, Noetherian ring of characteristic $p$ whose total quotient ring $K$ is F-finite. Then $R$ is excellent if and only if $R$ is F-finite. \end{corollary} \begin{proof} The backward implication is again a consequence of Kunz's Theorem \ref{Kunz-excellent}. So assume that $R$ is excellent. Let $q_1, \dots, q_n$ be the minimal primes of $R$. We denote the corresponding minimal primes of $R^p$ by $q^p_i$. Let $K_i$ be the fraction field of $R/q_i$, so that $K^p_i$ is the fraction field of $R^p/q^p_i$. Then we have a commutative diagram
$$ \xymatrix{ R \ar@{^{(}->}[r] &{R/q_1 \times \dots \times R/q_n} \ar@{^{(}->}[r] & {K_1 \times \dots \times K_n \cong K}\\ R^p \ar@{^{(}->}[u]\ar@{^{(}->}[r] &{R^p/q^p_1 \times \dots \times R^p/q^p_n}\ar@{^{(}->}[u] \ar@{^{(}->}[r] &{K^p_1 \times \dots \times K^p_n \cong K^p} \ar@{^{(}->}[u]} $$ where all rings involved are $R^p$-modules, and the horizontal maps are injections because $R$ is reduced. Since $R$ is excellent, so is each quotient $R/q_i$, and F-finiteness of $K$ implies that each $K_i$ is also a finitely generated $K^p_i$-module. Thus, Theorem \ref{ExcF-finite} implies that each $R/q_i$ is F-finite, that is, $R/q_i$ a finitely generated $(R/q_i)^p = R^p/q^p_i$-module. As a consequence, $$R^p/q^p_1 \times \dots \times R^p/q^p_n \hookrightarrow R/q_1 \times \dots \times R/q_n$$ is a finite map, and so is the map $R^p \hookrightarrow R^p/q^p_1 \times \dots \times R^p/q^p_n$. This shows that $R/q_1 \times \dots \times R/q_n$ is a finitely generated $R^p$-module, and being a submodule of the Noetherian $R^p$-module $R/q_1 \times \dots \times R/q_n$, $R$ is also a finitely generated $R^p$-module. Thus, $R$ is F-finite. \end{proof}
Theorem \ref{ExcF-finite} offers a simple way to think about excellence in prime characteristic, at least for domains in function fields. In Section \ref{examplesnon-excellent}, we use Theorem \ref{ExcF-finite} to easily construct many nice examples of non-excellent rings.
{In the spirit of Theorem \ref{ExcF-finite}, there is also an equivalence of excellence and F-finiteness in a slightly different context:}
\begin{theorem} \cite[Corollary 2.6]{Kunz76} \label{Local equiv of ExcF-finite} Let $(R, \mathfrak m)$ be a Noetherian local ring with F-finite residue field. Then $R$ is excellent if and only if $R$ is F-finite. \end{theorem}
It is worth pointing out that Theorem \ref{Local equiv of ExcF-finite} is closely related to Theorem \ref{ExcF-finite}. {Indeed the backward implication follows from Theorem \ref{ExcF-finite}. Moreover, the hypothesis of Theorem \ref{Local equiv of ExcF-finite} ensures that the completion $\widehat{R}$ is $F$-finite, because Cohen's structure theorem shows that a complete Noetherian local ring of equal characteristic $ p > 0 $ is F-finite if and only if the residue field is F-finite. The new implication in Theorem \ref{Local equiv of ExcF-finite} then says that if $R$ is excellent, that is, when the completion map $R \rightarrow \widehat{R}$ is well-behaved, $F$-finiteness descends from $\widehat{R}$ to $R$.}
\subsection{Frobenius splitting vs. F-purity} The hypothesis of F-finiteness is often seen in contexts where Frobenius splitting is discussed. We recall the definitions of Frobenius splitting and the closely related notion of F-purity, which is sometimes confused with it. These notions were originally defined in \cite{MehRam85} and \cite{HR1}, respectively.
\begin{definition} \label{basic char p notions} Let $R$ be an arbitrary commutative ring of prime characteristic $p$. \begin{enumerate} \item[(a)] The ring $R$ is \textbf{Frobenius split} if the map $R \overset{F}\rightarrow F_*R$ splits as a map of $R$-modules, that is, there exists an $R$-module map $F_*R \rightarrow R$ such that the composition $$R \xrightarrow{F} F_*R \rightarrow R$$ is the identity map. \item[(b)] The ring $R$ is \textbf{F-pure} if $R \overset{F}\rightarrow F_*R$ is a pure map of $R$-modules; this means that the map remains injective after tensoring with any $R$-module $M$. \end{enumerate} \end{definition}
It is easy to see that Frobenius split rings are always F-pure. It is also well-known that in the presence of F-finiteness, a Noetherian ring is Frobenius split if and only if it is F-pure \cite[Corollary 5.2]{HR1}. However, the relationship between F-purity and Frobenius splitting for a general excellent ring is less understood. Corollary \ref{Excellent-finite-reduced} clarifies that, at least in a large and important setting,
there is little difference between the F-finite and excellent settings {for the question of comparing splitting versus purity}:
\begin{corollary}\label{equivInExcellent} For an excellent Noetherian reduced ring whose total quotient ring is F-finite, Frobenius splitting is equivalent to F-purity.
For an excellent local Noetherian ring whose residue field F-finite, Frobenius splitting is equivalent to F-purity. \end{corollary}
\begin{proof}[Proof of Corollary] It easily follows from the definitions that a split map is pure, so Frobenius splitting always implies F-purity. Our hypotheses in both statements imply F-finiteness (from Corollary \ref{Excellent-finite-reduced} and Theorem \ref{Local equiv of ExcF-finite}, respectively), so splitting and purity are equivalent by \cite[Corollary 5.2]{HR1}. \end{proof}
\begin{remark} We do not know any example of an {\it excellent } F-pure ring that is not F-split. As we see in Section \ref{examplesnon-excellent}, there are plenty of non-excellent examples {even among regular local rings}. \end{remark}
\section{Maps inverse to Frobenius} Test ideals are an important technical tool in both commutative algebra and birational geometry. The original test ideal of Hochster and Huneke is the ideal generated by all the {\it test elements} for tight closure; they show such test elements exist for excellent local rings in \cite{HochHun94}. Many recent authors have taken the point of view that a slightly smaller ideal, sometimes called the non-finitistic test ideal, is the more natural object; this ideal is known to be the same as Hochster and Huneke's test ideal in many cases and conjectured to be the same quite generally. See the surveys \cite{SchweTuck12} or \cite{SmiZha15} for more information on this history.
An important insight of Schwede is that (under appropriate hypothesis) the test ideal can be defined independently of tight closure. \begin{definition} Fix an F-finite ring $R$. An ideal $J$ is said to be uniformly $F$-compatible if for all $e$ and all $\phi \in {\text{Hom}}_R(F_*^eR, R)$, we have $\phi(F^e_*(J))\subset J$. \end{definition}
It is not at all obvious that non-trivial uniformly $F$-compatible ideals exist. Schwede shows, however, using a deep theorem of Hochster and Huneke \cite[Theorem 5.10]{HochHun94}, that there is in fact a unique smallest non-zero such ideal \cite{Schw11}. This is the (non-finitistic) {\bf test ideal}.
The point we want to emphasize is that the modules $ {\text{Hom}}_R(F_*^eR, R)$ play a crucial role in this approach to test ideals. Note also that a splitting of Frobenius is a particular element of $ {\text{Hom}}_R(F_*^1R, R)$, namely a map $F_*R\overset{\phi}\longrightarrow R$ satisfying $\phi(1) = 1$.
Our next theorem shows, however, that there is little hope to use this approach beyond the F-finite case.
\begin{theorem}\label{F-finiteExcellent} Let $R$ be a Noetherian domain of characteristic $p$ whose fraction field is $F$-finite. Then the following are equivalent: \begin{enumerate} \item $R$ is excellent. \item The Frobenius endomorphism $R\overset{F} \longrightarrow F_* R $ is finite. \item The module ${\operatorname{Hom}}_R(F_*R, R)$ is non-zero. \item For all $e > 0$, the module ${\operatorname{Hom}}_R(F^e_*R, R)$ is non-zero. \item There exists $e > 0$ such that ${\operatorname{Hom}}_R(F^e_*R, R)$ is not the trivial module. \end{enumerate} \end{theorem}
Conditions (3)-(5) in Theorem \ref{F-finiteExcellent} can be stated using Hochster's notion of a {\bf solid algebra}.
\begin{definition} An $R$-algebra $A$ is {\bf solid} if there exists a non-trivial $R$-module map $A\rightarrow R$. \end{definition}
Thus condition (3) above precisely states that $F_*R$ is a solid $R$-algebra via Frobenius, or equivalently, that $R$ is a solid $R^p$-algebra. Similarly conditions (4) and (5) deal with the solidity of $R$ over $R^{p^e}$. The theorem states that if $R$ is a domain whose fraction field is F-finite, then $R$ is a solid algebra via Frobenius if and only if $R$ is excellent.
\begin{remark} It is worth emphasizing that the generic F-finite assumption in Theorem \ref{F-finiteExcellent} is essential. Fix a field $k$ of characteristic $p$ such that $ [k : k^p ] = \infty$. Then $R = k[x_1, . . . , x_n] $ is an excellent domain that is not F-finite; in this case $F_*^eR$ is a free $R$-module so there are many non-zero maps in $ Hom_R(F^e_*R, R)$. \end{remark}
\begin{remark} There are many applications of the module $ {\text{Hom}}_R(F_*^eR, R)$ which motivate its study more generally. Schwede was the first to apply it to the test ideal in \cite{Schw09} and \cite{Schw10}, but the $R$-module $ {\text{Hom}}_R(F_*^eR, R)$ plays a role in many related stories in birational geometry in characteristic $p$. For example, under suitable hypothesis including F-finiteness, the module ${\text{Hom}}_R(F_*^eR, R)$ can be identified with the global sections of the sheaf $F_*^e\mathcal O_X( (1 - p^e)K_X )$ on $X = $ Spec $R$. Each section of this sheaf can be identified with a $\mathbb Q$-divisor $\Delta$ on Spec $R$ such that $K_X+\Delta$ is $\mathbb Q$-Cartier. This idea is applied to understanding log-Fano varieties in prime characteristic in \cite{SchwSmit10}. Many other applications are described in \cite{SchweTuck12} and \cite{BlickSchw13}.
It is also worth pointing out for the experts in tight closure that for a local F-finite ring $R$, the uniformly F-compatible ideals defined in terms of the module $ {\text{Hom}}_R(F_*^eR, R)$ can be interpreted as dual to the $\mathcal F(E)$-submodules of $E$ (where $E$ is an injective hull of the residue field and $\mathcal F(E)$ is the ring of all Frobenius operators on it) studied in \cite{LyuSmi01}, the largest of which is the tight closure of zero. The dual characterization used by Schwede to define the test ideal was first carried out in the Gorenstein case in \cite{Smi95} and \cite{Smi97}. \end{remark}
The proof of Theorem \ref{F-finiteExcellent} requires the following lemma, which is independent of the characteristic.
\begin{lemma}\label{lem1}
Let $R \overset{f}\rightarrow S$ be an injective ring homomorphism of Noetherian domains such that the induced map of fraction fields $\operatorname{Frac}(R) \hookrightarrow \operatorname{Frac}(S)$ is finite. If the canonical map $$S \rightarrow {\operatorname{Hom}}_R({\operatorname{Hom}}_R(S,R),R)$$ is injective, then f is also a finite map.
\end{lemma}
\begin{proof} Note that if $M$ is a finitely generated $R$-module, then also ${\text{Hom}}_R(M,R)$ is finitely generated. Thus, the lemma follows by Noetherianity if we can show that ${\text{Hom}}_R(S,R)$ is a finitely generated $R$-module. Let $n$ be the degree of the field extension $\operatorname{Frac}(S)/\operatorname{Frac}(R). $ Then there exists a basis $x_1, \dots, x_n$ of $ \operatorname{Frac}(S) $ over $\operatorname{Frac}(R)$ such that $x_i \in S$ \cite[5.1.7]{AM69}.
Let $ T$ be the free $ R$-submodule of $S$ generated by the $x_i$. It is clear that $S/T$ is a torsion $R$-module. Then applying ${\text{Hom}}_R(-,R)$ to the short exact sequence $$0 \rightarrow T \rightarrow S \rightarrow S/T \rightarrow 0 $$ we get the exact sequence
$$0 \rightarrow {\text{Hom}}_R(S/T, R) \rightarrow {\text{Hom}}_R(S, R) \rightarrow {\text{Hom}}_R(T, R).$$ Since $S/T$ is a torsion $ R$-module and $R$ is a domain, ${\text{Hom}}_R(S/T,R) = 0. $ Thus, ${\text{Hom}}_R(S,R)$ is a submodule of ${\text{Hom}}_R(T,R),$ which is free of rank $n$. But $R$ is a Noetherian ring, and so ${\text{Hom}}_R(S,R)$ is also finitely generated. \end{proof}
A necessary condition for the injectivity of $S \rightarrow \operatorname{Hom}_R(\operatorname{Hom}_R(S,R),R) $ in the situation of the previous lemma is for the module $\operatorname{Hom}_R(S,R)$ to be non-trivial. If only the non-triviality of this module is assumed, injectivity of $S \rightarrow \operatorname{Hom}_R(\operatorname{Hom}_R(S,R),R) $ follows for a large class of examples as shown in the following result:
\begin{proposition} \label{prop2} {Let $ R \overset{f}\hookrightarrow S$ be an injective ring homomorphism of arbitrary domains such that the induced map $\operatorname{Frac}(R) \hookrightarrow \operatorname{Frac}(S)$ is algebraic. If $S$ is a solid $R$-algebra, then the canonical map $S \rightarrow \operatorname{Hom}_R(\operatorname{Hom}_R(S,R),R)$ is injective. If, in addition, $R$ and $S$ are Noetherian and $f$ is generically finite, then $f$ is a finite map.}
\end{proposition}
\begin{proof} { By non-triviality of $\operatorname{Hom}_R(S,R)$, there exists an $R$-linear map $S\overset{\phi} \rightarrow R$ such that $\phi(1) \neq 0$, and so, for all non-zero $r \in R$, $\phi(r) = r\phi(1) \neq 0$. For the injectivity of $$S \rightarrow \operatorname{Hom}_R(\operatorname{Hom}_R(S,R),R),$$ it suffices to show that for each non-zero $s \in S$, there exists $\varphi \in \operatorname{Hom}_R(S, R)$ such that $\varphi(s)\neq 0$. Now since $x$ is algebraic over $\operatorname{Frac}(R)$, there exists $\sum_{i = 0}^n a_iT^i \in R[T]$ such that $a_0 \neq 0$, and $$a_ns^n + a_{n-1}s^{n-1} + \dots a_1s + a_0 = (a_ns^{n-1} + a_{n-1}s^{n-2} + \dots + a_1)s + a_0 = 0.$$ Suppose $\ell_{\lambda}$ is left multiplication by $\lambda$, where $\lambda := a_ns^{n-1} + a_{n-1}s^{n-2} + \dots + a_1 \in S$. Then $\phi \circ \ell_{\lambda} \in \operatorname{Hom}_R(S, R)$, and $$\phi \circ \ell_{\lambda}(s) = \phi (-a_0) = -a_0\phi(1) \neq 0,$$ which proves injectivity of $S \rightarrow \operatorname{Hom}_R(\operatorname{Hom}_R(S,R),R)$.}
{If $R \overset{f} \rightarrow S$ is a generically finite map of Noetherian domains, then $f$ is a finite map by Lemma \ref{lem1} and what we just proved.} \end{proof}
\begin{remark} As a special case of Proposition \ref{prop2}, we obtain the following result: Let $R$ be any domain and $K$ be any field containing $R$. If the integral closure $\overline{R}$ of $R$ in $K$ is a solid $R$-algebra, then the canonical map $\overline{R} \rightarrow \operatorname{Hom}_R(\operatorname{Hom}_R(\overline{R}, R), R)$ is injective. In particular, a Noetherian domain $R$ is Japanese precisely when the integral closure of $R$ in any finite extension of its fraction field is a solid $R$-algebra. \end{remark}
\begin{proof}[Proof of Theorem \ref{F-finiteExcellent}] We already know (1) and (2) are equivalent from Theorem \ref{ExcF-finite}.
{For (2) implies (3), assume $F_*R$ is a finitely generated $R$-module. Let $K$ be the fraction field of $R$, and denote by $F_*K$ the fraction field of $F_*R$, again emphasizing the K-vector space structure via Frobenius. Note $F_*K = F_*R \otimes_R K$. Since $$\operatorname{Hom}_R(F_*R, R) \otimes_R K \cong \operatorname{Hom}_K(F_*K, K) \neq 0,$$ it follows that $\operatorname{Hom}_R(F_*R, R) \neq 0$.}
We now show (3) implies (4). If $\operatorname{Hom}_R(F_*R, R)$ is non-trivial, then there exists $\phi: F_*R \rightarrow R$ such that $$\phi(1) = c \neq 0.$$ By induction, suppose there exists $\varphi \in \operatorname{Hom}_R(F^{e-1}_*R, R)$ such that $\varphi(1) \neq 0$. Then the $p^{-e}$-linear map $$F^e_*R \xrightarrow{F^{e-1}_*(\phi)} F^{e-1}_*R \xrightarrow{\varphi} R$$ maps $c^{(p^{e-1} -1)p} \mapsto c\varphi(1) \neq 0$, showing that $\operatorname{Hom}_R(F^e_*R, R)$ is non-trivial.
Obviously, (4) implies (5). We finish the proof by proving that (5) implies (2). By assumption, $F^e_*K$ is a finite extension of K. We now apply Proposition \ref{prop2}, taking
taking $S = F^e_*R$ and $f = F^e$. The proposition implies that $F^e$ is a finite map. Thus, also $F$ is a finite map, and we have proved (5) implies (2). \end{proof}
\begin{corollary} If $R$ is a non-excellent domain of characteristic $p > 0$ which is generically $F$-finite, then $Hom(F_*^eR, R) = 0$ for all $e \in \mathbb{N}$. \end{corollary}
Basically, this corollary means that we can not expect to develop a theory of test ideals for non-excellent rings, at least, not a theory that uses the ideas of uniform $F$-compatibility.
\section{Examples of non-excellent rings} \label{examplesnon-excellent}
Given that the class of excellent rings is so large, it is natural to wonder how one can possibly find natural classes of examples of non-excellent rings. The next theorem gives one source.
\begin{theorem}\label{exDivisorial} Let $K$ be a field of characteristic $p$ such that $[K:K^p] < \infty.$ For any discrete valuation ring $V$ of $K$, the following are equivalent: \begin{enumerate} \item $V$ is excellent;
\item $V$ is F-finite;
\item $V$ is Frobenius split. \end{enumerate}
{Moreover, if $K$ is a function field over a ground field $k$, and $V$ is a discrete valuation ring of $K/k$, then (1)-(3) are equivalent to $V$ being a divisorial valuation ring of $K/k$.} \end{theorem}
Recall that a {\bf divisorial valuation ring} of $K$ is one that obtained as the local ring along some prime divisor of a normal model of $K/k$. In particular, if $K/k$ is a function field of transcendence degree $d$ over $k$, then any divisorial valuation ring of $K$ has residue field of transcendence degree $d-1$ over $k$.
\begin{proof} {The equivalence of (1) and (2) is a straightforward consequence of Theorem \ref{ExcF-finite}. For the proof of (2) $\Rightarrow$ (3), we use the fact that Frobenius is flat for a regular local ring \cite[Theorem 2.1]{Kunz69}. In particular, when $V$ is F-finite, flatness implies that $F_*V$ is a free $V$-module, which gives a splitting of the Frobenius map. Conversely, a splitting of Frobenius gives the non-triviality of $\operatorname{Hom}_V(F_*V, V)$. Then $V$ is F-finite by Theorem \ref{F-finiteExcellent}.}
{Finally, (1)-(3) is equivalent to $V$ being divisorial when it is a discrete valuation ring of a function field $K/k$ by \cite[Corollary 6.6.3]{DatSmi16}.} \end{proof}
\begin{remark} The paper \cite{DatSmi16}, with corrections in \cite{DatSmi17}, shows more generally that a (not necessarily Noetherian) valuation ring with F-finite function field will {\it always} be divisorial if it is F-finite; see \cite[Thm 0.1]{DatSmi17}. Thus, in the class of valuation rings of an F-finite function field, F-finiteness implies Noetherian. \end{remark}
\subsection{Some Non-excellent DVRs} \label{examples} Let $k$ be the algebraic closure of the finite field $\mathbb F_p,$ and fix
$K = k(x, y)$, the function field of $\mathbb P^2_{k}$. For concreteness, we consider discrete valuations of $K$ centered at the origin (the point defined by the ideal $(x,y)$). The reader will immediately observe that our technique generalizes to any function field over $k$.
Choose any non-unit power series $p(t) \in k[[t]]$ which is {\it not algebraic } over the subfield $ k(t) $ of $k((t))$. Note that such $p(t)$ are abundant: the field $k(t)$ is countable, hence so its algebraic closure, whereas $k[[t]]$ is uncountable (consisting of all infinite sequences of elements in $k$). Thus, there are uncountably many different choices of non-unit power series $p(t)$ non-algebraic over $k(t)$.
Consider the ring homomorphism $$ k[x, y]\hookrightarrow k[[t]] $$ obtained by sending $x\mapsto t$ and $y \mapsto p(t)$. Our assumption on $p(t)$ ensures this map is injective. Consider the induced inclusion of fraction fields $$
k(x, y)\hookrightarrow k((t)). $$ The standard $t$-adic valuation on $ k((t))$ restricts to some discrete valuation on $ k(x, y)$ which takes the value 1 on $x$. Its valuation ring is $V_p = k [[t]] \cap k(x, y)$, whose maximal ideal is generated by any element of minimal non-zero value, such as $x$. We have a local map of local rings $$ V_p \hookrightarrow k [[t]] $$ in which the maximal ideal of $ k [[t]] $ obviously contracts to the maximal ideal of $V_p.$ In particular, the residue field of $V_p$ satisfies $$ k \hookrightarrow V_p/\frak{m}_{V_p} \hookrightarrow k [[t]]/(t) \cong k. $$ Hence, the residue field of $V_p$ is $k$, which has transcendence degree zero over $k$. This means that the {discrete} valuation ring $V_p$ {of $k(x, y)/k$} is {\it{not divisorial}}.
Moreover, by Theorem \ref{exDivisorial}, because $V_p$ is not divisorial, it is neither excellent nor F-finite. This gives us examples of non-excellent regular local rings of dimension 1, whose fraction field is $k(x,y)$.
\begin{remark} Since one can similarly embed $k(x_1, \dots, x_n)$ in $k((t))$ for any $n \geq 2$, our method easily generalizes to produce examples of non-divisorial DVRs in the function field of $\mathbb P^n_k$, for all $n \geq 2$ and $k$ of characteristic $p$. These can be extended to non-divisorial DVRs on the function field of any variety over $k$. \end{remark}
{The above} construction shows that there are many more non-excellent DVRs than excellent ones. For example, among DVRs of $\mathbb P^2_k$, we have:
\begin{corollary} Let $K= k(x, y)$, where $k $ is the algebraic closure of $\mathbb F_p$. The set of all discrete valuation rings of $K/k$ is an uncountable set, with the excellent ones among them forming a countable subset. \end{corollary}
\begin{proof} We first show that our construction above already gives {\it uncountably many} non-excellent valuation rings in $k(x,y)$ over $k$. We have already observed that there are uncountably many different choices of the power series $p(t)$ {giving a homorphism of fields $$k(x, y) \hookrightarrow k((t))$$ that maps $x \mapsto t$ and $y \mapsto p(t)$. Each such homomorphism then gives a discrete valuation \begin{equation} \label{def of trans. valuation} v_{p(t)}: k(x,y)^{\times} \hookrightarrow k((t))^{\times} \xrightarrow{t- adic} \mathbb Z, \end{equation} whose associated valuation ring is a non-excellent discrete valuation ring. We now claim that each choice of $p(t)$ yields a {\it different} valuation ring of $k(x, y)/k$.
Let $p(t) = \Sigma_{n \geq 0} a_nt^n$ and $q(t) = \Sigma_{n \geq 0} b_nt^n$ be two different power series, and let $i \in \mathbb N \cup \{0\}$ be the smallest integer such that $a_i \neq b_i$. From the definitions of $v_{p(t)}$ and $v_{q(t)}$ (see \ref{def of trans. valuation}) we get $$v_{p(t)}(y - (a_0 + a_1x + \dots + a_ix^i)) > i \hspace{2mm} \operatorname{and} \hspace{2mm} v_{q(t)}(y - (a_0 + a_1x + \dots + a_ix^i)) = i.$$ Thus the fraction $$ \frac{x^i}{y-(a_0 + a_1x + \dots + a_ix^i)} $$ is in the valuation ring for the valuation $v_{q(t)}$ but not the one for $v_{p(t)}$, showing that each choice of power series $p(t)$ gives rise to a distinct valuation ring of $k(x, y)$. This completes the proof that $k(x,y)/k$ has uncountably many non-excellent DVRs. }
{On the other hand, let us show more generally that for any countable algebraically closed field $k$ of characteristic $p$, and any function field $K$ of $k$, the set divisorial valuation rings of $K/k$ is countable. Note that any such valuation ring is the localization of a finitely generated, normal $k$-subalgebra $R$ of $K$ at a height $1$ prime. Observe that being a finitely generated field extension of a countable field, $K$ itself is also countable. Thus the collection of all finitely generated $k$-subalgebras $R$ of $K$ is countable. Any such $R$ is itself countable, and since every ideal of $R$ is finitely generated, the set of ideals of $R$ is countable. This clearly implies countability of the collection $S$ of pairs $(R, p)$, where $R$ is a finitely generated, normal $k$-subalgebra of $K$ with fraction field $K$ and height one prime $p$, completing the proof.}
\end{proof}
To summarize: randomly choosing a discrete valuation ring in $k(x,y)/k$, we expect it to be {\it non-excellent} since there are only countably many excellent valuation rings. Equivalently, there are only countably many F-finite {discrete} valuation rings in $k(x,y)/k$, namely the same ones which are excellent.
\begin{remark} See also \cite[Chapter VI]{ZarSam}, \cite{Ben73} and \cite[Example 8.2.31]{Liu06} where these types of rings are discussed. In particular, \cite{Liu06} gives a different argument for the failure of the Japanese property in a specific case. \end{remark}
\end{document} |
\begin{document}
\title{Random spatial growth \\ with paralyzing obstacles}
\author{J. van den Berg\footnote{Research funded in part by the Dutch BSIK/BRICKS project.}, \, Y. Peres\footnote{Research supported in part by NSF grant DMS-0605166.}, \, V. Sidoravicius\footnote{Partially supported by CNPq, Brazil}\, and M.E. Vares \footnote{Partially supported by CNPq, Brazil}
\\ {\small CWI and VUA, Microsoft and UC Berkeley, IMPA and CBPF} \\ {\footnotesize email: [email protected]; [email protected]; [email protected]; [email protected]} } \date{} \maketitle
\begin{abstract} We study models of spatial growth processes where initially there are sources of growth (indicated by the colour green) and sources of a growth-stopping (paralyzing) substance (indicated by red). The green sources expand and may merge with others (there is no `inter-green' competition). The red substance remains passive as long as it is isolated. However, when a green cluster comes in touch with the red substance, it is immediately invaded by the latter, stops growing and starts to act as red substance itself. In our main model space is represented by a graph, of which initially each vertex is randomly green, red or white (vacant), and the growth of the green clusters is similar to that in first-passage percolation. The main issues we investigate are whether the model is well-defined on an infinite graph (e.g. the $d$-dimensional cubic lattice), and what can be said about the distribution of the size of a green cluster just before it is paralyzed. We show that, if the initial density of red vertices is positive, and that of white vertices is sufficiently small, the model is indeed well-defined and the above distribution has an exponential tail. In fact, we believe this to be true whenever the initial density of red is positive. \\ This research also led to a relation between invasion percolation and critical Bernoulli percolation which seems to be of independent interest. \end{abstract}
\noindent {\it 2000 MSC:} primary 60K35, secondary 60K37, 82B43.
\noindent {\it Key words and phrases:} Growth process, percolation, invasion percolation.
\begin{section}{Introduction} \begin{subsection}{Description of the model and the main problems} Consider the following model where different `objects' (or `populations') grow simultaneously until they hit a paralyzing substance, in which case they stop growing and become paralyzing themselves: Each vertex of a connected, finite (or countably infinite, locally finite) graph $G= (V,E)$ is initially, independently of the other vertices, white, red or green with probabilities $p_w$, $p_r$ and $p_g$ respectively. Each edge of $G$ is initially closed. By a green cluster we will mean a maximal connected subgraph of $G$ of which all vertices are green and all edges are open. We denote the green cluster containing $v$ at time $t$ by $C_g(v,t)$. (If $v$ is not green at time $t$, then $C_g(v,t)$ is empty). It is clear from the above that initially the only green clusters are single green vertices. These green clusters can grow, merge with other green clusters and finally become paralyzed (red) as follows. \\ Whenever an edge $e = \langle v,w \rangle$ is closed and has at least one green end-vertex, say $v$, it becomes open at rate $1$. Moreover, immediately after it gets open the following action takes place instantaneously: If exactly one end-vertex, say $v$, is green and the other, $w$, is white, $w$ becomes green (and we say, informally, that the green cluster of $v$ grows by absorbing $w$). If $w$ is red, then each vertex in the green cluster of $v$ becomes red (and we say that the green cluster of $v$ becomes paralyzed). Finally, if both vertices are green, no extra action takes place. (Note that in this case the two vertices may have been in two different green clusters right before the opening of $e$, but are now in the same green cluster).
Note that once an edge is open it remains open, that once a vertex is green it never turns white (but may become red), and once a vertex is red it remains red.
\noindent Let us first consider the case where the graph $G$ is finite. In that case the above process is clearly well-defined and has some obvious properties, which we will state after introducing the following terminology. By a configuration (or `site-bond configuration') we mean an element of $\{0,1\}^E \, \times \, \{\mbox{ green, red, white } \}^V$, where $0$ and $1$ denote `open' and `closed' respectively. An `open-bond cluster' (with respect to a configuration) is a maximal connected subgraph of $G$ of which all edges are open (for that configuration). We say that it is non-trivial if it has at least one edge. Note that the earlier defined `green cluster' is an open-bond cluster of which each vertex is green. A `red cluster' is defined similarly. We call a configuration admissible if each non-trivial open-bond cluster is either a red cluster or a green cluster. Now we are ready to state the announced simple properties and observations: If $G$ is finite, the process is a Markov chain on the set of admissible configurations. The admissible configurations where no vertices are green or all vertices are green are absorbing, and the chain will with probability 1 end in one of those configurations. In particular, if initially there was at least one red vertex, then every green vertex will eventually become red. Moreover (because initially all edges were closed) at any time, every non-empty red cluster $\mathcal{C}$ contains exactly one vertex $v$ that was originally red. We say that this vertex $v$ is `responsible for' the other vertices in $\mathcal{C}$ becoming red (or, that the vertices in $\mathcal{C}$ became red `due to' $v$).
\noindent If $G$ is {\it infinite}, for instance the $d$-dimensional cubic lattice, the situation is much more problematic, due to the fact that the range of the interaction is not bounded: an entire cluster, no matter how large, can change colour instantaneously. The main questions we address in this paper concerning the above process, and some other related models, are:
\begin{itemize} \item {\bf 1.} Does the dynamics exist? This is a nontrivial issue for such interacting processes on infinite graphs: See for instance, Aldous' frozen percolation process (\cite{A}), which was shown by Benjamini and Schramm (1999, private communication) not to exist in $\Z^2$. For related matters on the non-existence of that process, see also Remark (i) in Section 3 of \cite{BeT} and the example due to Antal J\'arai (1999, private communication) which follows it. A crucial difference between Aldous' model and ours is that in Aldous' model, clusters freeze only when they are infinite, while we believe that in our model, due to the positive density of initially red vertices, the green clusters do not become infinite (see the next item). A model which has more in common with ours is the forest-fire model studied in \cite{D}. But again there is a major difference: in that model there is a uniform lower bound for the probability that a cluster of interest is `destroyed' before growing further, and this uniform bound is a crucial ingredient in the existence proof in \cite{D}. In our model there seems to be no
analog of such a property.
\item{\bf 2.} Is a green cluster
always finite at the moment it becomes red? Does the distribution of its radius (and of its volume)
have an exponential tail? \item {\bf 3.} Let $w$ be an originally red vertex. Is the set of originally green vertices $v$ with the property that $w$ is responsible for $v$ becoming red, finite? Does the distribution of its volume have an exponential tail? \end{itemize}
The organization of the paper is as follows. In Subsection 1.2 we give a partial answer to the questions listed above. In particular, Theorem \ref{mainthm} states that, for $G = \Z^d$ and $p_w$ sufficiently small, the answers to the above questions are positive. Our research also led to a new result for invasion percolation (see Theorem \ref{uni-bound} and Proposition \ref{inv-perc}). In Subsection 1.3 we explain the notion of `autonomous region' which plays an important role in this paper. In subsection 1.4 we briefly discuss some alternative versions of the model. In section 2 we give a proof of the main result for the special case where $p_w = 0$. It turns out that that case can be dealt with in a very elegant and transparent way. It serves as an introduction to the proof of the more complicated case where $p_w$ is small but positive, which is given in Section 3. At the end of Section 3 we come briefly back to the alternative versions of the model discussed in Subsection 1.4. \end{subsection}
\begin{subsection}{Statement of the main results} Let $G$ be a connected, countably infinite graph of bounded degree, and consider the model presented in Subsection 1.1, with parameters $p_w$, $p_g$ and $p_r$. Our main result, Theorem \ref{mainthm} below, states, among other things, that under certain conditions the dynamics is well-defined. The formulation of the condition requires some additional notation and terminology: By the distance $d(v,w)$ between two vertices $v$ and $w$ of $G$ we mean the length (i.e. number of edges) of the shortest path from $v$ to $w$. The diameter of a set of vertices $W$ of $G$ is defined as $\max_{v, w \in W} d(v,w)$, and $\partial W$ will denote the set of all vertices that are not in $W$ but have an edge with some vertex in $W$. The number of elements of a set $W$ will be denoted by
$|W|$. For a finite graph $H$, denote by $|H|$ the number of vertices in $H$. Let $D$ denote the maximal degree in $G$.
For each vertex $v$ of $G$ and $p \in (0,1)$, let $\xi_v(p)$ denote the expectation of the volume (i.e. number of vertices) of the occupied cluster of $v$ in site percolation on $G$ with parameter $p$. Further, define $$\xi(p) = \sup_v \xi_v(p).$$ Recall the definition of $C_g(v,t)$ in Subsection 1.1. We are now ready to state our main results. \begin{thm}\label{mainthm} Suppose that \begin{equation} \label{key} (D-1) \xi(p_w) < p_r \,. \end{equation} We have
\item
{(a)} The dynamics on $G$ is well-defined. With probability 1, at any time, each red cluster has a unique initially red vertex.
\item (b) For any originally green vertex $v$, let $C_g(v) = \cup_{t \geq 0} C_g(v,t)$ be the green cluster of $v$ just before it becomes red. Let $|C_g(v)|$ be the number of vertices of $C_g(v)$. Then, with probability $1$,
$|C_g(v)|$ is finite for each such $v$. Moreover, the distribution of
$|C_g(v)|$ has an exponential tail.
\item (c) If $G$ is a Cayley graph and $w$ is an originally red vertex in $G$, then the set $D(w)$ consisting of all green vertices that become red due to $w$ is finite; moreover, the diameter of $D(w)$ has an exponential tail. (Here, extending the definition given before in the case of finite $G$, if $v$ is an originally green vertex and $w$ is the (unique a.s.) originally red vertex in the red clusters that eventually contain $v$, we say that $v$ becomes red due to $w$.)
\item (d) If $G$ is the $d$-dimensional cubic lattice, then the distribution of
$|D(w)|$ also has an exponential tail.
\end{thm}
Note that in the case $p_w = 0$, condition~\eqref{key} of Theorem \ref{mainthm} is satisfied for every positive $p_r$. For this case we have, in addition to Theorem \ref{mainthm}, considerably stronger results. In particular, the following theorem holds, where we fix $p_w = 0$ and then vary the parameter $p_r$. In this theorem and its proof, $P_p$ denotes the ordinary (Bernoulli) bond percolation measure with parameter $p$ and $P_{cr}$ stands for $P_{p_c}$, where $p_c$ denotes the critical probability for this percolation model. By $B(n)$ we denote the set of all vertices at (graph) distance $\leq n$ from some specified vertex $O$. The event that there is an open path from $O$ to $\partial B(n)$ is denoted by $\{O \leftrightarrow \partial B(n)\}$. Further, the symbol $\approx$ denotes logarithmic equivalence, i.e., we say for two positive functions $g(n)$ and $h(n)$ that $g(n) \approx h(n)$ as $n \rightarrow \infty,$ if
$$\frac{\log h(n)}{\log g(n)} \rightarrow 1, \,\,\, n \rightarrow \infty.$$ Let $W$ be a set of vertices in a graph $G$ with a distinguished vertex $O$. By the {\em radius} of $W$ we mean the maximal distance from $O$ to a vertex of $W$. We are now ready to state the following theorem.
\begin{thm} \label{uni-bound} Let $C_g(\cdot)$ be as in part (b) of Theorem \ref{mainthm}. If $G$ is the square lattice in two dimensions (or the triangular or the hexagonal lattice), and $p_w = 0$, then $$P(\mbox{The radius of } C_g(O) \mbox{ is at least } n) \,\, \uparrow f(n), \,\, \mbox{ as } p_r \downarrow 0,$$ where $f$ is a function satisfying
$$f(n) \approx P_{cr}(O \leftrightarrow \partial B(n)).$$
\end{thm}
Theorem \ref{uni-bound} follows easily from the following Proposition concerning invasion percolation on the lattices considered in the theorem. Before we state it, we briefly recall the invasion percolation model (on these lattices) and some of its basic properties. (Invasion percolation was introduced by Wilkinson and Willemsen, see \cite{WW}. For a detailed study of this process see \cite{LPS}, or the earlier works \cite{CCN}, \cite{Ale} and \cite{J2}). To each edge $e$ we assign, independent of the other edges, a random variable $\tau_e$, uniformly distributed in the interval $(0,1)$. We construct, recursively, a growing tree. Initially the tree consists only of one vertex, say $O$. At each step we consider all edges that have exactly one endpoint in the tree that has been created so far. From these edges we select the one with smallest $\tau$ value and add it (and its `external' endpoint) to the tree. Let $\tau(n)$ be the $\tau$ value of the $n$th edge invaded by this procedure.
For any infinite transitive graph $G$, it is proved in \cite{HPS} that \begin{equation} \label{hpseq} \limsup_{n \rightarrow \infty} \tau(n) = p_c, \end{equation} where $p_c$ is the critical probability for bond percolation. Further, note that, if all $\tau(n) < p_c$, then $O$ belongs to an infinite cluster on which all $\tau$ values are smaller than $p_c$. For the graphs in the statement of Theorem \ref{uni-bound} this latter event has probability $0$. (See \cite{G} for this classical result and references). Hence, for these lattices, (a.s.) there is an $n$ with $\tau(n) > p_c$. This, together with \eqref{hpseq}, implies that (a.s.) $\tau(n)$ achieves its maximum (and that this maximum is larger than $p_c$). The following proposition is about the invaded region at the step where this maximum is achieved. Although this and related regions have been under consideration before in the literature (see the subsection `Ponds and outlets' in Stein and Newman (1995)), this result is, as far as we know, new. \\ {\bf Remark:} The {\em invasion basin\/} of $O$ is defined similarly to the invasion tree, except that at every step, the edge of minimal $\tau$-value among the edges outside the current invasion basin that have {\em at least\/} one endpoint in the basin is added to the basin. The invasion basin is typically not a tree. It is easy to see that each edge $e$ in the invasion tree is in the invasion basin, and the set of sites in the invasion basin immediately before such an edge $e$ is added to it is the same as the set of vertices in the invasion tree immediately before $e$ is added.
\begin{prop} \label{inv-perc} Consider invasion percolation on the square lattice (or the triangular or the hexagonal lattice) with edge values $\tau_e$. Let $\hat e$ be the edge with maximal $\tau$ value in the invasion basin (as explained above). Let $\hat R$ be the radius of the region that has been invaded up to the step where $\hat e$ is invaded. We have: \item(a) \begin{equation*}
P(\hat R > n) \ge P_{cr}(O \leftrightarrow \partial B(n)) ; \end{equation*}
\item(b) \begin{equation} \label{invperc} P(\hat R > n) \approx P_{cr}(O \leftrightarrow \partial B(n)), \,\,\, n \rightarrow \infty. \end{equation} \end{prop}
\noindent{\bf Remarks:} \\ (a) Proposition \ref{inv-perc} has triggered further research on the comparison of ponds and critical percolation clusters: see recent refinements and generalizations in \cite{BJV}. \\ (b) The value $\hat R$ above can also be described in the following, somewhat informal, way. Suppose each edge $e$ is closed at time $0$ and becomes open at time $\tau_e$ (after which it remains open). The open cluster of $O$ grows in time. Up to time $p_c$ it is finite, but at some time larger than $p_c$ it will become infinite (a.s). The radius of this cluster just before it becomes infinite is $\hat R$.
\end{subsection}
\begin{subsection}{Description of the model in terms of passage times. Autonomous regions} Consider the description of the dynamics in the beginning of this section, and assume for the moment that the graph is finite. Recall that an open edge remains open and that a closed edge with at least one green end-vertex becomes open at rate $1$. This means that if we assign to each edge $e$ an exponentially distributed (mean $1$) random variable $\tau(e)$, independent of the other edges (and of the initial colours of the vertices), the time evolution of the process can be completely described in terms of the initial colours of the vertices and the $\tau-$ variables of the edges: Each edge $e$ remains closed until the time $t$ at which $L_t(e)$ (defined below) has Lebesgue measure $\tau_e$. (If no such time exists, the edge remains closed forever). Here $L_t$ is defined by \begin{equation} \label{Ldef} L_t(e) = \{s < t \, : \, e \mbox{ has at least one green end-vertex at time } s\}. \end{equation} (Since, once a vertex is green it can change colour only one more time, $L_t(e)$ is clearly an interval or union of two intervals). When $e$ becomes open and one of its end-vertices is white or red, the appropriate action in the description in Section 1.1 is carried out instantaneously.
In the following this equivalent description of the process turns out to be very convenient. To illustrate it and to emphasize the difference with one of the modified models that will be discussed in Subsection 1.4, we give the following example:
\begin{example} \label{dynex1} Consider the graph with vertices denoted by $\{1, 2, 3, 4, 5\}$ and edges $\langle i, i+1 \rangle$, $1 \leq i \leq 4$. Suppose that the initial colours of the vertices $1, \cdots, 5$ are red, green, white, green, red respectively, and that the $\tau$ values of the edges $\langle 1, 2 \rangle, \cdots \langle 4, 5 \rangle$ are $6$, $3$, $4$ and $2$ respectively. As one can check by following the above description, the initially green vertex $2$ becomes red at time $5$ due to vertex $5$. \end{example}
\noindent Now suppose some finite, but possibly large, graph $G$ is given, together with initial colours $c(v), v \in V$ and `opening times' $\tau(e), e \in E$. Further suppose we are only interested in the time evolution in a small subgraph of $G$, for instance just one initially green vertex $v$. Do we need to `follow' the process in the whole graph to reconstruct what happens at $v$? Often this is not the case. An instructive example is when $v$ is incident to three edges, $e$, $e'$ and $e''$ with the properties that $\tau(e)$ is smaller than $\tau(e')$ and $\tau(e'')$, and that the other end-vertex of $e$, which we denote by $w$, is red. In that case we know that $v$ is green until time $\tau(e)$ and from then on is red (which would also happen in the `isolated' graph consisting only of the vertices $v$ and $w$ and the edge $e$). This holds no matter what the initial colours of the vertices in $V \setminus \{v,w\}$ and the $\tau$-values of the edges in $E \setminus \{e, e', e''\}$ are. Note that this still holds when we extend $G$ to a bigger graph (with $c$ and $\tau$-variables) as long as we don't add extra edges to $v$.
This brings us to the notion of {\it autonomous set}: Let $H=(V(H),E(H))$ be a finite sub-graph of a graph $G$, and let $\bar E$ be a finite set of external edges of $H$, i.e. edges in $G$, which have exactly one vertex in $V(H)$. Assume that we have given an initial colour assignment $c(v)$ to all $v \in V(H)$ and opening times $\tau(e)$ to all $e \in E(H) \cup \bar E$. Let $\bar H$ be the minimal graph containing $H$ as subgraph and $\bar E \subset E(\bar H)$. We say that $(H,\bar E)$ is {\bf autonomous} (with respect to $\tau$ and $c$), if for every finite subgraph $G_0$ of $G$ which has $\bar H$ as a subgraph, the growth process on $G_0$ starting with a colour pattern and opening times extending the above given $c$'s and $\tau$'s has, restricted to $H$, always the same time evolution, i.e. the same evolution as it would have with $G_0=\bar H$, and which does not depend on colours at the vertices in $\bar H$ not in $H$. In the simple example considered in the previous paragraph, the graph with vertices $v$ and $w$, and edge $e$, together with the set of external edges $\bar E = \{e', e''\}$, is autonomous.
Often, when the identity of $\bar E$ is obvious and the choice of $c$- and $\tau$- variables is considered to be known, we simply say that $H$ is autonomous. For this reason we might refer to the autonomous set as ``autonomous subgraph".
\noindent Now suppose we have an infinite graph $G$ with given $\tau$- and $c$- variables. If every vertex (and every edge) is contained in a finite autonomous subgraph of $G$, the infinite-volume time evolution on $G$ can be defined in an obvious way. The key of the proof of Theorem \ref{mainthm} is to show that, under the condition in the theorem, these autonomous subgraphs exist with probability $1$. That is, for almost-all initial colour patterns, and almost-all $\tau$-values each vertex and edge is contained in a finite autonomous region.
\end{subsection}
\begin{subsection}{Some alternative versions of the model} There are many modifications or generalizations of our model (which we will sometimes call the {\it basic model} to distinguish it from these modified versions). Below we mention four of them.
\noindent (i) In the basic model the $\tau$ variables are exponentially distributed. It is easy to see that if the initial colours of the vertices are given, and none of them is white, the time evolution is essentially determined by the order statistics of the $\tau$ variables. It is also easy to see that in that case each edge $e$ becomes open at time $\tau_e$ or remains closed forever. From such observations it easily follows that, if $p_w = 0$, replacing the exponential distribution of the $\tau$ variables by some other continuous distribution, leaves the law of the process unchanged, apart from an obvious time change. This is not true if $p_w > 0$. However, as one can easily see from its proof, Theorem \ref{mainthm} remains valid under such replacement of distribution.
\noindent (ii) Recall that in our basic model an edge $e$ becomes open at the smallest time $t$ with the property that the subset of times $s < t$ at which $e$ has at least one green end-vertex, has Lebesgue measure $\tau_e$. A natural modification of this rule is the one where $e=\langle v, w \rangle$ becomes open at the smallest time $t$ with the property that $v$ is green throughout the interval $[t - \tau_e, t)$ or $w$ is green throughout the interval $[t - \tau_e, t)$. To illustrate the difference between the rules, consider again the graph with $\tau$ values and initial colours in Example \ref{dynex1}. As can be easily checked, under the modified rule the vertex $2$ will no longer become red due to vertex $5$ but due to vertex $1$ (and at time $6$ instead of $5$). It turns out that Theorem 1.1 remains valid for this modified model and that its proof only needs some small modifications.
\noindent (iii) The third modification is the following model in continuous space. Consider two homogeneous Poisson point processes $\zeta_G$, $\zeta_R$ on $\mathbb R^d$, with intensities $\lambda_G=1$, $\lambda_R \equiv \lambda \in (0, + \infty) $ respectively. The points of $\zeta_G$ ({\it green}) are interpreted as sources of growth, and those of $\zeta_R$ ({\it red}) as sources of ``paralyzing poison''. All other elements of $\R^d$ are uncoloured. From each source in $\zeta_G$ at time zero a green Euclidean sphere begins to grow with constant speed 1 (of its radius). When two or more green spheres intersect, they keep growing in the same manner, but we say that they have become connected (are in the same connected green component). If a growing green sphere hits a red region, its {\it entire} connected green component (note that this is a union of spheres) instantaneously gets red and stops growing. Analogs of the questions for our basic model in Subsection 1.1, in particular the existence question, arise naturally, but so far we have made very little progress. Although at first sight there is some resemblance with the model studied in \cite{HaM}, the arguments used there seem not to work here.
\noindent (iv) Consider the following change of rule of the previous model (model (iii) above): When a green sphere hits a red region, {\it only} the centers of all the spheres of its connected green component become red; the remaining parts of the spheres disappear (become uncoloured). This change makes the model much easier to handle (using an invasion procedure resembling the one we will use in Section 2 for the case $p_w =0$ of our basic model), but also considerably less natural, and we will not discuss it in more detail.
\end{subsection}
\end{section}
\begin{section}{Proofs for the case $p_w = 0$} \begin{subsection}{General properties for the case $p_w = 0$} The case where $p_w = 0$ is considerably easier than the general case and serves as a good introduction to the latter. We start with some deterministic observations and claims. Let us first restrict to a finite graph $G$, with given $\tau$-values and $c$-values. We assume that at least one vertex has initial colour red, at least one vertex has initial colour green, and no vertex has initial colour white. Let $x$ be a vertex with initial colour green, and let $t(x)$ denote the time at which $x$ becomes red. Let $\Pi$ denote the set of all paths of which the starting point is $x$ and the end-vertex has initial colour red. It is easy to see that
\begin{equation} \label{lowerbd} t(x) \geq \min_{\pi \in \Pi} \max_{e \in \pi} \tau(e). \end{equation}
Indeed, for each $t$ smaller than the r.h.s. of \eqref{lowerbd} there is a `cut set' of edges that are still closed at time $t$ and `shield' $x$ from all initially red vertices. It is also quite easy to see that equality holds in \eqref{lowerbd}. The algorithmic (and inductive) argument below is not the most direct one but has the advantage that it gives more, namely an elegant and suitable construction of an autonomous region. This particular construction will almost immediately lead to a proof of parts (a) and (b) of Theorem \ref{mainthm} for the case $p_w = 0$. The `algorithm' is a modification (`stopped' version) of the standard invasion percolation procedure (starting at $x$) described a few lines above Proposition \ref{inv-perc}. At each stage of the procedure we have a tree which is a subgraph of $G$. Initially this tree consists only of the vertex $x$. At each step we consider all edges that have exactly one end-vertex in the tree, also called the {\it external edges} of the tree. Among these edges we select the one with minimal $\tau$-value and add it (and its external end-vertex) to the tree. The procedure is stopped as soon as an initially red vertex is added to the tree. Let us denote this vertex by $R$, and the final tree given by this procedure by $T(x)$. Let $\tau^*$ be the maximal $\tau$ value on this tree, and $e^*$ the edge where this maximum is attained. Removing this edge from the tree $T(x)$ `splits' the tree in two parts. Let $T_1^*(x)$ denote the part containing $x$.
\noindent \begin{claim} \label{claimpw0}
\noindent (i) The vertex $R$ is responsible for $x$ becoming red. \\ (ii) $x$ becomes red at time $\tau^*$. That is, $t(x) = \tau^*$. Moreover, $C_g(x)$ (defined in part (b) of the Theorem) is the set of vertices of $T_1^*(x)$. \\ (iii). Let $\bar E$ denote the set of all edges of which one end-vertex is a vertex of $T(x)$, different from $R$, and one end-vertex is not in $T(x)$. Let $\widehat{T}(x)$ be the graph with the same vertices as $T(x)$ and with all edges that have both end-vertices in $T(x)$. Then $(\widehat{T}(x), \bar E)$ is autonomous (with respect to this coloring). \end{claim}
\noindent \begin{proof} (of Claim) The proof of the Claim is by induction on the number of steps in the above invasion procedure. If the number of steps is $1$ we are in the situation that the edge incident to $x$ with minimal $\tau$- value has a red end-vertex, and the above Claim follows easily. (Note that this case corresponds with the example in the second paragraph of Subsection 1.3). Now suppose the number of steps is larger than $1$. Consider the edge $e^*$ defined above. Let $E^*$ denote the set of external edges, except $e^*$ itself, at the stage of the procedure immediately before $e^*$ was added. From the definition of invasion percolation, all edges in $E^*$ have $\tau$-value larger than $\tau^*$. On the other hand, all edges that were added after that step have, by definition, $\tau$-value smaller than $\tau^*$. Therefore the edges in $E^*$ were never added to the tree. Hence, since $R$ was added after $e^*$ (and was the first red point added to the tree), it follows that every path in $G$ from $x$ to a red point contains $e^*$ or an edge in $E^*$. Therefore, by \eqref{lowerbd} we get that $$t(x) \geq \tau^*.$$
To get the reversed inequality, note the following. Let $y$ denote the external end-vertex of $e^*$ when $e^*$ was added to the tree. We already remarked that removing $e^*$ from $T(x)$ `splits' $T(x)$ in two separate trees, and we denoted the part containing $x$ by $T_1^*(x)$. Let $T_2^*(x)$ denote the other part. It follows from the above that $T_2^*(x)$ contains $y$ and $R$. We will assume that the initial colour of $y$ is green (otherwise the Claim follows easily). It is easy to see from the above that a similar invasion procedure as before, but now starting at $y$ instead of $x$, has as its final tree the tree $T_2^*(x)$. By the induction hypothesis we have that $y$ becomes red at the time which is equal to the maximal edge value in $T_2^*(x)$ and hence before time $\tau^*$, and that $R$ is responsible for $y$ becoming red. Also note that, from the earlier observations, just before time $\tau^*$ there is an open path from $x$ to the end-vertex $\neq y$ of $e^*$. Since $e^*$ becomes open at time $\tau^*$ it follows that $x$ becomes red at time $\tau^*$. Moreover, since $R$ is responsible for $y$ becoming red, it is also responsible for $x$ becoming red. This (and the earlier made observation that all external edges $\neq e^*$ of $T_1^*(x)$ have $\tau$ value larger than $\tau^*$)) completes part (i) and (ii) of the proof of Claim~\ref{claimpw0}. Similar arguments show part (iii). \end{proof}
\noindent Now we are ready to handle the case where $G$ is infinite. If $G$ is infinite and $p_r > 0$, it is not a priori clear that the process described in Subsection 1.1 is well-defined. However, the above invasion procedure and the corresponding Claim now give us the instrument to define it and to give a proof of Theorem \ref{mainthm} in this particular case. \end{subsection}
\begin{subsection}{Proof of Theorem \ref{mainthm} for the case $p_w = 0$} For each green vertex $x$ simply run the invasion procedure starting from $x$. Since the initial colours and the $\tau$ variables are independent, we have, at each step in the invasion from $x$, probability $p_r$ of hitting a red vertex (independently of the previous steps in this invasion). Hence the invasion procedure starting at $x$ stops with probability $1$, and (by part
(iii) of Claim~\ref{claimpw0}) yields an autonomous region containing $x$. Since the graph has countably many vertices, this yields a construction of the process on $G$ and completes the proof of part (a) of the theorem. Moreover it shows that Claim~\ref{claimpw0} also holds (a.s.) for $G$. Further, the number of steps in the invasion procedure from an initially green vertex clearly has a geometric distribution: the probability that it is larger than $n$ is $(1-p_r)^n$. Since (by part (ii) of Claim~\ref{claimpw0}) $|C_g(v)|$ is at most the number of steps in the invasion procedure, part (b) of the theorem follows. \\ {\it Proof of part (c)}: For each pair of vertices $x,y$, let $I(x,y)$ denote the event that $x$ is initially green and that $y$ is initially red and responsible for $x$ becoming red. It follows immediately from the above that for all vertices $x$ and all $m$ \begin{equation} \label{Rbd} \sum_{y : d(x,y) \geq m} P(I(x,y)) \, = \, P\left(d(x, R(x)) \geq m \right) \le (1-p_r)^m. \end{equation} Further, using that $G$ is a Cayley graph, the `mass transport principle' (see e.g. Section 7.1 in \cite{LyP} or \cite{HPS}) gives: $$ P\left(D(w) \mbox{has radius } \geq m\right) \leq \sum_{v \,: \, d(v,w) \geq m} \! \! P(I(v,w)) = \sum_{v \,: \, d(v,w) \geq m} \! \! P(I(w,v)),$$ which by \eqref{Rbd} is at most $(1-p_r)^m$. This completes the proof of part (c) of the theorem.
\noindent {\it Proof of part (d)}. As we will see, this follows from earlier observations, together with a block argument which is quite similar to one in percolation theory, due to Kesten (see \cite{K}). Below we denote the $d-$dimensional cubic lattice simply by $\Z^d$.
Let, as before, $T(x)$ denote the tree produced by the invasion procedure starting at $x$. We want to prove exponential decay for
$P(|D(v)| > n)$, where $v$ is an initially red point. Without loss of generality we take $v = {\bf 0}$. We say that a finite set $W$ of vertices containing $\bf 0$ is a lattice animal (abbreviated as l.a.) if for all $w \in W$ there is a path in $\Z^d$ from $\bf 0$ to $w$ of which every vertex is in $W$. From the definitions (and since, as we saw in (c), $D(\bf 0)$ is a.s. finite), it is clear that $D(\bf 0)$ is a lattice animal. Let $L$ be an even integer and consider the partition of $\mathbb{Z}^d$ into cubes $Q_L (x) := [-L/2, L/2 )^d + L x$, $ x \in \Z^d$. We say that $x \in \Z^d$ is {\it fine} if $Q_L(x) \cap D({\bf 0}) \neq \emptyset$. Let $V_F$ denote the set of all vertices that are fine. Since $D({\bf 0})$ is a lattice animal, $V_F$ is also a lattice animal. Further, we say that $x \in \Z^d$ is {\it proper} if $Q_L(x)$ contains a vertex $y$
with $|T(y)| > L/4$, and write $I( x \mbox{ is proper })$ for the indicator function of the corresponding event. (Here $T(\cdot)$ is as defined in the invasion procedure earlier in this Section). Finally, a subset of $\Z^d$ is proper if every element in the set is proper. It is clear that for every $x \neq \bf 0$, if $x$ is fine, then $x$ is proper. It is also clear that if $D(\bf 0)$ contains vertices outside $Q_L(\bf 0)$, then $\bf 0$ is also proper. Recall from Claim~\ref{claimpw0}(iii) that for each tree $T$ in $\Z^d$ and each vertex $y$, the event $\{T(y) = T\}$ depends only on the $c$ values of the vertices of $T$ and the $\tau$ values of the edges that have at least one end-vertex in $T$. From this it easily follows that the process $\left(I( x \mbox{ is proper }), \, x \in \Z^d\right)$ is $2$-dependent (see e.g. \cite{G} for this notion). Let $\varepsilon=\varepsilon(L) = \varepsilon(L,d)$ be the probability that a given vertex is proper. Since, for each $y$, the distribution of
$|T(y)|$ is geometric (and $|Q_L(y)|$ is polynomially bounded in $L$) it is clear that for fixed $d$ $$\varepsilon(L,d) \rightarrow 0 \mbox{ as } L \rightarrow \infty.$$ The above mentioned $2$-dependence gives that there is a constant $C_1 = C_1(d)$ such that for every set $W \subset \Z^d$
\begin{equation}
\label{1dp} P(W \mbox{ is proper }) \leq \varepsilon^{\frac{|W|}{C_1}}. \end{equation}
Finally, we use that there is a constant $C_2 = C_2(d)$ such that the number of lattice animals of size $m$ is at most $C_2^m$, see~\cite{G}. Together, the above gives that (noting that each l.a. of size $\geq m$ contains a l.a. of size $m$) that for $n$ large enough (depending on $L$), \begin{eqnarray}
& & P(|D({\bf 0})| > n) \leq P\left(\exists \mbox{ a proper l.a. of size } \lceil\frac{n}{|Q_L|}\rceil \right) \\ \nonumber \leq & &
C_2^{\frac{n}{|Q_L|}+1} \, \varepsilon(L)^{\frac{n}{|Q_L| C_1}} \\ \nonumber = & & C_2 \, \left[ \left(C_2 \,\,\varepsilon(L)^{\frac{1}{C_1}}\right)^{1/Q_L}\right]^n. \end{eqnarray} Taking $L$ so large that $C_2 \,\, \varepsilon(L)^{(1/C_1)} < 1$ completes the proof of part (d). This completes the proof of Theorem \ref{mainthm} for the special case where $p_w = 0$. \end{subsection}
\begin{subsection}{Proof of Proposition \ref{inv-perc} and Theorem \ref{uni-bound}} We first prove Proposition \ref{inv-perc}. We say that an edge is $p$-open if $\tau_e < p$. Define $p$-open paths and $p$-open clusters in the obvious way. To prove the Proposition we will derive suitable lower and upper bounds for the l.h.s. of \eqref{invperc} in terms of an expression of the form of its r.h.s. \\ The lower bound is very easy: Since $\tau_{\hat e} > p_c$ (see the paragraph below \eqref{hpseq}), it follows immediately that (a.s) the region which is already invaded at the step where $\hat e$ is invaded, contains all the vertices of the $p_c$-open cluster of $O$. Hence the l.h.s of \eqref{invperc} is larger than or equal to the r.h.s.\\ The upper bound is more complicated. We use the standard percolation notation $\theta(p)$ for the probability that $O$ is in an infinite $p$-open cluster.\\ Define, for each $p$ and $n$, the following two events:
\begin{eqnarray*} A_{n,p} = \{\exists \mbox{ a } p \mbox{-closed circuit with diameter}\geq n &\mbox{in the dual lattice}\\\mbox{ that contains } O \mbox{ in its interior}\}. \end{eqnarray*}
$$D_p = \{O \mbox{ belongs to an infinite } p \mbox{-open cluster }\}.$$ Note that $P(D_p) = \theta(p)$ and that if $p_1 < p_2$, then $D_{p_1} \subset D_{p_2}$ and $A_{n, p_2} \subset A_{n, p_1}$.
Let $\hat \tau = \tau_{\hat e}$. Let $p'$ be some number between $p_c$ and $1$. The following observation is straightforward.
\noindent {\it Observation}\\ (a) If $\hat \tau > p'$ and $\hat R \geq n$, then there is a $p > p'$ such that the event $A_{n,p}$ occurs. \\ (b) If $\hat \tau <p'$, then there is a $p<p'$ such that $D_p$ occurs.
\noindent Let, for $ p > p_c$, $L(p)$ be the correlation length (=$L(p,\varepsilon_0)$) as defined in Section 1 in the paper by Kesten (1987) on scaling relations. (See \cite{K2}). That is, $L(p)$ is the smallest $n$ such that the probability that there is a $p$--open horizontal crossing of a given $n \times n$ box is larger than $1 - \varepsilon_0$. Here $\varepsilon_0$ is an appropriately (sufficiently small) chosen positive constant. (From this definition it is clear that $L(p)$ is non-increasing in $p$ on the interval $(p_c,1]$). It is well-known (see (2.25) in \cite{K2} and the references preceding that equation) that there are constants $C_1 > 0$ and $C_2 > 0$ such that for all $p >p_c$ and all $n$,
\begin{equation} \label{ke1} P_p(A_{n,p}) \leq C_1 \, \exp\left(- \frac{C_2 n}{L(p)}\right). \end{equation}
Further, Theorem 2 in \cite{K2} says that there is a constant $C_3 > 0$ such that, for all $p > p_c$,
\begin{equation} \label{ke2} \theta(p) \leq C_3 P_{cr}\left(O \leftrightarrow \partial B(L(p))\right). \end{equation}
Now take, for $p'$, the supremum of those $p$ for which $L(p) > n/(C_4 \log n)$, where $C_4$ is a positive constant that will be appropriately chosen later. Obviously, \begin{equation} \label{obv-eq} P(\hat R \geq n) \leq P(\hat R \geq n, \, \hat \tau > p') + P(\hat \tau < p'). \end{equation} The first term in the r.h.s of \eqref{obv-eq} is, by Observation (a) above and the `nesting' property of the events $A_{n,p}$ (stated in the sentence below the definition of these events), smaller than or equal to \begin{equation} \label{obv1} \lim_{p \downarrow p'} P(A_{n,p}) \leq \limsup_{p \downarrow p'} C_1 \exp(- \frac{C_2 n}{L(p)}) \leq C_1 \exp(- C_2 C_4 \log n), \end{equation} where the first inequality follows from \eqref{ke1} and the second inequality from the definition of $p'$.
The second term of \eqref{obv-eq} is, by Observation (b) and the `nesting' property of the events $D_p$, smaller than or equal to \begin{equation} \label{obv2} \lim_{p \uparrow p'} \theta(p) \leq \limsup_{p \uparrow p'} C_3 P_{cr}\left(O \leftrightarrow \partial B(L(p))\right) \leq C_3 P_{cr}\left(O \leftrightarrow \partial B(\frac{n}{C_4 \log n})\right), \end{equation} where the first inequality follows from \eqref{ke2} and the second follows by (again) using the definition of $p'$. Putting \eqref{obv-eq}, \eqref{obv1} and \eqref{obv2} together we have
\begin{equation} \label{kec} P(\hat R \geq n) \leq C_3 P_{cr}\left(O \leftrightarrow \partial B(\frac{n}{C_4 \log n})\right) + C_1 \, \exp(- C_2 C_4 \log n). \end{equation}
It is believed that $P_{cr}(O \leftrightarrow \partial B(n))$ has a power law behaviour. This has only been proved for site percolation on the triangular lattice. However, for the percolation models we are considering, we do know that this function of $n$ has power-law lower and upper bounds. As a consequence we can choose $C_4$ so large that the second term in the r.h.s. of \eqref{kec} is (for all large enough n) smaller than the first term. Finally, it follows quite easily from RSW arguments (see e.g. Sections 11.7 and 11.8 in \cite{G}) that $P_{cr}\left(O \leftrightarrow \partial B(n/C_4\log n )\right)\approxP_{cr}\left(O \leftrightarrow \partial B(n)\right)$. This completes the proof of Proposition \ref{inv-perc}. $\Box$
\noindent Now we are ready to prove Theorem \ref{uni-bound}. The invasion procedure in Subsection 2.1, which was used in the proof of Theorem \ref{mainthm}, differs from the `ordinary' invasion percolation model (described in the paragraphs preceding Proposition \ref{inv-perc}, in that is stops as soon as the growing tree `hits' a red vertex. There is strictly speaking another difference: the $\tau$ values in the former case were exponentially distributed and those in the latter case were uniformly distributed on $(0,1)$. However, that difference clearly doesn't matter, and in the rest of this proof we assume the $\tau$ variables to be uniformly distributed on $(0,1)$. Let us call the former procedure a `stopped' invasion procedure (with parameter $p_r$), and the latter an ordinary invasion procedure. All these procedures (the stopped procedures with $p_r$ varying between $0$ and $1$, and the ordinary procedure) can be coupled in the following natural way: Assign to each vertex $v$, independent of the others, (and of the $\tau$ variables) a random variable $\rho(v)$, uniformly distributed on the interval $(0,1)$. When we now do invasion percolation (w.r.t. the $\tau$ variables) and stop when we hit a vertex with $\rho$ value smaller than $p_r$, this corresponds exactly with the above mentioned stopped invasion with parameter $p_r$. In this coupled setting, the set $C_g(O)$ for the stopped model with parameter $p_r$ is clearly non-increasing in $p_r$, and the union of these sets over all the values $p_r>0$ is exactly the region mentioned in Proposition \ref{inv-perc}. Theorem \ref{uni-bound} now follows from this proposition. $\Box$
\end{subsection}
\end{section}
\begin{section}{Proof for the case $p_w > 0$} In this section we prove Theorem \ref{mainthm} for the case $p_w>0$. Recall that in the special case where there are no white vertices (see Section 2) there was an elegant invasion procedure which produced, with probability $1$, a finite autonomous set containing a given vertex or edge. This is much more complicated in the general case, when there are white vertices. We still have a procedure which, if it stops, gives an autonomous set containing, say, a given vertex $x$. This algorithm starts as before, with one invasion tree, which initially consists only of the vertex $x$, and which grows by invading the edge with minimal $\tau$ value. However, when we hit a `fresh' white vertex $y$ we have to investigate the `space-time paths from outside' that have possibly influenced $y$. This is done by starting new invasion trees in the green vertices on the boundary of the white cluster of $y$. As before, an invasion tree stops when it invades a red vertex. In the situation in the previous Section this also marked the end of the algorithm. But in the current situation it only marks the end of one invasion tree, while the others keep growing and creating new invasion trees. In this way the algorithm might go on forever. However, we show that under the condition in Theorem \ref{mainthm} the algorithm, which is described more precisely below, does end.
\noindent The input is a connected graph $G = (V,E)$, the initial colours $c(v), v \in V$ and the opening times $\tau(e), e \in E$, and the vertex $x$ or edge $e$ for which we want to find an autonomous region. Here we only handle the case concerning a vertex $x$ and we assume that $x$ is green; the other cases can be done in a very similar way. For the moment it suffices to restrict to finite graphs. The algorithm will produce an autonomous subgraph $H$ and, for some vertices $v$ of $H$, non-negative numbers $t_g(v)$ and $t_r(v)$, and for some edges $e$ of $H$ a positive number $t(e)$. Here $t_g(v)$ and $t_r(v)$ will denote the time at which $v$ becomes green and red, respectively. The value $t(e)$ will be the time when $e$ becomes open. It will be clear from the description below that, at each stage of the algorithm the edges to which a $t$-value has been assigned form a collection of disjoint trees. Each tree in this collection has one of two labels: `active' or `paralyzing'. How these labels are assigned is described in Subsection \ref{Desc} below. The collection of active trees is denoted by $\mathcal{T}_a$ and the collection of paralyzing trees by $\mathcal{T}_p$. As we will see, new active or paralyzing trees are `created' during the algorithm, and active trees can merge with each other or with a paralyzing tree. In the former case the new tree is active, in the latter case it is paralyzing.
The set of edges which have at least one end-vertex in an active tree (and not both end-vertices in the same active tree) is denoted by $\mathcal{E}$. With some abuse of terminology we say that a vertex is in $\mathcal{T}_a$ if it is a vertex of some tree in $\mathcal{T}_a$. A similar remark holds w.r.t. $\mathcal{T}_p$.
Apart from the above, we need the following auxiliary variables and structures, which will be assigned during the algorithm.
The first auxiliary structure we mention here is a set $S$, which can be interpreted as the set of all initially white vertices that `have been seen until the current stage' in the algorithm. We say that a vertex `is registered' if it is in $\mathcal{T}_p$, $\mathcal{T}_a$ or $S$. Further, to each edge $e \in \mathcal{E}$ (as introduced above) a value $t_1(e)$ will be assigned, which can be interpreted as a tentative, possible value for $t(e)$.
Finally, the following definition will be important: The {\it white cluster} $C_w(v)$ of a vertex $v$ is defined as the maximal connected subset of $G$ of which all vertices $y$ have initial colour $c(y) = $ white. (Note that this notion, in contrast with the notion of green clusters (defined in Section 1) does not involve the state (open/closed) of the edges. The boundary of the white cluster of $v$, denoted by $\partial C_w(v)$, is the set of all vertices that are not in $C_w(v)$ but have an edge to some vertex in $C_w(v)$. If $c(v)$ is not white, then $C_w(v)$ and $\partial C_w(v)$ are empty.
\begin{subsection}{Description of the algorithm} \label{Desc} Using the notions above we are now ready to describe the algorithm. It starts with action 1 below, followed by an iteration of (some of) the other actions. Recall that $c(x)$ is green.
\noindent {\bf 1.} {\bf Initialization of some of the variables and structures.} \\ Set $\mathcal{T}_p = \emptyset$, $\mathcal{T}_a = \{\{x\}\}$, and $S = \emptyset$. \\ Set $t_g(x) = 0$, $\mathcal{E} $ as the set of all edges incident to $x$, and $t_1(e) = \tau(e)$ for all edges $e\in \mathcal{E}$.
\noindent {\bf 2.} {\bf Selection of minimal external edge.} \\ Remove from $\mathcal{E}$ all edges of which both endpoints are in the same tree of $\mathcal{T}_a$. \\ {\it Comment: such edges can have resulted from some af the actions below} \\ If $\mathcal{E} = \emptyset$, stop. Otherwise, let $e$ be the edge in $\mathcal{E}$ with minimal $t_1$-value. \\ Write $e = \langle v,y \rangle$ with $v$ in $\mathcal{T}_a$. (This way of writing is of course not unique if both end-vertices of $e$ are in $\mathcal{T}_a$ but that doesn't matter). Let $T$ denote the tree in $\mathcal{T}_a$ of which $v$ is a vertex. \\ If $y$ is not in $\mathcal{T}_a$, $\mathcal{T}_p$ or $S$ (that is, $y$ is `fresh') go to 2a, else go to 2b.
\noindent {\bf 2a. Fresh vertex.} \\ Determine $c(y)$. \\ If $c(y) =$ red, set $t(e) = t_1(e)$ and go to 3a. \\ If $c(y) =$ green, set $t(e) = t_1(e)$ and go to 4. \\ If $c(y) =$ white, go to 6.
\noindent {\bf 2b. Registered vertex.} \\ Set $t(e) = t_1(e)$. \\ If $y$ is in $\mathcal{T}_p$ go to 3b. \\ If $y$ is in $\mathcal{T}_a$ go to 5. \\ Else go to 7.
\noindent {\bf 3a. Fresh red.} \\ {\it Comment: This case can be handled in almost the same way as 3b below and therefore, with an `administrative trick', we simply turn it into the latter case:} \\ Set $t_r(y) = 0$. Add to $\mathcal{T}_p$ the tree which consists only of the vertex $y$. \\ Go to 3b.
\noindent {\bf 3b. Active tree $T$ becomes paralyzed.} Set $t_r(z) = t(e)$ for all vertices $z$ of $T$. \\ Remove from $\mathcal{E}$ all edges of which one end-vertex is in $T$ and the other end-vertex is not in $\mathcal{T}_a$. Let $T'$ be the tree in $\mathcal{T}_p$ of which $y$ is a vertex. Replace, in $\mathcal{T}_p$, the tree $T'$ by that obtained from `glueing together' $T$ and $T'$ via the edge $e$. Remove $T$ from $\mathcal{T}_a$. \\ Go to 2.
\noindent {\bf 4. Fresh green. } \\ Set $t_g(y) = 0$. For each edge $e'$ incident to $y$ that was not yet in $\mathcal{E}$: add $e'$ to $\mathcal{E}$ and set $t_1(e') = \tau(e')$. Replace, in $\mathcal{T}_a$, the tree $T$ by a new tree obtained from glueing $y$ to $T$ by the edge $e$. \\
Go to 2.
\noindent {\bf 5. Two active trees join.} \\ Let $T' \in \mathcal{T}_a$ be the active tree of which $y$ is a vertex. Replace, in $\mathcal{T}_a$, the trees $T$ and $T'$ by a new tree obtained from `glueing together' $T$ and $T'$ with the edge $e$. \\
Go to 2.
\noindent {\bf 6. Fresh white.} \\ Add every vertex of $C_w(y)$ to $S$. \\ For each vertex $z$ in $\partial C_w(y)$ that has $c(z) =$ green and is not in $\mathcal{T}_a$ or $\mathcal{T}_p$, do the following: \\ Set $t_g(z) = 0$; add the tree $\{z\}$ to $\mathcal{T}_a$; add to $\mathcal{E}$ each edge $e'$ incident to $z$ that is not yet in $\mathcal{E}$, and set $t_1(e') = \tau(e')$.
\noindent For each vertex $z$ in $\partial C_w(y)$ that has $c(z) =$ red and is not in $\mathcal{T}_p$, set $t_r(z) = 0$ and add the tree $\{z\}$ to $\mathcal{T}_p$.\\ Go to 2.
\noindent {\bf 7. Registered white.} \\ Set $t_g(y) = t(e)$. Replace, in $\mathcal{T}_a$, the tree $T$ by the tree obtained from $T$ by `glueing' the vertex $y$ to it by the edge $e$. For each edge $e' = \langle y, z \rangle$ of $y$ that is not in $\mathcal{E}$, add it to $\mathcal{E}$ and set $t_1(e')$ as follows: \\ If $z$ is in $\mathcal{T}_p$ but $c(z) \neq$ red, set \begin{equation} \label{adjust} t_1(e') = t(e) + \tau(e') -(t_r(z) - t_g(z)), \end{equation} else set $$t_1(e') = t(e) + \tau(e').$$ {\it Comment: The subtracted term in \eqref{adjust} accounts for the time that $e'$ already had a green end-vertex. See also the Remark at the end of Subsection 3.2} \\ Go to 2.
\noindent {\bf Remark:} \\ Note that initially there is only one active tree and that new active trees are only formed in part 6 of the algorithm. Also note that initially there are no paralyzing trees; these can be formed in part 6 and in part 3a. Moreover, 3a always leads, via 3b, to the elimination of an active tree. Now consider the case that $G$ has no vertices with initial colour white. Then the algorithm never enters part 6 (neither part 7) so that throughout the algorithm there is one active tree until a red vertex is `hit'. From such considerations it is easily seen that in this case the algorithm reduces to the one described in Section 2. \end{subsection}
\begin{subsection}{Correctness of the algorithm} If $G$ is finite the above algorithm will clearly stop. Moreover, we claim that if $G$ has at least one vertex with initial colour red, we have the following situation at the end of the algorithm: The set of active trees $\mathcal{T}_a$ is empty. The set $\mathcal{T}_p$ contains one or more trees, and the vertex $x$ is in one of them. Each of these trees has exactly one vertex with initial colour red, and this vertex is `responsible' for the other vertices in that tree to become red. The following pair, $(H, \bar E)$, is autonomous: The vertices of $H$ are the vertices in $\mathcal{T}_p$ together with all vertices in $S$. The edges of $H$ are all edges of which both end-vertices are in the above set. The set $\bar E$ is the set of all edges of which one end-vertex is a vertex $v$ of $H$ with $c(v) \neq$ red, and the other end-vertex is not in $H$. Further, each initially green vertex $v$ of $H$ becomes red at time $t_r(v)$.
The `correctness' of the above algorithm (that is, the above claim) can, in principle, be proved by induction, e.g. on the number of edges. Instead of giving a full proof (which would be extremely tedious) we present the key ideas/observations ((a) - (d) below) to be used in such proof.
\noindent (a) As in many induction proofs it is useful, or even necessary, (for carrying out the induction step) to generalize the statement one wants to prove. In the current situation this generalization is as follows: In the above algorithm, information is stored in the administration when the vertices involved are `encoutered' by the algorithm. In particular, in action 6 a white cluster and its boundary are `stored' because a vertex of the white cluster had been encountered (as endpoint of the edge selected in action 2). The same algorithm still works if at one or more stages of the algorithm such information about a white cluster (and its boundary) is stored `spontaneously' (that is, without this cluster having been encoutered in the sense above).
\noindent (b) The main observation for doing induction on the number of edges is the following: Let, among all edges with at least one initially green endpoint, $\hat e$ be the one with minimal $\tau$ value. Let $\hat x$ and $\hat y$ denote its endpoints. We may assume that $\hat x$ is initially green. It is clear that the first thing that happens in the `real' growth process is the opening of $\hat e$ (namely, at time $\tau(\hat e)$). It is alo clear that from that moment on the growth process behaves as if starting on a graph with one vertex less, namely the graph obtained by `identifying' (or glueing together) $\hat x$ and $\hat y$ (with an obviously assigned colour: green if $c(y)$ is white or green; red if $c(y)$ is red).
\noindent (c) To carry out the induction step it has to be shown that the algorithm has a property analogous to that for the real process described in (b) above. That this is indeed the case, can be seen as follows: As long as $\hat x$ and $\hat y$ are not `registered' in the algorithm, the algorithm behaves the same as it would behave for the graph obtained after the identification described in (b). Moreover, one can easily see from the description of the algorithm that immediately after one of these vertices is registered, the other one also is, and that they are immediately `attached to each other' (by the edge $\hat e$) in the same tree.
\noindent (d) The following side remark must be added to (c) above: Suppose that $\hat y \in C_w(y)$ in action 6 at some stage of the algorithm. This cluster $C_w(y)$ could be larger than that in the graph obtained by identifying $\hat y$ and $\hat x$. This means that in that step `more information is collected' than in the situation where $\hat x$ and $\hat y$ would be identified from the beginning. It is exactly for this issue that the generalized algorithm (and claim) in (a) was given. \end{subsection}
\begin{subsection}{Proof of Theorem \ref{mainthm}} \begin{proof}
It follows, in the same way as in the case $p_w = 0$, that on an infinite graph the dynamics is well-defined provided the algorithm stops with probability $1$. We will show that, under the condition \eqref{key} in the statement of the Theorem, the algorithm indeed stops. In fact, the arguments we use will give something stronger, namely Proposition \ref{prop1} below, from which not only part (a) of Theorem~\ref{mainthm} follows, but which we will also use to prove part (b), (c) and (d). \begin{prop}\label{prop1} Under the condition of Theorem~\ref{mainthm}, we have that, for each $x$, the above mentioned algorithm stops, and, moreover, the distributions of the volume and the diameter of the graph $H$ defined above have an exponential tail. \end{prop} \begin{proof} By the $k$th step of the algorithm we mean everything done by the algorithm between the $k$th and $k+1$th time the algorithm `enters' part 2a in the description in Subsection 3.1. Recall that we say that a vertex is registered if it is in $\mathcal{T}_a$, $\mathcal{T}_p$ or $S$. Let $\nu_k$ be the number of registered vertices at the beginning of step $k$. (In particular, $\nu_1 = 1$.) If the algorithm is already terminated during step $j$ for some $j < k$, we set $\nu_k$ equal to the number of registered vertices at the moment of termination. Further, let $y_k$ denote the `fresh' vertex (i.e. the vertex $y$ in part 2a of the description in Subsection 3.1) treated in step $k$ of the algorithm. (In particular, $y_1$ is the end-vertex of the edge incident to $x$ with minimal $\tau$ value). Let $\eta_k = \nu_{k+1} - \nu_k$.
Further, let $\alpha_k$ denote the net increase of the number of active trees during step $k$ of the algorithm. If the algorithm is terminated during step $k$, we set $\alpha_k = -1$. (This choice is somewhat arbitrary; it is simply a suitable choice to ensure that certain statements below hold for all $k$).
Note that the initial colours of the vertices are independent random variables, each being white, red or green with probability $p_w$, $p_r$ and $p_g$ respectively. It is clear from the algorithm that we may consider the colour of a vertex as `hidden' until the moment the vertex becomes registered. Let $\mathcal{F}_k$ be all information obtained by the algorithm until the beginning of step $k$ (including the identity but not the colour of $y_k$).\\ Let $N = \min\{n \, : \, 1 + \sum_{k=1}^n \alpha_k = 0\}$. It is easy to see that if $N$ is finite the algorithm stops during or before step $N$, and the number of vertices in the above defined graph $H$ is \begin{equation} \label{Hbd} 1 + \sum_{k=1}^N \eta_k. \end{equation}
Note that if $c(y_k)$ is white, the procedure is sent to part 6, and the newly registered vertices in step $k$ of the algorithm are exactly the vertices of $C_w(y_k)$ and the not yet registered vertices of $\partial C_w(y_k)$; moreover, $|\mathcal{T}_a|$ increases during this step by at most the number of green vertices in $\partial C_w(y_k)$. We write {\it at most}, because during the remainder of step $k$ no new active trees are created but already present active trees may disappear (which happens if the algorithm enters part 3b before it enters part 2a again.
Similarly, if $c(y_k)$ is red or green, then the only newly registered vertex is $y_k$ itself; moreover, in the former case
$|\mathcal{T}_a|$ goes down during step $k$ by at least $1$, while in the latter case it goes down or doesn't change. \\
For every connected set $W$ of vertices with $|W| \ge 2$, the number of vertices in the boundary of $W$ is at most $(D-1) |W|$; hence, we have
\begin{equation} \label{etbd}
\eta_k \leq D |C_w(y_k)| + \I_{\{c(y_k) \mbox{ not white}\}}. \end{equation} \begin{equation} \label{albd}
\alpha_k \leq (D-1) |C_w(y_k)| - \I_{\{c(y_k) \mbox{ is red}\}}. \end{equation}
Note that (since $y_k$ is `fresh') the conditional probability that $c(y_k)$ is red, white or green, given $\mathcal{F}_k$, is $p_r$, $p_w$ and $p_g$ respectively. Also note that, by the condition in the Theorem, $p_w < 1/(D-1)$ and hence (as is well-known and easy to check) there is a $q <1$ such that for all $n$ and all vertices $v$,
\begin{equation} \label{expcw}
P(|C_w(v)| \geq n) \leq q^n. \end{equation}
Moreover, it is easy to see that conditioned on $\mathcal{F}_k$, which includes the information that $y_k$ is a specific vertex, say $y$, the cluster size
$|C_w(y_k)|$ is stochastically smaller than $|C_w(y)|$. Hence the bound \eqref{expcw} also holds (a.s) if we replace its l.h.s. by $P(|C_w(y_k)| \ge n | \mathcal{F}_k)$. This, combined with \eqref{etbd}
immediately gives that there is a $\gamma < 1$ such that for all $k$ and $n$,
\begin{equation} \label{etdbd}
P(\eta_k \geq n | \mathcal{F}_k) \leq \gamma^n. \end{equation}
As to the $\alpha$'s, define (compare \eqref{albd}), for every vertex $v$, \begin{equation} \label{alvbd}
\alpha(v) = (D-1) |C_w(v)| - \I_{\{c(v) \mbox{ is red}\}}. \end{equation}
Let $\alpha'(v), \, v \in V$ be independent copies of the $\alpha(v), \, v \in V$. By a similar stochastic domination argument that led to \eqref{etdbd}, we have for all vertices $v$, and all positive integers $k$ and $n$,
\begin{equation} \label{aldbd}
P(\alpha_k \geq n | \mathcal{F}_k,\, y_k = v) \leq P(\alpha(v) \geq n) = P(\alpha'(v) \geq n). \end{equation}
And, again by \eqref{expcw}, there is a $\lambda < 1$ such that for all $n$ and $v$
\begin{equation} \label{alv-xbd} P(\alpha'(v) \geq n) = P(\alpha(v) \geq n) \leq \lambda^n. \end{equation}
Further note that, for each vertex $v$, we have $E(|C_w(v)|) = \xi_v(p_w)$. Hence, condition \eqref{key} in Theorem 1.1 says that there is an $\varepsilon > 0$ such that for all vertices $v$ we have
\begin{equation} \label{almbd} E(\alpha'(v)) = E(\alpha(v)) < -\varepsilon. \end{equation}
\noindent From \eqref{aldbd} and the definition of the random variables $\alpha'(v), \, v \in V$, it follows (from stochastic domination) that, for all positive integers $K$,
\begin{equation} \label{supbd} P\left(\sum_{k=1}^K \alpha_k \geq 0\right) \leq \sup^* P\left(\sum_{k=1}^K \alpha'(v_k) \geq 0\right), \end{equation} where we use '*' to indicate that the supremum is taken over all tuples of $K$ distinct vertices $v_1, v_2, ..., v_K$.
\noindent From \eqref{alv-xbd} and \eqref{almbd} it follows (by standard large-deviation upper bounds for independent random variables) that there is a $\beta < 1$ such that for all $K$ and all distinct vertices $v_1, v_2, ..., v_k$,
$$ P(\sum_{k=1}^K \alpha'(v_k) \geq 0) \leq \beta^K. $$
\noindent From this and \eqref{supbd} it follows that the distribution of $N$ has an exponential tail.
Putting this together with \eqref{etdbd} and \eqref{Hbd} we that the number of vertices in $H$ has an exponential tail. Indeed the event that $1+\sum_{k=1}^N \eta_k \ge n$ is contained in the union of the events $N \ge an$ and $\sum_{k=1}^{an} \eta_k \ge n$; the probabilities of these events decay exponentially in $n$ for suitable $a$.
This completes the proof of Proposition \ref{prop1}. (Note that the diameter of $H$ is at most its volume, since $H$ is a connected graph). \end{proof}
\noindent {\it Parts (a) and (b) of Theorem~\ref{mainthm}} follow immediately from Proposition~\ref{prop1} (noting that the vertices of $C_g(x)$ belong to $H$). \\ Using Proposition~\ref{prop1}, {\it Parts (c) and (d)} of the Theorem~\ref{mainthm} can now be derived in the same way as in the special case $p_w=0$ in Section 2. This completes the proof of Theorem \ref{mainthm}. \end{proof}
\noindent {\bf Remark:} For the alternative model (i) in Subsection 1.4, the proof of Theorem \eqref{mainthm} is exactly the same. Note that the proof doesn't use that the $\tau's$ are exponentially distributed, it applies in the same manner to any continuous distribution. \\ For the alternative model (ii) the algorithm in Subsection 3.1 needs a few small adaptations. Apart from this the proof remains practically the same.
\end{subsection} \end{section} {\bf Acknowledgments.} Two of the authors (V.S. and M.E.V.) learned about the continuum model from E.J. Neves. We thank Antal J\'{a}rai for comments on Proposition \ref{inv-perc} and Chuck Newman for drawing our attention to the article \cite{StN}. We also thank Ron Peled and the referees for corrections in the first manuscript.
\end{document} |
\begin{document}
\title{\MakeUppercase{Bounded-Degree Planar Graphs Do Not Have Bounded-Degree Product Structure}
\begin{abstract}
Product structure theorems are a collection of recent results that have been used to resolve a number of longstanding open problems on planar graphs and related graph classes. One particularly useful version states that every planar graph $G$ is contained in the strong product of a $3$-tree $H$, a path $P$, and a $3$-cycle $K_3$; written as $G\subsetcong H\boxtimes P\boxtimes K_3$. A number of researchers have asked if this theorem can be strengthened so that the maximum degree in $H$ can be bounded by a function of the maximum degree in $G$. We show that no such strengthening is possible. Specifically, we describe an infinite family $\mathcal{G}$ of planar graphs of maximum degree $5$ such that, if an $n$-vertex member $G$ of $\mathcal{G}$ is isomorphic to a subgraph of $H\boxtimes P\boxtimes K_c$ where $P$ is a path and $H$ is a graph of maximum degree $\Delta$ and treewidth $t$, then $t\Delta c \geqslant 2^{\Omega(\sqrt{\log\log n})}$. \end{abstract}
\section{Introduction}
Recently, product structure theorems have been a key tool in resolving a number of longstanding open problems on planar graphs. Roughly, a \defin{product structure theorem} for a graph family $\mathcal{G}$ states that every graph in $\mathcal{G}$ is isomorphic to a subgraph of the product of two or more ``simple'' graphs. As an example, there are a number of graph classes $\mathcal{G}$ for which there exists integers $t$ and $c$ such that, for each $G\in\mathcal{G}$ there is a graph $H$ of treewidth\footnote{A \defin{tree decomposition} of a graph $H$ is a collection $\mathcal{T}:=(B_x:x\in V(T))$ of subsets of $V(H)$ indexed by the nodes of some tree $T$ such that \begin{inparaenum}[(i)]
\item for each $v\in V(H)$, the induced subgraph $T[x\in V(T):v\in B_x]$ is connected; and
\item for each edge $vw\in E(H)$, there exists some $x\in V(T)$ with $\{v,w\}\subsetcong B_x$. \end{inparaenum}
The \defin{width} of such a tree decomposition is $\max\{|B_x|:x\in V(T)\}-1$. The \defin{treewidth} of $H$ is the minimum width of any tree decomposition of $H$.} $t$ and a path $P$ such that $G$ is isomorphic to a subgraph of the strong product\footnote{The \defin{strong product} $G_1\boxtimes G_2$ of two graphs $G_1$ and $G_2$ is the graph with vertex-set $V(G_1\boxtimes G_2):=V(G_1)\times V(G_2)$ and that includes the edge with endpoints $(v,x)$ and $(w,y)$ if and only if \begin{inparaenum}[(i)]
\item $vw\in E(G_1)$ and $x=y$;
\item $v=w$ and $xy\in E(G_2)$; or
\item $vw\in E(G_1)$ and $xy\in E(G_2)$. \end{inparaenum} } of $H$, $P$, and a clique $K$ of order $c$. This is typically written as $G\subsetcong H\boxtimes P\boxtimes K_c$, where the notation $G_1\subsetcong G_2$ is used to mean that $G_1$ is isomorphic to some subgraph of $G_2$. See references \cite{dujmovic.joret.ea:planar,dujmovic.morin.ea:structure,krauthgamer.lee:intrinsic,ueckerdt.wood.ea:improved,bose.morin.ea:optimal,campbell.clinch.ea:product,illingworth.scott.ea:alon,distel.hickingbotham.ea:improved,hickingbotham.jungeblut.ea:product,hickingbotham.wood:shallow,wood:product} for examples.
In some applications of product structure theorems, it is helpful if, in addition to having treewidth $t$, the graph $H$ has additional properties, possibly inherited from $G$. For example, one very useful version of the planar graph product structure theorem states that for every planar graph $G$ there exists a \emph{planar} graph $H$ of treewidth $3$ and a path $P$ such that $G\subsetcong H\boxtimes P\boxtimes K_3$ \cite[Theorem~36(b)]{dujmovic.joret.ea:planar}. The planarity of $H$ in this result has been leveraged to obtain better constants and even asymptotic improvements for graph colouring and layout problems, including queue number \cite{dujmovic.joret.ea:planar}, $p$-centered colouring \cite{debski.felsner.ea:improved}, and $\ell$-vertex ranking \cite{bose.dujmovic.ea:asymptotically}.
In this vein, the authors have been repeatedly asked if $H$ can have bounded degree whenever $G$ does; that is: \begin{quote}
For each $\Delta\in\N$, let $\mathcal{G}_\Delta$ be the family of planar graphs of maximum degree $\Delta$. Do there exist functions $t:\N\to\N$, $d:\N\to\N$, and $c:\N\to\N$ such that, for each $\Delta\in\N$ and each $G\in\mathcal{G}_\Delta$ there exists a graph $H$ of treewidth at most $t(\Delta)$ and maximum degree $d(\Delta)$ such that $G\subsetcong H\boxtimes P\boxtimes K_{c(\Delta)}$? \end{quote} In the current paper we show that the answer to this question is no, even when $\Delta=5$.
\begin{thm}\label{main_thm}
For infinitely many integers $n\geqslant 1$, there exists an $n$-vertex planar graph $G$ of maximum degree $5$ such that, for every graph $H$ of treewidth $t$ and maximum degree $\Delta$, every path $P$, and every integer $c$, if $G\subsetcong H\boxtimes P\boxtimes K_c$ then $t\Delta c \geqslant 2^{\Omega(\sqrt{\log\log n})}$. \end{thm}
The graph family $\mathcal{G}:=\{G_h:h\in\N\}$ that establishes \cref{main_thm} consists of complete binary trees of height $h$ augmented with edges to form, for each $i\in\{1,\ldots,h\}$, a path that contains all vertices of depth $i$. See \cref{G_5}.
\begin{figure}
\caption{The graph $G_5$ from the graph family $\{G_h:h\in\N\}$ that establishes \cref{main_thm}.}
\label{G_5}
\end{figure}
\section{Proof of \cref{main_thm}}
Throughout this paper, all graphs $G$ are simple and undirected with vertex-set $V(G)$ and edge-set $E(G)$. For a set $S$, $G[S]$ denotes the subgraph of $G$ induced by $S\cap V(G)$ and $G-S:=G[V(G)\setminus S]$. For every $v\in V(G)$, let $N_G(v):=\{w:vw\in E(G)\}$ and for every $S\subseteq V(G)$, let $N_G(S):=\bigcup_{v\in S} N_G(v)\setminus S$. We write $G_1\cong G_2$ if $G_1$ and $G_2$ are isomorphic and $G_1\subsetcong G_2$ if $G_1$ is isomorphic to some subgraph of $G_2$.
\subsection{Partitions}
Let $G$ and $H$ be graphs. An \defin{$H$-partition} $\mathcal{H}:=\{B_x:x\in V(H)\}$ of $G$ is a partition of $V(G)$ whose parts are indexed by the vertices of $H$ with the property that, if $vw$ is an edge of $G$ with $v\in B_x$ and $w\in B_y$ then $x=y$ or $xy\in E(H)$. The \defin{width} of $\mathcal{H}$ is the size of its largest part; that is, $\max\{|B_x|:x\in V(H)\}$. If $H$ is in a class $\mathcal{G}$ of graphs then we may call $\mathcal{H}$ a $\mathcal{G}$-partition of $G$. Specifically, if $H$ is a tree, then $\mathcal{H}$ is a \defin{tree-partition} of $G$ and if $H$ is a path, then $\mathcal{H}$ is a \defin{path-partition} of $G$. A path-partition $\mathcal{P}:=\{P_x:x\in V(P)\}$ of $G$ is also referred to as a \defin{layering} of $G$ and the parts of $\mathcal{P}$ are referred to as \defin{layers}. A set of layers $\{P_{x_1},\ldots,P_{x_q}\}\subseteq\mathcal{P}$ is \defin{consecutive} if $P[\{x_1,\ldots,x_q\}]$ is connected.
As in previous works, we make use of the following relationship between $H$-partitions and strong products, which follows immediately from the preceding definitions.
\begin{obs}\label{partitions_vs_products}
For every integer $c\geqslant 1$, and every graphs $G$, $H$, and $J$, $G\subsetcong H\boxtimes J\boxtimes K_c$ if and only if $G$ has an $H$-partition $\mathcal{H}:=\{B_x:x\in V(H)\}$ and a $J$-partition $\mathcal{J}:=\{C_y:y\in V(J)\}$ such that $|B_x\cap C_y|\leqslant c$, for each $(x,y)\in V(H)\times V(J)$. \end{obs}
The following important result of \citet{ding.oporowski:some} (also see \cite{wood:on,distel.wood:tree_partitions}) allows us to focus on the case where the first factor in our product is a tree.
\begin{thm}[\citet{ding.oporowski:some}]\label{dingy}
If $H$ is a graph with maximum degree $\Delta$ and treewidth $t$,
then $H$ has a tree-partition of width at most $24\Delta(t+1)$. \end{thm}
\begin{cor}\label{ding_translation}
If $G\subsetcong H\boxtimes P\boxtimes K_c$ where $H$ has treewidth $t$ and maximum degree $\Delta$ then there exists a tree $T$ such that $G\subsetcong T\boxtimes P\boxtimes K_{24c\Delta (t+1)}$. \end{cor}
\begin{proof}
By \cref{dingy}, $H$ has a tree-partition $\mathcal{T}:=\{B_x:x\in V(T)\}$ of width at most $24\Delta (t+1)$. By \cref{partitions_vs_products}, $H \subsetcong T\boxtimes K_{24\Delta (t+1)}$. Therefore, $G\subsetcong T\boxtimes K_{24\Delta (t+1)}\boxtimes P\boxtimes K_c \cong T\boxtimes P\boxtimes K_{24c\Delta (t+1)}$. \end{proof}
The \defin{length} of a path is the number of edges in it. Given two vertices $v,w\in V(G)$, $\dist_G(v,w)$ denotes the minimum length of a path in $G$ that contains $v$ and $w$, or $\dist_G(v,w)$ is infinite if $v$ and $w$ are in different connected components of $G$. For any $R\subseteq V(G)$, the \defin{diameter} of $R$ in $G$ is $\diam_G(R):=\max\{\dist_G(v,w):v,w\in R\}$.
\begin{obs}\label{diameter_spread}
Let $G$ be a graph, let $R\subseteq V(G)$, and let $\mathcal{L}$ be a layering of $G$. Then there exists a layer $L\in\mathcal{L}$ such that $|R\cap L|\geqslant |R|/(\diam_G(R)+1)$. \end{obs}
\begin{proof}
By the definition of layering, the vertices in $R$ are contained in a set of at most $\diam_G(R)+1$ consecutive layers of $\mathcal{L}$. The result then follows from the Pigeonhole Principle. \end{proof}
We also make use of the following basic fact about tree-partitions:
\begin{obs}\label{tree_thingy}
Let $G$ be a graph, let $\mathcal{T}:=(B_x:x\in V(T))$ be a tree-partition of $G$, let $x\in V(T)$, and let $v,w\in N_G(B_x)$ be in the same component of $G-B_x$. Then $T$ contains an edge $xy$ with $v,w\in B_y$. \end{obs}
\begin{proof}
Suppose that $v\in B_y$ and $w\in B_z$ for some $y,z\in V(T)$. Since $v,w\in N_G(B_x)$, $T$ contains the edges $xy$ and $xz$. All that remains is to show that $y=z$. For the purpose of contradiction, assume $y\neq z$. Since $v$ and $w$ are in the same component of $G-B_x$, $G$ contains a path from $v$ to $w$ that avoids all vertices in $B_x$, which implies that $T$ contains a path $P_{yz}$ from $y$ to $z$ that does not include $x$. This is a contradiction since then $P_{yz}$ and the edges $xy$ and $yz$ form a cycle in $T$, but $T$ is a tree. \end{proof}
\subsection{Percolation in Binary Trees}
The \defin{depth} of a vertex $v$ in a rooted tree $T$ is the length of the path $P_T(v)$ from $v$ to the root of $T$. Each vertex $a\in V(P_T(v))$ is an \defin{ancestor} of $v$, and $v$ is a \defin{descendant} of each vertex in $V(P_T(v))$. We say that a set $B\subseteq V(T)$ is \defin{unrelated} if no vertex of $B$ is an ancestor of any other vertex in $B$.
For each $h\in\N$, let $T_h$ denote the complete binary tree of height $h$; that is, the rooted ordered tree with $2^h$ leaves, each having depth $h$ and in which each non-leaf vertex has exactly two children, one \defin{left child} and one \defin{right child}. Note that the ordering of $T_h$ induces an ordering on every unrelated set $B\subseteq V(T_h)$, which we refer to as the \defin{left-to-right ordering}. Specifically, $v\in B$ appears before $w\in B$ in the left-to-right ordering of $B$ if and only if there exists a common ancestor $a$ of both $v$ and $w$ such that the path from $a$ to $v$ contains the left child of $a$ and the path from $a$ to $w$ contains the right child of $a$.
We use the following two percolation-type results for $T_h$.
\begin{lem}\label{one_path}
Let $h\geqslant 1$
and let $S\subseteq V(T_h)$ with $1\leqslant |S|< 2^h$. Then there exists a vertex $v$ of $T_h$ such that
\begin{compactenum}[(i)]
\item the depth of $v$ is at most $\log_2|S|+1$;
\item the parent of $v$ is in $S\cup\{r\}$, where $r$ is the root of $T_h$; and
\item $T_h-S$ contains a path from $v$ to a leaf of $T_h$.
\end{compactenum} \end{lem}
\begin{proof}
The proof is by induction on $h$. When $h=1$, $|S|\leqslant 1$. In particular, at least one child $v$ of $r$ is not in $S$. The depth of $v$ is $1\leqslant \log_2|S|+1$, so $v$ satisfies (i). The parent of $v$ is $r\in S\cup\{r\}$, so $v$ satisfies (ii). $T_1-S$ contains a length-$0$ path from $v$ to itself (a leaf of $T_1$), so $v$ satisfies (iii).
For $h\geqslant 2$, let $\ell$ be the maximum integer such that $S\cup\{r\}$ contains all $2^\ell$ vertices of depth $\ell$. Observe that $2^\ell \leqslant |S|$, so $\ell \leqslant \log_2 |S| < h$. Let $L$ be the set of $2^{\ell+1}$ depth-$(\ell+1)$ vertices in $T_h$. By the Pigeonhole Principle some vertex $r'\in L$ is the root of a complete binary tree $T'$ with root $r'$ of height $h-\ell-1$ with $|S\cap V(T')| \leqslant |S|/2^{\ell+1} < 2^{h-\ell-1}$.
If $V(T')\cap S=\emptyset$ then choosing $v:=r'$ satisfies the requirements of the lemma. Otherwise, by applying induction on $T'$ and $S':=S\cap V(T')$ we obtain a vertex $v'$ of depth at most $\ell+1+\log_2(|S'|)+1 \leqslant \log_2|S|+1$ whose parent is in $S\cup\{r'\}$, and such that $T_h-S$ contains a path from $v'$ to a leaf of $T_h$. Thus $v'$ satisfies requirements (i) and (iii). If the parent of $v'$ is in $S$ then $v'$ also satisfies requirement (ii) and the lemma is proven, with $v:=v'$. Otherwise, the parent of $v'$ is $r'$, in which case $r'$ satisfies requirements (i)--(iii) and we are done, with $v:=r'$. \end{proof}
\begin{lem}\label{two_paths}
Let $h\geqslant 2$
and let $S\subseteq V(T_h)$ with $1\leqslant |S|< 2^{h-1}$. Then there exist two unrelated vertices $v_1$ and $v_2$ of $T_h$ such that, for each $i\in\{1,2\}$:
\begin{compactenum}[(i)]
\item the depth of $v_i$ is at most $\log_2|S|+2$;
\item the parent of $v_i$ is in $S\cup\{r\}$, where $r$ is the root of $T_h$; and
\item $T_h-S$ contains a path from $v_i$ to a leaf of $T_h$.
\end{compactenum} \end{lem}
\begin{proof}
Let $T_1$ and $T_2$ be the two maximal subtrees of $T_h$ rooted at the children $r_1$ and $r_2$, respectively of $r$. (Each of $T_1$ and $T_2$ is a complete binary tree of height $h-1$.) For each $i\in\{1,2\}$, let $S_i:=S\cap V(T_i)$. If $S_i=\emptyset$ then we choose $v_i=r_i$ and this satisfies requirements (i)--(iii). If $S_i\neq\emptyset$ then, since $|S_i|\leqslant |S|< 2^{h-1}$, we can apply \cref{one_path} to $T_i$ and $S_i$ to obtain a vertex $v_i'\in V(T_i)$ of depth at most $1+\log_2|S_i|+1 \leqslant \log_2 |S| + 2$ and such that $T_h-S$ contains a path from $v_i'$ to a leaf of $T_h$. Therefore, $v_i'$ satisfies (i) and (iii). Furthermore, the parent of $v_i'$ is in $S\cup\{r_i\}$. If the parent of $v_i'$ is in $S$, then $v_i'$ also satisfies (ii), so we set $v_i:=v_i'$. If the parent of $v_i'$ is not in $S$, then the parent of $v_i'$ is $r_i\not\in S$ and $r_i$ satisfies (i)--(iii), so we set $v_i:=r_i$. Finally, since $v_1\in V(T_1)$ and $v_2\in V(T_2)$, $v_1$ and $v_2$ are unrelated. \end{proof}
\subsection{A Connectivity Lemma}
The \defin{$x\times y$ grid} $G_{x\times y}$ is the graph with vertex-set $V(G_{x\times y}):=\{1,\ldots,x\}\times\{1,\ldots,y\}$ and that contains an edge with endpoints $(x_1,y_1)$ and $(x_2,y_2)$ if and only if $|x_1-x_2|+|y_1-y_2|=1$. An edge of $G_{x\times y}$ is \defin{horizontal} if its two endpoints agree in the second (y) coordinate. For each $i\in\{1,\ldots,x\}$, the vertex-set $\{i\}\times\{1,\ldots,y\}$ is called \defin{column $i$} of $G_{x\times y}$. A set $C$ of columns is \defin{consecutive} if $G_{x\times y}[\cup C]$ is connected.
\begin{lem}\label{grid_connectivity}
Let $x,y,p\geqslant 1$ be integers, let $G$ be a graph obtained by subdividing horizontal edges of $G_{x\times y}$, and let $S\subseteq V(G)\setminus V(G_{x\times y})$ be a set of subdivision vertices of size $|S|< py$. Then some component of $G-S$ contains at least $x/p$ consecutive columns of $G_{x\times y}$. \end{lem}
\begin{proof}
For each $i\in\{1,\ldots,x-1\}$, in order to separate column $i$ from column $i+1$, $S$ must contain at least $y$ subdivision vertices on the horizontal edges between columns $i$ and $i+1$. Since $|S|< py$, this implies that there are at most $p-1$ values of $i\in\{1,\ldots,x-1\}$ for which columns $i$ and $i+1$ are in different components of $G-S$. These at most $p-1$ values of $i$ partition $\{1,\ldots,x\}$ into at most $p$ intervals, at least one of which contains at least $x/p$ consecutive columns that are contained in a single component of $G-S$. \end{proof}
\subsection{The Proof}
Recall that, for each $h\in\N$, $G_{h}$ is the planar supergraph of the complete binary tree $T_h$ of height $h$ obtained by adding the edges of a path $P_i$ that contains all vertices of depth $i$, in left-to-right order, for each $i\in\{1,\ldots,h\}$. Since $T_h$ is a spanning subgraph of $G_h$, the \defin{depth} of a vertex $v$ in $G_h$ refers to the depth of $v$ in $T_h$. The \defin{height} of a depth-$d$ vertex of $T_h$ is $h-d$.
We are now ready to prove the following result that, combined with \cref{ding_translation} is sufficient to prove \cref{main_thm}:
\begin{thm}\label{main_thm_tree}
For every $h\in\N$, every tree $T$, and every path $P$, if $G_{h}\subsetcong T\boxtimes P\boxtimes K_c$ then $c\geqslant 2^{\Omega(\sqrt{\log h})}$. \end{thm}
It is worth noting that, unlike \cref{main_thm}, there is no restriction on the maximum degree of the tree $T$.
Before diving into technical details we first sketch our strategy for proving \cref{main_thm_tree}. We may assume that $c \leqslant 2^{\sqrt{\log h}}$ since, otherwise there is nothing to prove. Recall \cref{partitions_vs_products} which states that if $G_h\subsetcong T\boxtimes P\boxtimes K_c$ then $G_h$ has a tree-partition $\mathcal{T}:=\{B_x:x\in V(T)\}$ and a path-partition (that is, layering) $\mathcal{P}:=\{P_y:y\in V(P)\}$ with $|B_x\cap P_y|\leqslant c$ for each $(x,y)\in V(T)\times V(P)$. However, since $G_h$ has diameter $2h$, \cref{diameter_spread} implies that $|B_x|\leqslant c(2h+1)$. This upper bound on $|B_x|$ is used to establish all of the results described in the following paragraph.
\begin{figure}
\caption{The proof of \cref{main_thm_tree}.}
\label{sketch}
\end{figure}
Refer to \cref{sketch}. We will construct a sequence of sets $\mathcal{R}_1,\ldots,\mathcal{R}_{t+1}$ and a sequence of nodes $x_1,\ldots,x_{t+1}$ of $T$, where each $\mathcal{R}_i$ is a family of unrelated sets such that $\cup\mathcal{R}_i\subseteq B_{x_i}$. The first family $\mathcal{R}_1$ has size $q_1\geqslant h/(25c)$ and, for each $i\in\{2,\ldots,t+1\}$, $\mathcal{R}_i$ has size $q_i\geqslant q_1/(10c)^{i-1}-3$. For each $\mathcal{R}_i:=\{R_{i,1},\ldots,R_{i,q_i}\}$, each $R_{i,j}\subseteq V(T)$ is an unrelated set of size $2^{i-1}$ that has a common ancestor $a_{i,j}$ of height at least $h/5$ that is at distance at most $(i-1)(\log_2(ch)+2)$ from every element in $R_{i,j}$. Furthermore, $\{a_{i,1},\ldots,a_{i,q_{i}}\}$ is an unrelated set. These properties imply that $\cup\mathcal{R}_i$ is also an unrelated set.
We do this for some appropriately chosen integer $t\in\Theta(\sqrt{\log h})$ in order to ensure that $q_{t+1}\geqslant 1$, so $\mathcal{R}_{t+1}$ contains at least one part $R$ of size $2^t$.
By \cref{diameter_spread}, there exists some $y \in V(P)$ such that \[
|R\cap P_y| \geqslant \frac{|R|}{\diam_{G_h}(R)+1} \geqslant \frac{2^t}{2t(\log(ch)+2)+1}
= 2^{t-\log_2(t\log(ch))-O(1)} = 2^{\Omega(t)} = 2^{\Omega(\sqrt{\log h})} \enspace . \]
Since $R\subseteq B_{x_{t+1}}$, $|B_{x_{t+1}}\cap P_y|\geqslant 2^{\Omega(\sqrt{\log h})}$. Since $c\geqslant \max\{|B_x\cap P_y|:(x,y)\in V(T)\times V(P)\}$, the assumption that $c\leqslant 2^{\sqrt{\log h}}$ therefore leads to the conclusion that $c\geqslant 2^{\Omega(\sqrt{\log h})}$, which establishes \cref{main_thm}.
We now proceed with the details of the proof outlined above. The next two lemmas will be used to obtain the set $\mathcal{R}_1$ that allows us to start the argument. Informally, the first lemma says that every balanced separator $S$ of $G_h$ must contain a vertex of depth $i$ for each $i\in\{i_0,\ldots,h\}$, where $i_0\in O(\log|S|)$.
\begin{lem}\label{small_depth_separator}
Let $h\in\N$ with $h\geqslant 1$, let $S\subseteq V(G_h)$, $S\neq\emptyset$. If $G_h-S$ has no component with more than $|V(G_h)|/2$ vertices then $S\cap V(P_i)\neq\emptyset$, for each $i\in\{i_0,\ldots,h\}$, where $i_0:=\ceil{\max\{2\log|S|+2, \log(1+(h+2)|S|)-1\}}$. \end{lem}
\begin{proof}
Let $C$ be the vertex-set of a component of $G_h-S$ that maximizes $C\cap V(P_h)$. For each $i\in\{0,\ldots,h\}$, let $C_i:=C\cap V(P_i)$ and let $S_i:=S\cap V(P_i)$. We will show that, for each $i\geqslant i_0$, $C_i$ is non-empty but does not contain all $2^i$ vertices in $P_i$. Therefore $S_i\supseteq N_{G_h}(C_i)\cap V(P_i)\neq\emptyset$ for each $i\in\{i_0,\ldots,h\}$.
For each $i\in\{0,\ldots,h-1\}$, the vertices in $C_{i+1}$ are adjacent to at least $|C_{i+1}|/2$ vertices of $P_{i}$, so $|C_i|\geqslant |N_{G_h}(C_{i+1})\cap V(P_i)\setminus S_i| \geqslant |C_{i+1}|/2 - |S_i|$. Iterating this inequality $h-i$ times gives $|C_i|\geqslant |C_h|/2^{h-i}-\sum_{j=i}^{h-1}|S_j|/2^{h-i-1}\geqslant |C_h|/2^{h-i}-|S|$. The vertices in $S$ partition $V(P_h)\setminus S$ into at most $|S|+1$ connected components. Since $C$ is chosen to maximize $|C_h|$, $|C_h| \geqslant (2^h-|S|)/(|S|+1) > 2^{h}/(|S|+1) - 1$. Therefore,
\begin{equation}
|C_i|\geqslant \frac{|C_h|}{2^{h-i}} - |S| > \frac{2^h/(|S|+1)-1}{2^{h-i}} - |S|
\geqslant 2^{i-\log(|S|+1)}-|S|-1 \geqslant 0
\end{equation}
for $i\geqslant 2\log |S|+2$. Since $i_0\geqslant 2\log |S|+2$, this establishes that $C_i$ is non-empty for each $i\in\{i_0,\ldots,h\}$.
For each $i\in\{0,\ldots,h-1\}$, the vertices in $C_i$ are adjacent to at least $2|C_i|$ vertices of $P_{i+1}$, so $|C_{i+1}|\geqslant 2|C_i|-|S_{i+1}|$. Iterating this $h-i$ times gives:
\begin{equation}
|C_h| \geqslant 2^{h-i}|C_i| - \sum_{j=i+1}^h 2^{h-j}|S_j| \geqslant 2^{h-i}|C_i| - 2^{h-i-1}|S| \enspace . \label{ch_lower_bound}
\end{equation}
Suppose that $|C_{i^*}|=2^{i^*}$ for some $i^*\in\{0,\ldots,h\}$. Then \cref{ch_lower_bound} implies that $|C_h|\geqslant 2^h-2^{h-i^*-1}|S|$. Therefore,
\begin{equation}
|C|
=\sum_{i=0}^h |C_i| \geqslant \sum_{i=0}^h\left(\frac{|C_h|}{2^{h-i}} - |S|\right)
> 2|C_h| - 1 -(h+1)|S|
\geqslant 2^{h+1}-2^{h-i^*}|S|-1 - (h+1)|S| \enspace . \label{crux}
\end{equation}
However, $2^h > |V(G_h)|/2 \geqslant |C|$, and combining this with \cref{crux} gives $2^h > 2^{h+1} - 2^{h-i^*}|S|-1-(h+1)|S|$.
Rewriting this inequality, we get
\begin{equation}
2^h < 2^{h-i^*}|S|+ 1 + (h+1)|S| \enspace . \label{cruxi}
\end{equation}
Multiplying each side of \cref{cruxi} by $2^{i^*-h}$ then gives:
\begin{align*}
2^{i^*} & < |S| + 2^{i^*-h}(1+(h+1)|S|) \\
& \leqslant |S| + 1 + (h+1)|S| & \text{(since $i^*\leqslant h$, so $2^{i^*-h}\leqslant 1$)} \\
& = 1+ (h+2)|S| \enspace .
\end{align*}
Taking the logarithm of each side then gives $i^* < \log_2(1+(h+2)|S|)\leqslant i_0$. This establishes that $|C_i|< 2^i$ for each $i\in\{i_0,\ldots,h\}$ and completes the proof. \end{proof}
The following lemma shows that every tree-partition of $G_h$ must have a part with a large unrelated set that is far from the leaves of $T_h$ and will be used to obtain our first set $\mathcal{R}_1$.
\begin{lem}\label{startup}
For every $\alpha\in(0,1/4)$, there exists $h_0$ such that the following is true, for all integers $h\geqslant h_0$ and all $c\in[1,h]$. If $\mathcal{T}:=\{B_x:x\in V(T)\}$ is a tree-partition of $G_h$ of width less than $ch$ then there exists a node $x\in V(T)$ and a subset $R\subseteq B_x$ such that
\begin{compactenum}[(i)]
\item $R$ is unrelated;
\item $|R|\geqslant \alpha^2 h/c$; and
\item Each vertex in $R$ has height at least $\alpha h$.
\end{compactenum} \end{lem}
\begin{proof}
It is well-known and easy to show that there exists a node $x$ of $T$ such that $G-B_x$ has no component with more than $|V(G_h)|/2$ vertices \cite[(2.6)]{robertson.seymour:graph}. Let $Y$ be the set of vertices in $B_x$ that have height at least $h/4$. By \cref{small_depth_separator}, $|Y|\geqslant 3h/4 - O(\log (ch+1))$.
Let $T_Y$ be the minimal (connected) subtree of $T_h$ that spans $Y$, and let $L$ be the set of leaves of $T_Y$. Observe that $L\subseteq Y$ is an unrelated set. Therefore, $L$ satisfies (i) and, by definition, each vertex in $L$ has height at least $h/4 > \alpha h$, so $L$ satisfies (iii). If $|L|\geqslant \alpha h \geqslant \alpha^2 h/c$ then $L$ also satisfies (ii). In this case, we can take $R:=L$ and we are done. We now assume that $|L|< \alpha h$.
Let $Z$ consist of all vertices in $V(T_h)\setminus V(T_Y)$ whose parents are in $Y\setminus L$. Observe that $Z$ is an unrelated set of vertices each having height at least $h/4$. For each $v$ of $T_Y$, let $d_v$ denote the number of children of $v$ in $T_Y$. Then,
\[
\sum_{v\in Y\setminus L} (d_v-1)
\leqslant \sum_{v\in V(T_Y)\setminus L} (d_v-1)
= |L|-1 \enspace ,
\]
where the second equality is a standard fact about rooted trees.
Rewriting this, we get $\sum_{v\in Y\setminus L} {d_v} < |Y\setminus L| + |L| = |Y|$. On the other hand, each $v\in Y\setminus L$ contributes $2-d_v$ vertices to $Z$, so
\[
|Z| = \sum_{v\in Y\setminus L} (2-d_v) \enspace .
\]
Combining these two formulas, we obtain
\[
|Z| \geqslant 2|Y\setminus L| - |Y| = |Y| - 2|L|
\geqslant 3h/4-O(\log(ch+1)) - 2\alpha h
\geqslant h/4-O(\log(ch+1)) \enspace .
\]
Refer to \cref{paths}. For each $r\in Z$, \cref{one_path} applied to the subtree of $T_h$ rooted at $r$ with $S=B_x$ implies that $r$ has a descendant $v$ such that
\begin{inparaenum}[(a)]
\item the parent of $v$ is in $B_x\cup\{r\}$;
\item the height of $v$ is at least $h/4-O(\log(ch+1))$; and
\item $T_h-B_x$ contains a path $P_{v}$ from $v$ to a leaf of $T_h$.
\end{inparaenum}
Form the set $Z'$ using the following rule for each $r\in Z$: If the vertex $v$ described in the preceding paragraph is a child of $r$ then place $r$ in $Z'$, otherwise place $v$ in $Z'$. Since each $r\in Z$ is a child of some vertex in $Y\subseteq B_x$, this ensures that the parent of $v$ is in $B_x$ for each $v\in Z'$. Since $Z$ is an unrelated set and $Z'$ is obtained by replacing each vertex in $Z$ with one of its descendants, $Z'$ is an unrelated set. Since $\alpha < 1/4$, for sufficiently large $h$, $|Z'|\geqslant h/4 - O(\log(ch+1)) \geqslant \alpha h$ and each vertex in $Z'$ has height at least $h/4 - O(\log(ch+1)) \geqslant \alpha h$.
\begin{figure}
\caption{A step in the proof of \cref{startup}.}
\label{paths}
\end{figure}
Now observe that the union of the paths in $\{P_{v}:v\in Z'\}$ and the paths $P_{h-\lceil\alpha h\rceil+1},\ldots,P_{h}$ contains a subgraph $G'$ isomorphic to a graph that can be obtained from the grid $G_{\lceil \alpha h\rceil\times\lceil \alpha h\rceil}$ by subdividing horizontal edges. Since $B_x$ does not contain any vertex of $P_{v}$ for every $v\in Z'$, $B_x\cap V(G')$ contains only vertices corresponding to subdivision vertices. Therefore, by \cref{grid_connectivity}, some component of $G-B_x$ contains a subset $R\subseteq Z'$ of size at least $\alpha^2 h/c$. Each element in $R$ has a parent in $B_x$. By \cref{tree_thingy} some neighbour $y$ of $x$ in $T$ has a bag $B_y$ that contains all of $R$. This completes the proof. \end{proof}
A set $\mathcal{R}:=\{R_1,\ldots,R_q\}$ of subsets of $V(T_h)$ is \defin{$(k,\ell,m)$-compact} if it has the following properties:
\begin{compactenum}
\item For each $i\in\{1,\ldots,q\}$, $R_i$ is unrelated and $|R_i|\geqslant k$.
\item For each $i\in\{1,\ldots,q\}$ there exists a common ancestor $a_i$ of $R_i$ such that $\dist_{T_h}(v,a_i)\leqslant\ell$ for each $v\in R_i$.
\item $a_1,\ldots,a_q$ are unrelated and each has height at least $m$. \end{compactenum}
This definition has the following implications: \begin{inparaenum}[(i)]
\item $\cup \mathcal{R}$ is an unrelated set; and
\item If $a_i$ precedes $a_j$ in the left-to-right ordering of $\{a_1,\ldots,a_q\}$ then every element of $R_i$ precedes every element of $R_j$ in the left-to-right order of $\cup\mathcal{R}$. \end{inparaenum} We say that a vertex $v$ of $T_h$ is \defin{compatible} with $S\subseteq V(T_h)$ if the parent of $v$ is in $S$ and $T_h-S$ contains a path from $v$ to a leaf of $T_h$. A $(k,\ell,m)$-compact set $\mathcal{R}$ is \defin{compatible} with $S$ if each vertex in $\cup\mathcal{R}$ is compatible with $S$.
\begin{lem}\label{compatible_set}
Let $\mathcal{R}:=\{R_1,\ldots,R_q\}$ be a $(k,\ell,m)$-compact set, and let $S\supseteq \cup\mathcal{R}$ have size $1\leqslant |S|< 2^{m-\ell-2}$. Then, there exists a $(2k,\ell + \log_2|S|+2,m)$-compact set $\mathcal{R}':=\{R_1',\ldots,R_q'\}$ that is compatible with $S$. \end{lem}
\begin{proof}
For each $i\in\{1,\ldots,q\}$ and each $r\in R_i$, replace $r$ with the descendants $v_1$ and $v_2$ of $r$ described in \cref{two_paths} and call the resulting set $R_i'$. Then $|R_i'|=2|R_i|\geqslant 2k$ and $\dist_{T_h}(v,a_i)\leqslant \ell+\log_2|S|+2$ for each $v\in R_i'$, where $a_i$ is the common ancestor of $R_i$ in the definition of $(k,\ell,m)$-compact. Therefore $\mathcal{R}':=\{R_1',\ldots,R_q'\}$ is a $(2k,\ell + \log_2(|R|)+2,m)$-compact set. \end{proof}
The next lemma is the last ingredient in the proof of \cref{main_thm}. \begin{lem}\label{big_lemma}
Let $\mathcal{T}:=\{B_x:x\in V(T)\}$ be a tree-partition of $G_h$ and let $\mathcal{P}:=\{P_y:y\in V(P)\}$ be a path-partition of $G_h$. Then there exists $(x,y)\in V(T)\times V(P)$ such that $|B_x\cap P_y| \geqslant 2^{\Omega(\sqrt{\log h})}$. \end{lem}
\begin{proof}
Let $x\in V(T)$ be a node that maximizes $|B_x|$. Then $\diam_{G_h}(B_x)\leqslant\diam_{T_h}(B_x) \leqslant 2h$ so, by \cref{diameter_spread}, $|B_x\cap P_y|\geqslant |B_x|/(2h+1)$ for some $y\in V(P)$. If $|B_x|\geqslant h2^{\sqrt{\log_2 h}}$ then there is nothing more to prove, so we may assume that $|B_x| < ch$ where $c:= 2^{\sqrt{\log_2 h}}$. Note that $c\geqslant 1$ for every $h\geqslant 1$.
By \cref{startup}, with $\alpha:= 1/5$, $T$ contains a node $x_1$ such that $B_{x_1}$ contains an unrelated set $R$ of size $q_1:=|R|\geqslant h/25c$ where each vertex in $R$ has height at least $m:=m_1:=h/5$. Let $\mathcal{R}_1:=\{\{v\}:v\in R\}$. By definition $\mathcal{R}_1$ is a $(1,0,m)$-compact set. $\mathcal{R}_1$ will be the first in a sequence of sets $\mathcal{R}_1,\ldots,\mathcal{R}_{t+1}$, where $t$ will be fixed below. For each $i\in\{1,\ldots,t+1\}$, $\mathcal{R}_i$ will satisfy the following properties:
\begin{enumerate}[(a)]
\item $\mathcal{R}_i$ is a $(2^{i-1},(i-1)(\log_2(ch)+2),m)$ compact set. \label{ri_compact}
\item $q_i:=|\mathcal{R}_i|$, with $q_i > q_{i-1}/(10c) - 2$ if $i\geqslant 2$. \label{ri_size}
\item There exists $x_i\in V(T)$ such that $\cup\mathcal{R}_i\subseteq B_{x_i}$. \label{ri_containment}
\end{enumerate}
Note that, by a simple inductive argument, one can show that
\[
q_i > q_1/(10c)^{i-1} - 3 \enspace .
\]
Indeed, the base case $i=1$ holds trivially, and for the inductive case ($i\geqslant 2$) we have $q_i > q_{i-1}/(10c) - 2 > (q_1/(10c)^{i-2} - 3)/(10c) -2 > q_1/(10c)^{i-1} - 3$.
It is straightforward to verify that $\mathcal{R}_1$ satisfies (\ref{ri_compact})--(\ref{ri_containment}).
Let $t:= \min\{t_1,t_2\}$ where $t_1:=\floor{\log_{10c}(q_1/3)}$ and $t_2:=\floor{h/(10(\sqrt{\log_2 h} + \log_2 h) + 2)}$. Observe that, since $c=2^{\sqrt{\log_2 h}}$, $t_1\geqslant \log_{10c}(h/75c)\in\Omega(\log_c h)\subseteq \Omega(\sqrt{\log h})$ and that $t_2\geqslant h/(10(\sqrt{\log_2 h} + \log_2 h) + 2)-1\in\Omega(h/\log h)$. Therefore $t\in\Omega(\sqrt{\log h})$. These specific values of $t_1$ and $t_2$ are chosen for the following reasons:
\begin{compactenum}[(i)]
\item Since $t\leqslant t_1$, $q_{t+1} > q_1/(10c)^{t_1} - 3 \geqslant 0$, so $q_{t+1}\geqslant 1$.
\item Since $t\leqslant t_2$, $m_i \geqslant h/5-t_2(\floor{\log_2(ch)}+2) \geqslant h/10$ for each $i\in\{2,\ldots,t+1\}$.
\end{compactenum}
We now describe how to obtain $\mathcal{R}_{i+1}$ from $\mathcal{R}_i$ for each $i\in\{1,\ldots,t\}$. By \cref{compatible_set} (applied to $\mathcal{R}:=\mathcal{R}_i$ and $S:=B_{x_i}$), $T_h$ contains a $(2^i,i\log_2(ch)+2,m)$-compact set $\mathcal{R}_{i+1}^+$ of size $q_i$ that is compatible with $B_{x_i}$. For each $v\in\cup\mathcal{R}_{i+1}^+$, $v$ has height at least $m_{i+1}:=m_i-(\floor{\log_2(ch)}+2)\geqslant h/5-i(\floor{\log_2(ch)}+2)$. Therefore $\mathcal{R}_{i+1}^+$ satisfies (\ref{ri_compact}),
but does not necessarily satisfy (\ref{ri_containment}). Next we show how to extract $\mathcal{R}_{i+1}\subseteq\mathcal{R}_{i+1}^+$ that also satisfies (\ref{ri_size}) and (\ref{ri_containment}).
For each $v\in \cup\mathcal{R}_{i+1}^+$, $T_h-B_{x_i}$ contains a path $P_v$ from $v$ to a leaf of $T_h$. The union of the paths in $P_{h-m_{i+1}},\ldots,P_{h}$ and the paths in $\mathcal{C}_i:=\{P_v:v\in\cup\mathcal{R}_{i+1}^+\}$ contains a subgraph $G'$ isomorphic to a graph that can be obtained from $G_{2^{i}q_i\times m_{i+1}}$ by subdividing horizontal edges. By \cref{grid_connectivity} applied to $G:=G'$ with $S:=B_{x_i}$ and $p:=ch/m_{i+1}$, some component $X'$ of $G'-B_{x_i}$ contains $q_i'\geqslant 2^{i}q_im_{i+1}/(ch)\geqslant 2^iq_i/(10c)$ consecutive columns $C_1,\ldots,C_{q_i'}$ of $G'$. The component $X'$ is contained in some component $X$ of $G_h-B_{x_i}$.
Since $\cup\mathcal{R}_i$ is unrelated, it has a left to-right-order. This order defines a total order $\prec$ on the paths in $\mathcal{C}_i$, in which $P_v\prec P_w$ if and only if $v$ precedes $w$ in left-to-right order. The resulting total order $(\prec,\mathcal{C}_i)$ corresponds to the order of the columns in $G'$ and each part in $\mathcal{R}_{i+1}^+$ corresponds to $2^i$ consecutive columns of $G'$. There are at most two parts $R\in\mathcal{R}_i$ such that $0 < |R\cap (C_1\cup\cdots\cup C_{q_i'})| < |R|$. These two parts account for at most $2(2^{i}-1)$ of the columns in $C_1,\ldots, C_{q_i'}$. Therefore, the number of parts of $\mathcal{R}_{i+1}^+$ completely contained in $C_1\cup\cdots\cup C_{q_i'}$ is at least
\begin{align*}
(q_i'-(2^{i+1}-2))/2^i & > q_i/(10c) - 2 \enspace .
\end{align*}
We define $\mathcal{R}_{i+1}\subseteq\mathcal{R}_{i+1}^+$ as the set of parts in $\mathcal{R}_{i+1}^+$ that are completely contained in $C_1\cup\cdots\cup C_{q_i'}$. The preceding calculation shows that $\mathcal{R}_{i+1}$ satisfies (\ref{ri_size}). Since $\cup\mathcal{R}_{i+1}$ is contained in a single component $X$ of $G_x-B_{x_i}$ and each vertex in $\cup\mathcal{R}_{i+1}$ has a neighbour (its parent in $T$) in $B_{x_i}$, \cref{tree_thingy} implies that $T$ contains an edge $x_{i}x_{i+1}$ with $\cup\mathcal{R}_{i+1}\subseteq B_{x_{i+1}}$. Therefore $\mathcal{R}_{i+1}$ satisfies (\ref{ri_containment}).
This completes the definition of $\mathcal{R}_1,\ldots,\mathcal{R}_{t+1}$. Properties~(\ref{ri_compact})--(\ref{ri_containment}) imply that, $\mathcal{R}_{t+1}$ is a $(2^t,t(\log_2(ch)+2),m)$-compact set of size $q_{t+1} \geqslant 1$ and $\cup\mathcal{R}_{t+1}\subseteq B_{x_{t+1}}$ for some $x_{t+1}\in V(T)$.
Let $R$ be one of the sets in $\mathcal{R}_{t+1}$. Since $\mathcal{R}_{t+1}$ is $(2^t,t(\log_2(ch)+2),m)$-compact, $|R|\geqslant 2^t$ and all vertices in $R$ have a common ancestor $a$ whose distance to each element of $R$ is at most $t(\log_2(ch)+2)$. Therefore, $\diam_{G_h}(R)\leqslant\diam_{T_h}(R)\leqslant 2t(\log_2(ch)+2)$. By \cref{diameter_spread}, there exists some $P_y\in\mathcal{P}$ with
\begin{align*}
|B_{x_{t+1}}\cap P_y|
& \geqslant |R\cap P_y| \\
& \geqslant \frac{|R|}{\diam_{G_h}(R)+1} \\
& \geqslant \frac{2^t}{2t(\log_2(ch)+2)+1} \\
& = 2^{t-O(\log\log h)} \\
& = 2^{\Omega(\sqrt{\log h})-O(\log\log h)}
= 2^{\Omega(\sqrt{\log h})} \enspace . \qedhere
\end{align*} \end{proof}
\begin{proof}[Proof of \cref{main_thm_tree}]
Suppose $G_h\subsetcong T\boxtimes P\boxtimes K_c$ for some tree $T$ and some path $P$. By \cref{partitions_vs_products}, $G_h$ has a $T$-partition $\mathcal{T}:=\{B_x:x\in V(T)\}$ and a path-partition $\mathcal{P}:=\{P_y:y\in V(P)\}$ such that $|B_x\cap P_y|\leqslant c$ for each $(x,y)\in V(T)\times V(P)$. By \cref{big_lemma}, there exists $(x,y)\in V(T)\times V(P)$, such that $|B_x\cap P_y| \geqslant 2^{\Omega(\sqrt{\log h})}$. Combining these upper and lower bounds on $|B_x\cap P_y|$ implies that $c\geqslant 2^{\Omega(\sqrt{\log h})}$. \end{proof}
\begin{proof}[Proof of \cref{main_thm}]
Let $n:=|V(G_h)|=2^{h+1}-1$. Suppose that $G_h\subsetcong H\boxtimes P\boxtimes K_c$ for some graph $H$ of treewidth $t$ and maximum degree $\Delta$, some path $P$ and some integer $c$. Then, by \cref{dingy,partitions_vs_products}, $G\subsetcong T\boxtimes P\boxtimes K_{24 c \Delta (t+1)}$ for some tree $T$. By \cref{main_thm_tree} $24 c \Delta (t+1) \in \Omega(2^{\sqrt{\log h}})$, so $c\Delta t \geqslant 2^{\Omega(\sqrt{\log h})} = 2^{\Omega(\sqrt{\log\log n})}$. \end{proof}
\section{Open Problems}
We know that every planar graph $G$ is contained in a product of the form $H\boxtimes P\boxtimes K_3$ where $\tw(H)\leqslant 3$ \cite{dujmovic.joret.ea:planar}. \cref{main_thm_tree} states that, for every $c$, there exists a planar graph of maximum degree $5$ that is not contained in any product of the form $T\boxtimes P\boxtimes K_c$ where $T$ is a tree and $P$ is a path. This leaves the following open problem:
\begin{quote}
Is every planar graph $G$ of maximum degree $\Delta$ contained in a product of the form $H\boxtimes P\boxtimes K_c$ where the treewidth of $H$ is $2$, $P$ is a path, and $c$ is some function of $\Delta$? \end{quote}
\cref{two_tree} and \cref{partitions_vs_products} show that $G_h$ is a subgraph of $H\boxtimes P$ where $H$ has treewidth $2$ (and is even outerplanar) and $P$ is a path. Our proof breaks down in this case because, unlike tree-partitions, outerplanar-partitions do not satisfy \cref{tree_thingy}. Indeed, the outerplanar-partition illustrated in \cref{two_tree} contains a part $B_x$ and a component $X$ of $G_h-B_x$ with $|N_{G_h}(B_x)\cap V(X)|=h$. In a tree-partition this would imply that $|N_{G_h}(B_x)\cap V(X)\cap B_y|=h$ for some other part $B_y$ of the partition. In contrast, for the outerplanar-partition shown in \cref{two_tree}, $|N_{G_h}(B_x)\cap V(X)\cap B_y|\leqslant 1$ for each $y\in V(H)$.
\begin{figure}
\caption{An outerplanar-partition $\mathcal{H}$ and a path-partition $\mathcal{P}$ of $G_5$ for which $|B\cap P|\leqslant 1$ for each $B\in\mathcal{H}$ and $P\in\mathcal{P}$.}
\label{two_tree}
\end{figure}
\end{document} |
\begin{document}
\title{Propagation of spatially entangled qudits through free space}
\date{\today}
\author{G. Lima} \author{Leonardo Neves} \author{Ivan F. Santos} \affiliation{Departamento de F\'{\i}sica, Universidade Federal de Minas Gerais, Caixa Postal 702, Belo~Horizonte,~MG 30123-970, Brazil.}
\author{J. G. Aguirre G\'omez} \affiliation{Center for Quantum Optics and Quantum Information, Departamento de Fisica, Universidad de Concepci\'on, Casilla 160-C, Concepci\'on, Chile.}
\author{C. Saavedra} \affiliation{Center for Quantum Optics and Quantum Information, Departamento de Fisica, Universidad de Concepci\'on, Casilla 160-C, Concepci\'on, Chile.}
\author{S. P\'adua} \email{[email protected]} \affiliation{Departamento de F\'{\i}sica, Universidade Federal de Minas Gerais, Caixa Postal 702, Belo~Horizonte,~MG 30123-970, Brazil.}
\pacs{03.67.Mn, 03.67.Hk}
\begin{abstract}
We show the propagation of entangled states of high-dimensional quantum systems. The qudits states were generated using the transverse correlation of the twin photons produced by spontaneous parametric down-conversion. Their free-space distribution was performed at the laboratory scale and the propagated states maintained a high-fidelity with their original form. The use of entangled qudits allow an increase in the quantity of information that can be transmitted and may also guarantee more privacy for communicating parties. Therefore, studies about propagating entangled states of qudits are important for the effort of building quantum communication networks.
\end{abstract}
\maketitle
\section{Introduction}
Most of the applications in quantum communication, like teleportation \cite{Bennett} and quantum cryptography \cite{Ekert}, rely on the assumption that the communicating parties are capable to transmit entangled particles between themselves. Because of the practical potential that an implementation of these applications over distant locations could have, the propagation of entangled states of qubits have been theme of recent studies. The first remarkable work used optical-fibers links to send energy-time entangled qubits for receivers separated by more than $10 \, Km$ \cite{Gisin1}. A test of Bell inequality \cite{Bell} showed that the two-photon state was still an entangled state and it was the first evidence that quantum correlations could be maintained over significant distances. Another interesting work was about the free-space distribution of polarization entangled qubits through the atmosphere \cite{Zeilinger1}. As it was emphasized in that paper, ``... one of the benefits of a free-space distribution of quantum entanglement is the possibility of bridging large distances by additional use of space infrastructure...". Observers were separated by $600 \, m$ and the quality of the entanglement of the propagated state had been guaranteed by a violation of Bell's inequality by more than four standard deviations.
Even though promising new experiments have had success propagating entangled qubits over farther distances \cite{Gisin2,Zeilinger2}, it has been demonstrated theoretically by E. Waks \emph{et al.} \cite{Waks}, that due to channel losses and dark counts, the communication length cannot surpass the order of $100 \, km$ while using entangled photons and joint measurements. For this reason we believe that the control of the technique to create and transmit entangled photons lying in a D-dimensional ($D \geq 3$) Hilbert space, will be a crucial step in the near future. They allow an increase in the quantity of information that can be transmitted per pair shared and will then require less effort of quantum repeaters when transmitting information in a global scale. Another advantage is that the use of entangled qudits may increase the security of the entanglement based quantum cryptography protocols against certain types of eavesdropping attacks \cite{Durt}.
In this work, we report the experimental free-space propagation of two entangled 4-dimensional qudits or ququarts. Following the studies developed at references \cite{Leonardo,Glima,Boyd}, the ququarts entangled state was generated by using the transverse spatial correlation of the photon pairs (\emph{biphotons}) produced by spontaneous parametric down-conversion (SPDC) and two four-slits sets, where they were transmitted through. The propagation was performed at laboratory scale and the propagated state observed had a high-fidelity with its original form. The presence of interference when the two photons are detected in coincidence is used as an experimental measurement for showing that the state of the propagated ququarts is entangled and an evidence of the good quality of the entanglement is discussed.
\section{Theory}
In Ref. \cite{Leonardo}, it was showed that the state of the biphotons when they are transmitted through generic apertures is
\begin{equation} \label{psislits} \ket{\Psi}=\itg{q_{1}}\itg{q_{2}} \,\,\,
\ce{F}(q_{1},q_{2})\ket{1q_{1}}\ket{1q_{2}}, \end{equation} with the biphoton amplitude given by
\begin{eqnarray} \label{F} \ce{F}(q_{1},q_{2}) & \propto \!\!\!\! & \itg{x_{1}}\itg{x_{2}} \,\,\, e^{i\frac{k}{8z_{A}}(x_{2}-x_{1})^{2}} e^{-i(q_{1}x_{1} + q_{2}x_{2})} \nonumber \\[2mm] & & \times A_{1}(x_{1}) A_{2}(x_{2}) W\!\bm{(}{\scriptstyle\frac{1}{2}}(x_{1}+x_{2});z_{A}\bm{)}, \end{eqnarray} where $q_{j}$ and $x_{j}$ are the wave vector and position transverse components, respectively, of the down-converted photons in modes $j=1,2$. $A_{j}(x_{j})$ is the transmission function of the aperture in mode $j$ and $W(\xi ;z_{A})$ is the pump beam transverse field profile at the plane of the apertures ($z=z_{A}$).
We consider the apertures where the twin photons are sent through as two identical four-slits. The separation between two consecutive slits is $d$ and $a$ is the slits half width. If the pump beam transverse profile, $W(\xi ;z_{A})$, is non-vanishing only at a small region of space, it can be experimentally demonstrated that Eq.~(\ref{psislits}) becomes \cite{Glima}
\begin{equation} \label{qudits} \ket{\Psi} = \frac{1}{2} \sum_{l=-\frac{3}{2}}^{\frac{3}{2}}
e^{ik\frac{d^{2}l^{2}}{2z_{A}}} \;
\ket{l}\lsub{1} \otimes \ket{-l}\lsub{2}, \end{equation} where
\begin{equation} \label{base} \ket{l}\lsub{j} \equiv \sqrt{\frac{a}{\pi}}
\itgf{q_{j}} e^{-iq_{j}ld}\ensuremath{\mbox{\hspace{1.3pt}sinc}\,}(q_{j}a)\ket{1q_{j}}. \end{equation} The $\{\,\ket{l}\lsub{j}\}$ states represent the photon in mode $j$ transmitted by the slit $l$ and satisfy $\lsub{j}\braket{l}{l'}\lsub{j}=\delta_{ll'}$. The two-photon state in Eq.~(\ref{qudits}) is a maximally entangled state of two ququarts, where we can see that, when the photon in mode 1 is transmitted by the slit $l$ the photon in mode 2 will pass through the symmetrically opposite slit $-l$.
Now we want to show that the state of Eq.~(\ref{qudits}) can be propagated at the free-space. The biphotons propagation will be through two independent channels which have distinctly lenses with focal length $f_1$ and $f_2$ (Fig.~(\ref{fig:channels})). Theses lenses are placed at a distance $z_{L_i}$ from their respective apertures. We calculated the two-photon state transmitted by generic apertures and propagated through these channels to the planes of the image formation ($z_{I_i}$).
\begin{figure}
\caption{ Channels for the free-space propagation. $A_1$ and $A_2$ are generic apertures. $L_1$ and $L_2$ are convergent lenses with focal lengthes $f_1$ and $f_2$, respectively. $A'_1$ and $A'_2$ are the images of the apertures.}
\label{fig:channels}
\end{figure}
For simplicity, the conditions used for image formation are $z_{I_i} - z_{L_i} = z_{L_i} - z_{A} = 2 f_i$. To obtain the image state, a general form for it must be assumed
\begin{equation} \label{psiimage} \ket{\Psi}_{Im}=\itg{q_{1}}\itg{q_{2}} \,\,\,
\ce{I}(q_{1},q_{2})\ket{1q_{1}}\ket{1q_{2}}. \end{equation}
Calculating the amplitude of the coincidence detection \cite{Mandel} of the biphotons at the planes of the image formation using two different methods, we could establish the form of $\ce{I}(q_{1},q_{2})$. The first amplitude's calculus was done considering the state of Eq.~(\ref{psislits}) and the electric-field operators describing the evolution of the photons through their channels. The second method used the state of Eq.~(\ref{psiimage}) and the expression for the electric-field operator at the point of image formation. Matching their results we obtained
\begin{eqnarray} \label{psiimage2} \ce{I}(q_{1},q_{2})&\propto & \itg{q'_{1}}\itg{q'_{2}}\,\,\,
\ce{F}(q'_{1},q'_{2}) \nonumber \\
& &\times e^{-if_{1}(q_{1}+q'_{1})^{2}/{2k}} e^{-if_{2}(q_{2}+q'_{2})^{2}/{2k}}. \end{eqnarray}
When the apertures from which the twin photons were transmitted by are two identical four-slits, described above, the function $\ce{F}(q_{1},q_{2})$ will be given by \cite{Leonardo}
\begin{equation} \ce{F}(q_{1},q_{2}) \propto \sum_{l=-\frac{3}{2}}^{\frac{3}{2}} e^{ik\frac{d^{2}l^{2}}{2z_{A}}} e^{-iq_{1}ld}\ensuremath{\mbox{\hspace{1.3pt}sinc}\,} (q_{1}a) e^{iq_{2}ld}\ensuremath{\mbox{\hspace{1.3pt}sinc}\,} (q_{2}a). \label{eq:fend} \end{equation}
Thus, inserting Eq.~(\ref{eq:fend}) into Eqs.~(\ref{psiimage2}) and (\ref{psiimage}) will give the state of the propagated ququarts
\begin{equation} \label{Imqudits} \ket{\Psi}_{Im} = \frac{1}{2} \sum_{l=-\frac{3}{2}}^{\frac{3}{2}}
e^{ik\frac{d^{2}l^{2}}{2z_{A}}} \;
\ket{-l}\lsub{1} \otimes \ket{l}\lsub{2}, \end{equation} which has the same form of the two-photon state at the plane of the four-slits, Eq.~(\ref{qudits}), showing that this state can be propagated at the free-space. A more important result can be obtained when one uses Eq.~(\ref{psiimage}) and Eq.~(\ref{psiimage2}) to show that the general state for the twin-photons after being transmitted through generic apertures (See Eq.~(\ref{psislits})), will always be reconstructed at the planes of their images. It is not worthless to mention that the theory doesn't require the use of identical lenses what means that the receivers of the entangled qudits can be at different distances from the apertures (source).
\section{Experimental setup and results}
The setup represented in Fig.~\ref{fig:setup} was carried out to experimentally demonstrate the free-space propagation of the ququarts entangled state described by Eq.~(\ref{qudits}). A 5-mm-long $\beta$-barium borate crystal is used to generate type-II parametric down-conversion luminescence when pumped by a 100~mW pulsed laser beam. Down-converted photons with the same wavelength ($\lambda = 826$~nm) are selected using interference filters. Two identical four-slits ($A_{1}$ and $A_{2}$) are placed at their exit path at the same distance $z_{A}= 200$~mm from the crystal ($z=0$). The slit width is $2a\approx0.09$~mm and the distance between two consecutive slits is $d\approx0.17$~mm. To guarantee that the function $W(\xi ;z_{A})$ is non-vanishing only at a small region of the space the pump beam transverse profile was focused at the plane of these apertures. After being transmitted by the four-slits the biphotons are propagated at the free-space and through two identical lenses ($L_{1}$ and $L_{2}$), with focal length of $f = 150$~mm and placed at a distance of $z_{L}= 500$~mm from the crystal. At the plane of the image formation of the apertures ($z_{I}= 800$~mm), avalanche photodiode detectors ($D_{1}$ and $D_{2}$) are placed. Single and coincidence counts are measured by the detectors and in front of each detector there is a single slit of width 0.1~mm (parallel to the four-slits).
\begin{figure}
\caption{ Outline of the experimental setup. $A_{1}$ and $A_{2}$ are four-slit apertures, $L_{j}$ lens, $D_{j}$ a detector and C is a coincidence counter.}
\label{fig:setup}
\end{figure}
Coincidence selective measurements onto the basis $\{\ket{l}\lsub{1}\ket{l'}\lsub{2}\}$ are performed to determine the two-photon image state. Detector $D_{1}$ is fixed at a region in space where the image of slit $l$ of the four-slit in channel 1 is formed while detector $D_{2}$ is scanning, in the $x$ direction, the entire region where the image of the other four-slit is formed. Four measurements of this kind, with detector $D_{1}$ going from the image of the slit for which $l=\frac{-3}{2}$ to the image of the slit with $l=\frac{3}{2}$, will determine the probability amplitudes in the sixteen basis states $\{\ket{l}\lsub{1}\ket{l'}\lsub{2}\}$.
If the theoretical result of Eq.~(\ref{Imqudits}) for the state of the twin photons at the plane of image formation is correct, coincidences peaks will occur only when detector $D_{2}$ passes by the image of slit for which $l'=-l$. However, the classical correlated state given by
\begin{equation} \label{classico} \rho_{\text{cc}} = \frac{1}{2} \sum_{l=-\frac{3}{2}}^{\frac{3}{2}}
\ket{l}\lsub{1\,1\!}\!\bra{l} \otimes
\ket{-l}\lsub{2\,2\!\!}\bra{-l}, \end{equation} predicts the same experimental result. And then to guarantee that the image state is indeed given by a coherent superposition (Eq.~(\ref{Imqudits})), the detectors are moved to a distance of $200$~mm from the image formation plane and conditional fourth-order interference patterns \cite{Greenberger,Fonseca} are measured. As it was demonstrated in Ref. \cite{Glima}, when we treat the spatial correlations of two photons, the observation of a fourth-order interference pattern with conditional fringes is a sufficient signature for entanglement. If the correlations between the propagated ququarts were classical, the coincidence count rate, at this new configuration of the setup, would exhibit only a single slit diffraction pattern.
\begin{figure}\label{fig:ququart}
\end{figure}
The experimental data recorded at the plane of image formation of the four-slits is showed in Fig.~\ref{fig:ququart}. We can see that the results are in agrement with Eq.~(\ref{Imqudits}) because coincidences peaks were only observed when $D_{2}$ was scanning the image of the slit symmetrically opposite to that which detector $D_{1}$ was fixed. Figure~\ref{fig:ququart}(e) is a histogram constructed using all the coincidences recorded in the four measurements performed. The probability related is the chance for the propagated ququarts state, selected in coincidence, be in the form of one of the basis sates. The fact that the probabilities for the states $\ket{l}\lsub{1}\ket{-l}\lsub{2}$ are almost equally weighted and all the others probabilities null is a strong evidence that the image state is a \emph{maximally} entangled state of ququarts. This means that the states $\ket{l}\lsub{1}\ket{-l}\lsub{2}$ will have almost the same amplitudes at the coherent superposition of the obtained image state (See Eq.~(\ref{expquarts})).
The fourth-order interference patterns measured when the detectors were moved to a distance of $200$~mm from the image formation plane and the propagated ququarts detected in coincidence are showed in Fig.~\ref{interf}. Coincidence measurements were made as a function of the $x$ position of the detector $D_{1}$ while $D_{2}$ was kept fixed. The results are shown in Fig.~\ref{interf}: (a) $D_{2}$ fixed at $x_{2}=0$~mm; (b) $D_{2}$ fixed at $x_{2}=0.6$~mm. The visibilities of the interference patterns are $v_{a} = 0.86 \pm 0.05$ and $v_{b} = 0.83 \pm 0.04$, respectively. One can easily observe the conditionality of the fringes of these patterns. As mentioned before, the presence of conditional interference patterns, at this new configuration of the setup, demonstrates that the image state is pure and entangled.
\begin{figure}
\caption{ Fourth-order interference patterns as a function of $D_{1}$ position. They were recorded when the propagated photons were detected in coincidence at a distance of $200$~mm of the image formation plane. (a) $D_{2}$ is kept fixed at $x_{2}=0$~mm. (b) $D_{2}$ is kept fixed at $x_{2}=0.6$~mm. The solid curve is a guide to the reader's eyes.}
\label{interf}
\end{figure}
The propagated ququarts state obtained from the measurements can then be written as:
\begin{eqnarray} \ket{\Psi}_{Im} & = & 0.49\;\ket{\scriptstyle -\frac{1}{2}, +\frac{1}{2}}
\; + \; 0.50\;\ket{\scriptstyle +\frac{1}{2}, -\frac{1}{2}} \nonumber \\%[2mm]
& & \!\!\!\!\!\!\!\! \text{} +
e^{i\frac{kd^{2}}{z_{A}}} (0.47\;\ket{\scriptstyle -\frac{3}{2}, +\frac{3}{2}}
\; \!\!\! + \!\! \; 0.49\;\ket{\scriptstyle +\frac{3}{2}, -\frac{3}{2}}),
\label{expquarts} \end{eqnarray} which has a fidelity of $F=0.98\pm 0.06$ to the original state of the ququarts given by Eq.~(\ref{qudits}). This prove that we were able to propagate the entangled ququarts state keeping a high-fidelity to its original form. The phase in Eq.~(\ref{expquarts}) was not measured because it can be cancelled out by choosing right values for $d$ and $z_{A}$ or by adding an appropriate external phase to a given slit.
\section{Discussion and conclusion}
We believe that the process of entangled qudits propagation, described above, can be implemented for larger distances. As it was demonstrated, at the plane of image formation of the four-slits, the ququarts entangled state (Eq.~(\ref{qudits})) is reconstructed with high-fidelity. Besides this, it is well know that different configurations of lenses can be used after objects to make their image appear at long distances. So, one can see that the use of such configuration after the apertures would allow the transmission of the entangled photons through more significant distances. In Ref. \cite{Zeilinger1}, two telescopes were used to propagate entangled qubits over more than $500$~m.
In conclusion, we have presented a principle to propagate entangled states of qudits, generated using the transverse correlation of the twin photons produced by SPDC, at the free-space. Up to our knowledge this is the first report of a propagation of entangled states of high-dimensional quantum systems. The experimental test performed obtained a propagated state with a high-fidelity to its original form. The benefits of a free-space distribution of quantum entanglement were already discussed at Ref. \cite{Zeilinger1}. The advantages of using entangled states of high-dimensions quantum systems to transmit information come both from the increase at the quantity of information that can be encoded at the entangled quantum systems and for the possibility of performing more safers quantum cryptography protocols. For these reasons we believe that the work presented in this paper is an important step that can be considered in the effort of building quantum communication networks.
\section{Acknowledgments}
This work was supported by the Brazilian agencies CAPES, CNPq, Fapemig and Mil\^enio-Informa\c{c}\~ao Qu\^antica. C. Saavedra was supported by Grants Nos. FONDECYT 1040591 and Milenio ICM P02-49F.
\end{document} |
\begin{document}
\title{Quantum Correlations in the Kerr Ising Model}
\author{M. J. Kewming} \email{[email protected]} \affiliation{Centre for Engineered Quantum Systems, School of Mathematics and Physics, University of Queensland, QLD 4072 Australia} \author{S. Shrapnel} \affiliation{Centre for Engineered Quantum Systems, School of Mathematics and Physics, University of Queensland, QLD 4072 Australia} \author{G. J. Milburn} \affiliation{Centre for Engineered Quantum Systems, School of Mathematics and Physics, University of Queensland, QLD 4072 Australia}
\date{\today}
\begin{abstract} In this article we present a full description of the quantum Kerr Ising model---a linear optical network of parametrically pumped Kerr non-linearities. We consider the non-dissipative Kerr Ising model and, using variational techniques, show that the energy spectrum is primarily determined by the adjacency matrix in the Ising model and exhibits highly non-classical cat like eigenstates. We then introduce dissipation to give a quantum mechanical treatment of the measurement process based on homodyne detection via the conditional stochastic Schrodinger equation. Finally, we identify a quantum advantage in comparison to the classical analogue for the example of two anti-ferromagnetic cavities. \end{abstract}
\maketitle \section*{Introduction} In an increasingly connected and dynamic society, the demand to solve complex problems requiring optimal configurations of large systems is growing. Many of these optimisation problems fall into the NP-hard or NP-complete complexity class that are practically impossible to solve on a classical digital computer. These types of problems can be mapped onto the Ising model where the ground state yields the optimal solution. Attempts to find the ground state has lead to the development of many classical and quantum approaches including adiabatic quantum computing \cite{farhi_quantum_2001} and quantum annealing \cite{kadowaki_quantum_1998,brooke_quantum_1999}. However, a significant hurdle facing these architectures is the connectivity of individual physical qubits, an essential requirement for solving large optimisation problems.
A new approach dubbed the Coherent Ising Machine (CIM) overcomes this issue by implementing an optical analog of the Ising model \cite{wang_coherent_2013,takata_quantum_2015, mcmahon_fully_2016, yamamoto_coherent_2017}. Recent results have compared the performance of the CIM against semi-definite programs and simulated annealing \cite{haribara_coherent_2016} as well as neural network architectures \cite{haribara_performance_2017} and quantum annealers \cite{hamerly_experimental_2019}. Further theoretical results have shown comparable results can be obtained when modelled using Gaussian optics \cite{clements_gaussian_2017}. In the CIM, a series of degenerate parametric oscillators (DOPO's) form a coupled network of potential spins. At sufficiently high pumping, the DOPO experiences a pitchfork bifurcation into two steady state solutions. These two solutions are coherent states which are $\pi$ out of phase with one another and play the role of a `spin' in the Ising model. When coupled, each DOPO in the network bifurcates in accordance with an Ising Hamiltonian. The lowest energy configuration of coherent states in the cavities therefore corresponds to the ground state. The network of DOPO's become non separable during bifurcation \cite{takata_quantum_2015, maruo_truncated_2016} which may lead to a quantum advantage \cite{inagaki_coherent_2016}, although it is not yet clearly identified. Recently, several impressive classical experimental realisations of the CIM have been published demonstrating up to $2000$ connected spins \cite{mcmahon_fully_2016,inagaki_coherent_2016, hamerly_experimental_2019}. Furthermore, off-the-shelf electronics have been used to build robust opto-electronic CIMs \cite{bohm_understanding_2018}.
Here we will consider a Kerr Ising Model (KIM) based on a network of parametrically pumped cavities containing a Kerr non-linearity. This interaction has been studied extensively with seminal results showing the onset of chaos \cite{milburn_quantum_1991}, single and multi-photon blockade \cite{leonski_possibility_1994, imamoglu_strongly_1997, miranowicz_two-photon_2013, miranowicz_state-dependent_2014}, as well as qubit construction \cite{mabuchi_qubit_2012}, quantum gates \cite{pachos_optical_2000, knill_scheme_2001,nysteen_limitations_2017} and computation \cite{goto_universal_2016,combes_two-photon_2018}. Furthermore, experimental realisations of parametrically driven of Kerr non-linearities and their subsequent quantum behaviour have been discussed \cite{puri_engineering_2017} and realised in superconducting circuits \cite{wang_quantum_2019}. There is now a small but growing literature on driven networks of Kerr non-linearities in cascaded systems \cite{stannigel_driven-dissipative_2012}, under adiabatic evolution \cite{goto_bifurcation-based_2016, goto_universal_2016, goto_quantum_2019} and dissipative single photon loss \cite{puri_quantum_2017, nigg_robust_2017}. Notably, the results presented in \cite{goto_bifurcation-based_2016, goto_quantum_2019} describe the implementation of an Ising machine constructed from a network of Kerr non-linearities but do not consider the nature or effects of any quantum correlations; an analysis we present here.
We start by presenting a comprehensive theoretical description of the KIM and show that the network exhibits non-classical correlations at the corresponding classical bifurcation --- indicating a quantum phase transition \cite{hines_quantum_2005}. Beyond the bifurcation the ground state corresponds to an entangled state of minimum energy spin configurations of the Ising model. We then introduce low temperature dissipation using the quantum optics master equation and show that the steady state corresponds to a probabilistic mixture of these highly entangled ground states. This enables us to describe the conditional dynamics corresponding to continuous homodyne detection of the output fields showing the stochastic approach to the desired ground state. Lastly, we compare the fully quantum model against a classical analogue at finite temperatures for two coupled cavities and clearly identify the advantage arising from quantum correlations.
\begin{figure}
\caption{a) Energy of the $n$th eigenstate $E_{n}$ of 2 spins---2 coupled cavities with no detuning $\Delta=0$. Red lines correspond to the uncoupled network $\eta=0$ and blue correspond to $\eta = 0.05$ with driving strength $\vert\epsilon\vert = 2$. When the network is uncoupled, there are $4$ degenerate ground states corresponding to each unique configuration of spins. However, when a small coupling is introduced, the degeneracy is broken leading to $2$ new ground states and $2$ excited states in accordance with the Ising model. b) For weak coupling, the energy levels change linearly according to the perturbed energy levels Eq.\,(\ref{eq:perturbation}).
}
\label{fig:energy_levels}
\end{figure}
\section{The Kerr Ising Model} In our analysis, we will start by assuming that the pump mode has been adiabatically eliminated. In the interaction picture, each cavity in the network is described by the Hamiltonian \begin{equation} \label{eq:ppk}
H_{0} = \hbar\chi \left( a^{\dagger 2} - \frac{\epsilon^{*}}{\chi}\right)\left(a^{2} - \frac{\epsilon}{\chi}\right) + \hbar\Delta a^{\dagger}a \end{equation} where $\chi$ is proportional to the third-order nonlinear susceptibility, $\Delta=\omega_a-\omega_p/2$ is the detuning of the pump frequency, $\omega_p$, from cavity resonance frequency, $\omega_a$, and $\epsilon$ is pump field.
Due to the parity symmetry of Eq.(\ref{eq:ppk}), on resonance the ground state is a doubly degenerate subspace with energy ${E_{0} = 0}$ spanned by two coherent states $\ket{\alpha_{i}} {= \ket{\pm \alpha_{0}}}$ where ${\alpha_{0} = \sqrt{\epsilon/\chi}}$. The introduction of a \emph{small} detuning $\Delta$ creates a linear shift in the energy levels $E_{0} {= \hbar \Delta \vert \alpha_{0} \vert^{2}}$. Likewise, for a network of $N$ \emph{uncoupled} Kerr-cavities the ground state is $2N$-fold degenerate spanned by product state $\ket{\vec{\alpha}_{m}} = \ket{\pm \alpha_{0}, \pm \alpha_{0}, ...}$ with a perturbed energy of $E_{0}^{'} = \hbar N \Delta \vert \alpha_{0} \vert^{2}$.
If we now introduce a linear coherent coupling scheme in the form of a network of connected beam-splitters and phase shifters ---we obtain the Hamiltonian \cite{Milburn:07} \begin{equation}
H_{\mathrm{ising}} = \sum_{i}^{N} H_{0}^{(i)} + \hbar \eta \,\vec{a}^{\dagger}.\hat{S}.\vec{a} \end{equation} where $\eta$ is fixed coupling strength for each cavity, $S$ is the adjacency matrix between cavities and $\vec{a} = (a_{1}, a_{2},...,a_{N})$ is a vector representation of annihilation operators for each cavity. On closer inspection, $H$ bears considerable resemblance to the classical Ising model of $N$ coupled spins $\sigma_{i} = \pm 1$ in a uniform magnetic field $B$ \begin{align}
H_{I} &= H_{B} -J \sum_{\langle i,j \rangle} \sigma_{i}\sigma_{j}\, \nonumber\\
\label{eq:Ising}
& = H_{B} - J \vec{\sigma}^{T}.\hat{S}.\vec{\sigma}\,, \end{align} where $H_{B} = - \mu B \sum_{i}\sigma_{i}$ is the magnetic dipole Hamiltonian, $J$ is the uniform coupling between spins and $\hat{S}$ is again, the adjacency matrix between spins. In the KIM, the parametric driving of each Kerr non-linearity creates a subspace of pseudo-spins coupled through a linear optical network. This model mimics the magnetic field driving and the magnetic coupling between spins in the traditional Ising model. As a result, the energy spectrum in the KIM will reflect similar characteristics to the Ising model in Eq.(\ref{eq:Ising}).
Using degenerate perturbation theory for a weak coupling $\eta$ and large driving $\epsilon> 0$, the $n$th shifted energy level of the network is determined by \begin{equation} \label{eq:perturbation}
E_{n}^{'} = \hbar N \Delta \frac{\vert \epsilon \vert}{\chi} + \hbar \eta \vec{\alpha}_{n}^{T}.\hat{S}.\vec{\alpha}_{n} \end{equation} where $\vec{\alpha}_{n} = \vert \epsilon \vert /\chi \vec{\sigma}_{n}$ corresponds to the configuration vector of cavity coherent states for the $n$th eigenstate $\ket{\vec{\alpha}_{n}}$ \cite{goto_bifurcation-based_2016}. The eigenstates of the new spectrum can be found by solving the perturbed eigenvector equation \begin{equation}
\eta \,\vec{a}^\dagger.\hat{S}.\vec{a} \ket{\vec{\alpha}_{n}^{'}} = \eta\, \vec{\alpha}_{n}^{\dagger}.\hat{S}.\vec{\alpha}_{n} \ket{\vec{\alpha}_{n}^{'}}\,, \end{equation} where $\ket{\vec{\alpha}^{'}} = \sum_{i}^{2N}c_{ni}\ket{\vec{\alpha}_{i}}$ is written as a linear combination of the uncoupled coherent eigenstates $\ket{\vec{\alpha}_{i}}$. Multiplying through by $\bra{\vec{\alpha}_{k}}$ and using the annihilation operator eigenvalue equation $\vec{a} \ket{\vec{\alpha}} = \vec{\alpha} \ket{\vec{\alpha}}$ we obtain the matrix equation \begin{equation} \label{eq:eigen}
c_{nk} \frac{\vert \epsilon \vert}{\chi}\eta \left( \vec{\sigma}_{k}^{\dagger}.\hat{S}.\vec{\sigma}_{k}\, - \vec{\sigma}_{n}^{\dagger}.\hat{S}.\vec{\sigma}_{n} \right) = 0\,. \end{equation} As $c_{ik} \neq 0$, this relationship is satisfied so long as $ \vec{\alpha}_{k}^{\dagger}.\hat{S}.\vec{\alpha}_{k} = \vec{\alpha}_{n}^{\dagger}.\hat{S}.\vec{\alpha}_{n} $. This ensures any linear superposition of spin configurations $\vec{\alpha_{n}}$ which have the same energy will form a degenerate subspace. Given the parity symmetry in the Hamiltonian, the subspace will be spanned by the orthogonal cat-like superpositions of the Ising solutions.
To illustrate this, we consider the simplest 2-spin Ising model for zero detuning $\Delta=0$ shown in Fig.(\ref{fig:energy_levels}). In the absence of coupling $\eta=0$, the ground state is $4$-fold degenerate spanned by every unique configuration of spins $\ket{\alpha_{i}}$.
Now introducing a weak coupling, the degeneracy is broken into a $2$-fold degenerate ground state spanned by the anti-ferromagnetic states ${\ket{\lambda_{-}} \propto \ket{\alpha_{0},-\alpha_{0}} \pm \ket{-\alpha_{0},\alpha_{0}}}$ and a $2$-fold degenerate excited state spanned by the ferromagnetic states ${\ket{\lambda_{+}} \propto \ket{\alpha_{0},\alpha_{0}} \pm \ket{-\alpha_{0},-\alpha_{0}}}$. The new ground state in the high driving limit has an energy $E_{g}^{'} = -2\hbar \eta \vert \epsilon \vert/\chi$ whereas the excited state has energy $E_{e}^{'} = 2 \hbar \eta \vert \epsilon \vert/\chi$.
A notable feature is the limit of low driving plotted in Fig.(\ref{fig:energy_levels}b) where the system is not degenerate. In this limit, the coupling $\eta$ perturbation is strong compared to the driving $\epsilon$ and breaks the degeneracy.
The evolution of the KIM can be described by the Von-Neumann time development equation \begin{equation}
\frac{d\rho(t)}{dt} = -\frac{i}{\hbar} \left[H_{\mathrm{ising}},\rho\right]\,, \end{equation}
This may be converted to a Fokker-Planck equation for the positive P-representation of $\rho$ using an expansion of the density operator over the off-diagonal projectors $|\vec{\mu}\rangle\langle \vec{\nu}|$, \begin{align} \label{eq:fokker-planck}
\frac{\partial P(\vec{\mu}, \vec{\nu})}{\partial t} &= -\partial_{\vec{\mu}}\left[\Omega(\vec{\mu},\vec{\nu}) P(\vec{\mu}, \vec{\nu})\right] + \partial_{\vec{\nu}}\left[\Omega(\vec{\nu},\vec{\mu}) P(\vec{\mu}, \vec{\nu})\right] \nonumber\\
& + \partial_{\vec{\mu}}^{2} \left[\Phi(\vec{\nu},\vec{\mu}) P(\vec{\mu}, \vec{\nu})\right]- \partial_{\vec{\nu}}^{2} \left[\Phi(\vec{\nu},\vec{\mu})P(\vec{\mu}, \vec{\nu})\right]\,, \end{align} where the diffusion term is $\Phi(\vec{\mu},\vec{\nu}) = -i(\chi \vec{\mu}^{2} + \epsilon)$ and the drift is $\Omega(\vec{\mu},\vec{\nu}) = -i(2\chi \vec{\mu}^{2} \vec{\nu} -2\epsilon \vec{\nu} + \Delta\vec{\mu} + \eta \hat{S}.\vec{\mu})$.
Replacing $\vec{\mu} = \vec{\alpha}$ and $\vec{\nu} = \vec{\alpha}^{*}$ and ignoring the quantum diffusion terms yields the semi-classical equations of motion \begin{equation} \label{eq:classical}
\frac{\partial}{\partial t} \left(\begin{array}{c}
\vec{\alpha} \\
\vec{\alpha}^{*}
\end{array} \right) = i \hat{A} \left(\begin{array}{c}
\vec{\alpha} \\
\vec{\alpha}^{*}
\end{array} \right) \end{equation} where \begin{equation}
\hat{A} = 2 \epsilon \mathbf{I}_{N} \otimes \sigma_{x} - \left(\Delta\, \mathbf{I}_{N} + 2 \chi \vert \alpha \vert^{2} \mathbf{I}_{N} + \eta \hat{S}\right) \otimes \sigma_{z} \end{equation} where $\hat{\sigma}_{x,y,z}$ are the Pauli spin matrices. The structure of $\hat{A}$ is analogous to the spin model coupled along $\hat{\sigma}_{z}$ and driven along $\hat{\sigma}_{x}$ where the Hilbert space of the cavities constructs the pseudo-spin structure and interactions. The bistability of the system can then be computed when a sign change occurs in the trace $\mathrm{tr}(\hat{A})$ or the determinant $\det(\hat{A})$ with the variation of the driving $\epsilon$ \cite{drummond_quantum_1980}.
\section{Entanglement in the KIM}
\begin{figure*}
\caption{a) Joint photon number distribution with for two entangled cavities with parameters $\chi=0.5$, $\eta = 0.1, \epsilon = 1.5, \Delta=1$: detuning pushes the bifurcation way from $\epsilon>0$. b) The LN of an individual cavity's ground state in the KIM. As the driving increases the ground state becomes a highly entangled cat like state satisfying Eq.(\ref{eq:eigen}). c) The LN of the steady state if the cavities are damped into a zero temperature heath bath ( $\bar{n}=0$). The correlations between the cavities become non-classical as the system undergoes bifurcation but rapidly decay to classical correlations (zero entanglement) in the large driving limit.}
\label{fig:entanglement}
\end{figure*}
\subsection{Ground state entanglement}
The KIM has an energy spectrum analogous to the Ising model of a spin network immersed in a constant magnetic field. Earlier work on the quantum Ising model has shown the ground state undergoing a quantum phase transition, through which the degenerate ground states become highly entangled \cite{hines_quantum_2005}. Likewise, previous dissipative CIM models have also shown that the entanglement and quantum discord occur in the pseudo-spin network \cite{takata_quantum_2015, maruo_truncated_2016}.
There are many different tests and criterion for quantum correlations and entanglement. One such measure of non-separability of Gaussian states was introduced by Duan \emph{et al} \cite{duan_inseparability_2000} and used in the dissiptive CIM models \cite{takata_quantum_2015,maruo_truncated_2016}. This criterion is a suitable choice in dissipative models if the steady state solution is Gaussian. The eigenstates of the KIM are highly non-Gaussian cat like states seen in the joint photon number distribution of $\braket{\hat{n}_{1},\hat{n}_{2}}{\lambda_{0}}$ plotted in Fig.(\ref{fig:entanglement})a which exhibits significant interference suggesting the presence of strong non-classicality. Given the non-Gaussian nature of the eigenstates, using the Duan inequality is not a necessary condition of inseparability \cite{duan_inseparability_2000}. A more robust metric to compare between Hamiltonian and dissapative models is the \emph{logarithmic negativity} (LN) defined as \begin{equation}
E_{\mathcal{N}}(\rho) = \log_{2} \abs{\rho^{T_{A}}}_{1} \end{equation} where $\abs{\rho^{T_{A}}}_{1} = 2\mathcal{N} +1$ is the trace norm defined in terms of the \emph{negativity} $\mathcal{N}$ which is the absolute sum of all negative eigenvalues of $\rho^{T_{A}}$ \cite{vidal_computable_2002}. In the limit of maximally entangled pure states, the LN reaches equality with the Von-Neumann entropy metric of entanglement.
Plotting the LN in Fig.(\ref{fig:entanglement})b for a large non-zero detuning $\Delta=1$ we see that the non-resonant pumping of the cavity pushes the bifurcation away from $\epsilon=0$ and plays an analogous role to the damping considered in the dissapative models---which we will consider in the next section---but retains the unitarity of the system. The ground state smoothly transitions to a highly entangled state and remains so for increased driving $\epsilon$ and coupling $\eta$.
\subsection{Steady state entanglement}
In an experimental setting, one needs to measure the values of each pseudo-spin in the model and thus the closed Hamiltonian system must be made open in some way. This could be an impulsive coupling to the external apparatus after some period of unitary evolution. In Ref.\cite{goto_bifurcation-based_2016} the solution to the model was obtained under adiabatic evolution followed by a final read out. In quantum optics however a more conventional approach would be to open the system by letting a small amount of light leak out of the cavity and subject that to continuous measurement. Once the cavity is opened in this way the system becomes a damped non linear system and the elliptic fixed points of the Hamiltonian now become stable fixed points of a dissipative dynamical system. As for what to measure, we note that the key feature of the KIM solutions we seek are in the phase of the intracavity field so we will need to consider a phase-dependent measurement scheme such as homodyne detection. Given a measurement record we can now ask for the conditional dynamics of the intracavity field given a given stochastic homodyne current record.
At a non-zero temperature---where the heat bath is treated as white-noise on the input field---and for unit quantum efficiency homodyne detection, the conditional state obeys the conditional Stochastic Schrodinger equation (SSE) \cite{WisemanH.M.HowardM.2010Qmac} \begin{eqnarray} \label{eq:stochastic}
d\rho(t) & = & -\frac{i}{\hbar} \left[H_{\mathrm{ising}}, \rho(t)\right]dt + \sum_{i}^{N}\gamma(\bar{n} + 1) \mathcal{D}[a_{i}]\rho dt \\
& & + \gamma \bar{n} \mathcal{D}[a_{i}^{\dagger}]\rho dt + \sqrt{\gamma} \mathcal{H}[\gamma(\bar{n}+1)a_{i} - \gamma \bar{n} a_{i}^{\dagger}]dW_{i}(t)\nonumber \,, \end{eqnarray} where all photons emitted from the cavity at rate $\gamma$ are detected and $dW_{i}(t)$ is the classical Weiner process. The mean-photon number due to thermal excitation is determined by the Bose-Einstein statistics $\bar{n} = (\exp\left[{\hbar \omega/k_{b}T}\right]-1)^{-1}$ where $T$ is the temperature and $\omega$ is the cavity resonance frequency (assumed to be identical). Here, $\mathcal{D}[a_{i}]\rho$ is the single photon loss channel and $\mathcal{H}[a]\rho = a \rho + \rho a_{i}^{\dagger} - \mathrm{tr}\left(a_{i} \rho + \rho a_{i}^{\dagger}\right)\rho$ is the conditional stochastic term of the $i$th cavity. If we only seek the unconditional dissipative evolution, we can average over the stochastic term in Eq.(\ref{eq:stochastic}) which then vanishes giving a master equation for the state of the damped cavity field. This master equation may now be converted to an equivalent Fokker-Planck equation as Eq.(\ref{eq:fokker-planck}) with the addition of a decay term introduced into the drift $\Omega(\vec{\mu},\vec{\nu}) \rightarrow \Omega(\vec{\mu},\vec{\nu}) - \gamma \vec{\mu}/2$ and an additional noise term $\gamma \bar{n}\,\partial_{\vec{\nu} \vec{\mu}}^{2}P(\vec{\mu},\vec{\nu})$. Instead of asking about the structure of the ground state our attention now turns to the steady state solution to the master equation or equivalently to the Fokker Planck equation.
The LN of the steady state solution of two cavities as a function of pumping $\epsilon$ and coupling $\eta$ is depicted in Fig.\, (\ref{fig:entanglement})c at zero temperature $\bar{n}=0$. The entanglement is reduced due to the introduction of cavity dissipation but is non zero at the bifurcation. The two cavities become non-separable around parameter values corresponding to the classical bifurcation in the semi-classical dissipative model indicating a dissipative quantum phase transition \cite{hines_quantum_2005}. The entanglement then decreases to zero in the limit of large driving. A similar peak in non-separability was also observed in the CIM \cite{takata_quantum_2015, maruo_truncated_2016}.
In the case of the conditional dynamics based on homodyne detection, the cavities stochastically evolve into one particular spin configuration $\ket{\pm\alpha_{0},\mp\alpha_{0}}$. One can show the steady state solutions correspond to the different possible spin configuration sin the Ising model determined by the adjacency matrix $\hat{S}$. This is achieved by finding the roots of Eq.\ref{eq:classical} when $\partial \vec{\alpha}/\partial t = 0$ \cite{yamamoto_coherent_2017, nigg_robust_2017}. As we can regard the unconditional steady state as arising from ensemble averaging over the measurement records and thus the conditional states, we conclude in the large driving limit, the steady state can be regarded as a mixture of the possible spin-state configurations that satisfy Eq.(\ref{eq:eigen}), slightly shifted due to the damping.
\section{Comparison between Quantum and Classical models}
In the previous section we outlined the quantum dynamics of the 2 spin anti-ferromagnetic KIM subject to weak damping so as to allow constant homodyne detection through the conditional SSE Eq.\,(\ref{eq:stochastic}). By modelling the conditional SSE we provide a numerical experiment of results that would likely be observed in a physical realisation of the KIM. We seek to understand more carefully what advantage may arise at the quantum limit. This can be achieved by making a comparison between the semi-classical model and the full quantum model at finite temperature as the temperature is lowered. Here we will compare the conditional field amplitude averages computed by solving the conditional SSE model with stochastic solutions to the SDE described by Eq.\,(\ref{eq:classical}) with the addition of thermal noise \begin{equation} \label{eq:classical_sde}
\frac{\partial}{\partial t} \left(\begin{array}{c}
\vec{\alpha} \\
\vec{\alpha}^{*}
\end{array} \right) = i \hat{A} \left(\begin{array}{c}
\vec{\alpha} \\
\vec{\alpha}^{*}
\end{array} \right) + \sqrt{\frac{\gamma \bar{n}}{2}} \left(\begin{array}{cc}
i & 1 \\
-i & 1
\end{array} \right) \left(\begin{array}{c}
\xi_{1}(t) \\
\xi_{2}(t)
\end{array} \right)\,, \end{equation} where $\xi_{1}(t)$ and $\xi_{2}(t)$ are delta correlated stochastic forces. The measured homodyne currents from either cavity, in the quantum case is ${J_{i}(t) = \langle \hat{X}_{i}(t) \rangle_c + \sqrt{\gamma(2\bar{n}+1)}\xi_{i}(t)}$ where the first term is a quantum average that is computed from the conditional state. In the classical case the analogous quantity is simply $j_i(t) = \alpha_i(t)+\alpha^*_i(t)$.
\begin{figure*}\label{fig:quantum}
\end{figure*}
There are several quantities determined directly from the homodyne current capable of informing us about the performance of the KIM. Firstly, consider the difference squared of the measured currents of either cavity; ignoring the white noise terms this gives, ${( J_{1}(t) - J_{2}(t) )^{2}=\langle \hat{X}_{1} - \hat{X}_{2} \rangle_c^{2}}$. (Note the square of a conditional average). In the anti-ferromagnetic example, if the two cavities converge on the same signed steady state then the mean difference will be zero. Conversely, if the two cavities settle into the anti-ferroamgnetic solution then this quantity will reach $2\vert \alpha \vert^{2}$ since the cavities bifurcate along $\hat{X}$. Notably, this quantity will also give us a measure of the intracavity dynamics.
Secondly the correlations between the individual homodyne currents ${J}_{1}(t)$ and $J_{2}(t)$ is fundamental to understanding the consequences of the quantum correlations ascertained in Fig.(\ref{fig:entanglement})c. Here we measure the normalised cross-correlation function between the two cavities defined \begin{equation}
R_{\hat{X}_{1},\hat{X}_{2}} = \frac{1}{M} \sum \frac{\left( J_{1}^{(m)}(t) - \overline{J_{1}(t) }\right)\left( J_{2}^{(m)}(t) - \overline {J_{2}(t) } \right)}{\sqrt{\sigma_{J_{1}}^{2}\,\sigma_{J_{2}}^{2}}} \end{equation} where $M$ is the number of sampled currents and $\sigma_{J_{i}}^{2}$ is the variance in the current of the $i$th cavity. The correlation function is bounded $-1\leq R_{X_{1},X_{2}} \leq 1$ where equality signifies perfect (anti-)correlation.
Finally, the `pseudo-spin' of the KIM is determined by the sign of the current $J_{i}(t)$. An error arises when both cavities have the same sign $\mathrm{sign}[J_{1}(t)] = \mathrm{sign}[J_{2}(t)]$ in the anti-ferromagnetic example. Thus we can calculate the probability of obtaining an error as a function of time and measure the success probability of the KIM.
Here we sample both the quantum Eq.\,(\ref{eq:stochastic}) and the classical Eq.\,(\ref{eq:classical_sde}) trajectories 2000 times---each equivalent to an experimental trial. From this sample we compute the average of the quantities depicted in Fig.\,(\ref{fig:quantum}). Both simulations have identical cavity parameters of $\epsilon=1, \Delta=0, \chi = 0.5, \gamma = 1, \eta = 1$ and are varied over several temperatures from $0\leq\bar{n}\leq1$.
The results exhibit several interesting features. Firstly, increasing the temperature leads to expected agreement between the classical and quantum models as they approach the steady state. The system experiences bifurcation approximately where the cross-correlation peaks $R_{X_{1}, X_{2}}$. Before bifurcation, the two cavities exhibit \emph{positive} correlations present in both classical and quantum models. This leads to an increase in detecting an error but rapidly decays once the system has bifurcated. In the limit of zero temperature (blue) the classical model stalls in the absence of thermal fluctuations: the KIM remains stuck at the unstable fixed point centred at the origin. The quantum mechanical model on the other hand still continues to function due to the spontaneous emission of the cavities. In-fact at zero temperature, it is less-likely to lead to an error in the steady state $Pr(\mathrm{error)}$. Also, at zero temperature the quantum model exhibits extremely large positive correlations prior to bifurcation as a consequence of the squeezing. This quantum effect is rapidly suppressed in the presence of small amounts of thermal noise.
Another noticeable feature is the time taken to bifurcate. At all temperatures, the quantum mechanical model appears to peak in correlations at $t\sim 0.5$. The mean difference in $\langle X_{1} - X_{2} \rangle^{2}$ appears to plateau between $1<t<2$ before increasing again for larger times. The classical model on the other hand at low temperature---$\bar{n} = 0.25$---is noticeably slower to begin convergence on the Ising solution but begins to resemble the quantum model for $t>2$. In the classical model Fig. (\ref{fig:quantum}), this suggests a critical slowing in the convergence rate to a stable fixed point at lower temperatures. This tension introduces a trade off between speed to undergo bifurcation and accuracy; lower thermal fluctuations lead to higher accuracy but also result in critical slowing of the dynamics. At low temperatures, this critical slowing could be overcome by thermal annealing. In the quantum mechanical model at times below $t<2$, the phase transition occurs sooner. This is likely due to the quantum entanglement between the cavities also known as quantum activation \cite{Dykman2006}. For larger times---after bifurcation---the cavities transition over into the classical regime as noted by the agreement between the results at finite temperature \cite{maruo_truncated_2016}.
This indicates one significant advantage over traditional quantum annealing architectures. At zero temperature---a reasonable assumption in optical systems and superconducting circuits---with an initial state in the vacuum, the system is situated at an unstable stationary fixed point. Classically, a mechanism to induce this symmetry breaking is thermal annealing. Quantum mechanically, the cavities can quantum tunnel and begin converging on the stationary solutions determined by the Ising model \cite{yamamoto_coherent_2017}. Therefore \emph{no} annealing is required in the KIM when finding the lowest energy solution.
\section{Discussion} In this article we have focused on the two-cavity KIM---both classically and the full quantum description. We have shown that the quantum model exhibits a highly entangled ground state but constantly measuring the cavities under homodyne detection forces the system converge on one particular spin-configuration solution. Consequently this leads to a classically correlated steady-state solution whereby quantum correlations exist only during the process of bifurcation. Regardless, the nature of this quantum phase transition elicits a quantum advantage in the trade-off between speed and accuracy of the model as we have shown comparing the classical model to the full quantum description.
We have shown conclusively that there exists an advantage to using a quantum mechanical KIM at the zero temperature limit as shown in Fig.\,(\ref{fig:quantum}). The quantum fluctuations due to spontaneous emission allow the KIM to operate in the limit where there are no thermal fluctuations. Furthermore, the non-classical correlations arising due to the quantum mechanical squeezing of the joint cavity state lead to an apparent decrease in the time taken for the system to reach bifurcation, likely resulting from quantum activation \cite{Dykman2006}. However it must be noted that this advantage rapidly decays with increasing temperature. Furthermore, we have considered the simplest two cavity model; further investigation is required to determine whether the advantage still exists at a larger number of cavities. It would also be interesting to consider how this advantage manifests itself with more complex adjacency matrices $\hat{S}$ and determine whether or not it is capable of outperforming other classical heuristic Ising solvers.
\end{document} |
\begin{document}
\begin{center}
\large{\bf Generalizations of $R_0$ and SSM properties; Extended Horizontal Linear Complementarity Problem}
\end{center}
\begin{center}
\textsc{Punit Kumar Yadav}\\
Department of Mathematics\\ Malaviya National Instiute of Technology, Jaipur, 302017, India\\
E-mail address: [email protected]\\
\textsc{K. Palpandi}\\
Department of Mathematics\\ Malaviya National Instiute of Technology, Jaipur, 302017, India\\
E-mail address: [email protected]
\end{center} \begin{abstract}
In this paper, we first introduce $R_0$-$W$ and ${\bf SSM}$-$W$ property for the set of matrices which is a generalization of $R_0$ and the strictly semimonotone matrix. We then prove some existence results for the extended horizontal linear complementarity problem when the involved matrices have these properties. With an additional condition on the set of matrices, we prove that the ${\bf SSM}$-$W$ property is equivalent to the unique solution for the corresponding extended horizontal linear complementarity problems. Finally, we give a necessary and sufficient condition for the connectedness of the solution set of the extended horizontal linear complementarity problems. \end{abstract}
\section{Introduction} The standard linear complementarity problem (for short LCP), LCP($C,q$), is to find vectors $x,y$ such that \begin{equation} x\in \mathbb{R}^n, ~y=Cx + q\in\mathbb{R}^n ~\text{and}~ x\wedge y = 0,\end{equation} where $C\in \mathbb{R}^{n\times n},~q\in \mathbb{R}^n$ and $'\wedge'$ is a min map. The LCP has numerous applications in numerous domains, such as optimization, economics, and game theory. Cottle and Pang's monograph \cite{LCP} is the primary reference for standard LCP. Various generalisations of the linear complementarity problem have been developed and discussed in the literature during the past three decades (see, \cite{n2n,elcp,telcp,hlcp,hlcpm,rhlcp}). The extended horizontal linear complementarity problem is one of the most important extensions of LCP, which various authors have studied; see \cite{PP0,exis,n2n} and references therein. For a given ordered set of matrices ${\bf C}:=\{C_0,C_1,...,C_k\} \subseteq \mathbb{R}^{n \times n}$, vector $q\in \mathbb{R}^n$ and ordered set of positive vectors ${\bf d}:=\{d_{1},d_{2},...,d_{k}\} \subseteq \mathbb{R}^n $, the extended horizontal linear complementarity problem (for short EHLCP), denoted by EHCLP(${\bf C},{\bf d},q$), is to find a vector $x_{0},x_{1},...,x_{k} \in \mathbb{R}^n$ such that \begin{equation}\label{e1} \begin{aligned}
C_0 x_{0}=&q+\sum_{i=1}^{k} C_ix_{i},\\
x_{0}\wedge x_{1}=0 ~~\text{and} ~~ (d_{j}-&x_{j})\wedge x_{j+1}=0, ~1\leq j\leq k-1.\\ \end{aligned} \end{equation} If $k=1$, then EHLCP becomes the horizontal linear complementarity problem (for short HLCP), that is, \begin{equation*} \begin{aligned}
C_0 x_{0}-C_1x_{1}=q~~\text{and}~~x_{0}\wedge x_{1}=0. \end{aligned} \end{equation*} Further, HLCP reduces to the standard LCP by taking $C_0 =I$. Due to its widespread applications in numerous domains, the horizontal linear complementarity problem has received substantial research attention from many academics; see \cite{hlcp,hlcpm,rhlcp, homo} and reference therein.
Various writers have presented new classes of matrices for analysing the structure of LCP solution sets in recent years; see for example, \cite{LCP, fvi,PP0}. The classes of $R_0$, $P_0$, $P$, and strictly semimonotone (SSM) matrices play a crucial role in the existence and uniqueness of the solution to LCP. For instance, $P$ matrix (if [$x\in\mathbb{R}^n,x*Ax\leq 0\implies x=0$]) gives a necessary and sufficient condition for the uniqueness of the solution for the LCP (see, Theorem 3.3.7 in \cite{LCP}). To get a similar type of existence and uniqueness results for the generalized LCPs, the notion of $P$ matrix was extended for the set of matrices as the column $W$-property by Gowda et al. \cite{PP0}. They proved that column $W$-property gives the solvability and the uniqueness for the extended horizontal linear complementarity problem (EHLCP). Also, they have generalized the concept of the $P_0$-matrix as the column $W_0$-property.
Another class of matrix, the so-called SSM matrix, has importance in LCP theory. This class of matrices provides a unique solution to LCP on $\mathbb{R}^n_+$ and also gives the existence of the solution for the LCP (see, \cite{LCP}). For a $Z$ matrix (if all the off-diagonal entries of a matrix are non-positive), $P$ matrix is equivalent to the SSM matrix (see, Theorem 3.11.10 in \cite{LCP}). A natural question arises whether the SSM matrix can be generalized for the set of matrices in the view of EHLCP and whether we have a similar equivalence relation for the set of $Z$ matrices. In this paper, we would like to answer this question.
The connectedness of the solution set of LCP has a prominent role in the study of the LCP. We say a matrix is connected if the solution set of the corresponding LCP is connected. In \cite{ctd}, Jones and Gowda addressed the connectedness of the solution set of the LCP. They proved that the matrix is connected whenever the given matrix is a $P_0$ matrix and the solution set has a bounded connected component. Also, they have shown that if the solution set of LCP is connected, then there is almost one solution of LCP for all $q>0.$ Due to the specially structured matrices involved in the study of the connectedness of the solution to LCP, various authors studied the connectedness of LCP, see for example \cite{ctd,ctdl,Cntd}. The main objectives of this paper are to answer the following questions: \begin{itemize} \item[(Q1)] In LCP theory, it is a well-known result that the $R 0$ matrix gives boundedness to the LCP solution set. The same holds true for HLCP \cite{szn}. This motivates the question of whether or not the notion of $R_0$ matrix can be generalized to the set of matrices. If so, then can we expect the same kind of outcome in the EHLCP? \item [(Q2)] Given that a strictly semimonotone matrix guarantees the existence of the LCP solution and its uniqueness for $q\geq 0$, it is natural to wonder whether the concept of SSM matrix can be extended to the set of matrices. If so, then whether the same result holds true for EHLCP. \item [(Q3)] Motivated by the results of Gowda and Jones \cite{ctd} regarding the connectedness of the solution set of LCP, one can ask whether the solution set of EHLCP is connected if the set of matrices has the column $W_0$ property and the solution set of the corresponding EHLCP has a bounded connected component. \end{itemize} The paper's outline is as follows: We present some basic definitions and results in section 2. We generalize the concept of $R_0$ matrix and prove the existence result for EHLCP in section 3. In section 4, we introduce the {\bf SSM}-$W$ property, and we then study an existence and uniqueness result for the EHLCP when the underlying set of matrices have this property. In the last section, we give a necessary and sufficient condition for the connectedness of the solution set of the EHLCP.
\section{Notations and Preliminaries} \subsection{Notations}Throughout this paper, we use the following notations: \begin{itemize} \item[(i)] The $n$ dimensional Euclidean space with the usual inner product will be denoted by $\mathbb{R}^n$. The set of all non-negative vectors (respectively, positive vectors) in $\mathbb{R}^n$ will be denoted by $\mathbb{R}^n_+$ (respectively, $\mathbb{R}^n_{++}$ ). We say $x \geq 0$ (respectively, $ >0$) if and only if $x\in\mathbb{R}^n_+$ (respectively, $\mathbb{R}^n_{++})$. \item [(ii)] The $k$-ary Cartesian power of $\mathbb{R}^n$ will be denoted by $\Lambda^{(k)}_n$ and the $k$-ary Cartesian power of $\mathbb{R}^n_{++}$ will be denoted by $\Lambda^{(k)}_{n,++}$. The bold zero '${\bf 0}$' will be used for denoting the zero vector $(0,0,...,0)\in \Lambda^{(k)}_n.$ \item [(iii)] The set of all $n\times n$ real matrices will be denoted by $\mathbb{R}^{n\times n}$. We use the symbol $\Lambda^{(k)}_{n\times n}$ to denote the $k$-ary Cartesian product of $\mathbb{R}^{n\times n}$. \item [(iv)] We use $[n]$ to denote the set $\{1,2,...,n\}$. \item [(v)] Let $M\in\mathbb{R}^{n\times n}$. We use $\text{diag}(M)$ to denote the vector $(M_{11},M_{22},...,M_{kk})\in \mathbb{R}^n$, where $M_{ii}$ is the $ii^{\rm th}$ diagonal entry of matrix $M$ and $\text{det}(M)$ is used to denote the determinant of matrix $M$. \item[(vi)] SOL(${\bf C}, {\bf d}, q$) will be used for denoting the set of all solution to EHLCP(${\bf C},{\bf d},q$). \end{itemize} We now recall some definitions and results from the LCP theory, which will be used frequently in our paper.
\begin{proposition}[\cite{wcp}]\label{star} Let $V=\mathbb{R}^n.$ Then, the following statements are equivalent. \begin{itemize}
\item [\rm(i)] $x\wedge y=0.$
\item [\rm(ii)] $x,y\geq 0$ and $~x*y=0,$ where $*$ is the Hadamard product.
\item[\rm(iii)] $x,y\geq 0~\text{and}~\langle x,y\rangle=0.$ \end{itemize} \end{proposition} \begin{definition}[\cite{PP0}]\rm Let ${\bf C}=(C_0,C_1,...,C_k)\in\Lambda^{(k+1)}_{n\times n}$. Then a matrix $R\in\mathbb{R}^{n\times n}$ is column representative of ${\bf C}$ if $$R._j\in\big\{(C_0)._j,(C_1)._j,...,(C_k)._j\big\},~\forall j\in[n],$$ where $R._j$ is the $j^{{\rm th}}$ column of matrix $R.$ \end{definition} Next, we define the column W-property. \begin{definition}[\cite{PP0}] \rm Let ${\bf C}:=(C_0,C_1,...,C_k)\in\Lambda^{(k+1)}_{n\times n}$. Then we say that ${\bf C}$ has the \begin{itemize}
\item[\rm (i)] {\it column $W$-property} if the determinants of all the column representative matrices of ${\bf C}$ are all positive or all negative.
\item[\rm (ii)] {\it column $W_0$-property} if there exists ${\bf N}:=(N_0,N_1,...,N_k)\in \Lambda^{(k+1)}_{n\times n}$ such that ${\bf C+\epsilon N}:=(C_0+ \epsilon N_0,C_1+\epsilon N_1,...,C_k+\epsilon N_k)$ has the column $W$-property for all $\epsilon>0$. \end{itemize} \end{definition} Due to Gowda and Sznajder \cite{PP0}, we have the following result. \begin{theorem}[\cite{PP0}] \label{P1} For ${\bf C}=(C_0,C_1,...,C_k)\in\Lambda^{(k+1)}_{n\times n}$, the following are equivalent:\begin{itemize}
\item[\rm(i)]${\bf C}$ has the column $W$-property.
\item[\rm(ii)] For arbitrary non-negative diagonal matrices $D_{0},D_{1},...,D_{k}\in\mathbb{R}^{n\times n}$
with $\text{\rm diag}(D_{0}+D_{1}+D_{2}+...+D_{k})>0$,
$$\text{\rm det}\big(C_0D_{0}+C_1D_{1}+...+C_kD_{k}\big)\neq 0.$$
\item [\rm(iii)]$C_0$ is invertible and $(I,C_0^{-1}C_1,...,C_0^{-1}C_k)$ has the column $W$-property.
\item [\rm(iv)] For all $q\in\mathbb{R}^n$ and ${\bf d}\in\Lambda^{(k-1)}_{n,++}$, {\rm EHLCP}$({\bf C},{\bf d},q)$ has a unique solution. \end{itemize} \end{theorem} If $k=1$ and $C_0^{-1}$ exists, then HLCP($C_0,C_1,q$) is equivalent to LCP($C_0^{-1}C_1,C_0^{-1}(q)$). In this case, $C_0^{-1}C_1$ is a $P$ matrix if and only if for all $q\in\mathbb{R}^n$, LCP($C_0^{-1}C_1,C_0^{-1}(q)$) has a unique solution (see, Theorem 3.3.7 in \cite{LCP}). Hence we have the following theorem given the previous theorem. \begin{theorem}[\cite{PP0}]\label{C1} Let $(C_0,C_1)\in\Lambda^{(2)}_{n\times n}$. Then the following are equivalent. \begin{itemize}
\item [\rm(i)] $(C_0,C_1)$ has the column $W$-property.
\item [\rm(ii)] $C_0$ is invertible and $C_0^{-1}C_1$ is a $P$ matrix.
\item [\rm(iii)] For all $q\in\mathbb{R}^n$, {\rm HLCP}$(C_0,C_1,q)$ has a unique solution. \end{itemize} \end{theorem} \subsection{Degree theory} We now recall the definition and some properties of a degree from \cite{fvi,deg} for our discussion.
Let $\Omega$ be an open bounded set in $\mathbb{R}^n$. Suppose $h:\bar{\Omega}\rightarrow \mathbb{R}^n$ is a continuous function and a vector $p\notin h(\partial\Omega)$, where $\partial\Omega$ and $\bar{\Omega}$ denote the boundary and closure of $\Omega$, respectively. Then the degree of $h$ is defined with respect to $p$ over $\Omega$ denoted by $\text{deg}(h,\Omega,p).$ The equation $h(x)=p$ has a solution whenever $\text{deg}(h,\Omega,p)$ is non-zero. If $h(x)=p$ has only one solution, say $y$ in $\mathbb{R}^n$, then the degree is the same overall bounded open sets containing $y$. This common degree is denoted by $\text{deg}(h,p)$. \subsubsection{Properties of the degree} The following properties are used frequently here. \begin{itemize} \item[(D1)] deg($I,\Omega,\cdot)=1$, where $I$ is the identity function. \item [(D2)] {\bf Homotopy invariance}: Let a homotopy $\Phi(x,s):\mathbb{R}^n\times[0,1]\rightarrow \mathbb{R}^n $ be continuous. If the zero set of $\Phi(x,s),~X=\{x:\Phi(x,s)={0}~\text{for some}~s\in[0,1]\}$ is bounded, then for any bounded open set $\Omega$ in $\mathbb{R}^n$ containing the zero set $X$, we have $$\text{deg}(\Phi(x,1),\Omega,{ 0})=\text{deg}(\Phi(x,0),\Omega,{0}).$$ \item[(D3)] {\bf Nearness property}: Assume $\text{deg}(h_1(x),\Omega,p)$ is defined and $h_2:{\bar\Omega}\rightarrow \mathbb{R}^n$ is a continuous function. If
$\text{sup}\displaystyle_{x\in\Omega}\| h_2(x)-h_1(x)\|<\text{dist}(p,\partial\Omega)$, then $\text{deg}(h_2(x),\Omega,p)$ is defined and equals to $\text{deg}(h_1(x),\Omega,p)$. \end{itemize} The following result from Facchinei and Pang \cite{fvi} will be used later. \begin{proposition}[\cite{fvi}]\label{ND} Let $\Omega$ be a non-empty, bounded open subset of $\mathbb{R}^n$ and let $\Phi:\bar{\Omega}\rightarrow \mathbb{R}^n$ be a continuous injective mapping. Then $\text{\rm deg}(\Phi,\Omega,p)\neq0$ for all $p\in\Phi(\Omega)$. \end{proposition} \noindent{\bf Note}: All the degree theoretic results and concepts are also applicable over any finite dimensional Hilbert space (like $\mathbb{R}^n$ or $\mathbb{R}^n\times\mathbb{R}^n\times\mathbb{R}^n$ etc). \section{$R_0$-$W$ property} In this section, we first define the $R_0$-$W$ property for the set of matrices which is a natural generalization of $R_0$ matrix in the LCP theory. We then show that the $R_0$-$W$ property gives the boundedness of the solution set of the corresponding EHLCP. \begin{definition}\rm Let ${\bf C}=(C_0,C_1,...,C_k) \in \Lambda^{(k+1)}_{n\times n}$. We say that ${\bf C}$ has the $R_0$-$W$ {\it property} if the system $$C_0x_{0}=\sum_{i=1}^{k} C_ix_{i}~\text{and}~x_{0}\wedge x_{j}=0 ~~\forall~j\in [k]$$ has only zero solution.
\end{definition} It can be seen easily that the $R_0$-$W$ property coincides with $R_0$ matrix when $k=1$ and $C_0 =I$. Also it is noted (see, \cite{wcp}) that if $k=1$, then the $R_0$-$W$ property referred as $R_0$ pair. To proceed further, we prove the following result. \begin{lemma}\label{l1} Let ${\bf C}=(C_0,C_1,...,C_k) \in \Lambda^{(k+1)}_{n\times n}$ and ${\bf{x}} =(x_{0},x_{1},...,x_{k})\in\text{\rm SOL}({\bf C},{\bf d},q)$. Then ${\bf{x}}$ satisfies the following system $$C_0x_{0}=q+\sum_{i=1}^{k} C_ix_{i}~\text{and}~x_{0}\wedge x_{j}=0~\forall~j\in [k].$$ \end{lemma} \begin{proof} As $x_{0}\geq0$, there exists an index set $\alpha \subseteq [n]$ such that $(x_{0})_i=\begin{cases}
>0 & i\in \alpha\\
0 & i\in [n]\setminus \alpha \end{cases}.$ Since $x_{0} \wedge x_{1}=0$, we have $(x_{1})_i=0$ for all $i\in \alpha$. From $(d_{1}-x_{1})\wedge x_{2}=0$, we get $(d_{1})_i (x_{2})_i =0~\forall i\in \alpha$. This gives that $(x_{2})_i =0~\forall i\in \alpha$. By substituting $(x_{2})_i =0~\forall i\in \alpha$ in $(d_{2}-x_{2})\wedge x_{3}=0$, we obtain $(x_{3})_i =0~\forall i\in \alpha $. Continue the process in the similar way, one can get $(x_{4})_i =(x_{5})_i=...=(x_{k})_i=0~\forall i\in \alpha$. So, $x_{0}\wedge x_{j}=0~ \forall ~j \in[k]$. This completes the proof. \end{proof} We now prove the boundedness of the solution set of EHLCP when the involved set of matrices has the $R_0$-$W$ property. \begin{theorem}\label{R_0} Let ${\bf C}=(C_0,C_1,...,C_k) \in \Lambda^{(k+1)}_{n\times n}$. If ${\bf C}$ has the $R_0$-$W$ property then $\text{\rm SOL}({\bf C},{\bf d},q)$ is bounded for every $q\in\mathbb{R}^n$ and ${\bf d} \in \Lambda^{(k-1)}_{n,++}$. \end{theorem}
\begin{proof} Suppose there exist $q\in\mathbb{R}^n$ and ${\bf d}=(d_{1}, d_{2},...,d_{k-1})\in \Lambda^{(k-1)}_{n,++}$ such that $\text{SOL}({\bf C},{\bf d},q)$ is unbounded. Then there exists a sequence ${\bf x}^{(m)}=( {x^{(m)}_{0}},{x^{(m)}_{1}},...,{x^{(m)}_{k}})$ in $\Lambda^{(k+1)}_n$ such that $||{\bf x}^{(m)}|| \to \infty $ as $m\to \infty$ and it satisfies \begin{equation}\label{bound}
\begin{aligned}
~~~~& C_0 {x^{(m)}_{0}} =q+\sum_{i=1}^{k} C_i {x^{(m)}_{i}} \\
~~~~& {x^{(m)}_{0}} \wedge {x^{(m)}_{1}}=0 ~~\text{and}~~ (d_{j}-{x^{(m)}_{j}})\wedge {x^{(m)}_{j+1}}=0 ~\forall j\in[k-1].
\end{aligned} \end{equation} From the Lemma \ref{l1}, equation \ref{bound} gives that \begin{equation}\label{bd1}
\begin{aligned}
C_0 {x^{(m)}_{0}} =&q+\sum_{i=1}^{k} C_i {x^{(m)}_{i}} ~~\text{and}~~
{x^{(m)}_{0}} \wedge {x^{(m)}_{j}} =&0 ~\forall j\in[k].\\
\end{aligned} \end{equation}
As $\dfrac{{\bf x}^{(m)}}{\|{\bf x}^{(m)}\|}$ is a unit vector for all $m$, $\dfrac{{\bf x}^{(m)}}{\|{\bf x}^{(m)}\|}$ converges to some vector ${\bf{y}}=( y_{0},y_{1},...,y_{k}) \in \Lambda^{(k+1)}_{n}$ with $||{\bf{y}}||=1$.
Now first divide the equation \ref{bd1} by $\|{\bf x}^{(m)}\|$ and then take the limit $m\rightarrow \infty$, we get $$ C_0 y_{0}=\sum_{i=1}^{k} C_i y_{i} ~~\text{and}~~ y_{0}\wedge y_{j}=0 ~\forall j\in[k].$$ This implies that ${\bf{y}}$ must be a zero vector as ${\bf C}$ has the $R_0$-$W$ property, which contradicts the fact that $||{\bf{y}}||=1$. Therefore $\text{SOL}({\bf C},{\bf d},q)$ is bounded. \end{proof} \subsection{Degree of EHLCP} Let ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda^{(k+1)}_{n \times n}$ and ${\bf d}=(d_{1}, d_{2}, ...., d_{k-1})\in \Lambda^{(k-1)}_{n,++}$. We define a function $F:\Lambda^{(k+1)}_n \to \Lambda^{(k+1)}_n $ as \begin{equation}\label{e1} \begin{aligned}
F({\bf{x}})=\begin{bmatrix}
C_0 x_{0} -\sum_{i=1}^{k} C_ix_{i}\\ x_{0}\wedge x_{1}\\ (d_{1}-x_{1})\wedge x_{2}\\ (d_{2}-x_{2})\wedge x_{3}\\ .\\ .\\ .\\ (d_{k-1}-x_{k-1})\wedge x_{k}\\
\end{bmatrix}.\end{aligned}\end{equation} We denote the degree of $F$ with respect to ${\bf 0}$ over bounded open set $\Omega \subseteq \Lambda^{(k+1)}_n$ as $\rm{deg}({\bf C},\Omega,{\bf 0})$. It is noted that if ${\bf C}$ has the $R_0$-$W$ property, in view of the Lemma \ref{l1}, $F({\bf{x}})={\bf 0} \Leftrightarrow {{\bf{x}}}={\bf 0}$ which implies that $\text{deg}({\bf C},\Omega,{\bf 0})=\text{deg} ({\bf C},{\bf 0})$ for any bounded open set $\Omega$ contains the origin in $\Lambda^{(k+1)}_n$. We call this degree as EHLCP-degree of ${\bf C}.$
We now prove an existence result for EHLCP. \begin{theorem}\label{P2} Let ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda^{(k+1)}_{n\times n}$. Suppose the following hold: \begin{itemize}
\item[\rm (i)] ${\bf C}$ has the $R_0$-$W$ property.
\item[\rm (ii)] ${\rm{deg}}({\bf C},{\bf 0})\neq 0$. \end{itemize} Then {\rm EHLCP(${\bf C},{\bf d},q$)} has non-empty compact solution for all $q\in\mathbb{R}^n$ and ${\bf d} \in \Lambda^{(k-1)}_{n, ++}$. \end{theorem} \begin{proof} As the solution set of EHLCP is closed, it is enough to prove that the solution set is non-empty and bounded. We first define a homotopy $\Phi: \Lambda_n^{(k+1)} \times [0,1] \to \Lambda_n^{(k+1)}$ as $$\Phi({\bf{x}},s)=\begin{bmatrix}
C_0 x_{0} -\sum_{i=1}^{k} C_ix_{i}-sq\\ x_{0}\wedge x_{1}\\ (d_{1}-x_{1})\wedge x_{2}\\ (d_{2}-x_{2})\wedge x_{3}\\ .\\ .\\ .\\ (d_{k-1}-x_{k-1})\wedge x_{k}\\ \end{bmatrix}.$$ Then, $$\Phi({\bf{x}},0)=F({\bf{x}})~~\text{and}~\Phi({\bf{x}},1)=F({\bf{x}})- \hat{q}, \text{where}~~\hat{q}=(q,0,0,...0)\in \Lambda^{(k+1)}_n.$$ By using the similar argument as in above Theorem $\ref{R_0}$, we can easily show that the zero set of homotopy, $X=\{{\bf{x}}:\Phi({\bf{x}},s)={\bf 0}~\text{for some}~s\in[0,1]\}$ is bounded. From the property of degree (D2), we get $\text{deg}(F,\Omega,{\bf 0})=\text{deg}(F-\hat{q},\Omega,{\bf 0})$ for any open bounded set $\Omega$ containing $X$. As $\text{deg}(F, \Omega,{\bf 0})=\text{deg}({\bf C},{\bf 0})\neq 0$, we obtain $\text{deg}(F-\hat{q},\Omega,{\bf 0})\neq 0$ which implies $\text{SOL}({\bf C},{\bf d},q) $ is non-empty. As ${\bf C}$ has the $R_0$-$W$ property, by Theorem \ref{bound}, $\text{SOL}({\bf C},{\bf d},q) $ is bounded. This completes the proof. \end{proof} \section{ ${\bf SSM}$-$W$ property} In this section, we first define the ${\bf SSM}$-$W$ {\it property} for the set of matrices which is a generalization of the SSM matrix in the LCP theory, and we then prove that the existence and uniqueness result for the EHLCP when the involved set of matrices have the ${\bf SSM}$-$W$ property.
We now recall that an $n\times n$ real matrix $M$ is called strictly semimonotone (SSM) matrix if [$x\in \mathbb{R}^n_+,~x*Mx\leq 0\Rightarrow x=0$]. We generalize this concept to the set of matrices. \begin{definition}\rm We say that ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda^{(k+1)}_{n\times n}$ has the ${\bf SSM}$-$W$ property if \begin{equation*}
\{C_0x_{0}=\sum_{i=1}^{k} C_ix_{i},~x_{i}\geq 0~\text{and}~~ x_{0}* x_{i}\leq 0~~\forall i\in[k]\}\Rightarrow {{\bf{x}}}=(x_0,x_1,..,x_k)={\bf 0}. \end{equation*} \end{definition} We prove the following result. \begin{proposition}\label{P2} Let ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda^{(k+1)}_{n\times n}$. If ${\bf C}$ has the ${\bf SSM}$-$ W$ property, then the followings hold: \begin{itemize}
\item[\rm (i)] $C_0^{-1}$ exists and $C_0^{-1}C_i$ is a strict semimonotone matrix for all $i\in[k].$
\item[\rm(ii)] $(I,C_0^{-1} C_1,...,C_0^{-1}C_k)$ has the ${\bf SSM}$-$W$ property.
\item [\rm(iii)] $(P^TC_0P,P^TC_1P,...,P^TC_kP)$ has the ${\bf SSM}$-$ W$ property for any permutation matrix $P$ of order $n$. \end{itemize} \end{proposition} \begin{proof} (i): Suppose there exists a vector $x_{0}\in \mathbb{R}^n$ such that $C_0x_{0}=0$. Then we have $$C_0x_{0}=C_1 0+C_2 0+ ...+C_k 0.$$ This gives that $x_{0}=0$ as ${\bf C}$ has the ${\bf SSM}$-$ W$ property. Thus $C_0$ is invertible.
Now we prove the second part of (i). Without loss of generality, it is enough to prove that $C^{-1}_0 C_1$ is a strictly semimonotone matrix. Suppose there exists a vector $y \in\mathbb{R}^n$ such that $y \geq 0$ and $y * (C_0^{-1}C_1) y \leq 0.$ Let $y_0:=(C_0^{-1}C_1)y$, $y_1:=y$ and $y_i:=0$ for all $2\leq i\leq k$. Then we get $$C_0y_{0}=C_1 y_{1}+C_2 y_{2}+...+C_iy_{i}+..+C_k y_{k},~~y_{j} \geq 0~\text{and}~y_{0} * y_{j}\leq 0~\forall j\in [k].$$ Since ${\bf C}$ has the ${\bf SSM}$-$ W$ property, $y_{j}=0~~\forall j\in [k]$. Thus $C_0^{-1}C_1$ is a strict semimonotone matrix. This completes the proof.
(ii): It follows from the definition of the ${\bf SSM}$-$W$ property.
(iii): Let ${\bf x}=(x_{0},x_{1},...,x_{k})\in \Lambda^{(k+1)}_n$ such that $$ (P^TC_0P )x_{0}=\sum_{i=1}^{k} (P^TC_iP) x_{i},~x_{j}\geq 0~\text{and}~x_{0}* x_{j}\leq 0 ~\forall j\in[k].$$ As $P$ is a non-negative matrix and $PP^T=P^TP$, we can rewrite the above equation as $$ C_0Px_{0}=\sum_{i=1}^{k} C_iP x_{i},~ P x_{j} \geq 0 ~\text{and}~Px_{0}* Px_{j}\leq 0 ~\forall j\in[k].$$ By the ${\bf SSM}$-$ W$ property of ${\bf C}$, $P x_{j}=0$ for all $0\leq j \leq k$ which implies ${\bf{x}} ={\bf 0}$. This completes the proof. \end{proof} In the above Proposition \ref{P2}, it can be seen easily that the converse of the item (ii) and (iii) are valid. But the converse of item (i) need not be true. The following example illustrates this. \begin{example}\rm Let ${\bf C}=(C_0,C_1,C_2)\in \Lambda^{(3)}_{2\times 2}$, where $$C_0=\begin{bmatrix}
1&0\\0&1\\ \end{bmatrix},~C_1=\begin{bmatrix}
1&-2\\0&1\\ \end{bmatrix},C_2=\begin{bmatrix}
1&0\\-2&1\\ \end{bmatrix}.$$ It is easy to check that $C_0^{-1}C_1=C_1$ and $C_0^{-1}C_2=C_2$ are $P$ matrix. So, $C_0^{-1}C_1$ and $ C_0^{-1}C_2$ are SSM matrix. Let ${\bf{x}}=(x_{0},x_{1},x_{2})=((0,0)^T,(1,1)^T,(1,1)^T)\in \Lambda^{(3)}_2$. Then we can see that the non-zero $\bf{x}$ satisfies $$C_0x_{0}=C_1x_{1}+C_2x_{2},~x_{1}\geq 0,~x_{2} \geq 0~\text{and}~x_{0}*x_{1}=0=x_{0} * x_{2}.$$ So ${\bf C}$ can not have the ${\bf SSM}$-$ W$ property. \end{example} The following result is a generalization of a well-known result in matrix theory that every $P$ matrix is a SSM matrix. \begin{theorem}\label{T4} Let ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda^{(k+1)}_{n\times n}$. If ${\bf C}$ has the column $W$-property, then ${\bf C}$ has the ${\bf SSM}$-$ W$ property. \end{theorem} \begin{proof} Suppose there exists a non-zero vector ${\bf x}=(x_{0},...,x_{k})\in \Lambda^{(k+1)}_n$ such that $$C_0x_{0}=\sum_{i=1}^{k} C_ix_{i},~x_{j}\geq 0,~ x_{0}* x_{j}\leq 0~\forall j\in[k].$$ Consider a vector $y\in\mathbb{R}^n$ whose $j^{\rm{th}}$ component is given by $$y_j=\begin{cases}
-1 & \text{if} ~(x_{0})_j>0\\ 1 & \text{if} ~(x_{0})_j<0\\1 & \text{if} ~(x_{0})_j=0 ~\text{and}~(x_{i})_j\neq 0~\text{for some}~ i\in[k]\\0 & \text{if}~ (x_{0})_j=0 ~\text{and} ~(x_{i})_j= 0 ~\text{for all}~i\in[k] \end{cases}.$$ As ${\bf{x}}$ is a non-zero vector, ${\bf{y}}$ must be a non-zero vector. Consider the diagonal matrices $D_{0}, D_{1},...,D_{k}$ which are defined by $$(D_{0})_{jj}=\begin{cases}(x_{0})_j & \text{if} ~(x_{0})_j>0\\ -(x_{0})_j & \text{if} ~(x_{0})_j<0\\0 & \text{if} (x_{0})_j=0~\text{and}~(x_{i})_j\neq 0~\text{for some}~ i\in[k] \\1 & \text{if}~ (x_{0})_j=0 ~\text{and} ~(x_{i})_j= 0 ~\text{for all}~i\in[k] \end{cases}$$ and for all $i\in [k]$, $$(D_{i})_{jj}=\begin{cases}
0&\text{if } (x_{0})_j>0\\ (x_{i})_j&\text{ else } \end{cases}.$$ It is easy to verify that $D_{0}, D_{1},...,D_{k}$ are non-negative diagonal matrices and $\text{diag}(D_{0}+ D_{1}+...+D_{k})>0$. And also note that \begin{equation}\label{22}
x_{0}=-D_{0}y~\text{and}~x_{i}=D_{i}y~\forall i\in [k]. \end{equation} By substituting the Equation \ref{22} in $C_0x_{0}=\sum_{i=1}^{k} C_ix_{i}$, we get \begin{equation*}
C_0(-D_{0}y)=\sum_{i=1}^{k} C_iD_{i}(y) \Rightarrow
\big(C_0D_{0}+C_1D_{1}+...+C_kD_{k}\big)y=0. \end{equation*} This implies that det$(C_0D_{0}+C_1D_{1}+...+C_kD_{k}\big)=0$. So, ${\bf C}$ does not have the column $W$-property from Theorem \ref{P1}. Thus we get a contradiction. Therefore, ${\bf C}$ has the ${\bf SSM}$-$W$ property. \end{proof} The following example illustrates that the converse of the above theorem is invalid. \begin{example}\rm Let ${\bf C}=(C_0,C_1,C_2) \in \Lambda^{(3)}_{2\times 2}$ such that $$C_0=\begin{bmatrix}
1&0\\0&1\\ \end{bmatrix},~C_1=\begin{bmatrix}
1&1\\1&1\\ \end{bmatrix},C_2=\begin{bmatrix}
1&1\\1&1\\ \end{bmatrix}.$$ Suppose $ {\bf{w}} =(x,y,z)\in \Lambda^{3}_2$ such that $$ C_{0}x=C_{1}y+C_{2}z~\text{and}~ y,z\geq 0, x*y\leq 0, x*z\leq 0.$$ From $C_{0}x=C_{1}y+C_{2}z$, we get $$\begin{bmatrix}
x_{1}\{\bf{x}}_{2} \end{bmatrix}=\begin{bmatrix}
y_1+y_2+z_1+z_2\{\bf{y}}_1+y_2+z_1+z_2\\ \end{bmatrix}.$$ As $x*y\leq 0,~x*z\leq 0$ and from the above equation, we have \begin{equation}\label{33}
\begin{aligned}
y_1&(y_1+y_2+z_1+z_2)\leq 0~\text{and}~ y_2(y_1+y_2+z_1+z_2)\leq 0,\\
z_1&(y_1+y_2+z_1+z_2)\leq 0~\text{and}~
z_2(y_1+y_2+z_1+z_2)\leq 0.\\
\end{aligned} \end{equation} Since $y,z\geq 0$, from the equation \ref{33}, we get $x=y=z=0.$ Hence ${\bf C}$ has the ${\bf SSM}$-$ W$ property. As $\text{det}(C_1)=0$, by the definition of the column $W$-property, ${\bf C}$ does not have the column $W$-property. \end{example} We now give a characterization for ${\bf SSM}$-$W$ property. \begin{theorem}\label{CW} Let ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda^{(k+1)}_{n\times n}$ has the ${\bf SSM}$-$ W$ property if and only if $(C_0,C_1D_{1}+C_2D_{2}+...+C_kD_{k})\in \Lambda_{n\times n}^{(2)}$ has the ${\bf SSM}$-$ W$ property for any set of non-negative diagonal matrix $(D_{1},D_{2},...,D_{k})\in \Lambda^{(k)}_{n\times n}$ with $\text{\rm diag}(D_{1}+D_{2}+...+D_{k})>0$. \end{theorem} \begin{proof} {\it Necessary part}: Let $(D_{1},D_{2}...,D_{k})\in \Lambda^{(k)}_{n\times n}$ be the set of non-negative diagonal matrix with $\text{\rm diag}(D_{1}+D_{2}+...+D_{k})>0$. Suppose there exist vectors $x_{0}\in\mathbb{R}^n$ and $y\in \mathbb{R}^n_+ $ such that $$C_0x_{0}=\big(C_1D_{1}+C_2D_{2}+...+C_kD_{k}\big)y~~\text{and}~~x_{0}*y\leq 0.$$ For each $i\in[k]$, we set $x_{i}:=D_{i}y$. As each $D_{i}$ is a non-negative diagonal matrix, from $x_{0}*y\leq 0$, we get $x_{0}*x_{i}\leq 0~ \forall i\in[k]$. Then we have $$C_0x_{0}=C_1x_{1}+C_2x_{2}+...+C_kx_{k},$$ $$x_{i}\geq 0,~x_{0}*x_{i}\leq 0~\forall i\in[k].$$ As ${\bf C}$ has the ${\bf SSM}$-$ W$ property of ${\bf C}$, we must have $x_{0}=x_{1}=...=x_{k}=0.$ This implies $x_{1}+x_{2}+...+x_{k}=(D_{1}+D_{2}+...+D_{k})y=0$. As $\text{\rm diag}(D_{1}+D_{2}+....+D_{k})>0$, we have $y=0$. This completes the necessary part.
\noindent{\it Sufficiency part}: Let ${\bf x}=(x_{0},x_{1},...,x_{k})\in \Lambda^{(k+1)}_n$ such that \begin{equation} \label{SS}
C_{0}x_{0}=C_1x_{1}+C_2x_{2}+...+C_kx_{k}~~\text{and}~~x_{j}\geq 0,~x_{0}*x_{j}\leq 0~\forall j\in[k]. \end{equation}
We now consider an $n\times k$ matrix $X$ whose $j^{\rm th}$ column as $x_{j}$ for $j\in[k]$. So, $X=[x_{1}~x_{2}~...~x_{k}]$. Let $S:=\{i\in [k]: i^{\rm{th}} ~\text{row sum of $X$ is zero}\}$. From this, we define a vector $y\in\mathbb{R}^n$ and diagonal matrices $D_{1},D_{2},..,D_{k}$ such that $$y_i=\begin{cases}
1 & i\notin S\\ 0 & i\in S\\ \end{cases}~~\text{and}~~ (D_{j})_{ii}=\begin{cases}
(x_{j})_i & i\notin S\\~ 1 &i\in S\\ \end{cases},$$ where $(D_{j})_{ii}$ is the diagonal entry of $D_{j}$ for all $j\in[k]$.
It can be seen easily that $D_{j}y=x_{j}$ for all $j\in[k]$ and each $D_{j}$ is a non-negative diagonal matrix with $\text{\rm diag}(D_{1}+D_{2}+...+D_{k})>0$. Therefore, from equation \ref{SS}, we get $$C_0x_{0}=\big(C_1D_{1}+C_2D_{2}+...+C_kD_{k}\big)y,$$ $$x_{0}*y\leq 0.$$ From the hypothesis, we get $x_{0}=0=y$ which implies ${\bf{x}}={\bf 0}$. This completes the sufficiency part. \end{proof} We now give a characterization for the column $W$-property. \begin{theorem}\label{CD} Let ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda^{(k+1)}_{n\times n}$ has the column-$ W$ property if and only if $(C_0,C_1D_{1}+C_2D_{2}+...+C_kD_{k})\in \Lambda_{n\times n}^{(2)}$ has the column-$ W$ property for any set of non-negative diagonal matrices $D_{1},D_{2},...,D_{k}$ of order $n$ with ${\rm{diag}}(D_{1}+D_{2}+...+D_{k})>0$. \end{theorem} \begin{proof} {\it Necessary part}: It is obvious.
\noindent{\it Sufficiency part}: Let $\{E^{0},E^{1},...,E^{k}\}$ be a set of non-negative diagonal matrices of order $n$ such that ${\rm{diag}}(E^{0}+E^{1}+...+E^{k})>0$. We claim that $\det(C_0E^{0}+C_1E^{1}+...+C_kE^{k}) \neq 0$.
To prove this, we first construct a set of non-negative diagonal matrices $D_{1},D_{2},...,D_{k}$ and $E$ as follows: $$(D_{j})_{ii}=\begin{cases}
E^{j}_{ii}&\text{~if~} \sum_{m=1}^{k}E^{m}_{ii}\neq0\\
1&\text{~if~} \sum_{m=1}^{k}E^{m}_{ii}=0\\ \end{cases}\text{ and } E_{ii}=\begin{cases}
1 &\text{~if~} \sum_{m=1}^{k}E^{m}_{ii}\neq0\\
0&\text{~if~} \sum_{m=1}^{k}E^{m}_{ii}=0\\ \end{cases},$$ where $(D_{j})_{ii}$ is $ii^{\rm th}$ diagonal entry of $D_{j}$ for $j\in[k]$ and $ E_{ii}$ is $ii^{\rm th}$ diagonal entry of matrix $E$. By an easy computation, we have $D_{j}E=E^{j}~\forall j\in[k]$ and ${\rm diag}(D_{1}+D_{2}+...+D_{k})>0$. From ${\rm{diag}}(E^{0}+E^{1}+...+E^{k})>0$, we get ${\rm diag}(E^{0}+E)>0$. As $D_{j}E=E^{j}~\forall j\in[k]$ and $(C_0,C_1D_{1}+C_2D_{2}+...+C_kD_{k})$ has column $W$-property, by Theorem \ref{P1}, we have \begin{equation*}
\begin{aligned}
\det(C_0E^{0}+C_1E^{1}+...+C_kE^{k})&=\det(C_0E^{0}+C_1D_{1}E+...+C_kD_{k}E)\\
&=\det(C_0E^{0}+(C_1D_{1}+...+C_kD_{k})E) \neq 0.
\end{aligned} \end{equation*} Hence ${\bf C}$ has the column $W$-property. This completes the proof. \end{proof} A well-known result in the standard LCP is that strictly semimonotone matrix and $P$ matrix are equivalent in the class of $Z$ matrices (see, Theorem 3.11.10 in \cite{LCP}). Analogue this result, we prove the following theorem. \begin{theorem}\label{cssm} Let ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda_{n\times n}^{(k+1)}$ such that $C_0^{-1}C_i$ be a $Z$ matrix for all $i\in[k]$. Then the following statements are equivalent.\begin{itemize}
\item [\rm (i)] ${\bf C}$ has the column $W$-property.
\item[\rm(ii)] ${\bf C}$ has the ${\bf SSM}$-$ W$ property. \end{itemize} \end{theorem} \begin{proof} (i)$\implies$(ii): It follows from Theorem \ref{T4}.
(ii)$\implies$(i): Let $\{D_{1}, D_{2},...,D_{k}\}$ be the set of non-negative diagonal matrices of order $n$ such that $\text{\rm diag}(D_{1}+D_{2}+...+D_{k})>0$. In view of Theorem \ref{CD}, it is enough to prove that $(C_0,C_1D_{1}+C_2D_{2}+...+C_kD_{k})$ has the column $W$-property.
As ${\bf C}$ has the ${\bf SSM}$-$ W$ property, by Theorem \ref{CW}, we have $(C_0,C_1D_{1}+...+C_kD_{k})$ has the ${\bf SSM}$-$ W$ property. So, by Proposition \ref{P2}, $\big(I,C_0^{-1}\big(C_1D_{1}+...+C_kD_{k}\big)\big)$ has the ${\bf SSM}$-$ W$ property and $C_0^{-1}\big(C_1D_{1}+C_2D_{2}+...+C_kD_{k}\big)$ is a strict semimonotone matrix. As $C_0^{-1}C_i$ is a $Z$ matrix, we get $C_0^{-1}\big(C_1D_{1}+C_2D_{2}+...+C_kD_{k}\big)$ is also a $Z$ matrix. Hence $C_0^{-1}\big(C_1D_{1}+C_2D_{2}+...+C_kD_{k}\big)$ is a $P$ matrix. So, by Theorem \ref{C1}, $(C_0,C_1D_{1}+C_2D_{2}+...+C_kD_{k})$ has the column $W$-property. Hence we have our claim. \end{proof} \begin{corollary} Let ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda_{n\times n}^{k+1}$ such that $C_0^{-1}C_i$ be a $Z$ matrix for all $i\in[k]$. Then the following statements are equivalent. \begin{itemize}
\item [\rm (i)] ${\bf C}$ has the ${\bf SSM}$-$ W$ property.
\item [\rm(ii)] For all $q\in\mathbb{R}^n$ and ${\bf d}\in\Lambda^{(k-1)}_{n,++}$, {\rm EHLCP}$({\bf C},{\bf d},q)$ has a unique solution. \end{itemize} \end{corollary} \begin{proof} (i) $\implies$(ii): It follows from Theorem \ref{cssm} and Theorem \ref{P1}. (ii)$\implies$(i): It follows from Theorem \ref{P1} and Theorem \ref{T4}. \end{proof} In the standard LCP \cite{deg}, the strictly semimonotone matrix gives the existence of a solution of LCP. We now prove that the same result holds in EHLCP. \begin{theorem}\label{degg} Let ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda^{(k+1)}_{n\times n}$ has the ${\bf SSM}$-$W$ property, then $\rm{{SOL}}({\bf C},{\bf d},q)\neq \emptyset$ for all $q\in\mathbb{R}^n$ and ${\bf d}\in \Lambda^{(k+1)}_{n,++}$. \end{theorem} \begin{proof} As ${\bf C}$ has the ${\bf SSM}$-$W$ property$, {\bf C}$ has the $R_0$-$W$ property. From Theorem \ref{P2}, it is enough to prove that ${\rm deg}({\bf C},{\bf 0})\neq 0$. To prove this, we consider a homotopy $\Phi: \Lambda_n^{(k+1)} \times [0,1] \to \Lambda_n^{(k+1)}$ as $$\Phi({\bf x},t)=t\begin{bmatrix}
C_0x_{0} \\ x_{1}\{\bf{x}}_{2}\{\bf{x}}_{3}\\.\\.\\.\{\bf{x}}_{k} \\ \end{bmatrix}+(1-t)\begin{bmatrix}
C_0 x_{0} -\sum_{i=1}^{k} C_ix_{i}\\ x_{0}\wedge x_{1}\\ (d_{1}-x_{1})\wedge x_{2}\\ (d_{2}-x_{2})\wedge x_{3}\\ .\\ .\\ .\\ (d_{k-1}-x_{k-1})\wedge x_{k}\\ \end{bmatrix}.$$ Let $F({\bf x}):=\Phi({\bf x},0)$ and $G({\bf x}):=\Phi({\bf x},1)$. We first prove that the zero set $X=\{{\bf{x}}:\Phi({\bf{x}},t)={\bf 0}~\text{for some}~t\in[0,1]\}$ of homotopy $\Phi$ contains only zero. We consider the following cases.
{\it Case 1}: Suppose $t=0$ or $t=1$. If $t=0$, then $\Phi({\bf x},0)={\bf 0}\implies F({\bf{x}})={\bf 0}$. As $\bf{C}$ has the ${\bf SSM}$-$W$ property, by Lemma \ref{l1}, we have $F({\bf x})={\bf 0}\Rightarrow {\bf x}={\bf 0}$. If $t=1$, then $\Phi({\bf x},1)={\bf 0}\implies G({\bf{x}})={\bf 0}$. Again by $\bf{C}$ has the ${\bf SSM}$-$W$ property, $C^{-1}_0$ exists, which implies that $G$ is a one-one map. So, $G({\bf x})={\bf 0}\Rightarrow {\bf x}={\bf 0}.$
{\it Case 2}: Suppose $t\in(0,1)$. Then $\Phi({\bf{x}},t)={\bf 0}$ which gives that \begin{equation}\label{pp}
\begin{bmatrix}
C_0 x_{0} -\sum_{i=1}^{k} C_ix_{i}\\ x_{0}\wedge x_{1}\\ (d_{1}-x_{1})\wedge x_{2}\\ (d_{2}-x_{2})\wedge x_{3}\\ .\\ .\\ .\\ (d_{k-1}-x_{k-1})\wedge x_{k}\\
\end{bmatrix}=-\alpha\begin{bmatrix}
C_0x_{0} \\ x_{1}\{\bf{x}}_{2}\{\bf{x}}_{3}\\.\\.\\.\{\bf{x}}_{k} \\
\end{bmatrix},~~\text{where}~~\alpha=\dfrac{t}{1-t}>0. \end{equation} From the second row of above equation, we have $$x_{0}\wedge x_{1}=-\alpha {x_{1}}\implies \text{min}\{{x_{0}}+\alpha {x_{1}} ,(1+\alpha){x_{1}}\}=0.$$ By Proposition \ref{star}, we get $x_{1} \geq 0$ and $({x_{0}}+\alpha {x_{1}}) * (1+\alpha){x_{1}} =0 $ which implies that ${x_{0}} * {x_{1}} \leq 0.$ Set $\Delta:=\{i\in [n]: ({x_{1}})_{i} >0\}$. So, we have \begin{equation}\label{EX}
(x_{0})_{i}=\begin{cases}
~\leq 0~&{\text{if}}~ i\in \Delta\\
~\geq 0~& {\text{if}} ~i \notin \Delta
\end{cases}~~~\text{and}~~(x_{1})_{i}=\begin{cases}
>0~~\text{if}~i\in \Delta\\
=0~~\text{if}~i\notin \Delta
\end{cases}. \end{equation} From third row of the equation \ref{pp}, we have $ (d_{1}-x_{1})\wedge x_{2}=-\alpha {x_{2}}$ which is equivalent $$\mathop{\rm min}\{d_{1}-x_{1}+\alpha {x_{2}}, (1+\alpha){x_{2}}\}=0.$$ This gives that $x_{2}\geq 0$ and $(d_{1}-x_{1}+\alpha {x_{2}})*(1+\alpha){x_{2}}=0$. As $d_{1}>0$ and from the last term in equation \ref{EX}, we have $$(x_{2})_{i}=\begin{cases}
\geq 0~\text{if}~i\in \Delta\\
=0 ~\text{if}~i\notin \Delta \end{cases}. $$ This leads that $x_{0} * x_{2}\leq 0$. By continuing the similar argument for the remaining rows, we get $$x_{j}\geq 0~~\text{and}~x_{0}*x_{j}\leq 0~\forall j\in [k].$$ From the first row of the equation \ref{pp}, the vectors ${\bf{x}}=(x_{0},x_{1},...,x_{k})$ satisfies $$ C_0(1+\alpha) x_{0} =\sum_{i=1}^{k} C_i x_{i}~~\text{and}~~x_{j}\geq0,~{x_{0}}*{x_{j}}\leq 0, ~j\in[k].$$ So, ${{\bf{x}}}={\bf 0}$ as ${\bf C}$ has the ${\bf SSM}$-$ W$ property.
From both cases, we get $X$ contains only zero. By the homotopy invariance property of degree (D2), we have $\text{deg}(\Phi({\bf{x}},0),\Omega,{\bf 0})=\text{deg}\big(\Phi({\bf{x}},1),\Omega,{\bf 0}\big)$ for any bounded open set containing ${\bf 0}$. As $G$ is a continuous one-one function, by Proposition \ref{ND}, we have $$\text{deg}\big({\bf C},{\bf 0}\big)=\text{deg}\big(\Phi({\bf{x}},0),\Omega,{\bf 0}\big)= \text{deg}\big(F,\Omega,{\bf 0}\big)=\text{deg}\big(G,\Omega,{\bf 0}\big)\neq 0.$$ This completes the proof. \end{proof} We now recall that a matrix $A \in\mathbb{R}^{n\times n}$ is said to be a $M$ matrix if it is $Z$ matrix and $A^{-1}(\mathbb{R}^n_+)\subseteq \mathbb{R}^n_+.$ We prove a uniqueness result for EHLCP when $q\geq0$ and ${\bf d}\in\Lambda^{(k-1)}_{n,++}.$ \begin{theorem}\label{smq} Let ${\bf C}=(C_0,C_1,...,C_k)\in \Lambda^{(k+1)}_{n\times n}$ has the ${\bf SSM}$-$ W$ property. If $C_0$ is a $M$ matrix. then for every $q \in \mathbb{R}^n_+$ and for every ${\bf d} \in \Lambda^{(k-1)}_{n,++}$, $\text{\rm EHLCP}({\bf C},{\bf d},q)$ has a unique solution. \end{theorem} \begin{proof} Let $q \in \mathbb{R}^n_+$ and ${\bf d}=(d_{1}, d_{2},...,d_{k-1}) \in \Lambda^{(k-1)}_{n,++}$. We first show $(C_0^{-1}q,0,...,0)\in\text{SOL}({\bf C},{\bf d},q).$ As $C_0$ is a $M$ matrix and $q \in \mathbb{R}^n_+$, we have $C_0^{-1}q\geq 0$. If we set ${\bf{y}}=(y_{0},y_{1},...,y_{k}):=(C_0^{-1}q,0,...,0)\in \Lambda^{(k+1)}_{n}$, then we can see easily that $(y_{0},y_{1},...,y_{k})$ satisfies that $$C_0 y_{0}=q+\sum_{i=1}^k C_i y_{i},~ y_{0}\wedge y_{1}=0~\text{and}~(d_{j}-y_{j})\wedge y_{j+1}=0~~\forall j\in[k-1].$$ Hence $(C_0^{-1}q,0,...,0)\in \text{SOL} ({\bf C},{\bf d},q).$
Suppose ${\bf x}=(x_{0},x_{1},...,x_{k})\in \Lambda^{(k+1)}_n$ is an another solution to EHLCP($C,q,d$). Then, \begin{equation}\label{ssm}
\begin{aligned}
C_0 x_{0}=q+\sum_{i=1}^{k} C_ix_{i},~
x_{0}\wedge x_{1}=0, ~ (d_{j}-x_{j})\wedge x_{j+1}=0~ \forall j\in[k-1].
\end{aligned} \end{equation} From the Lemma \ref{l1}, we have \begin{equation}\label{unique}
C_0x_{0}=q+\sum_{i=1}^{k} C_ix_{i}~\text{and}~x_{0}\wedge x_{j}=0~\forall~j\in [k]. \end{equation} We let ${\bf{z}}:={\bf{x}}-{\bf{y}}$, then ${\bf{z}}=(x_{0}-C_0^{-1}q, x_{1},x_{2},..,x_{k})$. By an easy computation, from Equation \ref{unique}, we get $$C_0 (x_{0}-C_0^{-1}q)=\sum_{i=1}^k C_i x_{i}$$ and $$ x_{j}\geq 0,~~(x_{0}-C_0^{-1}q)*x_{j}=x_{0}*x_{j}-C_0^{-1}q*x_{j}=-C_0^{-1}q*x_{j}\leq 0~\forall j\in [k].$$ Since ${\bf C}$ has the ${\bf SSM}$-$ W$ property, ${\bf{z}}={\bf 0}$ which implies that $(x_{0},x_{1},...,x_{k})=(C_0^{-1}q,0,...,0).$ This completes the proof. \end{proof}
\section{Connected solution set and Column ${W_0}$ property } In this section, we give a necessary and sufficient condition for the connected solution set of the EHLCP. \begin{definition}\rm Let ${\bf C}=(C_0,C_1,...,C_k)\in\Lambda^{(k+1)}_{n\times n}$. We say that ${\bf C}$ is connected if $\text{SOL}({\bf C},{\bf d},q)$ is connected for all $q\in\mathbb{R}^n$ and for all ${\bf d}\in\Lambda^{(k-1)}_{n,++}$. \end{definition} We now recall some definitions and results to proceed further. \begin{definition}\rm\cite{semi} A subset of $\mathbb{R}^n$ is said to be a semi-algebraic set it can be represented as, $$S=\displaystyle\bigcup_{u=1}^{s}\bigcap_{v=1}^{r_u}\{{\bf{x}}\in\mathbb{R}^n;f_{u,v}({\bf{x}})*_{uv} 0\}, $$ where for all $u\in[s]$ and for all $v\in[r_u]$, $*_{uv}\in\{\ >, =\}$ and $f_{u,v}$ is in the space of all real polynomials. \end{definition} \begin{theorem}[\cite{semi}]\label{sctd} Let $S$ be a semi-algebraic set. Then $S$ is connected iff $S$ is path-connected. \end{theorem} \begin{lemma}\label{CC} The $\text{\rm SOL}({\bf C},{\bf d},q)$ is a semi-algebraic set. \end{lemma} \begin{proof} It is clear from the definition of $\text{SOL}({\bf C},{\bf d},q)$. \end{proof} The following result gives a necessary condition for a connected solution whenever $C_0$ is a $M$ matrix. \begin{theorem} Let $C_0\in\mathbb{R}^{n\times n} $ be a $M$ matrix. If ${\bf C}=(C_0,C_1,...,C_k)\in\Lambda^{(k+1)}_{n\times n}$ is connected, then $\text{\rm SOL}({\bf C},{\bf d},q)=\{(C_0^{-1}q,0,...,0)\}$ for all $q\in\mathbb{R}^n_{++}$ and for all ${\bf d}\in\Lambda^{(k-1)}_{n,++}$. \end{theorem} \begin{proof} Let $q\in\mathbb{R}^n_{++}$ and ${\bf d}=(d_{1}, d_{2},...,d_{k-1})\in\Lambda^{(k-1)}_{n,++}$. It can be seen from the proof of Theorem \ref{smq} that ${\bf{x}}=(C_0^{-1}q,0,...,0)\in\text{SOL}({\bf C},{\bf d},q)$. We now show that ${\bf{x}}$ is the only solution to EHLCP(${\bf C},{\bf d},q$).
Assume contrary. Suppose ${\bf{y}}$ is another solution to EHLCP(${\bf C},{\bf d},q$). As $\text{SOL}({\bf C},{\bf d},q)$ is connected, by Lemma \ref{CC} and Theorem \ref{sctd}, it is path-connected. So, there exists a path $\gamma=(\gamma^{0},\gamma^{1},...,\gamma^{k}):[0,1]\rightarrow \text{SOL}({\bf C},{\bf d},q)$ such that $$\gamma(0)={\bf{x}}, ~\gamma(1)={\bf{y}}~ \text{and}~ \gamma(t)\neq {\bf{x}} ~\forall t>0.$$ Let $\{t_m \} \subseteq(0,1)$ be a sequence such that ${t_m}\to 0$ as $m\rightarrow \infty$. Then, by the continuity of $\gamma$, $\gamma(t_m)\rightarrow \gamma(0)={\bf{x}}$ as $m\to \infty$. Since $\big(\gamma^{0}(t_m),\gamma^{1}(t_m),...\gamma^{k}(t_m)\big)\in\text{SOL}({\bf C},{\bf d},q),$ \begin{equation*}\begin{aligned}
C_0\gamma^{0}(t_m) &=q+\sum_{i=1}^{k} C_i\gamma^{i}(t_m),\\
\gamma^{0}(t_m)\wedge\gamma^{1}(t_m)=0& ~\text{and}~\big(d_{j}-\gamma^{j}(t_m)\big)\wedge
\gamma^{({j+1})}(t_m) =0~\forall j\in[k-1].\\ \end{aligned} \end{equation*} Now we claim that there exists a subsequence \{$t_{m_l}\}$ of $\{t_{m}\}$ such that $$\big(\gamma^{j}(t_{m_l})\big)_i\neq 0 ,\text{ for some }j\in[k] \text{ and for some } i\in[n].$$ Suppose the claim is not true. This means that for given any subsequence $\{t_{ml}\}$ of $\{t_m\}$, there exists $m_0\in \mathbb{N}$ such that for all $m_l \geq m_0$, we have $$\big(\gamma^{j}(t_{m_l})\big)_i =0~~\forall i\in [n]~~\forall j\in [k].$$ So, $\gamma^{j} (t_m)$ is an eventually zero sequence for all $j\in[k]$. This implies that there exists a natural number $m_0$ such that $$\gamma^{1}(t_m)=\gamma^{2}(t_m)=...=\gamma^{k}(t_m)=0~~\forall m\geq m_0.$$As $\big(\gamma^{0}(t_m),\gamma^{1}(t_m),...\gamma^{k}(t_m)\big)\in\text{SOL}({\bf C},{\bf d},q)$, we get $\gamma^{0}(t_m)=C^{-1}_0 (q)~~\forall m \geq m_0$. This gives us that $\gamma(t_m)={\bf{x}}$ for all $m\geq m_0$ which contradicts the fact that $\gamma(t_m)\neq{\bf{x}}$ for all $m$. Therefore, our claim is true. No loss of generality, we assume a sequence $\{t_m\}$ itself satisfies the condition $$\big(\gamma^{j}(t_{m})\big)_i\neq 0 ,\text{ for some }j\in[k] \text{ and for some } i\in[n].$$ We now consider the following cases for possibilities of $j$.
{\it Case 1} : If $j=1$, then $(\gamma^{0}(t_{m}))_i(\gamma^{1}(t_{m}))_i=0$ which leads to $(\gamma^{0}(t_{m}))_i=0.$ This implies that $$0=\displaystyle{\lim_{m \to \infty} \gamma^{0}(t_{m})_{i}=(C_0^{-1}q})_i.$$ But $(C_0^{-1}q)>0$ as $C_0$ is a $M$ matrix. This is not possible. So, $j\neq 1$.
{\it Case 2} : If $2\leq j\leq k $, then we have $(d_{j-1}-\gamma^{j-1}(t_{m}))_i (\gamma^{j}(t_{m}))_i=0$ which gives that $(d_{j-1}-\gamma^{j-1}(t_{m}))_i=0.$ By taking limit $m\rightarrow\infty$, $$0=\lim_{m\rightarrow\infty}(d_{j-1}-\gamma^{j-1}(t_{m}))_i=(d_{j-1})_i-(\gamma^{j-1}(0))_i= (d_{j-1})_i>0.$$ This is not possible.
From both cases, there is no such a $j$ exists. This contradicts the fact. Hence ${\bf{x}}=(C_0^{-1}q,0,...,0)$ is the only solution to $\text{EHLCP}({\bf C},{\bf d},q)$. \end{proof} The following result gives a sufficient condition for a connected solution to EHLCP. \begin{theorem} Let ${\bf C}:=(C_0,C_1,...,C_k)\in \Lambda^{(k+1)}_{n \times n}$ has the column $W_0$-property. If $\text{\rm SOL}({\bf C},{\bf d},q)$ has a bounded connected component, then $\text{\rm SOL}({\bf C},{\bf d},q)$ is connected. \end{theorem} \begin{proof} If SOL$({\bf C},{\bf d},q)= \emptyset$, then we have nothing to prove. Let SOL$({\bf C},{\bf d},q)\neq \emptyset$ and $A$ be a connected component of SOL$({\bf C},{\bf d},q)$. If SOL$({\bf C},{\bf d},q)=A$, then we are done. Suppose SOL$({\bf C},{\bf d},q)\neq A.$ Then there exists ${\bf{y}}=(y_{0},y_{1},..,y_{k})\in \text{SOL}({\bf C},{\bf d},q)\setminus A$. As $A$ is a bounded connected component of SOL$({\bf C},{\bf d},q)$, we can find an open bounded set $\Omega \subseteq \Lambda^{(k+1)}_{n}$ which contains $A$ and it does not intersect with other component of $\text{SOL}({\bf C},{\bf d},q)$. Therefore ${\bf{y}} \notin\Omega$ and $\partial{(\Omega)}\cap\text{SOL}({\bf C},{\bf d},q)=\emptyset.$ Since ${\bf C}$ has the column $W_0$-property, there exists ${\bf N}:=(N_0,N_1,...,N_k)\in \Lambda^{(k+1)}_{n\times n}$ such that ${\bf C+\epsilon N}:=(C_0+\epsilon N_0,C_1+\epsilon N_1,...,C_k+\epsilon N_k)$ has the column $W$-property for every $\epsilon>0$.
Let ${\bf z}=(z_{0},z_{1},...,z_{k})\in A$ and $\epsilon >0$, we define functions $H_1$, $H_2$ and $H_3$ as follows: $$H_1 ({\bf{x}})=\begin{bmatrix}
C_0 x_{0} -\sum_{i=1}^{k} C_ix_{i}-q\\ x_{0}\wedge x_{1}\\ (d_{1}-x_{1})\wedge x_{2}\\ .\\ .\\ .\\ (d_{k-1}-x_{k-1})\wedge x_{k}\\ \end{bmatrix},$$ $$H_2 ({\bf{x}})=\begin{bmatrix}
(C_0+\epsilon N_0) x_{0} -\sum_{i=1}^{k} (C_i+\epsilon N_i)x_{i}+(\sum_{i=1}^{k}\epsilon N_iy_{i}-\epsilon N_0y_{0}-q)\\ x_{0}\wedge x_{1}\\ (d_{1}-x_{1})\wedge x_{2}\\ .\\ .\\ .\\ (d_{k-1}-x_{k-1})\wedge x_{k}\\ \end{bmatrix},$$ $$H_3 ({\bf{x}})=\begin{bmatrix}
(C_0+\epsilon N_0) x_{0} -\sum_{i=1}^{k} (C_i+\epsilon N_i)x_{i}+(\sum_{i=1}^{k}\epsilon N_iz_{i}-\epsilon N_0z_{0}-q)\\ x_{0}\wedge x_{1}\\ (d_{1}-x_{1})\wedge x_{2}\\ .\\ .\\ .\\ (d_{k-1}-x_{k-1})\wedge x_{k}\\ \end{bmatrix}.$$ By putting ${\bf{x}}={\bf{y}}$ in $H_2({\bf{x}})$, and ${\bf{x}}={\bf z}$ in $H_1({\bf{x}})$ and $H_3({\bf{x}})$, we get $$H_1({\bf z})=H_2({\bf{y}})=H_3({\bf z})=0.$$ For $\epsilon$ is near to zero, deg$(H_1,\Omega,{\bf 0})$= deg$(H_2,\Omega,{\bf 0})$= deg$(H_3,\Omega,{\bf 0})$ due to the nearness property of degree (D3). As ${\bf z} \in \Omega$ is a solution to $H_3 ({\bf{x}})={\bf 0}$ and ${\bf C+\epsilon N}$ has the column $W$-property, we get deg$(H_3, \Omega, {\bf 0}) \neq 0$ by Theorem \ref{T4} and \ref{degg}. Since deg$(H_2,\Omega,{\bf 0})$= deg$(H_3,\Omega,{\bf 0})$, we have deg$(H_2,\Omega,{\bf 0})\neq 0$. This implies that if we set $ {q_2}:=q+\epsilon N_0 y_{0}-\sum_{i=1}^{k}\epsilon N_i y_{i}$, then EHLCP$({\bf C+\epsilon N},{\bf d},q_2)$ must have a solution in $\Omega$. As ${\bf C+\epsilon N}$ has the column $W$-property, by Theorem \ref{P1}, EHLCP$({\bf C+\epsilon N},{\bf d},q_2)$ has a unique solution which must be equal to ${\bf{y}}$. So, ${\bf{y}}\in \Omega$. It gives us a contradiction. Hence SOL$({\bf C},{\bf d},q)=A$. Thus SOL$({\bf C},{\bf d},q)$ is connected. \end{proof} \section{Conclusion} In this paper, we introduced the $R_0$-$W$ property and SSM-$W$ properties and then studied the existence and uniqueness result for EHLCP when the underlying set of matrices has these properties. Last, we gave a necessary and sufficient condition for the connectedness of the solution set of the EHLCP. \section*{Declaration of Competing Interest} The authors have no competing interests.
\end{document} |
\begin{document}
\title{Characterizing the complete hierarchy of correlations in an $n$-party system}
\author{D.L. Zhou} \affiliation{Institute of Physics, Chinese Academy of Sciences, Beijing 100080, China}
\author{L. You} \affiliation{School of Physics, Georgia Institute of Technology, Atlanta, Georgia 30332, USA}
\begin{abstract} A characterization of the complete correlation structure in an $n$-party system is proposed in terms of a series of $(k,n)$ threshold classical secret sharing protocols ($2\le k\le n$). The total correlation is shown to be the sum of independent correlations of $2$-, $3$-,$\cdots$, $n$-parties. Our result unifies several earlier scattered works, and shines new light at the important topic of multi-party quantum entanglement. As an application, we explicitly construct the hierarchy of correlations in an $n$-qubit graph state.
\end{abstract}
\pacs{03.65.Ud, 03.67.Mn, 89.70.+c} \maketitle
Despite their wide usage, correlations, especially more than two-party correlations in a multiparty system remain to be fully understood. Two-party correlation is relatively well understood. It is typically measured by the two-party mutual entropy \cite{henderson}, which gives the remarkable result that the two-party correlation of a Bell state is twice that of the maximally correlated classical two-qubit state. Groisman \textit{et. al.}, provided the first operational interpretation for two-party correlation \cite{Groisman} based on the idea of Landauer---the amount of information equals the amount of work required for its erasure \cite{Landauer}. More recently, Schumacher and Westmoreland published a direct proof equating the two-party mutual entropy to the maximal amount of information that one party can send to the other in a one-time pad cryptography \cite{Schumcher}.
For a Bell state, the cryptographic scheme of Schumacher and Westmoreland is simply the superdense coding protocol \cite{Bennett} where two bits of classical information are communicated by transmitting one qubit. Different roles, the sender and the receiver, respectively, are assumed by the two parties in this protocol. The correlation between the two parties, however, is symmetric, \textit{i.e.}, it makes no sense to phrase that the correlation is from one party (sender) to the other (receiver). The same has to hold for multi-parties, {\it i.e.}, any operational definition for the degree of multi-party correlation has to be symmetric with respect to all parties.
The above discussion of correlation echoes multiparty secret sharing (SS) schemes, both are symmetric with respect to all parties. In 1979, Blakely \cite{Blakely} and Shamir \cite{Shamir} addressed the issue of a $(k,n)$ threshold protocol for sharing a secret that can be recovered by $k$ or more parties, but not by less than $k$ parties. Quantum SS was first discussed by M. Hillery \textit{et. al.}, and was associated with establishing classical or quantum secret keys among the multi-parties \cite{Hillery}. Cleve {\it et. al.}, proposed an improved $(k,n)$ threshold quantum SS protocol \cite{Cleve}, which allowed an unknown quantum state to be shared in a multi-party system and made connections to quantum error correction codes. Terhal \textit{et. al.}, presented a scheme for hiding a classical bit into a collection of Bell states (between two parties), allowing for all types of classical communication, but not two-party quantum communication \cite{Terhal}. Eggeling and Werner generalized the protocol of Terhal \textit{et. al.}, to multiparties \cite{Eggeling} and pointed out that quantum entanglement is not required.
The intimate connection between correlations and SS protocols has led to the present work, where we propose a complete characterization of the hierarchy of correlations in an $n$-party system with a series of $(k,n)$ ($2\le k\le n$) threshold classical SS protocols. This Letter is organized as follows. We start by revisiting the simplest possible case of a two-party state, fully analyzing its correlation in terms of its SS capacity. The definition for the total correlation in an $n$-party state then naturally arises. Using examples of three-party states, the total correlation is shown to be composed of independent two- and three-party correlations, which paves the way for a proper generalization to the complete correlation hierarchy of $n$-party states. Before summarizing our result, we provide an explicit construction of the complete correlation structures for all graph states.
The maximally correlated classical two-qubit state \begin{eqnarray}
\rho^{(12)}_{c}=\frac{1}{2}\left(|00\rangle_{12}\
_{12}\!\langle 00|+|11\rangle_{12}\
_{12}\!\langle 11| \right), \label{2ce} \end{eqnarray} gives rise to completely mixed reduced density matrices for both parties $j=1,2$, \textit{i.e.}, $\rho^{(j)}=I_j/2$. This state gives a random outcome ``$+1$" or ``$-1$" with equal probabilities when each qubit is measured independently with the Pauli matrix $Z_j$. The results from the two qubits, however, reveal the inherent correlation because $Z_1 Z_2\equiv1$ for the state (\ref{2ce}).
The above example shows that in an approximate sense, correlation specifies the definiteness of a composite state with uncertainties for its parts. To provide a more precise characterization, we introduce an alternative picture for the state (\ref{2ce}) by defining two logical qubits ${\tilde{1}}$ and ${\tilde{2}}$ with $Z_{\tilde{1}}=Z_1 Z_2$, $X_{\tilde{1}}=X_1$, $Z_{\tilde{2}}= X_1 X_2$, and $X_{\tilde{2}}=Z_2$. This gives rise to a transparent form
$\rho^{(12)}_{c}=|0\rangle_{\tilde{1}}\
_{\tilde{1}}\langle 0|\otimes I_{\tilde{2}}/2$, clearly revealing the presence of one bit of correlation encoded in the first logical qubit ${\tilde{1}}$.
We now relate this correlation measure to a classical SS protocol, where a secret is encoded by a unitary transformation that leaves the state invariant for all local parties. The capacity for SS by a state is then defined as the maximal number of secrets encodable or the maximal number of unitary transformations that are distinguishable in a single measurement to this state. The degree of correlation is then measured by the capacity for SS. For the state (\ref{2ce}) $\rho_c^{(12)}$, $1$ bit of classical information $c\in \{0,1\}$ can be encoded into a unitary transformation $X_{\tilde{1}}^c$, and the secret $c$ can be decoded by a measurement with $Z_{\tilde{1}}$.
We next consider the Bell state \begin{eqnarray} \left\vert B\right\rangle_{12} =\frac{1}{\sqrt{2}}\left( \left\vert 00\right\rangle _{12}+\left\vert 11\right\rangle _{12}\right), \label{2eb} \end{eqnarray} with the same reduced matrix $\rho^{(j)}=I_j/2$ as for the state (\ref{2ce}). In terms of the aforementioned two logic qubits, we find
$\left\vert B\right\rangle_{12}=|0\rangle_{\tilde{1}}\otimes
|0\rangle_{\tilde{2}}$, {\it i.e.}, a state capable of encoding two secret bits $c_1$ and $c_2$ with unitary transformations $X_{\tilde{1}}^{c_1} $ and $X_{\tilde{2}}^{c_2}$ that are recoverable from $Z_{\tilde{1}}$ and $X_{\tilde{2}}$ measurements. Thus the Bell state (\ref{2eb}) can share $2$ bits of secret, {\it i.e.}, it contains twice as much correlation as the classical state (\ref{2ce}), in agreement with the result of Ref. \cite{Groisman}. Alternatively, the $2$ bits of secret can be encoded by $X_{\tilde{1}}^{c_1}=X_1^{c_1}$ and $(Z_{\tilde{1}}X_{\tilde{2}})^{c_2}=Z_1^{c_2}$. This latter encoding involves operations only on a single party, which gives nothing but the familiar superdense coding protocol \cite{Bennett}.
We aim for a proper two-party correlation measure $C_2(.)$ satisfying the additivity relationship $C_2(\rho^{(12)}\otimes\sigma^{(12)})=C_2(\rho^{(12)})+C_2(\sigma^{(12)})$ for general states $\rho^{(12)}$ and $\sigma^{(12)}$ shared between the two parties. This calls for a discussion of the average degree of correlation for an ensemble of identical copies of $\rho^{(12)}$ described by $\rho^{(12)}_{\rm ens}=\prod_{i=1}^{N} \otimes \rho^{(12)}_i$, whose one-party reduced density matrix is $\rho^{(j=1,2)}_{\rm ens}=\prod_{i=1}^{N} \otimes \rho^{(j)}_i$. According to Schumacher's theorem on noiseless quantum data compression, the reduced state for party $j$ can be encoded into $NS(\rho^{(j)})$ qubits with completely random reduced state in the limit of $N\to\infty$. The whole state of the ensemble $\rho^{(12)}_{\rm ens}$, on the other hand, can be encoded into $NS(\rho^{(12)})$ completely random qubits. The correlation, shared between the two parties, is exactly the reason for the reduction of the total number of compressed qubits from $N(S(\rho^{(1)})+S(\rho^{(2)}))$ to $NS(\rho^{(12)})$. Therefore, the average correlation for a general two-party state becomes \begin{eqnarray} C_2(\rho^{(12)})=I(\rho^{(12)})\equiv S(\rho^{(1)})+S(\rho^{(2)})-S(\rho^{(12)}), \end{eqnarray} which can be considered as a direct deduction of the main theorem of \cite{Schumcher}.
Similar argument based on data compression enables a proper generalization to multiparty states. We define \begin{eqnarray} C_T(\rho^{(12\cdots n)})=\sum_{i=1}^{n} S(\rho^{(i)})-S(\rho^{(12\cdots n)}), \label{tc} \end{eqnarray} as the total correlation in an $n$-party state $\rho^{(12\cdots n)}$. In the language of SS, the total correlation (\ref{tc}) is simply the capacity for the (2,\,n) threshold classical SS in an $n$-party state $\rho^{(12\cdots N)}$, a direct generalization of the two-party result. However, the total correlation (\ref{tc}) alone does not provide sufficient information on the correlation structure in an $n$-party $(n\ge 3)$ state. Therefore our further analysis below will concentrate on characterizing how the total correlation is distributed among the $n$ parties.
We now examine the three-qubit maximally correlated classical state \begin{eqnarray}
\rho^{(123)} _{c}=\frac{1}{2}\left(|000\rangle_{123}
\ _{123}\langle 000|+|111\rangle_{123}\ _{123}\langle 111|\right), \label{3ce} \end{eqnarray} with $C_T(\rho_c^{(123)})=2$. The two-party correlations are calculated easily, given by $C_2(\rho_c^{(12)})=C_2(\rho_c^{(23)})=C_2(\rho_c^{(13)})=1$. This result leads to an interesting paradox: the total correlation is less than the apparent total two-party correlation, \textit{i.e.}, $C_T(\rho_c^{(123)})<C_2(\rho_c^{(12)})+C_2(\rho_c^{(23)}) +C_2(\rho_c^{(13)})$, a puzzle previously encountered when three-party mutual entropy was found to be negative for certain quantum states \cite{Vedral}. From the view point of our proposed characterization scheme, the reason for the above paradox is simple: the three two-party correlations $C_2(\rho_c^{(12)})$, $C_2(\rho_c^{(23)})$, and $C_2(\rho_c^{(13)})$ are not independent of each other, thus they cannot be simply added together to give the total two-party correlation. In fact, the correlation between the first qubit and the other two qubits is $C_2(\rho_c^{(1(23))})=1$, causing $C_2(\rho_c^{(1(23))})<C_2(\rho_c^{(12)})+C_2(\rho_c^{(13)})$, thus, at most one of the two correlations $C_2(\rho_c^{(12)})$ and $C_2(\rho_c^{(13)})$ is independent when the second and the third qubits are considered as independent parties. More generally for the state (\ref{3ce}), only two of the three two-party correlations are independent when all three qubits are viewed as independent parties. Any two can be used because the state (\ref{3ce}) is completely symmetric. The equality $C_T(\rho_c^{(123)})=C_2(\rho_c^{(12)})+C_2(\rho_c^{(13)})$ then excludes the existence of any genuine three-party correlation in $\rho_c^{(123)}$ and gives rise to the following simple correlation structure: the total correlation is $2$ bits, which is distributed exclusively into any two of the three two-party correlations of $1$ bit each.
The above correlation structure can be easily understood in the language of classical SS with the introduction of three logic qubits $Z_{\tilde{1}}=Z_1 Z_2$, $X_{\tilde{1}}=X_1$; $Z_{\tilde{2}}= Z_2 Z_3$, $X_{\tilde{2}}=X_2$; and $Z_{\tilde{3}}= X_1 X_2 X_3$, $X_{\tilde{3}}= Z_3$. The state (\ref{3ce}) then takes the form
$\rho^{(123)}_{c}= |0\rangle_{\tilde{1}}\; _{\tilde{1}}\langle 0|\otimes|0\rangle_{\tilde{2}}\; _{\tilde{2}}\langle 0|\otimes I_{\tilde{3}}/2$, capable of encoding two bits of secret $c_1$ and $c_2$ with $X_1^{c_1} X_2^{c_2}$. A single measurement with $Z_{\tilde{1}}=Z_1 Z_2$ and $Z_{\tilde{2}}=Z_2 Z_3$ then accomplishes the decoding. Additionally, we note that the identity of $Z_{\tilde{1}}Z_{\tilde{2}}=Z_1 Z_3$ allows for the interchange of the roles for $Z_{\tilde{1}}$ or $Z_{\tilde{2}}$, because only two of the three two-party correlations are independent.
The second three-qubit state we consider is the three-qubit GHZ state \begin{eqnarray} \left\vert G\right\rangle_{123} =\frac{1}{\sqrt{2}}\left( \left\vert 000\right\rangle _{123}+\left\vert 111\right\rangle _{123}\right), \label{ghz} \end{eqnarray}
whose total correlation is $C_T(|G\rangle_{123})=3$. Its two-party correlation structure is the same as that of the state (\ref{3ce}) because both share identically the same two-party reduced density matrices. This then leads to the simple result for the degree of three-party correlation of the state (\ref{ghz}) being
$C_T(|G\rangle_{123})-C_T(\rho_c^{(123)})=1$, a result easily understood again in terms of SS. Using the same set of three logic qubits introduced above, we find $\left\vert G\right\rangle_{123}=|0\rangle_{\tilde{1}}\otimes
|0\rangle_{\tilde{2}}\otimes |0\rangle_{\tilde{3}}$, capable of coding three bits of classical secret $c_1$, $c_2$, and $c_3$ with $X_1^{c_1}X_2^{c_2}Z_3^{c_3}$. The decoding is achieved analogously by measurements with $Z_{j=\tilde{1},\tilde{2},\tilde{3}}$. Clearly, $Z_{\tilde{3}}$ probes three-party correlation that cannot be detected by any two-party measurement.
The total correlation in a two-party state is simply the two-party correlation. For a three-party state, however, the total correlation includes both three-party correlation and independent two-party correlation. The calculation of this independent two-party correlation is generally a mathematically challenging task, as evidenced by why the three-party mutual entropy, defined by \begin{eqnarray} I(\rho^{(123)})=C_T(\rho^{(123)})&-& C_2(\rho^{(12)})\nonumber\\ &-&C_2(\rho^{(13)})-C_2(\rho^{(23)}), \end{eqnarray} is not an appropriate measure for three-party correlation. Expressing it as $I(\rho^{(123)})=C_2(\rho^{(1(23))})-C_2(\rho^{(12)})-C_2(\rho^{(13)})$, formally analogous to the two-party mutual entropy, one might be tempted to consider $I(\rho^{(123)})$ as a reasonable three-party correlation measure. Yet, this is generally unacceptable because the two-party correlations $C_2(\rho^{(12)})$ and $C_2(\rho^{(13)})$ are not always independent when the two-party correlation $C_2(\rho^{(1(23))})$ is considered.
The mathematical difficulty of classifying independent multiparty correlations can be resolved with our proposed classification scheme based on classical SS, although the actual computation for general multiparty states may still become completely out of reach. A three-party state $\rho^{(123)}$ can admit the $(2,3)$ and $(3,3)$ threshold classical SS protocols in general. Our definition (\ref{tc}) for the total correlation in a three-party state reduces simply to the capacity of the $(2,3)$ classical secret sharing. The capacity for the $(3,3)$ threshold classical secret sharing measures nothing but the three-party correlation. The total (independent) two-party correlation can then be obtained as equal to the difference between the total correlation and the three-party correlation.
Formally the definition for the capacity of the $(k,n)$ threshold classical SS is based on an ensemble of identical copies of $n$-party state $\rho^{(12\cdots n)\otimes N}$. $M_k$ secret bits $\{c_m\} \; (c_m\in \{0,1\}\; \mathrm{and}\; m=1,2,\cdots,M_k)$ are encoded with a series of unitary transformations $U(\{c_m\})$, that leave all reduced density matrices of $(k-1)$-party invariant, \textit{i.e.}, $\forall S^k_j$, \begin{eqnarray} \mathrm{Tr}_{S^k_j} \left[U(\{c_m\})\rho^{(12\cdots n)\otimes N}U^{\dagger}(\{c_m\})\right]=\mathrm{Tr}_{S^k_j} \rho^{(12\cdots n)\otimes N}, \end{eqnarray}
where $S^k_j=\{j_\alpha|\; \alpha\in\{1,2,\cdots, n-k+1\}, \; j_\alpha\in\{1,2,\cdots,n\}\}$. If the secret bits are decoded by a single measurement, all coded states $U(\{c_m\})\rho^{(12\cdots n)\otimes N}U^{\dagger}(\{c_m\})$ are required to be orthogonal to each other. The capacity of this $(k,n)$ threshold classical SS for the state $\rho^{(12\cdots n)}$ is then given by \begin{eqnarray} C^{(k,n)}(\rho^{(12\cdots n)})=\lim_{N\to\infty} \frac {\max M_k} {N}. \end{eqnarray} The total correlation of the state $\rho^{(12\cdots n)}$ is defined as \begin{eqnarray} C_T(\rho^{(12\cdots n)})=C^{(2,n)}(\rho^{(12\cdots n)}), \end{eqnarray} consistent with our earlier definition (\ref{tc}). The $k$-party $(2\le k\le n-1)$ correlation is given by \begin{eqnarray} C_k(\rho^{(12\cdots n)})=C^{(k,n)}(\rho^{(12\cdots n)}) -C^{(k+1,n)}(\rho^{(12\cdots n)}), \end{eqnarray} and the $n$-party correlation is \begin{eqnarray} C_n(\rho^{(12\cdots n)})=C^{(n,n)}(\rho^{(12\cdots n)}). \end{eqnarray} Our classifying scheme then leads to \begin{eqnarray} C_T(\rho^{(12\cdots n)})=\sum_{k=2}^{n}C_k(\rho^{(12\cdots n)}). \end{eqnarray}
Although an efficient algorithm remains to be found to compute the hierarchy of correlations for a general $n$-party state, surprisingly we find these calculations can be performed analytically for all graph states.
An $n$-qubit graph state can be represented by a fully connected $n$-vertex graph. It is defined by an abelian subgroup $S_n$ of the $n$-qubit Pauli group $G_n$ with $n$ generators. $S_n$ is called the stabilizer group because the graph state it defines is invariant when acted upon by its elements. A complete set of independent elements of
$S_n$ forms the generator of the group, denoted by $\langle S_n \rangle$. Despite the rich variety of choices for the generator $\langle S_n \rangle$, the number of elements in $\langle S_n \rangle$, denoted by $|\langle S_n \rangle|$, is definite and equals $n$ for a $n$-qubit graph state.
The total correlation $(\ref{tc})$
for an $n$-qubit graph state is then simply equal to $|\langle S\rangle|=n$ since the one-qubit reduced density matrix is uniformly a completely mixed state for all qubits. Each element in the stabilizer then represents $1$ bit of correlation. To compute the capacity of the $(k,n) \;(2\le k\le n)$ threshold classical SS in an $n$-qubit graph state, we classify the elements in $S_n$ to the sets $S_{k}\; (2\le k\le n)$, with $S_{k}$ composed of
all elements in $S_n$ containing not more than $k$ single qubit Pauli matrices distinct from identity. Clearly we have $S_{2}\subseteq S_{3}\subseteq S_{4} \cdots \subseteq S_{n}$. The elements in $S_k$ can be easily shown to represent reduced density matrices of not more than $k$-parties, thus can be used to share secrets among not more than $k$ parties. In general the set $S_k$ is not a group, but its independent elements in $S_k$ can be used to generate a stabilizer group whose stabilizer is denoted by $\langle S_k\rangle$. The $k$-party correlation $C_k$ then is equal to \begin{eqnarray}
C_T(\langle S_{k} \rangle)-C_T(\langle S_{k-1} \rangle)=|\langle S_{k} \rangle|-|\langle S_{k-1} \rangle|. \label{eq10} \end{eqnarray}
As an example, we list our results on the correlation structure for all five-qubit graph states in Table \ref{table1}. The total correlation of all five-qubit graph states is $5$ bits, which distributes differently to among the $2$-, $3$-, $4$-, and $5$-party correlations for different states as listed. For instance, we note that only the first graph state contains $1$ bit of $5$-party correlation, while only the second graph state has $1$ bit of $4$-party correlation, and the last graph state has $5$ bits of $3$-party correlation. We emphasize that this classification of correlation structure is locally unitary invariant. Furthermore, the end result on the correlation structure is independent of the specific labels for the qubits. Thus our classification scheme allows for a transparent categorization of graph states into local unitary equivalent classes. The example of the 5-qubit graph state above shows that the $2$-party (or $3$-party) correlation alone is enough to distinguish all four distinct classes of five-qubit graph states. We thus state as a conjecture here that the correlation structure for any $n$-qubit graph state is sufficient to distinguish different local unitary equivalent classes. More detailed discussion on this will be given elsewhere. \begin{table}
\begin{tabular}{|c|c|c|c|c|} \hline & \includegraphics[width=1.5cm,height=1.5cm]{gs51.eps}& \includegraphics[width=1.5cm,height=1.5cm]{gs52.eps} & \includegraphics[width=1.5cm,height=1.5cm]{gs53.eps} & \includegraphics[width=1.5cm,height=1.5cm]{gs54.eps}\\ \hline
$C_2$&4&3&2&0\\
\hline
$C_3$&0&1&3&5\\
\hline
$C_4$&0&1&0&0\\
\hline
$C_5$&1&0&0&0\\ \hline \end{tabular} \caption{The correlation structure for all five-qubit graph states.} \label{table1} \end{table}
Before concluding, we provide further digestion of our result by comparison with a related recent study of multiparty quantum entanglement in an n-qubit graph state \cite{Fattal}. We find that their main result-$2$ \cite{Fattal} can be obtained directly from our classification scheme based on Eq. (\ref{eq10}), provided the appropriate association of the $k$-party correlation in the $k$-party $n$-qubit graph state is taken. Our classification scheme, on the other hand, is more powerful and complete. In addition, it provides a transparent picture in terms of capacities of threshold SS protocols.
In summary, we have proposed a scheme to characterize the complete correlation structure in an $n$-party quantum state based on the state's capacities for $(k,n)$ threshold classical SS protocols ($2\le k\le n$). The total correlation in an $n$-party state is then found to be equal to the capacity of classical SS in a $(2,n)$ threshold protocol, which is the same as the sum of every single-party entropy minus the entropy for the whole state. This total correlation is further classified into constituents of $k$-party ($2\le k\le n$) correlations, with the $k$-party ($2\le k\le $n$-1$) correlation being the capacity difference between the $(k,n)$ and $(k+1,n)$ threshold protocols, and the $n$-party correlation in an $n$-party state is defined as the capacity of the $(n,n)$ threshold SS protocol. Our result allows for an easy explanation of why the three-party mutual entropy for a three-party state can take negative values, thus mutual entropy cannot represent a legitimate three-party correlation measure. We have provided general results on the complete correlation structure for an $n$-qubit graph state, and give an explicit construction for the case of five-qubit graph states. Finally we note that the $k$-party entanglement measure proposed by Fattal \textit{et. al.}, is simply the $k$-party correlation in a $k$-party $n$-qubit graph state.
The author (D.L.Z.) thanks B. Zeng for many useful discussions. This work is supported by NSFC and NSF.
\begin{references}
\bibitem{henderson} For example, L. Henderson and V. Vedral, J. Phys. A: Math. Gen. \textbf{34}, 6899 (2001).
\bibitem{Groisman} B. Groisman, S. Popescu, and A. Winter, Phys. Rev. A \textbf{72}, 032317 (2005).
\bibitem{Landauer} R. Landauer, IBM J. Research and Development, \textbf{5}, 183 (1961).
\bibitem{Schumcher} B. Schumacher and M.D. Westmoreland, Phys. Rev. A \textbf{74}, 042305 (2006).
\bibitem{Bennett} C.H. Bennett and S. J. Wiesner, Phys. Rev. Lett. \textbf{69}, 2881 (1992).
\bibitem{Blakely} G. Blakely, Proc. AFIPS \textbf{48}, 313 (1979).
\bibitem{Shamir} A. Shamir, Communications of the ACM \textbf{22}, 612 (1979).
\bibitem{Hillery} M. Hillery, V. Bu\u{z}ek, and A. Berthiaume, Phys. Rev. A \textbf{59}, 1829 (1999).
\bibitem{Cleve} R. Cleve, D. Gottesman, and H.-K. Lo, Phys. Rev. Lett. \textbf{83}, 648 (1999).
\bibitem{Terhal} B.M. Terhal, D.P. DiVincenzo, and D.W. Leung, Phys. Rev. Lett. \textbf{86}, 5807 (2001).
\bibitem{Eggeling} T. Eggeling and R.F Werner, Phys. Rev. Lett. \textbf{89}, 097905 (2002).
\bibitem{Vedral} V. Vedral, Rev. Mod. Phys. \textbf{74}, 197 (2002).
\bibitem{Briegel1} H.J. Briegel and R. Raussendorf, Phys. Rev. Lett. \textbf{86}, 910 (2001).
\bibitem{Briegel2} M. Hein, J. Eisert, and H.J. Briegel, Phys. Rev. A \textbf{69}, 062311 (2004).
\bibitem{Fattal} D. Fattal, T.S. Cubitt, Y. Yamamoto, S. Bravyi, and I.L. Chuang, quant-ph/0406168.
\end{references}
\end{document} |
\begin{document}
\begin{center} {\Large \bf Borodin--Okounkov and Szeg\H{o} for Toeplitz \\[0.5ex] operators on model spaces}
{\Large Albrecht B\"ottcher} \end{center}
\begin{quote} \footnotesize{ We consider the determinants of compressions of Toeplitz operators to finite-dimensional model spaces and establish analogues of the Borodin--Okounkov formula and the strong Szeg\H{o} limit theorem in this setting.} \let\fnsymbol{footnote}\relax\footnote{\hspace*{-7.5mm} MSC 2010: 47B35, 30J10} \let\fnsymbol{footnote}\relax\footnote{\hspace*{-7.5mm} Keywords: Toeplitz determinant, model space, Blaschke product, truncated Toeplitz operator} \end{quote}
\section{Introduction and main results}
Although compressions of Toeplitz operators to model spaces have been studied for a long time, see, for example, \cite{Nik}, \cite{Treil}, it was Sarason's paper \cite{Sar} which initiated the recent increasing activity in research into such operators\footnote[1]{These operators are now called ``truncated Toeplitz operators'', although that name is already occupied by the classical finite Toeplitz matrices. Moreover, I see a difference between truncation and compression. However, since Donald Sarason is one of my mathematical top heroes, I will not vote against that name. I will nevertheless not follow the custom and will instead refer to these operators simply as Toeplitz operators on model spaces.}, see, for instance, the survey \cite{GaRoss} and the ample list of references therein. The number one theorem in classical Toeplitz matrices is Szeg\H{o}'s strong limit theorem, and curiously, I have not seen the model space version of this theorem among the many results which have so far been carried over from the classical setting to the model space level. In fact the strong Szeg\H{o} limit theorem is a straightforward consequence of another great theorem, namely, the Borodin--Okounkov formula. My favorite proof of the Borodin--Okounkov formula is the one in \cite{Botok}, and the purpose of this note is to show that this proof works equally well for Toeplitz operators on model spaces.
Our context is the usual Hardy spaces of the unit disk $\mathbb{D}$ or, when interpreted as nontangential limits, of the unit circle $\mathbb{T}$. We let $P$ stand for the orthogonal projection of $L^2$ onto $H^2$. The Toeplitz operator $T(a)$ induced by a function $a \in L^\infty$ is the operator on $H^2$
which acts by the rule $T(a)f=P(af)$. Let $u \in H^\infty$ be an inner function. The space $K_u:=H^2 \ominus uH^2$ is referred to as the model space generated by $u$. We denote by $P_u$ and $Q_u=I-P_u$ the orthogonal projections of $H^2$ onto $K_u$ and $uH^2$, respectively. It is well known that $P_u=I-T(u)T(\overline{u})$, the bar denoting complex conjugation. We are interested in the compression of $T(a)$ to $K_u$, that is, in the operator $T_u(a)=P_u T(a)|K_u$.
We will actually consider the matrix case. Thus, $a$ is supposed to be a matrix function in the $\mathbb{C}^{m \times m}$-valued $L^\infty$, and $T(a)$ and $T_u(a)$ act on the $\mathbb{C}^m$-valued $H^2$ and $K_u$, respectively. The inner function $u$ remains scalar-valued.
We make the following assumptions on $a$. It is required that $a$ is in the intersection of the Wiener algebra $W$ and the Krein algebra $K_{2,2}^{1/2,1/2}$, that is, the Fourier coefficients $a_n$ satisfy $\sum_{n=-\infty}^\infty \| a_n\|+\sum_{n=-\infty}^\infty n \| a_n\|^2 < \infty$, where $\| \cdot\|$ is any matrix norm on $\mathbb{C}^{m \times m}$. We furthermore assume that $a$ has right and left canonical Wiener--Hopf factorizations $a=w_-w_+=v_+v_-$ in $W \cap K_{2,2}^{1/2,1/2}$. This means that $w_+, v_+, \overline{w_-}, \overline{v_-}$ and their inverses belong to $W \cap K_{2,2}^{1/2,1/2}\cap H^\infty$. In the scalar case ($m=1$), the existence of such factorizations is guaranteed if $a$ has no zeros on $\mathbb{T}$ and vanishing winding number about the origin. Our assumptions imply in particular that $T(a)$, $T(a^{-1})$, and $T(\widetilde{a})$ are invertible on $H^2$. Here and in what follows, $\widetilde{a}$ results from $a$ be reversal of the Fourier coefficients, $\widetilde{a}(t):=a(1/t)$ for $t \in \mathbb{T}$.
The Hankel operator $H(a)$ generated by $a \in L^\infty$ is defined on the space $H^2$ by $H(a)f=P(a\cdot (I-P)Jf)$, where $J$ is the flip operator, $(Jf)(t)=(1/t)f(1/t)$ for $t \in \mathbb{T}$. Put $b=v_-w_+^{-1}$ and $c=w_-^{-1}v_+$. Then $b$ and $c$ are in the Krein algebra and hence the Hankel operators $H(b)$ and $H(\widetilde{c})$ are Hilbert--Schmidt operators. This implies that $H(b)H(\widetilde{c})$ is in the trace class. As $T(b)=T(v_-)T(w_+^{-1})$ and $T(c)=T(w_-^{-1})T(v_+)$ are invertible, so also is $I-H(b)H(\widetilde{c})=T(b)T(c)$.
For $\alpha \in \mathbb{D}$, we define the inner functions $\mu_\alpha$ and $B_\alpha$ by
\[\mu_\alpha(z)=\frac{z-\alpha}{1-\overline{\alpha}z}, \quad B_\alpha(z)=\frac{-\overline{\alpha}}{|\alpha|}\frac{z-\alpha}{1-\overline{\alpha}z} \quad (z \in \mathbb{D}),\] with the convention to put $B_0(z)=z$. The space $K_u$ is known to be finite-dimensional if and only if $u$ is a finite Blaschke product, that is, if and only if there are $\alpha_1, \ldots, \alpha_N$ in $\mathbb{D}$ such that $u=B_{\alpha_1}\cdots B_{\alpha_N}$. We let $\sigma(u)$ denote the numbers $\alpha_1, \ldots, \alpha_N$, repeated according to the number of times they appear in $u=B_{\alpha_1}\cdots B_{\alpha_N}$. Finally, as usual, the geometric mean of a (matrix) function $\varphi$ on $\mathbb{T}$ is defined by \[G(\varphi)=\exp(\log \det \varphi)_0:=\exp\left(\frac{1}{2\pi}\int_0^{2\pi}\log\det\varphi(e^{i\theta})d\theta\right).\] Here is the model space version of the Borodin--Okounkov formula.
\begin{thm} \label{Theo 1.1} If $u=B_{\alpha_1}\cdots B_{\alpha_N}$ is a finite Blaschke product, then \begin{equation} \det T_u(a)=\left(\prod_{\alpha \in \sigma(u)} G(a \circ \mu_{-\alpha})\right)\frac{\det (I-Q_u H(b)H(\widetilde{c})Q_u)}{\det (I-H(b)H(\widetilde{c}))}. \label{1.1} \end{equation} \end{thm}
An alternative expression for the product of the numbers $G(a\circ\mu_{-\alpha})$ is \begin{equation} \prod_{\alpha \in \sigma(u)} G(a \circ \mu_{-\alpha})=\prod_{\alpha \in \sigma(u)} \det v_+(\alpha)\det v_-(1/\overline{\alpha}). \label{1.2} \end{equation} For $u(z)=z^N$, the products (\ref{1.2}) become $G(a)^N$ and (\ref{1.1}) turns into the classical Borodin--Okounkov formula, which was originally established in \cite{Borodok}, reformulated, extended to the block case, and equipped with two new proofs in \cite{BasWid}, and with still another proof in \cite{Botok}. For positive functions $a$, the formula was even already in \cite{GerCase}, which, however, was not known to the authors of \cite{BasWid}, \cite{Borodok}, \cite{Botok} at the time they wrote their papers. Taking into account that $Q_u=T(u)T(\overline{u})$ for an arbitrary inner function, it is easy to see that \[\det(I-Q_u H(b)H(\widetilde{c})Q_u)=\det(I-H(\overline{u}b)H(\widetilde{c}\widetilde{u}))\] for every inner function $u$.
Now suppose $\{\alpha_j\}_{j=1}^\infty$ is a sequence of points in $\mathbb{D}$. Put \[u_N(z)=\prod_{j=1}^N B_{\alpha_j}(z).\] The following is a model space version of the strong Szeg\H{o} limit theorem.
\begin{thm} \label{Theo 1.2}
If $\sum_{j=1}^\infty (1-|\alpha_j|) = \infty$, then $u_N(z) \to 0$ for $z \in \mathbb{D}$, $Q_{u_N} \to 0$ strongly and \begin{equation} \lim_{N \to \infty} \det T_{u_N}(a)\prod_{\alpha \in \sigma(u_N)} G(a \circ \mu_{-\alpha})^{-1}= \frac{1}{\det (I-H(b)H(\widetilde{c}))}. \label{1.3} \end{equation}
If $\sum_{j=1}^\infty (1-|\alpha_j|) < \infty$, then $u_N(z)$ converges to the infinite Blaschke product \[B(z)=\prod_{j=1}^\infty B_{\alpha_j}(z)\] for $z \in \mathbb{D}$, $Q_{u_N} \to Q_B$ strongly, and \begin{equation} \lim_{N \to \infty} \det T_{u_N}(a)\prod_{\alpha \in \sigma(u_N)} G(a \circ \mu_{-\alpha})^{-1}= \frac{\det(I-Q_B H(b)H(\widetilde{c})Q_B)}{\det (I-H(b)H(\widetilde{c}))}. \label{1.4} \end{equation} \end{thm} Again, in the case where $u_N(z)=z^N$, this theorem implies that \[\lim_{N \to \infty} T_{z^N}(a) G(a)^{-N} = \frac{1}{\det (I-H(b)H(\widetilde{c}))},\] which is the classical Szeg\H{o}--Widom limit theorem, established by Szeg\H{o} \cite{Sz} in the scalar case ($m=1$) and by Widom \cite{Wid} in the block case ($m \ge 1$). Note that for $m=1$ we have \[\frac{1}{\det (I-H(b)H(\widetilde{c}))}=\exp \sum_{k=1}^\infty k (\log a)_k (\log a)_{-k},\] and that for $m \ge 1$ we may also write \[\frac{1}{\det (I-H(b)H(\widetilde{c}))}=\det T(a) T(a^{-1}).\] We refer to the books \cite{BoSi} and \cite{Simon} for more on this topic, including the history. Incidentally, sequences of Toeplitz operators $T_{u_N}(a)$ with $u_{N+1}$ divisible by $u_N$ and with $P_{u_N}$ converging strongly to $I$ appeared already in Treil's paper \cite{Treil} (and his results are also quoted on p. 394 of \cite{BoSi}).
\section{Proofs}
We first prove Theorem \ref{Theo 1.1} and formula (\ref{1.2}). Let $u$ be a finite Blaschke product. As shown in \cite{Botok} (or see \cite[p. 552]{BoSi} or \cite{BoWi}), Jacobi's formula for the minors of the inverse matrix can be extended to identity minus trace class operators: \[\det P_u (I-L)^{-1} P_u = \frac{\det (I-Q_u L Q_u)}{\det (I-L)}\] whenever $L$ is of trace class and $I-L$ is invertible. This formula with $L=H(b)H(\widetilde{c})$ will give Theorem~\ref{Theo 1.1} provided we can prove that \begin{equation} \det P_u(I-H(b)H(\widetilde{c}))^{-1}P_u=\det T_u(a) \prod_{\alpha \in \sigma(u)} G(a \circ \mu_{-\alpha})^{-1}. \label{2.1} \end{equation} It is readily seen that if $\varphi \in H^\infty$, then \begin{equation} P_u T(\varphi)=P_u T(\varphi) P_u, \quad T(\overline{\varphi}) P_u = P_u T(\overline{\varphi}) P_u.\label{2.1a} \end{equation} Consequently, \begin{eqnarray*} & & P_u (I-H(b)H(\widetilde{c}))^{-1}P_u=P_u T(c)^{-1}T(b)^{-1}P_u\\ & & = P_u T(v_+^{-1})T(w_-)T(w_+)T(v_-^{-1})P_u =T_u(v_+^{-1}) T_u(a) T_u(v_-^{-1}). \end{eqnarray*} Taking determinants, we see that the left-hand side of (\ref{2.1}) equals \[\det T_u(a) /( \det T_u(v_+) \det T_u(v_-)).\] We are so left with proving that \begin{eqnarray} & & \det T_u(v_+)=\prod_{\alpha \in \sigma(u)}\det v_+(\alpha), \quad \det T_u(v_-)=\prod_{\alpha \in \sigma(u)} \det v_-(1/\overline{\alpha}), \label{2.2}\\ & & \prod_{\alpha \in \sigma(u)}\det v_+(\alpha)\det v_-(1/\overline{\alpha})= \prod_{\alpha \in \sigma(u)} G(a \circ \mu_{-\alpha}). \label{2.3} \end{eqnarray} The determinant is the product of the eigenvalues. A complex number $\lambda$ is an eigenvalue of $T_u(v_+)$ if and only if $T_u(v_+)-\lambda I=T_u(v_+-\lambda I)$ is not invertible. We may think of $T_u(v_+-\lambda I)$ as an $m \times m$ block matrix whose blocks $T_u(v_+^{jk}-\lambda \delta_{jk})$ are generated by scalar-valued functions. By virtue of~(\ref{2.1a}), the blocks commute pairwise, and hence $T_u(v_+-\lambda I)$ is not invertible if and only if the block determinant $\det T_u(v_+-\lambda I)$ is not invertible. Again by~(\ref{2.1a}), $\det T_u(v_+-\lambda I)=T_u(\det(v_+-\lambda I))$. But the operator $T_u(\det(v_+-\lambda I))$ is known to be not invertible if and only if $\det(v_+(\alpha)-\lambda I)=0$ for some $\alpha \in \sigma(u)$; see~\cite[p. 66]{Nik} or~\cite[Theorem 15(ii)]{GaRoss}. Equivalently, $T_u(\det(v_+-\lambda I))$ is not invertible if and only if $\lambda$ is an eigenvalue of $v_+(\alpha)$ for some $\alpha \in \sigma(u)$. Thus, the set of the eigenvalues of $T_u(v_+)$ is the union of the sets of the eigenvalues of $v_+(\alpha)$ for $\alpha \in \sigma(u)$, multiplicities taken into account. This proves the first formula in~(\ref{2.2}). The second now follows from the equalities \[\det T_u(v_-)=\overline{\det T_u(v_-^*)}=\prod_{\alpha \in \sigma(u)}\overline{\det v_-^*(\alpha)}=\prod_{\alpha \in \sigma(u)}\det v_-(1/\overline{\alpha}).\] Finally, we have \begin{eqnarray*} & & \prod_{\alpha \in \sigma(u)}\det v_+(\alpha) \det v_-(1/\overline{\alpha})= \prod_{\alpha \in \sigma(u)}\det(v_+\circ \mu_{-\alpha})(0) \det(v_- \circ \mu_{-\alpha})(\infty)\\ & & = \exp \sum_{\alpha \in \sigma(u)}\Big(\log\det(v_+\circ \mu_{-\alpha})(0)+ \log\det(v_- \circ \mu_{-\alpha})(\infty)\Big)\\ & & = \exp \sum_{\alpha \in \sigma(u)}\Big([\log\det(v_+\circ \mu_{-\alpha})]_0+ [\log\det(v_- \circ \mu_{-\alpha})]_0\Big)\\ & & = \exp \sum_{\alpha \in \sigma(u)}[\log \det (a\circ \mu_{-\alpha})]_0=\prod_{\alpha \in \sigma(u)} G(a \circ \mu_{-\alpha}), \end{eqnarray*} which gives (\ref{2.3}) and completes the proof of Theorem \ref{Theo 1.1} and formula (\ref{1.2}).
Once Theorem \ref{Theo 1.1} is available, Theorem \ref{Theo 1.2} is no surprise. Indeed, the assertions concerning the limit of $u_N(z)$ are well known, and the theorem on the lower limits of model spaces on page 35 of \cite{Nik} implies that $P_{u_N}$ converges strongly to $I$ if $u_N(z) \to 0$ and to $P_B$ if $u_N(z) \to B(z)$. Formulas~(\ref{1.3}) and~(\ref{1.4}) then result from Theorem~\ref{Theo 1.1} and the continuity of the determinant on $I$ minus the trace ideal.
\section{Three Examples}
As already said, for $u(z)=z^N$ the term (\ref{1.2}) is simply $G(a)^N$. For general inner functions $u$, it is less harmless. It suffices to illustrate things in the simple case where $v_+(z)=1-vz$ with $|v| <1$. We put \[G_u(v)=\prod_{\alpha \in \sigma(u)} v_+(\alpha)=\prod_{\alpha \in \sigma(u)} (1-v\alpha).\]
{\bf Example 1.} Let $\alpha_j=1-1/j^2$ and $u_N(z)=\prod_{j=1}^N B_{\alpha_j}(z)$. Then \begin{eqnarray*} \log G_{u_N}(v) & = & \sum_{j=1}^N \log (1-v\alpha_j)= \sum_{j=1}^N \log\left(1-v+\frac{v}{j^2}\right)\\ & = & N \log (1-v)+\sum_{j=1}^N \log \left(1+\frac{v}{1-v}\,\frac{1}{j^2}\right)\\ & = & N \log (1-v)+\sum_{j=1}^\infty \log \left(1+\frac{v}{1-v}\,\frac{1}{j^2}\right)+O\left(\frac{1}{N}\right) \end{eqnarray*} and hence \begin{eqnarray*} G_{u_N}(v) & = & (1-v)^N \prod_{j=1}^\infty\left(1+\frac{v}{1-v}\,\frac{1}{j^2}\right)\left(1+O\left(\frac{1}{N}\right)\right)\\ & = & (1-v)^N \:\frac{\sinh\left(\pi \sqrt{\frac{v}{1-v}}\right)}{\pi \sqrt{\frac{v}{1-v}}}\left(1+O\left(\frac{1}{N}\right)\right). \end{eqnarray*}
{\bf Example 2.} Now take $\alpha_j=1-1/j$ and $u_N(z)=\prod_{j=1}^N B_{\alpha_j}(z)$. This time, with $q:=v/(1-v)$, \begin{eqnarray*} \log G_{u_N}(v) & = & N \log (1-v)+\sum_{j=1}^N \log\left(1+\frac{q}{j}\right)\\ & = & N\log(1-v)+\sum_{j=1}^N \left(\log\left(1+\frac{q}{j}\right)-\frac{q}{j}\right)+\sum_{j=1}^N\frac{q}{j}, \end{eqnarray*} and this equals \[N\log(1-v)+\sum_{j=1}^\infty \left(\log\left(1+\frac{q}{j}\right)-\frac{q}{j}\right)+O\left(\frac{1}{N}\right)+ q\left(\log N + C + O\left(\frac{1}{N}\right)\right),\] where $C=0.5772\ldots$ is Euler's constant. It follows that \[G_{u_N}(v)=(1-v)^N \,N^q\,e^{qC}\prod_{j=1}^\infty \left(1+\frac{q}{j}\right)e^{-q/j}\left(1+O\left(\frac{1}{N}\right)\right),\] and taking into account that \[\prod_{j=1}^\infty \left(1+\frac{q}{j}\right)e^{-q/j}=\frac{e^{-qC}}{\Gamma(q+1)},\] we arrive at the formula \[G_{u_N}(v)=\frac{(1-v)^N\, N^{v/(1-v)}}{\Gamma\left(\frac{1}{1-v}\right)}\left(1+O\left(\frac{1}{N}\right)\right).\]
{\bf Example 3.} The previous two examples raise the question whether the limits of $G_{u_{N+1}}(v)/G_{u_N}(v)$ and $G_{u_N}(v)^{1/N}$ always exist. Surprisingly, the answer is NO. Since $G_{u_{N+1}}(v)/G_{u_N}(v)=1-v\alpha_{N+1}$, this is clear for the quotient. To give a counterexample for the root, we construct a sequence $\{u_N\}$ with a subsequence $\{u_{N_i}\}$ such that $G_{u_{N_i}}(v)^{1/N_i}$ alternately assumes two different values. We take $u_N(z)=\prod_{j=1}^N B_{\alpha_j}(z)$ where $\alpha_j =r_j z_j$, $r_j \in (0,1)$, $z_j \in \mathbb{T}$, and $\sum_{j=1}^\infty (1-r_j) < \infty$. Then \begin{eqnarray*} G_{u_N}(v) & = & \prod_{j=1}^N (1-vr_jz_j)=\prod_{j=1}^N (1-vz_j+vz_j(1-r_j))\\ & = & \prod_{j=1}^N (1-vz_j)\prod_{j=1}^N \left(1+\frac{vz_j}{1-vz_j}(1-r_j)\right)\\ & = & \prod_{j=1}^N (1-vz_j)\prod_{j=1}^\infty \left(1+\frac{vz_j}{1-vz_j}(1-r_j)\right)\:(1+o(1)), \end{eqnarray*} and it is sufficient to choose $\{z_j\}_{j=1}^\infty$ so that the limit of $\prod_{j=1}^N(1-vz_j)^{1/N}$ does not exist. We successively take $z_j=-1$ or $z_j=1$ and denote by $f(N)$ the number of choices of $z_j=1$ after $N$ steps. Here $f: \mathbb{N} \to \mathbb{N}$ may be any function such that \begin{equation} f(N-1) \le f(N) \le f(N-1)+1 \quad \mbox{for} \quad N \ge 2. \label{3.1} \end{equation} Then \[\prod_{j=1}^N(1-vz_j)^{1/N}=(1-v)^{f(N)/N}(1+v)^{(N-f(N))/N}=(1+v)\left(\frac{1-v}{1+v}\right)^{f(N)/N},\] and we are left with searching a function satisfying (\ref{3.1}) such that $f(N)/N$ has no limit as $N \to \infty$. Such functions obviously exist: start with $f(1)=1$, leave $f(N)$ constant until $f(N)/N=1/4$, then increase $f(N)$ successively by $1$ until $f(N)/N=1/2$, after that leave $f(N)$ again constant to reach $f(N)/N=1/4$, then increase $f(N)$ anew by ones until $f(N)/N=1/2$, etc. Here is this function explicitly. Every natural number $N \ge 3$ may uniquely be written as $N=2\cdot 3^k+\ell$ with $k \ge 0$ and $1 \le \ell \le 4\cdot 3^k$. We put \[f(2\cdot 3^k+\ell)=\left\{\begin{array}{lll} 3^k & \mbox{for} & 1 \le \ell \le 2\cdot 3^k,\\ \ell-3^k & \mbox{for} & 2\cdot 3^k \le \ell \le 4\cdot 3^k, \end{array}\right.\] and we also define $f(1)=f(2)=1$. Thus, our choice for $z_1$ is $1$, the following three choices are $z_2=z_3=z_4=-1$, the following two are $z_5=z_6=1$, the following six $z_j$ are $-1$, the next six $z_j$ are $1$, and so on. It can be verified straightforwardly that $f$ satisfies~(\ref{3.1}), and since $f(N)/N=1/2$ for $N=2\cdot 3^k$ and $f(N)/N=1/4$ for $N=4\cdot 3^k$, the limit of $f(N)/N$ does not exist.
Albrecht B\"ottcher
Fakult\"at f\"ur Mathematik
TU Chemnitz
09107 Chemnitz
Germany
{\tt [email protected]}
\end{document} |
\begin{document}
\title{Open quantum dynamics with singularities: Master equations and degree of non-Markovianity}
\author{Abhaya S. Hegde}
\email{[email protected]}
\author{K. P. Athulya}
\author{Vijay Pathak}
\affiliation{
School of Physics, Indian Institute of Science Education and Research, Thiruvananthapuram, Vithura, Kerala 695551, India
}
\author{Jyrki Piilo}
\affiliation{
Turku Centre for Quantum Physics, Department of Physics and Astronomy, University of Turku, FI-20014, Turun yliopisto, Finland
}
\affiliation{
Laboratory of Quantum Optics, Department of Physics and Astronomy, University of Turku, FI-20014, Turun yliopisto, Finland
}
\author{Anil Shaji}
\email{[email protected]}
\affiliation{
School of Physics, Indian Institute of Science Education and Research, Thiruvananthapuram, Vithura, Kerala 695551, India
}
\date{\today}
\begin{abstract}
Master equations describing open quantum dynamics are typically first-order differential equations. When such dynamics brings the trajectories in state space of more than one initial state to the same point at finite instants in time, the generator of the corresponding master equation becomes singular while the dynamical map becomes non-invertible. The first-order, time-local, homogeneous master equations then fail to describe the dynamics beyond the singular point. Retaining time-locality in the master equation necessitates a reformulation in terms of higher-order differential equations. We formulate a method to eliminate the divergent behavior of the generator by using a combination of higher-order derivatives of the generator with suitable weights and illustrate it with several examples. We also present a detailed study of the central spin model and we propose the average rate of information inflow in non-Markovian processes as a quantity that captures a different aspect of non-Markovian dynamics.
\end{abstract}
\maketitle
\section{\label{sec:intro}Introduction}
Almost all realistic quantum systems are open systems with their dynamics determined by interactions with the environment also. Although the evolution of the system in the presence of its environment does not follow unitary dynamics, the combined evolution of the system and environment is unitary in nature. The reduced dynamics of the system of interest is then obtained by tracing over the environmental degrees of freedom from the time-evolved combined density matrix as $\rho_{S}(t)=\text{Tr}_{E}[\rho_{SE}(t)]$. The reduced system dynamics induced by the joint evolution of the system and its environment can be modeled by a dynamical map given by $\rho_{S}(t)=\mathcal{E}_{t} \rho_{S}(0)$~\cite{Sudarshan1961, Choi1972, Choi1975, Erling1963}. While the dynamical maps describe changes in the state of the open system across finite-time intervals akin to the unitary time-evolution operator for closed systems, continuous-time description of open quantum evolution is typically formulated in terms of quantum master equations~\cite{BreuerBook, Breuer2009}. Open quantum systems endowed with a large separation in timescales of the system and environment are modeled using the Markov approximation and their dynamics is described by a Markovian master equation. The quantum master equation under the Markov approximation can be written in the Gorini-Kossakowski-Sudarshan-Lindblad (GKSL) form that corresponds to completely positive and trace preserving open quantum system dynamics~\cite{GKS1976, Lindblad1976}.
There are processes for which the Markovian approximation is not valid and we have to turn to non-Markovian dynamics. Time-dependent, local-in-time, master equations of GKSL form can be formulated for the non-Markovian case as well~\cite{Breuer1999, Silvan2016, Sabrina2014, Rivas_2012}. In this paper, we present several physically realizable non-Markovian cases for which forcing the description of the system dynamics into a time-local master equation leads to a singular generator. The propagation of states after the singularity cannot be done formally using the time-local master equation. Motivated by the rapid developments in the ability to study open quantum dynamics experimentally, we address this gap in the formalism and in the process also propose a minor but useful modification to one of the standard ways of quantifying information back flow and non-Markovianity.
We investigate how processes in which the trajectories of distinct states diverge after a singularity can be mathematically described. Note that the trajectories we consider in the following are in the space of all possible quantum states of the system of interest. A suitable parametrization of the state space, for instance, with the Bloch ball of states of a single qubit, will allow us to visualize these trajectories as well. We see how a general master equation for such dynamics that holds true for all time can be constructed in certain cases. Specifically, we propose higher-order master equations to weed out the singularities in a manner that their solutions reduce to that of the traditional first-order equation at all other points. The proposed higher-order equations naturally take care of propagating the state through the singularities. Dynamics with singular points are typically non-Markovian. Different approaches to characterize the non-Markovianity resulting from the divergent behavior of generators were studied in~\cite{Hall2007, Hall2014, DivisibilityPRA2011, DivisibilityPRA2019, DivisibilityPRL2018} and a measure to characterize the nature and degree of the singularity was proposed in~\cite{Hou2012}. Depending on the nature of the singularity in the generator of the first-order master equation, we arrive at different forms of equivalent higher-order master equations that avoid the singular behavior. We are interested in exploring the connection, if any, between the nature of the singularity and the nature of the non-Markovianity in the system. This however requires a comparison of the degree of non-Markovianity in different processes. There are several proposed measures of non-Markovianity available in the literature~\cite{Li2018,Li2019,Li2020,Wolf2008,
RHP2010, Vasile2011,
Lu2010, Lorenzo2013, Alipour2012, breuer_measure_2009, Laine2010, Liu2013,ColloquiumNM, Daniel2017, rivas_huelga_plenio_2014}, but they do not typically allow for a direct comparison between processes as explained later on. The characterization of the singularity in~\cite{Hou2012} also is not suitable for comparison of different processes. To circumvent these difficulties, we introduce a quantity to capture the persistence of information inflow which, in turn can lead to meaningful comparison of different non-Markovian processes. In addition to using this quantity to compare the singular processes, we extend its applicability and demonstrate its utility in comparing generic non-Markovian processes as well.
This paper is structured as follows. In Sec.~\ref{sec:def}, we introduce the relevant definitions and the problem. We reinforce the issues of singular dynamics with an illustrative example in Sec.~\ref{sec:spin_model}. A discussion on possible avenues to resolve the issue is presented in Sec.~\ref{sec:met}. In Sec.~\ref{sec:Solution}, we apply our results to the example presented in Sec.~\ref{sec:spin_model}. We comment on different classes of examples in Sec.~\ref{sec:more_ex} using our methods. A new quantity that enables comparison of the observed non-Markovianity in different processes is proposed in Sec.~\ref{sec:measures}. Section~\ref{sec:disc} contains a brief discussion and our conclusion.
\section{\label{sec:def}Dynamical Maps and Master Equations}
The dynamics of an open quantum system that is initially in a product state with its environment can be expressed in terms of the completely positive and trace preserving (CPTP) dynamical maps $\mathcal{E}_t$. The open system we will be considering is a single qubit. Using the left-right vectorization formalism~\cite{Milz_2017} to write the equations of motion for the open dynamics of the qubit, we represent its quantum states, $\rho_t$, by real vectors and the quantum dynamical maps $\mathcal{E}_t$ as real four-dimensional matrices. Since the Hilbert space associated with a qubit is a subset of the four-dimensional linear space of Hermitian qubit operators, it follows that any quantum state can be written as $\rho = (\mathbb{I}+\vec{r} \cdot \vec{\sigma})/2$, where $|\vec{r}| \leq 1$ and $\vec{\sigma} = (\sigma_x, \sigma_y, \sigma_z)$ is a vector of Pauli operators. The condition $|\vec{r}| \leq 1$ enforces positivity of $\rho$ and the states of the qubit can be represented as points in the Bloch sphere. The vector $(1, \vec{r})$ furnishes the real, four-dimensional representation of the quantum state. The affine form of $\mathcal{E}_t$ acting on the state $(1, \vec{r})$ is
\begin{equation}
\label{map}
\mathcal{E}_t = \begin{pmatrix} 1 & \vec{0} \\ \vec{s} & T \end{pmatrix},
\end{equation}
with $\vec{s}$ a translation vector and $T$ a real three-dimensional matrix. The Bloch sphere vectors transform as $\vec{r}' \equiv \mathcal{E}_t(\vec{r}) = T\vec{r}+\vec{s}$.
The state of the system at time $t$ is given by the dynamical map as
\begin{equation}
\label{dyn_map}
\rho_t = \mathcal{E}_t [\rho_0],
\end{equation}
with $\rho_0 \equiv \rho_{t=0}$. When $\mathcal{E}_t$ is an invertible map, one finds its time-local generator as
\begin{equation}
\label{generator_map}
\mathcal{L}_t = \dot{\mathcal{E}}_{t}^{\mathstrut}\mathcal{E}_{t}^{-1}.
\end{equation}
Assuming the semigroup property $\mathcal{E}_{t+s} = \mathcal{E}_t\mathcal{E}_s$, we can write a time-local master equation $\dot{\rho}_t = \mathcal{L}_t [\rho_t]$ in the well-known GKSL form (choosing $\hbar = 1$)
\begin{equation}
\label{GKSLeq}
\dot{\rho}_{t} = -i[H, \rho_t] + \sum_{i=1}^{3}\gamma_{i}\bigg(L_{i}^{\mathstrut} \rho_{t}^{\mathstrut} L_{i}^\dagger - \frac{1}{2}\Big\{L_{i}^\dagger L_{i}^{\mathstrut}, \rho_{t}^{\mathstrut}\Big\}\bigg)
\end{equation}
where $\text{tr}(L_i)=0$ and $\text{tr}(L_iL_j)=\text{tr}(L_jL_i)=\delta_{ij}$. In other words, the Lindblad operators $L_i$ are traceless and orthonormal. The dynamics described by the semigroup master equation is Markovian. The Markovian master equation may be generalized by introducing time-dependent Lindblad-like operators and time-dependent decay rates $\gamma_i(t)$ in Eq.~\eqref{GKSLeq}. This results in a GKSL form for generators $\mathcal{L}_t$ of open dynamics that are not Markovian in general,
\begin{align}
\label{quasi_GKSL}
\dot{\rho}_t = & -i[H(t), \rho_t] \nonumber \\
& + \sum_{i=1}^{3} \gamma_i(t) \left[L_{i}^{\mathstrut}(t) \rho_{t}^{\mathstrut} L_{i}^{\dagger}(t) - \frac{1}{2}\left\{L_{i}^{\dagger}(t) L_{i}^{\mathstrut}(t), \rho_{t}^{\mathstrut}\right\}\right].
\end{align}
The presence of negative rates $\gamma_i(t) < 0$ for some $i$ and $t$ can be regarded as non-Markovian behavior~\cite{Hall2014, ColloquiumNM, Daniel2017, rivas_huelga_plenio_2014}.
Since the time-local master equation is first-order in time, knowing the state at time $t$ allows one to uniquely determine the state at all later times $t' > t$. In particular, it follows that if the trajectories of two states $\rho_1(t)$ and $\rho_2(t)$ intersect at some time $t = t_{c}$, i.e., $\rho_1(t_c) = \rho_2(t_c)$, the trajectories will move together for all subsequent times, i.e., $\rho_{1}(t') = \rho_{2}(t')$ for $t' > t_{c}$. Since any such merging of trajectories is irreversible, the dynamical map in Eq.~\eqref{dyn_map} becomes noninvertible in all such cases and thus the generator as defined in Eq.~\eqref{generator_map} ceases to exist. However, there are several examples of physically valid processes in which the trajectories of multiple states converge at distinct points in time and then are again separate for $t > t_{c}$. Moreover, the trajectories of qubit dynamics visualized on the Bloch sphere for all such processes are analytic, even at those instants of time when the inverse dynamical map does not exist. Clearly, the first-order equation fails to describe the dynamics of these processes. We illustrate such a process using the central spin model in the next section and then propose a way of describing such dynamics using higher-order differential equations.
\section{\label{sec:spin_model}Example: Central Spin Model}
To illustrate the problem at hand, we examine here a central spin model used to simulate the interaction of a single electron spin confined to a quantum dot with a bath of nuclear spins~\cite{Breuer2004}. Consider a bath consisting of $N$ spin-$\frac{1}{2}$ particles coupled to a central spin-$\frac{1}{2}$ particle. The interaction Hamiltonian is
\begin{equation}
\label{spin_ham}
H = \sum_{k=1}^{N}A_k \sigma_z \otimes \sigma_z^{(k)}, \quad A_k = \frac{A}{\sqrt{N}}
\end{equation}
such that each spin in the bath is interacting with the central spin via the Pauli $\sigma_z$ operator. Note that we have scaled the coupling constant appearing in the Hamiltonian by a factor of $1/\sqrt{N}$ that will keep the total interaction energy between the central spin and the ones around constant irrespective of $N$. We will see later on that this choice is required if we are to compare different non-Markovian processes. We begin with an initial product state for the total system of $N+1$ particles such as, $\eta_{0} = \rho_{0} \otimes \mathbb{I}/2^N$. The final state of the central spin after tracing out the bath of $N$ surrounding spins at time $t$ is
\begin{align}
\rho_{t} &= \text{Tr}_{\text{E}}\left(e^{-iHt}\eta_{0}e^{iHt}\right) \nonumber \\
& =
\begin{pmatrix}
\rho_{11} & \cos^N\left(\frac{2At}{\sqrt{N}}\right)\rho_{12} \\
\cos^N\left(\frac{2At}{\sqrt{N}}\right)\rho_{21} &\rho_{22}
\end{pmatrix}
\label{sys_state}
\end{align}
with $\rho_{ij}$ for $i,j = \{1, 2\}$ as the elements of $\rho_{0}$. The master equation typically used to describe this process is~\cite{Hou2012}
\begin{equation}
\label{spin_mod}
\dot{\rho}_{t} = A\sqrt{N}\tan\left(\frac{2At}{\sqrt{N}}\right)\left(\sigma_z\rho_{t}\sigma_z-\rho_{t}\right).
\end{equation}
The rate appearing in the equation above is proportional to $\tan(t)$ and the equation is singular for all $t = \sqrt{N}(2k+1)\pi/4A$ for $k = 0,\, 1,\, 2,\, \cdots$. However, this model is known to be exactly solvable for all $N$~\cite{Breuer2004}. Moreover, it is easy to see that the dynamical map corresponding to this process,
\begin{equation}
\label{spin_map}
\mathcal{E}_{t}^{\text{spin}} = \operatorname{diag}\left(1,\, \cos^{N}\bigg(\frac{2At}{\sqrt{N}}\bigg), \, \cos^{N}\bigg(\frac{2At}{\sqrt{N}}\bigg), \, 1\right)
\end{equation}
is a well-defined diagonal matrix for all $t$. The trajectories of a pair of initial states of the central spin are plotted on the Bloch sphere in Fig.~\ref{fig:sing_behav}. We see that the two trajectories intersect at $t=t_c$ and the inverse map, ${\mathcal E}_t^{-1}$ becomes one-to-many and singular at this point. The master equation~\eqref{spin_mod} fails to describe the observed trajectory beyond this (first) singular point since beyond $t_c$ the first-order differential equation yields identical evolution for both intersecting trajectories. The dynamical map outputs the correct final state for all times nevertheless and yields diverging trajectories after $t=t_c$ as shown in the figure. The failure of the master equation to predict the evolution beyond $t_c$ prompts us to explore the existence of an alternate differential equation that is consistent with the dynamics given by the map while at the same time, does not exhibit singularities.
\begin{figure}
\caption{ Trajectories of two initially distinct states are shown at different times on the Bloch sphere. The figure corresponds to the evolution given by the Hamiltonian in Eq.~\eqref{spin_ham} with $N=1$.}
\label{fig:lab3}
\label{fig:sing_behav}
\end{figure}
\section{\label{sec:met}Higher Order Master Equations}
Since any nondiagonal dynamical map can be made diagonal by a suitable choice of operator basis~\cite{nielsen2002quantum}, we explore the case of a general diagonal map. For the sake of simplicity we will stick to unital maps for which $\vec{s} = 0$ in Eq.~\eqref{map}. We point out that our arguments can also be extended to non-unital maps in a straightforward manner, as shown in examples later (see Sec. \ref{subsec:non_uni} below). Choosing the affine map in Eq.~\eqref{map} as a diagonal matrix that describes the transformations of a state in each subspace, we define
\begin{equation}
\label{map_form}
T = \operatorname{diag}{(f_{x}(t),\, f_{y}(t),\, f_{z}(t))}.
\end{equation}
Writing the initial state in the vectorized form $\rho_0 = (1, x, y, z)^T$, the action of this unital map corresponds to the master equation:
\begin{align*}
\frac{d \rho}{dt} &= \dot{\mathcal{E}}_t \rho_0 \\
& = {\left(
\begin{array}{cccc}
1 & 0 & 0 & 0 \\
0 & \frac{\dot{{f_x}}}{{f_x}} & 0 & 0 \\
0 & 0 & \frac{\dot{{f_y}}}{{f_y}} & 0 \\
0 & 0 & 0 & \frac{\dot{{f_z}}}{{f_z}} \\
\end{array}
\right)\cdot \left(
\begin{array}{c}
1 \\
{f_x}\cdot x \\
{f_y}\cdot y \\
{f_z}\cdot z \\
\end{array}
\right)} \\
& = \dot{\mathcal{E}}_{t}{\mathcal{E}_{t}}^{-1}\rho_t \equiv \mathcal{L}_t\rho_t.
\end{align*}
Here $\mathcal{L}_t$ would be indeterminate if $1/f_i$ were singular. In such a case, we seek well-defined higher-order derivatives to obtain a valid description for the evolution of states. Assuming that any function $f_i$ in the map has a zero at $t_{c}$ and supposing that $\dot{f}_i(t_c)$ is nonzero, then $\dot{f}_i/f_i$ does not exist at $t_{c}$. Here we can Taylor expand both $f_{i}(t)$ and $\dot{f}_{i}(t)$ around the critical time $t_{c}$ with $f_{i}(t_c) = 0$. If $\ddot{f}_{i}(t_c)$ is also zero and $\dot{f}_{i}(t_c)$ is nonzero, then
\[ \dfrac{\ddot{f}_{i}}{f_{i}} = \dfrac{\dddot{f_i}(t_c)}{\dot{f}_{i}(t_c)} \]
is a well defined quantity at $t_c$ as well. Since ${\mathcal E}$ and ${\mathcal L}$ are both diagonal, it is straightforward to see that
\[ \frac{d^2 \rho}{dt^2} = {\mathcal L}_t^{(2)} \rho_t \]
is a differential equation for $\rho(t)$ devoid of the singularities that beset the first-order equation. Here we have defined higher-order generators as
\begin{equation}
\label{higher_order_gen}
\mathcal{L}^{(n)}_t \equiv \mathcal{E}^{(n)}_t\mathcal{E}^{-1}_t = \frac{d^n\mathcal{E}_t}{dt^n}\mathcal{E}^{-1}_t,
\end{equation}
with ${\mathcal L}_t^{(1)} \equiv {\mathcal L}_t$. If the order of derivatives considered above does not lead to a nonsingular equation, we extend the same method to higher derivatives until we obtain a nonzero finite value for the ratio.
Note that this method may still not yield a finite value for some cases even if we consider all orders of derivatives. In such cases we find that a combination of different order generators with suitable weights of the form
\begin{equation}
\label{gen_eqn}
\sum_n p_n \rho_t^{(n)} = 0
\end{equation}
would yield a non-diverging time-local master equation that holds for all time. The coefficients $p_n$ can be obtained from the higher derivatives of the generator ${\mathcal L}_t^{(n)}$ as described in the next section. The exact dynamics can be found by solving these differential equations which require specifying more initial conditions than that for the traditional master equation. Our approach is valid for nondiagonal maps as well. For reasons of mathematical complexity and the lack of experimental literature requiring the usage of time-dependent Lindblad (or jump) operators, the singularities present in such dynamics are left unexplored in this paper.
\section{\label{sec:Solution}Master equation for the spin model}
The concept of higher-order equations can be nicely illustrated considering the example of the central spin model described earlier. It also offers a viable experimental setup to validate our findings. In general, characterizing the dynamics observed in an experiment requires an accurate description of the decay rates. Using the techniques of quantum process tomography, one may infer the relevant rates with sufficient accuracy as described below.
In terms of traceless operators $F_{\alpha}$, Eq.~\eqref{GKSLeq} can be rewritten as
\begin{align}
\label{lindblad}
\dot{\rho} =& -i [H(t),\, \rho_t] \nonumber \\
&+ \frac{1}{2} \sum_{\alpha,\, \beta=1}^{d^2-1}c_{\alpha\beta}(t)\left([F_{\alpha}\rho_t,\, F_{\beta}^{\dagger}] + [F_{\alpha},\, \rho_t F_{\beta}^\dagger]\right).
\end{align}
We choose $F_{\alpha}$ to be Pauli operators (upto a normalization constant) and $H = h_{\alpha}\sigma_{\alpha}$ is the Hamiltonian. Substituting this in Eq.~\eqref{lindblad} outputs a traceless matrix for $\dot{\rho}$. Since Pauli matrices form a basis for $2 \times 2$ matrices, we can express $\dot{\vec{\mathbf{r}}} \equiv (\dot{x},\,\dot{y},\,\dot{z})$ in terms of the nine Kossakowski coefficients $c_{\alpha\beta}$ and three parameters of the Hamiltonian.
From the experimentally observed data, we can determine the values of $(\dot{x},\, \dot{y},\, \dot{z})$ at each time $t$ using
$\dot{f} = \lim\limits_{h \rightarrow 0} [f(t+h)-f(t)]/h$ for $f \equiv (x,\, y,\, z)$.
Corresponding to 12 unknowns (nine from $c_{\alpha\beta}$ and three more from $h_i$) and three known quantities ($\dot{x},\, \dot{y},\, \dot{z}$), we can setup 12 independent linear equations by choosing four linearly independent initial states. For example, the set of states $\rho_1 = \ket{0}\bra{0}, \, \rho_2 = \ket{1}\bra{1}, \, \rho_3 = \ket{+}\bra{+}$, and $\rho_4 = \ket{-}\bra{-}$ where $\ket{+} \equiv (\ket{0}+\ket{1})/\sqrt{2}$, and $\ket{-} \equiv (\ket{0}+i\ket{1})/\sqrt{2}$, furnishes one such choice.
Determining all the unknowns involves solving the resulting linear equations. The first-order traditional master equation so obtained from the experimental data can now be used to locate the singular points.
The quantum process tomography~\cite{Bellomo_2010_tomography,Bennink_2019_tomography, Boulant_2003_tomography,Howard_2006_tomography,Ben_2020_tomography} described above leads to the equation of motion given in Eq.~\eqref{spin_mod} and the corresponding dynamical map given in Eq.~\eqref{spin_map}. The generator of the dynamics is singular when one or more of the elements of the diagonal dynamical map goes to zero. By inspection, we see that these points correspond to the zeros of $\cos^N(\omega t)$ where $\omega \equiv 2A/\sqrt{N}$. As mentioned earlier, despite the singularities in ${\mathcal L}_t^{\rm spin} = \dot{\mathcal E}_t^{\rm spin} ({\mathcal E}_t^{\rm spin})^{-1}$, the dynamical map in Eq.~\eqref{spin_map} is analytic for all $t$. In order to construct a higher-order differential equation that avoids the singular behavior, we therefore start from the dynamical map $\rho_t = {\mathcal E}_t \rho_0$, where we have taken ${\mathcal E}_t^{\rm spin} \equiv {\mathcal E}_t$ for simplicity. We consider higher derivatives of the equation involving the dynamical map,
\begin{equation}
\label{dyn_der}
\rho_t^{(k)} = {\mathcal E}_t^{(k)} \rho_0,
\end{equation}
with the equation for $\rho_t^{(1)}$ being the same as Eq.~\eqref{spin_mod}. The strategy we adopt is as follows. The terms that appear in ${\mathcal E}_t^{(k)}$ are derivatives of $\cos^N(\omega t)$, which in turn are functions of $\sin(\omega t)$ and $\cos (\omega t)$. Computing a sufficient number of derivatives as in Eq.~\eqref{dyn_der} allows us to invert these functions and write them in terms of $\rho_t^{(k)}$ and the next suitable higher derivative of $\rho_t$ can be expressed fully in terms of its lower derivatives, leading to a higher-order dynamical equation of the form given in Eq.~\eqref{gen_eqn}.
The $x$ component for the Bloch vector representing $\rho_t$ is transformed by the dynamical map as $\rho_{t,x} = \cos^{N}(\omega t)\rho_{0,x}$. Since the $y$ component also follows the same pattern and since the map is diagonal, we focus on obtaining a higher-order differential equation for $\rho_{t,x}$ without loss of generality. The equation so obtained also applies to the full density matrix. Exploiting the properties of derivatives of $\sin(\omega t)$ and $\cos(\omega t)$, we express any higher-order $\cos^{N}(\omega t)$ into a binomial sum of exponentials that upon simplification turns to a sum of cosines.
\begin{equation}
\cos^N(\omega t) = \frac{1}{2^N} \sum_{j=0}^{N}\binom{N}{j}e^{i(N-2j) \omega t}.
\end{equation}
For even $N$ we obtain a binomial sum of cosines as follows:
\begin{equation}
\cos^{2m}(\omega t) = \frac{1}{4^m} \binom{2m}{m} + \frac{1}{2^{2m-1}}\sum_{j=1}^m\binom{2m}{m+j}\cos(2j \omega t).
\end{equation}
Odd-order derivatives of $\rho_{t,x}$ contain $m$ terms each containing $\sin(2j\omega t)$ for $j=1, \ldots , m$. The first $m$ odd-order derivatives can be collected and rewritten as a system of linear equations of the form
\[
\begin{bmatrix}
a_{11} & \cdots & a_{1m} \\
\cdots & \cdots & \cdots \vphantom{\vdots} \\
a_{m1} & \cdots & a_{mm}
\end{bmatrix}
\begin{bmatrix}
\sin(2 \omega t) \rho_{0,x} \\ \vdots \\ \sin(2m \omega t)\rho_{0,x}
\end{bmatrix}
=
\begin{bmatrix}
\rho_{t,x}^{(1)} \\ \vdots \\ \rho_{t,x}^{(2m-1)}
\end{bmatrix}
\]
where $a_{ij}$ denotes the coefficients gathered from odd differentiations,
\begin{equation}
a_{ij} = (-1)^{i} \frac{1}{2^{2m-1}} \binom{2m}{m+j} (2j \omega)^{2i-1}.
\end{equation}
The superscript on $\rho_{t,x}$ denotes the order of the time derivative. The binomial coefficient that appears in $a_{ij}$ is distinct and nonzero for each value of $j$, while the factor $(2j\omega)^{2i-1}$ is nonzero and different for each value of $i$ given a value of $j$. So we find that each $a_{ij}$ is non-zero and distinct which means that the determinant of the $m \times m$ matrix $A = [a_{ij}]$ is always nonzero. This system of linear equations can therefore be inverted so as to express $\sin(2j \omega t) \rho_{0,x}$ in terms of $d^{j}\rho_{t,x}/dt^{j}$ for $j=1,\ 3,\ \ldots,\ 2m-1$. The right hand side of the equation for the $(2m +1)$th derivative of $\rho_{t,x}$ is then completely determined by $\sin(2j \omega t) \rho_{0,x}$ for $j=1,\ldots m$, which in turn can be now written in terms of the odd derivatives of $\rho_{t,x}$. This leads to a differential equation of order $2m+1$ of the form $\sum_{j=0}^{m} p_{2j+1} \rho_t^{(2j+1)} = 0$. Here we have used the fact that both $\rho_{t,x}$ and $\rho_{t,y}$ have the same dynamics to write the differential equation for the full density matrix.
For odd $N$ we can do a similar analysis starting from
\begin{equation}
\cos^{2m+1}(\omega t) = \frac{1}{2^{2m}}\sum_{j=0}^{m} \! \binom{2m+1}{j} \! \cos[(2m-2j+1)\omega t].
\end{equation}
The first $m+1$, odd-order derivatives $(\rho_{t,x}^{(1)}, \ldots, \rho_{t,x}^{(2m+1)})^T$ can be equated to
\[
\begin{bmatrix}
a_{11} & \cdots & a_{1,m+1} \\
\cdots & \cdots & \cdots \vphantom{\vdots} \\
a_{m+1,1} & \cdots & a_{m+1,m+1}
\end{bmatrix}
\begin{bmatrix}
\sin( \omega t) \rho_{0,x} \\ \vdots \\ \sin[(2m+1) \omega t]\rho_{0,x}
\end{bmatrix}
\]
with
\begin{equation}
a_{ij} = (-1)^{i} \frac{1}{2^{2m}}\binom{2m+1}{m+j} [(2j-1)\omega]^{2i-1}.
\end{equation}
This system of linear equations again yields $\sin[(2j+1) \omega t]$ for $j=0,\ 1,\ \ldots,\ 2m$ in terms of the odd order derivatives of $\rho_{t,x}$. Differentiating $\rho_{t,x}$ twice more leads to a master equation as desired. Note that when $N \rightarrow \infty$, $\cos^N(\omega t) \rightarrow e^{-2A^2t^2}$ with the singular behavior is pushed to $t \rightarrow \infty$. A Markovian, first-order, dephasing master equation is obtained in this limit with many states being mapped to the same state on the $z$ axis of the Bloch sphere asymptotically only.
For example, a third order master equation is obtained for $N=2$ and $\omega = 1$ in the central spin model. The corresponding dynamical map is $\mathcal{E}(t) = \operatorname{diag}\left(1,\,\cos^2(t),\, \cos^2(t),\, 1\right)$. Rewriting $\cos^2(t)$ as $[1+\cos(2t)]/2$ leads to $\dot{\rho}_t = -\sin(2t)\rho_0$, $\ddot{\rho}_t = -2\cos(2t)\rho_0$ and $\dddot{\rho_t} = +4\sin(2t)\rho_0$. Combining these derivatives we see that
\begin{equation}
\label{spin_2}
4\dot{\rho}_t + \dddot{\rho_t} = 0.
\end{equation}
This higher-order master equation for the central spin model with $N=2$ is numerically solved for a pure initial state $r_0 = \big( 1/2,\, 1/\sqrt{2},\, 1/2 \big)$ as shown in Fig.~\ref{fig:num_soln}. While the first-order equation~\eqref{spin_mod} is singular at $\pi/2$ and hence is unable to propagate the solution beyond that point, we see that the dynamics obtained from Eq.~\eqref{spin_2} is smooth at all times, just as desired.
\begin{figure}
\caption{ Numerical solutions of the higher-order master equation \eqref{spin_2} (dashed curves in white) are plotted component-wise along with the elements of dynamical map from Eq.~\eqref{spin_map} with $N=2$ (colored solid curves). This plot displays the evolution of each component of the Bloch vector for the initial pure state $\vec{r}_0 = \big( 1/2,\, 1/\sqrt{2},\, 1/2 \big)$. Solutions given by higher-order equations exactly agree with that of the dynamical map and the dashed white curves fall exactly on top of the solid colored ones. This is unlike the solution of the first-order equation which blows up at $\pi/2$ and cannot be propagated further. Time on the $x$-axis is shown in units of $1/\omega$.}
\label{fig:num_soln}
\end{figure}
It is important to note that the higher-order equations can also be obtained by directly using the diverging generator and its derivatives leading to an equation that closely resembles Eq.~\eqref{gen_eqn}. We use $\rho_{t}^{(n)} = \mathcal{L}^{(n)}_t\rho_t$ in Eq.~\eqref{gen_eqn} so that $\sum_n p_n\mathcal{L}_t^{(n)}\rho_t = 0$ holds true for all $\rho_t$, which in turn yields,
\begin{equation}
\label{generator_equation}
\sum_{n} p_n \mathcal{L}^{(n)}_t = 0.
\end{equation}
For the central spin model it is possible to start from $\rho_t^{(1)} = {\mathcal L}_t \rho_t$ instead of Eq.~\eqref{dyn_der} and arrive at Eq.~\eqref{generator_equation} without considering the dynamical map. However the steps involved will be more complicated when using the generator rather than the map because of the $\rho_t$ appearing on the right hand side. While using Eq.~\eqref{dyn_der} makes it simpler to see how the higher-order equation is obtained, it also gives the impression that knowledge of the full dynamics in terms of the map at all times is necessary for obtaining the higher-order equation. We point out here that this is not the case and starting from the (singular) generator obtained using the process tomography steps outlined at the beginning of this section, one can directly obtain the higher-order master equation.
We illustrate this approach for the central spin model with $N=2$ and $\omega=1$. As noted previously, the $x$ and $y$ components of Bloch vector undergo the same dynamics and so we consider only the $x$ component, $\rho_{t,x}$. Denoting the $x$ component of generator by $\mathcal{L}_{t,x}$, we have,
\begin{align*}
\mathcal{L}_{t,x} \rho_{t,x} &= -2 \tan(t) \rho_{t,x},\\
\mathcal{L}^{(2)}_{t,x} \rho_{t,x} &\equiv \left(\dot{\mathcal{L}}_{t,x} + \mathcal{L}_{t,x}^2 \right) \rho_{t,x} = 2\left[\tan^2(t) - 1\right] \rho_{t,x}, \\
\mathcal{L}^{(3)}_{t,x} \rho_{t,x} &\equiv \left(\ddot{\mathcal{L}}_{t,x} + 3\mathcal{L}_{t,x}\dot{\mathcal{L}}_{t,x} + \mathcal{L}_{t,x}^3 \right) \rho_{t,x} = 8\tan(t)\rho_{t,x}.
\end{align*}
From the equations above, as expected, we recover Eq.~\eqref{spin_2} in the form $4\mathcal{L}_t+\mathcal{L}^{(3)}_t = 0$.
From either of the methods we described above to obtain the higher-order equations, it is clear that their order is $N+1$ for even $N$ and $N+2$ for odd $N$. Consequently, we would need as many specified initial conditions to overcome the issue of singularity. In other words, it is mandatory to know the history of the particle to determine the further evolution of a state. As this feature suggests the presence of memory effects to varying extents, it is then natural to speculate if a correspondence between the number of bath spins and the degree of non-Markovianity can be established. More on this is discussed in Sec.~\ref{sec:measures}.
\section{\label{sec:more_ex} Higher order master equations for other types of singular open dynamics}
The singular behavior for the first-order master equation of the central spin model is not unique to this model. We present several examples of CPTP maps with singularities, the first-order master equations, and their corresponding higher-order master equations whose solutions are free of singularities. As before, we phrase our discussion in terms of dynamical maps because of the simplicity and clarity afforded by this approach. Having the dynamical maps at hand also helps in verifying that the solutions of the higher-order master equations that are obtained indeed do reproduce the dynamics faithfully. We reiterate that as with the central spin model, the (singular) generator is sufficient to obtain the corresponding higher-order equations and knowledge of the full dynamics in terms of the dynamical map for all $t$ is not needed. We categorize the examples considered based on the dynamical map being unital or not.
\subsection{Unital dynamical maps}
We continue with the central spin model and consider a case where the locations of the singularities of the first-order master equation can be moved around by changing the model parameters. This means that the difficulties encountered in the numerical propagation of the first-order equation can be modulated and for certain choices of model parameters such solutions can become impracticable or even impossible to obtain. In this case, if one were to take the restricted point of view of an observer who has access only to the central spin and does process tomography to determine the form of the generator, the dynamical map for all times remains inaccessible to the observer since even numerical integration of the obtained first-order master equation may be precluded. Proceeding to construct the higher-order master equation then appears to be the only path forward for this restricted observer in order to gain predictive power over its evolution.
We consider a central spin under the influence of two environment spins with unequal interaction strengths as given by the Hamiltonian,
\begin{equation*}
H = \frac{\omega_{1}}{2} (\sigma_z \otimes \mathbb{I} \otimes \sigma_z) + \frac{\omega_{2}}{2} (\sigma_z \otimes \sigma_z \otimes \mathbb{I}).
\end{equation*}
and $\mathcal{E}_t = \operatorname{diag}(1,\, \cos( \omega_{1} t)\cos(\omega_{2} t),\,\cos( \omega_{1} t)\cos( \omega_{2} t),\, 1)$ is the map describing the reduced dynamics of the first qubit. The generator will include two tangent functions each with a different argument. It is possible to change the location of the singularity by altering the strength of interaction. In addition, if we increase the number of environment spins, the number of tangent functions in the generator will also increase. When singularities are aggregated, propagating the first-order differential equation beyond them, without accumulating significant errors becomes increasingly difficult. For the case of two environment spins, equations of motion for the $x$ and $y$ components of the Bloch vector of the state of the central spin are again the same (the dynamics of the $z$ component does not exhibit any singular behavior). The higher-order equation for $\rho_{t,x}$ is,
\begin{equation}
\rho_{t,x}^{(4)} + 2(\omega_{1}^2+\omega_{2}^2) \rho_{t,x}^{(2)}+ (\omega_{1}^2-\omega_{2}^2)^2\rho_{t,x}=0.
\end{equation}
Solving this fourth-order equation yields the correct dynamics.
As a second example consider the dynamical map given below which is CPTP for all $\gamma, \omega \geq 0$ and has no inverse at $\omega t = (m+\frac{1}{2})\pi, m \in \mathbb{Z}$ due to the singular nature of the dynamics of the $x$ and $y$ components of the Bloch vector:
\begin{equation}
\label{ex1}
\mathcal{E}_{t} =
\left(
\begin{array}{cccc}
1 & 0 & 0 & 0 \\
0 & e^{-\gamma t} \cos (\omega t) & 0 & 0 \\
0 & 0 & e^{-\gamma t} \cos (\omega t) & 0 \\
0 & 0 & 0 & e^{-\gamma t} \\
\end{array}
\right).
\end{equation}
The functions appearing in this dynamical map are non-periodic and the singularities in the dynamics occur at periodic intervals of $\pi/\omega$.
The traditional master equation for the above map is
\begin{align}
\dot{\rho}_t = \frac{1}{4}
\bigg\{&\gamma (\sigma_x \rho_t \sigma_x - \rho_t)
+ \gamma (\sigma_y \rho_t \sigma_y - \rho_t) \nonumber \\
+ &\left[\gamma + 2\omega \tan(\omega t)\right](\sigma_z \rho_t \sigma_z - \rho_t)\bigg\}.
\end{align}
For simplicity, assume that $\omega=\gamma=1$.
The higher-order master equation for this example looks like
\begin{equation}
\label{eq_ex2}
\rho_t^{(4)}+M\rho_t = 0,
\end{equation}
where $M = \operatorname{diag}\left(0,\, 4,\, 4,\, -1\right).$
Choosing different values of $\omega$ and $\gamma$ results in a master equation of different degree than the above.
It would be misleading to dismiss the singularities in the first-order equations as manually avoidable by choosing to ``jump" over those discrete points while regularizing the traditional master equations, either by analytically integrating the rates or via forceful numerical techniques. Although one may try to ``escape" the singular points by carefully choosing the integration limits, it relies on having the exact knowledge of location of singularities. However, one can come up with examples where it is impossible to obtain all singular points analytically. The advantage of using higher-order equations is further emphasized by the fact that it is not necessary to know when singularities occur, as shown in the next example. Returning to the generic form in Eq.~\eqref{map_form} for the diagonal unital map, consider the following choice:
\begin{subequations}
\begin{align}
\label{exp_decay}
f_{x}(t) =
f_{y}(t) &= \frac{1}{6}(2+4e^{-\gamma t}-3\sin^2(\omega t)), \\
f_{z}(t) &= \frac{1}{3}(4e^{-\gamma t}-1).
\end{align}
\end{subequations}
This dynamical map is constructed in such a way that it is not possible to obtain all the singular points analytically. In addition to $\gamma t = \log(4)$ and $\omega t = (m+1/2)\pi$ for $m = 0,\, 1,\, 2,\, \ldots$ for any $\gamma, \omega \geq 0$, the dynamics exhibits singular behavior whenever the following transcendental equation holds true:
$\gamma t = \log 4 - \log(3 \sin^2(\omega t) - 2)$.
The traditional master equation is
\begin{align}
\dot{\rho}_t = & \frac{\gamma}{4-e^{\gamma t}}
\bigg[\left(\sigma_x\rho_t\sigma_x-\rho_t\right) + \left(\sigma_y\rho_t\sigma_y-\rho_t\right)\bigg] \nonumber \\
& + \left(\frac{\gamma}{e^{\gamma t}-4}+\frac{4\gamma + 3\omega e^{\gamma t}\sin(2\omega t)}{8+e^{\gamma t}[1+3\cos(2\omega t)]}\right) \nonumber \\
& \times \left(\sigma_z\rho_t\sigma_z-\rho_t\right).
\end{align}
The higher-order equation provides a reliable description since it naturally gets rid of all the singularities regardless of our knowledge on their whereabouts. We obtain the following higher-order master equation when $\gamma = \omega = 1$ that holds for all times,
\begin{align}
\rho^{(5)}_t = 4\rho^{(1)}_t - 3\rho^{(3)}_t.
\end{align}
In this last example for unital maps, we demonstrate the presence of singularities due to the presence of zeros at discrete times, in all three diagonal elements of the dynamical map. For $1/n_1 + 1/n_2 + 1/n_3 \leq 1$ and $a_1,\, a_2,\, a_3 \geq 0$, the following choice of diagonal elements of the map from Eq.~\eqref{map_form} stays CPTP:
\begin{subequations}
\begin{align}
f_{x}(t) &= 1-2\left(\frac{1-e^{-a_1t}}{n_1} + \frac{1-e^{-a_2t}}{n_2}\right), \\
f_{y}(t) &= 1-2\left(\frac{1-e^{-a_1t}}{n_2} + \frac{1-e^{-a_3t}}{n_3}\right), \\
f_{z}(t) &= 1-2\left(\frac{1-e^{-a_2t}}{n_2} + \frac{1-e^{-a_3t}}{n_3}\right).
\end{align}
\end{subequations}
The constants $a_j$ and $n_j$ determine when the singularities occur and we can identify one set of singular points observed for each component of the Bloch vector of the state of the system qubit at times
\[ t_j=\frac{1}{a_j}\ln\left(\frac{1}{1-\frac{n_j}{4}}\right), \quad j=1,2,3.\]
The rates appearing in the traditional master equation are given by \[ \gamma_x = \frac{1}{4}\left(\frac{\dot{f}_{x}}{f_{x}} - \frac{\dot{f}_{y}}{f_{y}} - \frac{\dot{f}_{z}}{{f_{z}}}\right)\]
and its cyclical permutations among $x, y,z$. For this map, singularities occur in all three Bloch vector components at distinct times determined by the constants $a_j$ and $n_j$. The higher-order equations without singularities that holds for all times is given by
\begin{equation}
M_{1} \rho^{(3)}_t + M_{2} \rho^{(2)}_t + M_{3} \rho^{(1)}_t = 0,
\end{equation}
where
\begin{eqnarray*}
M_{1} & = & \operatorname{diag}\left(0,\, 1,\, 1,\, 1\right), \\
M_{2} & = & \operatorname{diag}\left(0,\, a_1+a_2,\, a_1+a_3,\, a_2+a_3\right), \\
M_{3} & = & \operatorname{diag}\left(0,\, a_1a_2,\, a_1a_3,\, a_2a_3\right).
\end{eqnarray*}
\subsection{\label{subsec:non_uni}Non-unital dynamical maps}
The Jaynes Cummings Hamiltonian in the interaction picture for a two level atom coupled to a quantized electromagnetic field is given by \begin{equation} \hat{\mathcal{H}}_{\mathrm{JC}}= \omega\left(a \sigma_{+}+a^{\dagger} \sigma_{-}\right). \end{equation}
This model corresponds to a non-unital dynamical CPTP map~\cite{BreuerBook}:
\begin{equation}
\label{JC_map}
\mathcal{E}_{\mathrm{JC}}(t) =
\begin{bmatrix*}[c]
1 & 0 & 0 & 0 \\
0 & f(t) & 0 & 0 \\
0 & 0 & f(t) & 0 \\
f^2(t)-1 & 0 & 0 & f^2(t) \\
\end{bmatrix*}.
\end{equation}
The corresponding generator is:
\begin{equation}
\label{JC_gen}
\mathcal{L}_{\mathrm{JC}}(t) =
\begin{bmatrix*}[c]
0 & 0 & 0 & 0 \\
0 & \dot{f}/f & 0 & 0 \\
0 & 0 & \dot{f}/f & 0 \\
2\dot{f}/f & 0 & 0 & 2\dot{f}/f \\
\end{bmatrix*}.
\end{equation}
This example is presented to show that our method can be applied to non-unital maps also. For a real function $f$, the time evolution corresponds to a time-local Lindblad-like master equation~\cite{Hall2007},
\begin{equation}
\label{JC_model}
\dot{\rho}(t) = -\frac{\dot{f}(t)}{f(t)}\big[2 \sigma_- \rho_t \sigma_+ - \sigma_+\sigma_-\rho_t - \rho_t\sigma_+\sigma_-\big].
\end{equation}
where $\sigma_+ = \ket{e}\bra{g}$, and $\sigma_- = \ket{g}\bra{e}$. Choosing $f(t) = \cos(\omega t)$ corresponds to the Jaynes-Cummings model on resonance, describing the interaction of an atom with a cavity field. We see that Eq.~\eqref{JC_model} is singular just like Eq.~\eqref{spin_mod} because of the $\tan(\omega t)$ term. However, the regularized, higher-order master equation in this case will be different from the spin model ($N=1$) described earlier which has a second-order master equation. Noticing that
\begin{equation}
\label{JC_higher_gen}
\mathcal{L}^{(4)} + 4\omega^2 \mathcal{L}^{(2)} = \operatorname{diag}\left(0,\, -3\omega^4,\, -3\omega^4,\, 0\right),
\end{equation}
a straightforward calculation reveals that
\begin{equation}
\label{JC_corrected}
M_1 \rho^{(4)}_t + M_2 \rho^{(2)}_t + M_3 \rho_t = 0,
\end{equation}
where
\begin{align*}
M_1 &= \operatorname{diag}\left(0,\, 1,\, 1,\, 1\right), \\
M_2 &= \operatorname{diag}\left(0,\, 4\omega^2,\, 4\omega^2,\, 4\omega^2\right), \\
M_3 &= \operatorname{diag}\left(0,\, 3\omega^4,\, 3\omega^4,\, 0\right).
\end{align*}
We can equivalently rewrite Eq.~\eqref{JC_higher_gen} as,
\begin{equation}
\label{JC_higher_equiv}
\rho^{(4)}_t+4\omega^2 \rho^{(2)}_t = \frac{3}{2} \omega^4 (\sigma_z \rho_t \sigma_z - \rho_t).
\end{equation}
The right hand side of Eq.~\eqref{JC_higher_equiv} has a different set of operators compared to Eq.~\eqref{JC_model} and it resembles a dephasing term with $\sigma_z$ operators rather than the $\sigma_\pm$ appearing in the first-order master equation. This highlights the fact that the higher-order master equations may have a substantially different form from the first-order ones in general. However, the presence of the higher derivatives means that these equations do not lend themselves to the usual interpretation of rates or Lindblad operators. For instance, in the present case, the operator $M_3$ acting on the state $\rho_t$ cannot be understood as a generator of time translations in the same manner as $\mathcal{L}_{\text{JC}}$. The meaning imparted by extra terms present in higher-order equations appear to be context-dependent and thus inferring their exact meaning is beyond the scope of this study. It may be noted that the dynamics described by both Eqs.~\eqref{JC_model} and \eqref{JC_higher_equiv} are the same except at the singular points.
\section{\label{sec:measures}Comparing non-Markovian processes}
The necessity to explore higher-order differential equations for a clear description of singular processes naturally begs the question of the relationship, if any, between the extent of non-Markovianity and the order of equations, or essentially, the nature of singularities. This prompts us to seek a means of comparing different singular non-Markovian processes using existing measures of non-Markovianity. Non-Markovianity manifests itself in various ways such that there is no single measure or a set of instructions by which comparison of its ``degree" can be conclusively done. Multiple measures have been developed as indicators of non-Markovian dynamics in the past, based on, for example, the nearest approximation to Markovian channels~\cite{Wolf2008}, entanglement between system and ancilla along with the deviations from the divisibility of dynamical maps~\cite{RHP2010}, non-monotonic behavior of fidelity~\cite{Vasile2011}, quantum Fischer information~\cite{Lu2010}, the volume of accessible states~\cite{Lorenzo2013}, non-zero quantum discord~\cite{Alipour2012}, and the behavior of trace distance~\cite{breuer_measure_2009, Laine2010, Liu2013}. There have been multiple studies to investigate the inflow of information and some of these studies have also considered those cases when the map is non-invertible~\cite{DivisibilityPRA2011,DivisibilityPRA2019,DivisibilityPRL2018}. Our interest, however, is on the relationship between nature of singularities and extent of non-Markovianity from the perspective of information inflow. We shall mainly focus on the trace distance measure defined in~\cite{breuer_measure_2009} owing to its quantitative nature and applicability to experimental realizations~\cite{Liu2011}.
A quantum process is non-Markovian if there is an initial pair of states $\rho_1(0)$ and $\rho_2(0)$ such that the trace distance $D(\rho_1(t),\, \rho_2(t))$ starts to increase for some time $t > 0$. A measure of non-Markovianity introduced by Breuer, Laine and Piilo~\cite{breuer_measure_2009} defined in terms of this property is
\begin{equation}
\label{BLP}
\mathcal{N}(\mathcal{E}_t) = \max\limits_{\rho_{1,2}(0)} \int_{t,\sigma>0}dt \ \sigma(\rho_1(0),\, \rho_2(0),\, t),
\end{equation}
where
\begin{equation}
\label{rate_trace_dist}
\sigma(\rho_1(0),\, \rho_2(0),\, t) = \frac{dD(\mathcal{E}_t\rho_1(0),\, \mathcal{E}_t\rho_2(0))}{dt},
\end{equation}
denotes the time derivative of the trace distance of the evolved pair of states. The trace distance for states $\rho_1$ and $\rho_2$, in turn is given by
\begin{equation}
D(\rho_1,\, \rho_2) = \frac{1}{2}\text{Tr}\|{\rho_1-\rho_2}\|,
\end{equation}
where the modulus of an operator $A$ is $\|A\| = \sqrt{A^{\dagger}A}$.
The integral over time in Eq.~\eqref{BLP} extends over all intervals in which $\sigma(t)>0$. The maximum is taken over all pairs of initial states $\rho_{1,2}(0)$. Note that the Breuer-Laine-Piilo (BLP) measure, $\mathcal{N}(\mathcal{E}_t)$ is a positive functional of the dynamical map $\mathcal{E}_t$ and that it acts as a measure for the maximal total inflow of information from the environment back to the open system. By construction, all Markovian processes have $\mathcal{N}(\mathcal{E}_t) = 0$.
For the spin model, $\sigma(t)$ is positive at periodic intervals and $\mathcal{N}(\mathcal{E}_t^{\text{spin}})$ adds up to infinity for any $N$ when the contributions from all the periods are added up. Therefore this measure cannot be used to compare the degree of non-Markovian behavior corresponding to different values of $N$. Analysis of other measures of non-Markovianity like the one quantified based on the change in Bloch sphere volume $V(t)$ of the set of accessible states of the evolved system~\cite{Lorenzo2013} also reveals a similar behavior independent of $N$ precluding the comparison that we seek. The divergent behavior of the BLP and related measures is not unique to the central spin model we consider.
Information inflow from the environment to the system is an unmistakable signature of non-Markovian evolution. In order to explore the exchange of information of between the two in the central spin model, we look at the mutual information between the central spin and its environment of spins. Using the von Neumann entropy $S$ for a system $\rho$ calculated as $S(\rho) = -\text{Tr}(\rho \log \rho)$, the mutual information $\mathcal{I}$ is evaluated as
\[
\mathcal{I}(\rho_{\text{sys}}, \rho_{\text{env}}) = S(\rho_{\text{sys}}) + S(\rho_{\text{env}}) - S(\rho_{\text{joint}}),
\]
where $\rho_{\text{sys}}$ is the state of the system as in Eq.~\eqref{sys_state}, $\rho_{\text{env}} = \mathbb{I}/2^N$ is the bath state, and $\rho_{\text{joint}} = U\eta_0U^\dagger$ for $U = e^{-iH_{\text{spin}}t}$, all evaluated at time $t$.
This mutual information is plotted for different values of $N$ in Fig.~\ref{fig:mutinf}. We see from the oscillatory behavior of the mutual information that information is delocalized between the system and the environment and then localized back in the respective components in an alternating manner. The rate at which this exchange occurs depends on the number of environment spins, $N$. The time taken by the information, once delocalized, to again return to the central spin scales as $\sqrt{N}$. Note that this scaling is connected to the choice we made in Eq.~\eqref{spin_ham} for the Hamiltonian where the coupling between the central spin and the environment spins scaled as $1/\sqrt{N}$. We emphasize that this is different from the example considered in Ref.~\cite{breuer_measure_2009} wherein the interaction Hamiltonian was not scaled with the number of spins in the environment. This choice resulted in a process that had no Markovian limit as a function of $N$. However, in our case, we recover the expected case of Markovian evolution as $N \to \infty$ with the delocalized information never returning to the central qubit.
\begin{figure}
\caption{Mutual information between the central spin and environment is plotted for varying number of spins in the bath. The number of spins in the bath are placed as labels next to each curve in the plot. The higher the number of interacting spins, the longer the interval between vanishing of mutual information, eventually reaching infinity for large $N$. Here we have chosen $A = 0.5$ with $A_k = 0.5/\sqrt{N}$. Time on the $x$-axis is shown in units of $1/\omega$.}
\label{fig:mutinf}
\end{figure}
The dynamics of the mutual information highlights an aspect of non-Markovian evolution that is not typically addressed by the various known measures of non-Markovianity. While the amount of inflow of information from the environment is captured by a measure of non-Markovianity like the BLP measure, we see that central spin models with different $N$ are also characterized by the time scales at which the inflow happens. Non-Markovianity is indeed recognized as a feature that makes mathematical descriptions of physical phenomena rather difficult. In the absence of a comprehensive, all encompassing, understanding of non-Markovian quantum evolution, we are led to consider the possibility that more than one measure may be necessary for capturing different aspects of non-Markovianity. We consider whether persistence of information exchange is an aspect of non-Markovianity that can be quantified in a manner that it complements the existing measures. In addition to the central spin model, several processes allow the identification of `cycles' in their evolution such that the contribution of further dynamics to BLP measure after the first cycle is redundant. Taking a cue from this we propose supplementing the BLP measure with another quantity that determines a characteristic time $\tau$ over which the integral defining the BLP measure in Eq.~\eqref{BLP} can be limited to. The average rate of inflow of information over one such cycle can then be used as an effective quantifier that allows us to compare the degree on non-Markovianity of different processes belonging to the same family. In other words the ratio $\mathcal{N}(\mathcal{E}_t)/\tau$ with ${\mathcal N}({\mathcal E}_t)$ redefined as
\begin{equation}
\label{BLP2}
\mathcal{N}(\mathcal{E}_t) = \max\limits_{\rho_{1,2}(0)} \int_{t,\sigma>0}^\tau dt \ \sigma(\rho_1(0),\, \rho_2(0),\, t),
\end{equation}
becomes the figure-of-merit we explore in the subsequent discussion.
Finding an optimal pair of states that maximize the integral under consideration in Eq.~\eqref{BLP2} is made easier with the help of theorems proved in Ref.~\cite{Jyrki2012}, which state that an optimal pair of states must be orthogonal to each other and are restricted to the boundary of the state space. For qubit systems, this choice reduces to finding the optimal pair of pure, mutually orthogonal states that lie on the surface of the Bloch sphere. For all the examples discussed below, we have found the optimal pair of states by discretizing the surface of the Bloch sphere and evolving the antipodal states by the chosen dynamical map. The maximum of the sum of trace distances between evolved states over all the time intervals in $[0,\, \tau]$ for which $\sigma > 0$ is then divided by $\tau$ for determining the quantity of interest,
\begin{equation}
\mathcal{M}_{\tau}(\mathcal{E}_t) := \frac{\mathcal{N}(\mathcal{E}_t)}{\tau}.
\end{equation}
This rate of information inflow can be applied to any generic non-Markovian process.
The purpose of cutoff time $\tau$ is to identify the time limit by which a pre-determined amount of information flows into the system from its environment. The interval $\tau$ varies greatly depending on the required proximity to the initial state. This statement is equivalent to choosing an error tolerance $\epsilon > 0$ for comparing the similarity of the dynamical map at a later time $\mathcal{E}_{t}$ with the initial map $\mathcal{E}_{0} = \mathbb{I}$. It is well known that in finite dimensional state spaces, all norms are equivalent~\cite{Conway1985}. Without loss of generality, we employ the $\mathcal{L}^{1}$-norm for measuring the distance between the dynamical maps. In other words, we need the first occurrence of time $\tau_\epsilon$ for which $\|\mathcal{E}_{\tau_\epsilon} - \mathcal{E}_{0}\|_{1} =
\sum_{i,j}|(\mathcal{E}_{\tau_\epsilon})_{ij} - (\mathcal{E}_{0})_{ij}| \leq \epsilon$, where $i$ and $j$ denote row and column indices, respectively, and $(d\mathcal{E}_t/dt)|_{\tau_\epsilon} < 0$ so as to select only those times for which the map is returning. Choosing a sufficiently smaller tolerance typically leads to longer recurrence times. Although the for purpose of comparing different non-Markovian processes belonging to the same family, the first occurrence of information inflow up to the prescribed tolerance level is sufficient, one might as well choose any such occurrence as long as comparisons are done on an equal footing.
We will demonstrate the discussion above using the example described in Eq.~\eqref{ex1}, namely, $\mathcal{E}_t = \operatorname{diag} (1, \, e^{-\gamma t}\cos(\omega t),\, e^{-\gamma t}\cos(\omega t),\, e^{-\gamma t})$.
Consider two such processes with $\omega_1 = 100, \, \omega_2 = 50$ and $\gamma_1 = \gamma_2 = 1 \equiv \gamma$. Any general non-Markovian process, especially the ones with non-Markovian decay, need not bring the dynamical map as close to the identity matrix as desired and thus the tolerance level for comparison must be carefully chosen. We can mitigate this problem by choosing the first local minima for both the processes as the respective tolerance limits and then choose the maximum of the two to ensure both processes witness the norm reaching the assigned limits. For the processes at hand, we fix a tolerance level of $\epsilon = 0.5$. We desire to find the time $\tau$ for which $\|\mathcal{E}_\tau - \mathbb{I}\|_1 \leq 0.5$. We determine that $\tau_{0.5}$ is $0.0568$ and $0.1169$ for the first and second processes, respectively, as is evident from the Fig.~\ref{fig:non_periodic_case}. The quantity $\mathcal{M}_{\tau}^1$ for the first process turns out to be $30.1507$ and $\mathcal{M}_{\tau}^2$ is $14.3495$ for the second, which is consistent with the observation that the process having frequent oscillations turns out to be more non-Markovian than the one with slower oscillations.
\begin{figure}
\caption{$\mathcal{L}^1$ norm for the dynamical map from Eq.~\eqref{ex1} for the initial time and intermediate time is plotted as a function of time. The tolerance level is fixed at 0.5. The first arrivals of information inflow to the required tolerance are denoted by $\tau_{0.5}^{1}$ and $\tau_{0.5}^{2}$ for different oscillation frequencies, respectively. The lesser time for the recurrence of information inflow indicates a higher degree of non-Markovianity. Note that time has the units of $1/\gamma$ in this figure.}
\label{fig:non_periodic_case}
\end{figure}
Defining a process-independent cutoff time $\tau$ for a non-periodic process is a challenging task. Hence, one may naively assign an infinite-time period for all such processes, allowing the BLP measure to also accumulate to infinity over an unbounded time interval. It is easy to see that $\mathcal{M}_{\tau}(\mathcal{E}_t)$ for any process is a bounded quantity. The key point is that the BLP measure is limited by the maximum difference in the trace distance for a pair of states and thus is always bounded by 1 for qubits. Since this increase in trace distance happens over a finite time, the proposed measure will always have a finite limiting value. However, it may not be straightforward to obtain the measure value in such cases.
For the spin model, choosing tolerance limits as $\epsilon = 10^{-2}$ and $10^{-3}$ leads to cutoff times $\tau = 6.184$ and $6.252$, respectively, both of which are close to $2\pi$. The corresponding measure values turn out to be $0.3226$ and $0.3198$. One may as well choose $\epsilon = 0$ indicating complete inflow of information resulting in a cutoff time same as the period of the process which is $2\pi$. In addition to the generic procedure to find the cutoff time the $\tau$ for any process, the periodic and quasi-periodic processes offer simpler ways of fixing it.
\textbf{Periodic Cases:}
All periodic processes repeat their dynamics after their respective time periods $T$ and thus naturally furnish a time $\tau$ until which the BLP measure must be calculated. The complete dynamics of the system is captured by the dynamical map $\mathcal{E}_t$ whose period shall then ensure that all the states on the Bloch sphere revisit their initial configuration corresponding to $t = 0$ exactly and any dynamics beyond this period is redundant for eliciting the degree of non-Markovian behavior. Note that multiple pairs of states might revisit their initial configurations even before one cycle of the dynamical map is complete. By choosing the period of the map we are insisting that all states return to their positions in state space. The initial configurations are typically ones in which there are no system-environment correlations, particularly if one considers only completely positive dynamical maps. Since all system states have reset their correlations, if any, with the environment at intervals defined by the period of the map, we can use $T$ as the upper limit of the integral in~Eq.~\eqref{BLP2}. The integral itself will have the same value if integrated over any interval of length $T$. The average rate of information inflow is then defined as,
\begin{equation}
\label{measure}
\mathcal{M}_\tau(\mathcal{E}_t) = \frac{1}{T} \max_{\rho_{1,2}(0)} \int\limits_{\substack{0 \\ \sigma > 0}}^{T} dt \, \sigma(\rho_1(0),\, \rho_2(0),\, t).
\end{equation}
We demonstrate the utility of Eq.~\eqref{measure} by applying it to the spin model described in Sec.~\ref{sec:spin_model}. Extension to other periodic cases is straightforward. For the spin model, the time period of the map depends on $N$. We find that $T = 2 \pi \sqrt{N}$ for odd $N$ and $\pi \sqrt{N}$ for even $N$ and $\sigma > 0$ in the interval $[\pi\sqrt{N}/2, \, \pi\sqrt{N}]$ for all $N$ and additionally in the interval $[3\pi\sqrt{N}/2, \, 2\pi\sqrt{N}]$ for odd $N$. The average rate of information inflow for this example is $\mathcal{M}_\tau(\mathcal{E}^{\text{spin}}_t) = 1/(\pi \sqrt{N})$. We see that $\mathcal{M}_\tau(\mathcal{E}^{\text{spin}}_t)$ is able to distinguish between central spin models with different number of bath spins and allow comparisons among them in terms of their degree of non-Markovianity. This is unlike the previously proposed measure of singular behavior from Ref.~\cite{Hou2012} where the value of the measure is $1/2$ irrespective of $N$. In our discussion of the dynamics of mutual information earlier, we noted that Markovian evolution is expected as $N \rightarrow \infty$. We see that as expected, $\mathcal{M}_\tau(\mathcal{E}^{\text{spin}}_t)$ converges to zero as $N$ becomes large as shown in Fig.~\ref{fig:blp_spin}, indicating Markovian limiting behavior.
We would like to highlight that the scaling constant directly affects the decay rate. Suppose the interaction strength in the Hamiltonian of the central spin model in Eq.~\eqref{spin_ham}is $B$. The corresponding average inflow rate from Eq.~\eqref{measure} is then proportional to $B$. In the discussion above, we have considered the interaction strength as $A/\sqrt{N}$ with $A=1/2$.
\begin{figure}\label{fig:blp_spin}
\end{figure}
\textbf{Quasi-periodic cases:}
In what follows, we supplement our proposal for modification of BLP measure with an example where we find $\tau$ although the map has only an approximate periodicity. Consider the following 3-spin model with the Hamiltonian,
\begin{equation*}
H = \frac{1+\pi}{4} (\sigma_z \otimes \mathbb{I} \otimes \sigma_z) +\frac{1-\pi}{4} (\sigma_z \otimes \sigma_z \otimes \mathbb{I})
\end{equation*}
and the corresponding map describing the reduced dynamics of the first qubit, $\mathcal{E}_t = \operatorname{diag}(1,\, [\cos(t) + \cos(\pi t)]/2,\, [\cos(t) + \cos(\pi t)]/2,\, 1)$. Clearly, there does not exist a period for this map since the two frequencies appearing (1 and $\pi$ in this case) are incommensurate. We propose two different approaches for finding the suitable time $\tau$.
For all quasiperiodic processes, the general method can be understood as a corollary of the Poincar\'e recurrence theorem. The theorem states that for all finite dimensional systems with a time-independent Hamiltonian, the state vector $\ket{\psi(T)}$ returns arbitrarily close to the initial state $\ket{\psi(0)}$~\cite{Bocchieri1957}. Proceeding with the method of obtaining $\tau$ earlier, we fix the error limit $\epsilon$ to $0.1$ for the three-spin example considered above which results in $\tau_{0.1} = 5.92$. Integrating Eq.~\eqref{measure} over all the intervals wherein the trace distance between a pair of states in increasing until $\tau_{0.1}$, we find the modified measure $\mathcal{M}_{\tau_{0.1}}$ to be $0.5204$. Similarly, $\tau_{0.01} = 43.95$ and $\tau_{0.001} = 43.98$ yield the measure values as $0.5130$ and $0.5128$, respectively. Noticeably, these values are more or less similar for different tolerance levels. Since higher accuracy can only be achieved after longer times, BLP measure values will also accumulate proportionally for the optimal pair of states. Thus we conjecture that the measure values remain almost the same for lesser tolerance values as well.
The presence of quasi-periodicity allows us to adopt an alternative procedure which is as simple as rationalizing the irrational frequencies that appear so that the resulting terms of the dynamical map have a well-defined period. Since the irrationals are dense in $\mathbb{R}$, we are always guaranteed to find the rational approximation of any irrational number to the needed accuracy. For the case at hand, choosing $\frac{22}{7}$ as the approximation of $\pi$ yields the period as $14\pi$. The proposed measure $\mathcal{N}(\mathcal{E}_t)/\tau$ then has the value $0.5129$, which is also closer to the values obtained by the other method.
In the sense of information inflow, we may conclude that certain processes are more non-Markovian than the others, as evidenced by the measure we introduced. In essence, the proposed addition to the BLP measure captures the differences in the degree of non-Markovianity between any two processes as advertised.
\section{\label{sec:disc}Discussion and Conclusion}
State preparation or initialization of a quantum system is a ubiquitous and important step in pretty much all experiments exploring the quantum realm. Initialization is an important step in running any algorithm in a quantum information processor and it is called for in most other applicable quantum technologies as well. Whether it is in the context of initializing an ensemble of identical quantum systems that are in different states into a common initial state or in the context of driving a single quantum system in an arbitrary state deterministically into a specific initial state, the preparation device has to induce dynamics on the system such that it is a many-to-one map of the kind we have discussed at length. During initialization, the quantum system of interest undergoes open quantum dynamics in contact with a preparation device that serves as its immediate environment. Our analysis shows that the preparation step can very well correspond to a singular point in the dynamics. Unless the strong assumption is made that after initialization the system and the preparation device are in a completely uncorrelated product state, further evolution of the system state may depend on the state from which the initialization process started. Note that in fact, the preparation device must return to the same quantum state after initialization irrespective of the system state that was prepared for all preparations to yield identical subsequent dynamics.
In this paper we have explored in detail how such singular behavior in open quantum dynamics can be described mathematically using master equations with higher-order time derivatives. We see that such singular behavior may be much more common than previously imagined in the context of state preparations, lending added significance to our results. Our construction not only provides a means of propagating system states across the singular points of the normal first-order master equations, it also highlights the role that the environment can play in endowing various trajectories in state space that meet at the singular point with independent and distinct subsequent evolution. It may even be possible to observe subtle variations in subsequent trajectories of the same initial state in quantum process tomography experiments arising from differences in the starting point of state initialization and residual correlations that may exist between the system and state preparation device.
It is interesting to note that from the various examples we have considered wherein higher-order master equations turned out to be useful, there is no particular discernible pattern for the structure of such equations. While a detailed characterization of the families of higher-order equations that may appear is beyond the scope of the present work, one way of understanding the possible origin of this variety is the following. Since the trajectories of multiple states coincide at the singular points, it is safe to say that at these points, relevant information that determines the future of each trajectory no longer resides in the state of the system. Given that quantum information can lie delocalized across multiple subsystems, this information can either lie delocalized across the system and its environment, be contained entirely in the state of the environment or both. The trajectories separating again can then be attributed to this information flowing back. The information inflow need not always produce a change in the state of the system that is first-order in time. It may affect higher-order time derivatives due to the interplay between the system-environment dynamics and the flow of information from the environment and/or from the delocalized form back to the system state.
In a different context, this idea was presented in~\cite{Erika2012} where, using the Jaynes-Cummings model, an example was constructed in which two different system-environment interactions can lead to identical master equations but different trajectories for a qubit. The difference between the two Hamiltonians involves an instantaneous switch in the parameters that happens at a singular point in the dynamics such that the first-order master equation remains the same. However, two different solutions of the same master equation starting from the same initial state are obtained with and without the switch. From our point of view, the switch corresponds to rapidly changing the system-environment parameters exactly when the information that determines and differentiate subsequent dynamics of the qubit is not available in its state. The effect of the switch is in modifying higher-order time derivatives of the system state and so it does not appear in the first-order master equation. The trajectories followed by the same initial state with and without the switch are both solutions of the first-order equation but looking at the overall evolution it is easy to distinguish the two as expected.
The role of information inflow from the environment into the system that disambiguates trajectories after singular points in the context of the experimentally implementable central spin model led us to the question of non-Markovian behavior in such models. With the aim of comparing the degree of non-Markovianity across different instances of the central spin model we introduced the typical time-scale for information inflow as a quantity that captures a different aspect of non-Markovian behavior compared to the standard approaches to quantifying such behavior. The average rate of information inflow introduced by combining this quantity with a well-established non-Markovianity measure helped us compare central spin models with different numbers of environment spins with regard to the degree of non-Markovianity in the evolution of the central spin. We also explored the limiting case of Markovian behavior that emerges when the number of environment spins become very large. We then showed that the notion of an average rate of information inflow can be extended to generic non-Markovian open evolution as well and its applicability need not be limited to examples with singular behavior. We discussed these extensions for various types of non-Markovian dynamics possible for a single qubit.
\begin{acknowledgments}
The authors are thankful to Erika Andersson for directing attention to an important reference. Anil Shaji acknowledges the support of the SERB, Govt.~of India through grant no.~EMR/2016/007221 and the QuEST program of the Department of Science and Technology through project No.~Q113 under Theme~4. Jyrki Piilo acknowledges the support from Magnus Ehrnrooth Foundation. Vijay Pathak acknowledges the support of CSIR through fellowship (09/997(0040)/2015 EMR-I). Abhaya S. Hegde acknowledges the support of DST through INSPIRE fellowship (INSPIRE No. DST/INSPIRE-SHE/IISER-T/2008).
\end{acknowledgments}
\end{document} |
\begin{document}
\title{One-Shot Hybrid State Redistribution}
\author{Eyuri Wakakuwa} \email{[email protected]} \affiliation{Department of Communication Engineering and Informatics, Graduate School of Informatics and Engineering, The University of Electro-Communications, Tokyo 182-8585, Japan} \affiliation{Department of Computer Science, Graduate School of Information Science and Technology, The University of Tokyo, Bunkyo-ku, Tokyo 113-8656, Japan } \orcid{0000-0002-2445-2701} \author{Yoshifumi Nakata} \affiliation{Yukawa Institute for Theoretical Physics, Kyoto university, Kitashirakawa Oiwakecho, Sakyo-ku, Kyoto, 606-8502, Japan} \affiliation{Photon Science Center, Graduate School of Engineering, The University of Tokyo, Bunkyo-ku, Tokyo 113-8656, Japan } \affiliation{JST, PRESTO, 4-1-8 Honcho, Kawaguchi, Saitama, 332-0012, Japan} \email{[email protected]} \orcid{0000-0003-0290-4698} \author{Min-Hsiu Hsieh} \email{[email protected]} \affiliation{Centre for Quantum Software \& Information (UTS:QSI), University of Technology Sydney, Sydney NSW, Australia} \affiliation{Hon Hai (Foxconn) Research Institute, Taipei, Taiwan} \orcid{0000-0003-1985-4623} \maketitle
\begin{abstract}
We consider state redistribution of a ``hybrid'' information source that has both classical and quantum components. The sender transmits classical and quantum information at the same time to the receiver, in the presence of classical and quantum side information both at the sender and at the decoder. The available resources are shared entanglement, and noiseless classical and quantum communication channels. We derive one-shot direct and converse bounds for these three resources, represented in terms of the smooth conditional entropies of the source state. Various coding theorems for two-party source coding problems are systematically obtained by reduction from our results, including the ones that have not been addressed in previous literatures. \end{abstract}
\begin{figure*}
\caption{The task of state redistribution for the classical-quantum hybrid source is depicted. The black dots and the circles represent classical and quantum parts of the information source, respectively. The wavy line represents the entanglement resource. }
\label{fig:A}
\end{figure*}
\section{Introduction}
Quantum state redistribution is a task in which the sender aims at transmitting quantum states to the receiver, in the presence of quantum side information both at the sender and at the receiver. The costs of quantum communication and entanglement required for state redistribution have been analyzed in \cite{yard2009optimal,devetak2008exact,ming08} for the asymptotic scenario of infinitely many copies and vanishingly small error, and in \cite{berta2016smooth,1409.4352,anshu2017one} for the one-shot scenario. Various coding theorems for two-party quantum source coding problems are obtained by reduction from these results as special cases, such as the Schumacher compression \cite{schumacher95}, quantum state merging \cite{horo07} and the fully-quantum Slepian-Wolf \cite{ADHW2009, datta2011apex}. However, some of the well-known coding theorems cannot be obtained from those results, such as the (fully-classical) Slepian-Wolf (see e.g.~\cite{cover05}) and the classical data compression with quantum side information \cite{devetak2003classical}. This is because the results in \cite{yard2009optimal,devetak2008exact,ming08,berta2016smooth} only cover the fully quantum scenario, in which the information to be transmitted and the available resources are both quantum.
In this paper, we generalize the one-shot state redistribution theorem in \cite{berta2016smooth} to a ``hybrid'' situation. That is, we consider the task of state redistribution in which the information to be transmitted and the side information at the parties have both classical and quantum components. Not only quantum communication and shared entanglement, but also classical communication is available as a resource. Our goal is to derive trade-off relations among the costs of the three resources required for achieving the task within a small error. The main result is that we provide the direct and the converse bounds for the rate triplet to be achievable, in terms of the smooth conditional entropies of the source state and the error tolerance. For most of the special cases that have been analyzed in the previous literatures, the two bounds match in the asymptotic limit of infinitely many copies and vanishingly small error, providing the full characterization of the achievable rate region. Our result can be viewed as a one-shot generalization of the classically-assisted state redistribution protocol, proposed in \cite{min08}.
Coding theorems for most of the redistribution-type protocols, not only for quantum or classical information source but also for hybrid one, in one-shot scenario are systematically obtained from our result by reduction. In this sense, our result completes the one-shot capacity theorems of the redistribution-type protocols in a standard setting. As examples, we show that the coding theorems for the fully quantum state redistribution, the fully quantum Slepian-Wolf, quantum state splitting, quantum state merging, classical data compression with quantum side information, quantum data compression with classical side information and the fully classical Slepian-Wolf and quantum state redistribution with classical side information only at the decoder \cite{anshu2018noisy} can be recovered. The last one would further lead to the family of quantum protocols in the presence of classical side information only at the decoder, along the same line as the one without classical side information \cite{ADHW2009,devetak2004family}. In addition, our result also covers some redistribution-type protocols that have not been addressed in the previous literatures.
We note that the cost of resources in the hybrid redistribution-type protocols cannot be fully analyzed by simply plugging the hybrid source and the hybrid channel into the fully quantum setting. This is because interconversion of classical and quantum communication channels requires the use of entanglement resource, which is not allowed e.g. in the fully classical scenario.
This paper is organized as follows. In \rSec{prelimi}, we introduce notations and definitions that will be used throughout this paper. In \rSec{mainresults}, we provide the formulation of the problem and present the main results. The results are applied in \rSec{specialcases} to special cases, and compared with the results in the previous literatures. The proofs of the direct part and the converse part are provided in \rSec{direct} and \rsec{converse}, respectively. Conclusions are given in \rSec{conclusion}. The properties of the smooth entropies used in the proofs are summarized in \rApp{propSmEn}.
\section{Preliminaries} \lsec{prelimi}
We summarize notations and definitions that will be used throughout this paper.
\subsection{Notations}
We denote the set of linear operators on a Hilbert space $\ca{H}$ by $\ca{L}(\ca{H})$. For normalized density operators and sub-normalized density operators, we use the following notations, respectively: \begin{align} & \ca{S}_=(\ca{H}) = \{\rho \in \ca{L}(\ca{H}) : \rho \geq 0, \mathrm{Tr} [\rho]=1 \}, \\ & \ca{S}_{\leq}(\ca{H}) = \{\rho \in \ca{L}(\ca{H}) : \rho \geq 0, \mathrm{Tr} [\rho] \leq 1 \}. \end{align} A Hilbert space associated with a quantum system $A$ is denoted by ${\mathcal H}^A$, and its dimension is denoted by $d_A$. A system composed of two subsystems $A$ and $B$ is denoted by $AB$. When $M$ and $N$ are linear operators on ${\mathcal H}^A$ and ${\mathcal H}^B$, respectively, we denote $M\otimes N$ as $M^A\otimes N^B$ for clarity.
In the case of pure states, we abbreviate $|\psi\rangle^A\otimes|\phi\rangle^B$ as $|\psi\rangle^A|\phi\rangle^B$.
We denote $|\psi\rangle\!\langle\psi|$ simply by $\psi$.
For $\rho^{AB} \in \ca{L}(\ca{H}^{AB})$, $\rho^{A}$ represents ${\rm Tr}_B[\rho^{AB}]$. The identity operator is denoted by $I$. We denote $(M^A\otimes I^B)\ket{\psi}^{AB}$ as $M^A\ket{\psi}^{AB}$ and $(M^A\otimes I^B)\rho^{AB}(M^A\otimes I^B)^{\dagger}$ as $M^A\rho^{AB}M^{A\dagger}$.
When ${\mathcal E}$ is a supermap from $\ca{L}(\ca{H}^{A})$ to $\ca{L}(\ca{H}^{B})$, we denote it by $\ca{E}^{A \rightarrow B}$. When $A = B$, we use $\ca{E}^{A}$ for short. We also denote $({\mathcal E}^{A \rightarrow B} \otimes{\rm id}^C)(\rho^{AC})$ by ${\mathcal E}^{A \rightarrow B} (\rho^{AC})$. When a supermap is given by a conjugation of a unitary $U^A$ or a linear operator $W^{A \rightarrow B}$, we especially denote it by its calligraphic font such as $ \ca{U}^{A}(X^A):= (U^{A }) X^A (U^{A })^{\dagger} $ and $ \ca{W}^{A \rightarrow B}(X^A):= (W^{A \rightarrow B}) X^A (W^{A \rightarrow B})^{\dagger} $.
The maximally entangled state between $A$ and $A'$, where $\ca{H}^{A} \cong \ca{H}^{A'}$, is defined by \alg{ \ket{\Phi}^{AA'}:=\frac{1}{\sqrt{d_A}}\sum_{\alpha=1}^{d_A}\ket{\alpha}^A\ket{\alpha}^{A'} } with respect to a fixed orthonormal basis $\{\ket{\alpha}\}_{\alpha=1}^{d_A}$. The maximally mixed state on $A$ is defined by $\pi^A:=I^A/d_A$.
For any linear CP map $\ca{T}^{A\rightarrow B}$, there exists a finite dimensional quantum system $E$ and a linear operator $W_{\ca{T}}^{A\rightarrow BE}$ such that $\ca{T}^{A\rightarrow B}(\cdot)={\rm Tr}_E[W_{\ca{T}}(\cdot)W_{\ca{T}}^\dagger]$. The operator $W_{\ca{T}}$ is called a Stinespring dilation of $\ca{T}^{A\rightarrow B}$ \cite{stinespring1955positive}, and the linear CP map defined by ${\rm Tr}_B[W_{\ca{T}}(\cdot)W_{\ca{T}}^\dagger]$ is called a {\it complementary map} of $\ca{T}^{A\rightarrow B}$. With a slight abuse of notation, we denote the complementary map by $\ca{T}^{A\rightarrow E}$.
\subsection{Norms and Distances}
For a linear operator $X$, the trace norm is defined as $|\! | X |\! |_1 = \mathrm{Tr}[ \sqrt{X^{\dagger}X}]$.
For subnormalized states $\rho,\sigma\in\ca{S}_\leq(\ca{H})$, the trace distance is defined by $\|\rho-\sigma\|_1$. The generalized fidelity and the purified distance are defined by \alg{ \bar{F}(\rho,\sigma) :=
\|\sqrt{\rho}\sqrt{\sigma}\|_1 + \sqrt{(1-{\rm Tr}[\rho])(1-{\rm Tr}[\sigma])} } and \alg{ P(\rho,\sigma) := \sqrt{1-\bar{F}(\rho,\sigma)^2}, \laeq{dfnPD} } respectively (see Lemma 3 in \cite{tomamichel2010duality}). The trace distance and the purified distance are related as
\alg{
\frac{1}{2}\|\rho-\sigma\|_1 \leq P(\rho,\varsigma) \leq
\sqrt{2\|\rho-\sigma\|_1} \laeq{relTDPD} } for any $\rho,\sigma\in\ca{S}_\leq(\ca{H})$. The epsilon ball of a subnormalized state $\rho\in\ca{S}_\leq(\ca{H})$ is defined by \begin{align}
\ca{B}^\epsilon(\rho):=\{\tau\in\ca{S}_\leq(\ca{H})|\:P(\rho,\tau)\leq\epsilon\}. \label{eq:epsilon} \end{align}
\begin{figure*}
\caption{ The task of state redistribution for the classical-quantum hybrid source is depicted in the diagram. The black lines and the dashed lines represent classical and quantum systems, respectively. }
\label{fig:B}
\end{figure*}
\subsection{One-Shot Entropies}
For any subnormalized state $\rho\in\ca{S}_\leq(\ca{H}^{AB})$ and normalized state $\varsigma\in\ca{S}_=(\ca{H}^{B})$, define \alg{
H_{\rm min}(A|B)_{\rho|\varsigma}
:= \sup \{ \lambda \in \mathbb{R}| 2^{-\lambda} I^A \otimes \varsigma^B \geq \rho^{AB} \} } and \alg{
H_{\rm max}(A|B)_{\rho|\varsigma} := \log{\|\sqrt{\rho^{AB}}\sqrt{I^A\otimes\varsigma^B}\|_1^2}. }
The conditional min- and max- entropies (see e.g.~\cite{T16}) are defined by \begin{align}
H_{\rm min}(A|B)_{\rho}& := \sup_{\sigma^B \in \ca{S}_=(\ca{H}^B)}H_{\rm min}(A|B)_{\rho|\sigma}, \\
H_{\rm max}(A|B)_{\rho}& := \sup_{\sigma^B \in \ca{S}_=(\ca{H}^B)}H_{\rm max}(A|B)_{\rho|\sigma}, \end{align} and the smoothed versions thereof are given by \begin{align}
H_{\rm min}^\epsilon(A|B)_{\rho}& := \sup_{\hat{\rho}^{AB} \in \ca{B}^\epsilon(\rho)}H_{\rm min}(A|B)_{\hat\rho}, \laeq{dfnmine}\\
H_{\rm max}^\epsilon(A|B)_{\rho}& := \inf_{\hat{\rho}^{AB} \in \ca{B}^\epsilon(\rho)}H_{\rm max}(A|B)_{\hat\rho} \laeq{dfnmaxe} \end{align} for $\epsilon\geq0$. In the case where $B$ is a trivial (one-dimensional) system, we simply denote them as $H_{\rm min}^\epsilon(A)_{\rho}$ and $H_{\rm max}^\epsilon(A)_{\rho}$, respectively. We define \alg{ &
H_{*}^{(\iota,\kappa)}(A|B)_{\rho} \nonumber\\ &\quad\quad :=
\max\{H_{\rm min}^{\iota}(A|B)_{\rho}, H_{\rm max}^{\kappa}(A|B)_{\rho}\} \laeq{dfnHstar} } and \alg{ &
\tilde{I}_{\rm min}^{\epsilon}(A:C|B)_{\rho} \nonumber\\ &\quad\quad :=
H_{\rm min}^{\epsilon}(A|B)_{\rho}
-H_{\rm min}^{\epsilon}(A|BC)_{\rho}. \laeq{dfnmimMI} } We will refer to \req{dfnmimMI} as the {\it smooth conditional min mutual information}. For $\tau\in\ca{S}(\ca{H}^A)$, we also use the ``max entropy'' in the version of \cite{renner2008security} (see Section 3.1.1 therein). Taking the smoothing into account, it is defined by \alg{ H_{\rm max'}^\epsilon(A)_\tau := \inf_{\Pi:{\rm Tr}[\Pi\tau]\geq1-\epsilon} \log{{\rm rank}[\Pi]}, \laeq{dfnHrank} } where the infimum is taken over all projections $\Pi$ such that ${\rm Tr}[\Pi\tau]\geq1-\epsilon$. The von Neumann entropies and the quantum mutual information are defined by \alg{ H(A)_\rho & := -{\rm Tr}[\rho^{A}\log{\rho^{A}}], \\
H(A|B)_\rho &:=H(AB)_\rho-H(B)_\rho, \\ I(A:B)_\rho
&:=H(A)_\rho-H(A|B)_\rho. } The properties of the smooth conditional entropies used in this paper are summarized in \rApp{propSmEn}.
\section{Formulation and Results} \lsec{mainresults}
Consider a classical-quantum source state in the form of \alg{ & \Psi_s^{ABCRXYZX'Y'Z'} := \nonumber\\ & \quad \sum_{x,y,z}p_{xyz} \proj{x}^X\otimes\proj{y}^Y\otimes\proj{z}^Z \quad\quad\quad\quad \nonumber \\ &\quad\quad \otimes\proj{\psi_{xyz}}^{ABCR}\otimes\proj{xyz}^{X'Y'Z'}. \!\! \laeq{sourcestate} } Here, $\{p_{xyz}\}_{x,y,z}$ is a probability distribution, $\ket{\psi_{xyz}}$ are pure states, and $\{\ket{x}\}_x$, $\{\ket{y}\}_y$, $\{\ket{z}\}_z$, $\{\ket{xyz}\}_{x,y,z}$ are orthonormal bases. The systems $X'$, $Y'$ and $Z'$ are assumed to be isomorphic to $X$, $Y$ and $Z$, respectively. For the simplicity of notations, we denote $AX$, $BY$, $CZ$, $X'Y'Z'$ and $RX'Y'Z'$ by $\hat{A}$, $\hat{B}$, $\hat{C}$, $T$ and $\hat{R}$, respectively. Accordingly, we also denote the source state by $\Psi_s^{\hat{A}\hat{B}\hat{C}\hat{R}}$.
We consider a task in which the sender transmits $\hat{C}$ to the receiver (see Figure \ref{fig:A} and \ref{fig:B}). The sender and the receiver have access to systems $\hat{A}$ and $\hat{B}$, respectively, as side information.
The system $\hat{R}$ is the reference system that is inaccessible to the sender and the receiver. The available resources for the task are the one-way noiseless classical and quantum channels from the sender to the receiver, and an entangled state shared in advance between the sender and the receiver. We describe the communication resources by a quantum system $Q$ with dimension $2^q$ and a ``classical'' system $M$ with dimension $2^c$. The entanglement resources shared between the sender and the receiver, before and after the protocol, are given by the maximally entangled states $\Phi_{2^{e+e_0}}^{E_AE_B}$ and $\Phi_{2^{e_0}}^{F_AF_B}$
with Schmidt rank $2^{e+e_0}$ and $2^{e_0}$, respectively.
\bdfn{}
A tuple $(c,q,e,e_0)$ is said to be achievable within an error $\delta$ for $\Psi_s$, if there exists a pair of an encoding CPTP map $\ca{E}^{\hat{A}\hat{C}E_A\rightarrow \hat{A}QMF_A}$ and a decoding CPTP map $\ca{D}^{\hat{B}QME_B\rightarrow \hat{B}\hat{C}F_B}$, such that \begin{eqnarray} \!
\left\| \ca{D}\circ\ca{E}(\Psi_s^{\hat{A}\hat{B}\hat{C}\hat{R}}\!\otimes\!\Phi_{2^{e+e_0}}^{E_AE_B})\!-\!\Psi_s^{\hat{A}\hat{B}\hat{C}\hat{R}}\!\otimes\!\Phi_{2^{e_0}}^{F_AF_B}
\right\|_1 \nonumber\\ \leq \delta. \quad\quad \laeq{qeec} \end{eqnarray} \end{dfn}
\noindent Note that, since $M$ is a classical message, the encoding CPTP map $\ca{E}$ must be such that for any input state $\tau$, the output state $\ca{E}^{\hat{A}\hat{C}E_A\rightarrow \hat{A}QMF_A}(\tau)$ is diagonal in $M$ with respect to a fixed orthonormal basis. Note also that we implicitly assume that $c,q,e_0\geq0$, while the net entanglement cost $e$ can be negative.
Our goal is to obtain necessary and sufficient conditions for a tuple $(c,q,e,e_0)$ to be achievable within the error $\delta$ for a given source state $\Psi_s$. The direct and converse bounds are given by the following theorems:
\bthm{direct}{\bf(Direct part.)} A tuple $(c,q,e,e_0)$ is achievable within an error $4\sqrt{12\epsilon+6\delta}+\sqrt{2}\epsilon$ for $\Psi_s$ if $d_C\geq2$ and it holds that \alg{ c+2q &\geq \max\{\tilde{H}_{I}^{(3\epsilon/2,\epsilon/2)},\tilde{H}_{I\! I}^{(\epsilon/2)}\} \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad -\log{(\delta^4/2)}, \laeq{neon00t} \\ c+q+e
&\geq H_{\rm max}^{\epsilon/2}(CZ|BY)_{\Psi_s}-\log{(\delta^2/2)}, \laeq{neon03t} \\ q+e
&\geq H_{\rm max}^{\epsilon/2}(C|BXYZ)_{\Psi_s}-\log{\delta^2}, \laeq{neon04t} \\ e_0 &\geq
\frac{1}{2}(H_{\rm max'}^{\epsilon^2/8}(C)_{\Psi_s}-H_{\rm max}^{3\epsilon/2}(C|BXYZ)_{\Psi_s}) \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad\quad +\log{\delta}, \laeq{neon05t} } where \alg{ \tilde{H}_{I}^{(\iota,\kappa)} := &
H_{*}^{(\iota,\kappa)}(C|AXYZ)_{\Psi_s} \nonumber\\
&\quad+H_{\rm max}^{\kappa}(CZ|BY)_{\Psi_s}, \\ \tilde{H}_{I\! I}^{(\iota)} := &
H_{\rm max}^\iota(C|AXZ)_{\Psi_s} \nonumber\\ &\quad
+H_{\rm max}^\iota(C|BXYZ)_{\Psi_s} } and $H_{*}^{(\iota,\kappa)}$ is defined by \req{dfnHstar}.
In the case where $d_C=1$, a tuple $(c,0,0,0)$ is achievable for $\Psi_s$ within the error $\delta$ if it holds that \alg{
c\geq H_{\rm max}^\epsilon(Z|BY)_{\Psi_s} -\log{\frac{\delta^2}{2}}. } \end{thm}
\bthm{converse}{\bf(Converse part.)} Suppose that a tuple $(c,q,e,e_0)$ is achievable within the error $\delta$ for $\Psi_s$. Then, regardless of the value of $e_0$, it holds that \alg{ c+2q &\geq \max\{\tilde{H}_{I}'^{(\epsilon,\delta)},\tilde{H}_{I\! I}'^{(\epsilon,\delta)}\!-\!\Delta^{(\epsilon,\delta)}\} \!-\!6f(\epsilon), \laeq{convv00}\\ c+q+e & \geq H_{\rm min}^{\epsilon}(BYCZ)_{\Psi_s} \nonumber\\ & \;\quad- H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(BY)_{\Psi_s}-f(\epsilon), \laeq{convv03}\\ q+e &
\geq H_{\rm min}^{\epsilon}(BC|XYZ)_{\Psi_s} \nonumber\\ &\quad\;
-\!H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(B|XYZ)_{\Psi_s}\!-\!2f(\epsilon) \!\!\! \laeq{convv04} } for any $\epsilon>0$. Here, $ f(x):=-\log{(1-\sqrt{1-x^2})} $, \alg{ \tilde{H}_{I}'^{(\epsilon,\delta)} :=&
H_{\rm min}^{\epsilon}(AC|XYZ)_{\Psi_s} \nonumber\\ &\quad -
H_{\rm max}^{\epsilon}(A|XYZ)_{\Psi_s} \nonumber\\ & \quad\quad + H_{\rm min}^{\epsilon}(BYCZ)_{\Psi_s} \nonumber\\ &\quad\quad\quad - H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(BY)_{\Psi_s}, \\ \tilde{H}_{I\! I}'^{(\epsilon,\delta)} :=& H_{\rm min}^{\epsilon}(AXCZ)_{\Psi_s} \nonumber\\ &\quad - H_{\rm max}^{\epsilon}(AXZ)_{\Psi_s} \nonumber\\ &\quad\quad +
H_{\rm min}^{\epsilon}(BC|XYZ)_{\Psi_s} \nonumber\\ &\quad\quad\quad
-H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(B|XYZ)_{\Psi_s} } and \alg{ \Delta^{(\epsilon,\delta)} :=
\sup_{\ca{F}}\tilde{I}_{\rm min}^{7\epsilon+4\sqrt{\delta}}(G_A:Y'|M_AAX'Z')_{\ca{F}(\Psi_s)}. \laeq{dfnDeltaed} } The supremum in \req{dfnDeltaed} is taken over all CPTP maps $\ca{F}:\hat{A}\hat{C}\rightarrow AG_AM_A$ such that $\ca{F}(\tau)$ is diagonal in $M_A$ with a fixed orthonormal basis for any $\tau\in\ca{S}(\ca{H}^{\hat{A}\hat{C}})$, and \begin{eqnarray} \!\! \inf_{\{\omega_{xyz}\}} \! P \! \left( \! \ca{F}(\Psi_s^{\hat{A}\hat{C}\hat{R}}), \sum_{x,y,z}p_{xyz} \psi_{xyz}^{A\hat{R}} \otimes \omega_{xyz}^{G_AM_A} \! \right) \nonumber\\ \leq 2\sqrt{\delta}, \quad\quad \laeq{conditionaldecoupling} \end{eqnarray} where we informally denoted $\psi_{xyz}^{AR}\otimes\proj{xyz}^T$ by $\psi_{xyz}^{A\hat{R}}$. \end{thm}
\noindent The proofs of \rThm{direct} and \rThm{converse} will be provided in \rSec{direct} and \rSec{converse}, respectively.
We also consider an asymptotic scenario of infinitely many copies and vanishingly small error. A rate triplet $(c,q,e)$ is said to be {\it asymptotically achievable} if, for any $\delta>0$ and sufficiently large $n\in\mbb{N}$, there exists $e_0\geq0$ such that the tuple $(nc,nq,ne,ne_0)$ is achievable within the error $\delta$ for the one-shot redistribution of the state $\Psi_s^{\otimes n}$. The achievable rate region is defined as the closure of the set of achievable rate triplets. The following theorem provides a characterization of the achievable rate region:
\bthm{asymptotic}{\bf(Asymptotic limit.)} In the asymptotic limit of infinitely many copies and vanishingly small error, the inner and outer bounds for the achievable rate region are given by \alg{ c+2q &\geq \max\{\tilde{H}_{I},\tilde{H}_{I\!I}\}, \laeq{neon1-2} \\ c+q+e
&\geq H(CZ|BY)_{\Psi_s}, \laeq{neon3-2} \\ q+e
&\geq H(C|BXYZ)_{\Psi_s}, \laeq{neon4-2} \\ e_0 & \geq \frac{1}{2}I(C:BXYZ)_{\Psi_s} \laeq{neon5-2} } and \alg{ c+2q &\geq \max\{\tilde{H}_{I},\tilde{H}_{I\!I}-\tilde{\Delta}\}, \laeq{neon1-2o} \\ c+q+e
&\geq H(CZ|BY)_{\Psi_s}, \laeq{neon3-2o} \\ q+e
&\geq H(C|BXYZ)_{\Psi_s}, \laeq{neon4-2o} } respectively. Here, \alg{ & \tilde{H}_{I}
:=H(C|AXYZ)_{\Psi_s}+H(CZ|BY)_{\Psi_s}, \laeq{katatteru1} \\ & \tilde{H}_{I\! I} :=
H(C|AXZ)_{\Psi_s}+H(C|BXYZ)_{\Psi_s} \laeq{katatteru2} } and \alg{ \tilde{\Delta} := \lim_{\delta\rightarrow0}\lim_{n\rightarrow\infty}\frac{1}{n}\Delta^{(\epsilon,\delta)}(\Psi_s^{\otimes n}), \laeq{dfntildeDelta} } where $\Delta^{(\epsilon,\delta)}$ is defined in \rThm{converse}. \end{thm}
\rThm{asymptotic} immediately follows from the one-shot direct and converse bounds (\rThm{direct} and \rThm{converse}). This is due to the fully-quantum asymptotic equipartition property \cite{tomamichel2009fully},
which implies that the smooth conditional entropies are equal to the von Neumann conditional entropy in the asymptotic limit of infinitely many copies. That is, for any $\rho\in\ca{S}_=(\ca{H}^{PQ})$ and $\epsilon>0$, it holds that \alg{ & \lim_{n\rightarrow\infty}
\frac{1}{n}H_{\rm min}^\epsilon(P^n|Q^n)_{\rho^{\otimes n}} \nonumber\\ &= \lim_{n\rightarrow\infty}
\frac{1}{n}H_{\rm max}^\epsilon(P^n|Q^n)_{\rho^{\otimes n}} \\ &=
H(P|Q)_{\rho}. \laeq{FQAEP} } A simple calculation using this relation and the chain rule of the conditional entropy implies that the R.H.S.s of \req{neon00t}-\req{neon05t} and \req{convv00}-\req{convv04} coincide with those of \req{neon1-2}-\req{neon5-2} and \req{neon1-2o}-\req{neon4-2o}, respectively, in the asymptotic limit of infinitely many copies.
Due to the existence of the term $\tilde{\Delta}$ in Inequality \req{neon1-2o}, the direct and converse bounds in \rThm{asymptotic} do not match in general. In many cases, however, it holds that $\tilde{\Delta}=0$ and thus the two bounds matches. This is due to the following lemma about the property of $\Delta^{(\epsilon,\delta)}$:
\blmm{propDelta} The quantity $\Delta^{(\epsilon,\delta)}$ defined in \rThm{converse} is nonnegative, and is equal to zero if there is no classical side information at the decoder (i.e. ${\rm dim}Y={\rm dim}Y'=1$) or if there is neither quantum message nor quantum side information at the encoder (i.e. ${\rm dim}A={\rm dim}C=1$). The quantity $\tilde{\Delta}$ satisfies the same property due to the definition \req{dfntildeDelta}. \end{lmm}
\noindent A proof of \rLmm{propDelta} will be provided in \rSec{propDelta}. To clarify the general condition under which $\tilde{\Delta}=0$ is left as an open problem.
\begin{rmk} The results presented in this section are applicable to the case where the sender and the receiver can make use of the resource of classical shared randomness. To this end, it is only necessary to incorporate the classical shared randomness as a part of classical side information $X$ and $Y$. \end{rmk}
\begin{figure*}
\caption{ The relation among special cases of communication scenario analyzed in \rSec{specialcases} are depicted. ``SI'' and ``SI-D'' stand for ``side information'' and ``side information at the decoder'', respectively. See Table \ref{tb:notations} below for the notations. }
\label{fig:specialcases}
\end{figure*}
\begin{table*}[t] \renewcommand{1.5}{1.5}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|} \hline
& \multicolumn{3}{c}{information source} & \multicolumn{2}{|c|}{available resources} \\\cline{2-6}
& { \begin{tabular}{c} \!\!\!\!\!\!side information\!\!\!\!\!\! \\[-2mm] \!\!\!\!\!\!\!\!at the encoder\!\!\!\!\!\!\!\! \end{tabular} }
& { \begin{tabular}{c} \!\!\!\!\!\!side information\!\!\!\!\!\! \\[-2mm] \!\!\!\!\!\!\!\!at the decoder\!\!\!\!\!\!\!\! \end{tabular} }
& { \begin{tabular}{c} \!\!\!\!\!\!\!\!information\!\!\!\!\!\!\!\! \\[-2mm] \!\!\!\!\!\!to be transmitted\!\!\!\!\!\! \end{tabular} } & communication & { \begin{tabular}{c} shared \\[-2mm] correlation \end{tabular} } \\ \hline
quantum & $A$ & $B$ & $C$ & $q$ & $e$ \\\hline
classical & $X$ & $Y$ & $Z$ & $c$ &- \\\hline
\end{tabular}
\end{center}
\caption{}
\label{tb:notations} \end{table*}
\section{Reduction to Special Cases} \lsec{specialcases}
In this section, we apply the results presented in \rSec{mainresults} to special cases of source coding (see Figure \ref{fig:specialcases} in the next page). In principle, the results cover all special cases where some of the components $A$, $B$, $C$, $X$, $Y$ or $Z$ are assumed to be one-dimensional, and where $c$, $q$ or $e$ is assumed to be zero.
Among them, we particularly consider the cases with no classical component in the source state and with no side information at the encoder, which have been analyzed in previous literatures. We also consider quantum state redistribution with classical side information at the decoder, which has not been addressed before. We investigate both the one-shot and the asymptotic scenarios. The one-shot direct and converse bounds are obtained from \rThm{direct} and \rThm{converse}, respectively, and the asymptotic rate region is obtained from \rThm{asymptotic}. The analysis presented below shows that, for the tasks that have been analyzed in previous literatures, the bounds obtained from our results coincide with the ones obtained in the literatures. It should be noted, however, that the coincidence in the one-shot scenario is only up to changes of the types of entropies and the values of the smoothing parameters. All entropies are for the source state $\Psi_s$. We will use \rLmm{onedimHminmax} in \rApp{propSmEn} for the calculation of entropies.
\subsection{No Classical Component in The Source State}
First, we consider the case where there is no classical component in the source state. It is described by setting $ X=Y=Z=\emptyset $. By imposing several additional assumptions, the scenario reduces to different protocols.
\subsubsection{Fully Quantum State Redistribution} Our hybrid scenario of state redistribution reduces to the fully quantum scenario, by additionally assuming that $ c=0 $. The one-shot direct part is given by \alg{ 2q
&\geq H_*^{(3\epsilon/2,\epsilon/2)}(C|A)+H_{\rm max}^{\epsilon/2}(C|B) \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad -\log{(\delta^4/2)}, \\ \!\!\!q+e &
\geq H_{\rm max}^{\epsilon/2}(C|B) -\log{(\delta^2/2)}, \\ e_0 & \geq
\frac{1}{2}(H_{\rm max'}^{\epsilon^2/8}(C)-H_{\rm max}^{3\epsilon/2}(C|B))+\log{\delta}.\!\!\! } An example of the tuple satisfying the above conditions is \alg{ q &=
\frac{1}{2}(H_*^{(3\epsilon/2,\epsilon/2)}(C|A)+H_{\rm max}^\epsilon(C|B) \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad-\log{(\delta^4/2)}), \laeq{FQSRa1}\\ e &=
\frac{1}{2}(-H_{\rm max}^\epsilon(C|A)
+H_{\rm max}^\epsilon(C|B)+1), \laeq{FQSRa2} \\ e_0 & =
\frac{1}{2}(H_{\rm max'}^{\epsilon^2/8}(C)-H_{\rm max}^{3\epsilon/2}(C|B))+\log{\delta}. } The achievability of $q$ and $e$ given by \req{FQSRa1} and \req{FQSRa2} coincides with the result of \cite{berta2016smooth} (see also \cite{anshu2017one}). The one-shot converse bound is represented as \alg{ 2q &\geq
H_{\rm min}^\epsilon(AC)- H_{\rm max}^\epsilon(A) +H_{\rm min}^\epsilon(BC) \nonumber \\ & \quad\quad\quad\quad\quad -H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(B)-8f(\epsilon), \laeq{FQSRconv1}\\ q+e &\geq
H_{\rm min}^\epsilon(BC)-H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(B)-f(\epsilon). } The condition \req{FQSRconv1} in the above coincides with Inequality (104) in \cite{berta2016smooth}. The rate region for the asymptotic scenario is obtained from \rThm{asymptotic}, which yields \alg{ 2q
&\geq H(C|A)+H(C|B), \\ q+e &
\geq H(C|B). } A simple calculation implies that the above rate region is equal to the one obtained in Ref.~\cite{devetak2008exact,yard2009optimal}.
\subsubsection{Fully Quantum Slepian-Wolf} The fully-quantum Slepian-Wolf protocol is obtained by setting $ A=\emptyset $, $ c=0 $. The one-shot direct part obtained from \rThm{direct} reads \alg{ 2q
&\geq H_*^{(3\epsilon/2,\epsilon/2)}(C)+H_{\rm max}^{\epsilon/2}(C|B) \nonumber\\ & \quad\quad\quad\quad\quad\quad\quad\quad\quad\quad -\log{(\delta^4/2)}, \\ \!\!q+e
&\geq H_{\rm max}^{\epsilon/2}(C|B)-\log{(\delta^2/2)}, \\ e_0 & \geq
\frac{1}{2}(H_{\rm max'}^{\epsilon^2/8}(C)-H_{\rm max}^{3\epsilon/2}(C|B))+\log{\delta}.\!\!\! } An example of the rate triplet $(q,e,e_0)$ satisfying the above inequalities is \alg{ q &=
\frac{1}{2}(H_*^{(3\epsilon/2,\epsilon/2)}(C)+H_{\rm max}^{\epsilon/2}(C|B) \nonumber\\ & \quad\quad\quad\quad\quad\quad\quad\quad\quad\quad -\log{(\delta^4/2)}), \\ e&=
\frac{1}{2}(-H_*^{(3\epsilon/2,\epsilon/2)}(C)+H_{\rm max}^{\epsilon/2}(C|B)+1),\!\! \\ e_0 & =
\frac{1}{2}(H_{\rm max'}^{\epsilon^2/8}(C)-H_{\rm max}^{3\epsilon/2}(C|B))+\log{\delta}. } The result is equivalent to the one given by \cite{datta2011apex} (see Theorem 8 therein), with respect to $q$ and $e$. Note, however, that our achievability bound requires the use of initial entanglement resource of $e+e_0$ ebits, whereas the one by \cite{datta2011apex} does not. The one-shot converse bound is obtained from \rThm{converse}, which yields \alg{ 2q &\geq
H_{\rm min}^\epsilon(C)+ H_{\rm min}^\epsilon(BC)
\nonumber\\
&\quad\quad\quad\quad
-H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(B)-6f(\epsilon), \\ q+e &\geq
H_{\rm min}^\epsilon(BC)-H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(B)-f(\epsilon). } From \rThm{asymptotic}, the two-dimensional achievable rate region for the asymptotic scenario is given by \alg{ 2q
&\geq H(C)+H(C|B), \\ q+e
&\geq H(C|B), } which coincides with the result obtained in \cite{ADHW2009}. It should be noted that various coding theorems for quantum protocols are obtained from that for the fully quantum Slepian-Wolf protocol, which is referred to as the family of quantum protocols \cite{ADHW2009,devetak2004family}.
\subsubsection{Quantum State Splitting} The task in which $ B=\emptyset $, $ c=0 $ is called quantum state splitting. The one-shot direct part is represented as \alg{ 2q
&\geq H_*^{(3\epsilon/2,\epsilon/2)}(C|A)+H_{\rm max}^{\epsilon/2}(C) \nonumber\\ & \quad\quad\quad\quad\quad\quad\quad\quad\quad -\log{(\delta^4/2)}, \\ q+e & \geq H_{\rm max}^{\epsilon/2}(C) -\log{(\delta^2/2)}, \\ e_0 & \geq \frac{1}{2}(H_{\rm max'}^{\epsilon^2/8}(C)-H_{\rm max}^{3\epsilon/2}(C))+\log{\delta}. \laeq{deltae0} } Note that if a triplet $(q,e,e_0)$ is achievable, then $(q,e+e_0,0)$ is also achievable. Thus, an example of an achievable rate pair $(q,e)$ is \alg{ q &=
\frac{1}{2}(H_*^{(3\epsilon/2,\epsilon/2)}(C|A)+H_{\rm max}^{\epsilon/2}(C)-\log{(\delta^4/2)}), \\ e &=
\frac{1}{2}(-H_*^{(3\epsilon/2,\epsilon/2)}(C|A) +H_{\rm max}^{\epsilon/2}(C)+1) + \delta e_0, } where we have denoted the R.H.S. of \req{deltae0} by $\delta e_0$. This coincides with Lemma 3.5 in \cite{berta11}, up to an extra term $\delta e_0$. The one-shot converse bound is given by \alg{ 2q &\geq
H_{\rm min}^\epsilon(AC)- H_{\rm max}^\epsilon(A) +H_{\rm min}^\epsilon(C)
\nonumber\\
&\quad\quad +\log{(1-22\epsilon-16\sqrt{\delta})}-6f(\epsilon), \\ q+e &\geq
H_{\rm min}^\epsilon(C)
+\log{(1-22\epsilon-16\sqrt{\delta})}-f(\epsilon). } The rate region for the asymptotic scenario yields \alg{ 2q
&\geq H(C|A)+H(C), \\ q+e & \geq H(C). } An example of a rate pair satisfying this condition is \alg{ q &=
\frac{1}{2}(H(C)+H(C|A)), \\ e & =
\frac{1}{2}(H(C)-H(C|A)),. }
This result coincides with Equality (6.1) in \cite{ADHW2009}, under the correspondence $|\Psi_s\rangle^{ACR}=U_\ca{N}^{R'\rightarrow AC}\ket{\varphi}^{R'R}$ with $U_\ca{N}^{R'\rightarrow AC}$ being some isometry.
\subsubsection{Quantum State Merging} Quantum state merging is a task in which $ A=\emptyset $, $ q=0 $. The one-shot direct part is given by \alg{ c
&\geq H_*^{(3\epsilon/2,\epsilon/2)}(C)+H_{\rm max}^{\epsilon/2}(C|B)-\log{(\delta^4/2)}, \\ e
&\geq H_{\rm max}^{\epsilon/2}(C|B)-\log{\delta^2}, \laeq{entoneSM} \\ e_0 & \geq
\frac{1}{2}(H_{\rm max'}^{\epsilon^2/8}(C)-H_{\rm max}^{3\epsilon/2}(C|B))+\log{\delta}. } The achievability of the entanglement cost \req{entoneSM} is equal to the one given by \cite{DBWR2010} (see Theorem 5.2 therein). The one-shot converse bound is obtained from \rThm{converse}, which yields \alg{ c &\geq H_{\rm min}^\epsilon(C)+H_{\rm min}^{\epsilon}(BC) \nonumber\\ &\quad\quad\quad\quad-H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(B)-6f(\epsilon),\! \\ e &\geq H_{\rm min}^{\epsilon}(BC)-H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(B)-2f(\epsilon). } The rate region for the asymptotic setting is obtained from \rThm{asymptotic} as \alg{ c
&\geq H(C)+H(C|B), \\ e
&\geq H(C|B). } This rate region is equivalent to the results in \cite{horo05,horo07}. Note, however, that the protocols in \cite{horo05,horo07} are more efficient than ours, in that the catalytic use of entanglement resource is not required.
\subsection{No Side Information At The Encoder}
Next, we consider scenarios in which there is no classical or quantum side information at the encoder. This corresponds to the case where $A=X=\emptyset$. We consider three scenarios by imposing several additional assumptions.
\subsubsection{Classical Data Compression with Quantum Side Information at The Decoder} The task of classical data compression with quantum side information was analyzed in \cite{devetak2003classical}. This is obtained by additionally setting $ Y=C=\emptyset $, $ q=e=e_0=0 $. The one-shot direct and converse bounds are given by \alg{ c
&\geq H_{\rm max}^\epsilon(Z|B) -\log{\frac{\delta^2}{2}}, \\ c &\geq H_{\rm min}^\epsilon(BZ) - H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(B) -f(\epsilon), } respectively. This result is equivalent to the one obtained in \cite{renes2012one} (see also \cite{tomamichel2013hierarchy}). In the asymptotic limit, the achievable rate region is given by $ c
\geq H(Z|B) $, which coincides with the result by \cite{devetak2003classical}.
\subsubsection{Quantum Data Compression with Classical Side Information at The Decoder}
The task of quantum data compression with classical side information at the decoder was analyzed in \cite{khanian2020distributed}. This is obtained by imposing additional assumptions $ Z=B=\emptyset $, $ c=0 $. In the entanglement ``unconsumed'' scenario ($e=0$), the direct bounds for the one-shot case is given by \alg{ q &\geq \frac{1}{2}(
H_*^{(3\epsilon/2,\epsilon/2)}(C)+H_{\rm max}^{\epsilon/2}(C|Y)) -\log{\frac{\delta^4}{2}}, \\ e_0 & \geq
\frac{1}{2}(H_{\rm max'}^{\epsilon^2/8}(C)-H_{\rm max}^{3\epsilon/2}(C|Y))+\log{\delta}. } Note that the entanglement is used only catalytically. Thus, in the asymptotic regime, the achievable quantum communication rate in the entanglement unassisted scenario $(e=e_0=0)$ is obtained due to the cancellation lemma (Lemma 4.6 in \cite{deve08}), which reads \alg{ q &\geq \frac{1}{2}(
H(C)+H(C|Y)). \laeq{QcompCsideDA} } In the case where the unlimited amount of entanglement is available, the converse bounds on the quantum communication cost in the one-shot and the asymptotic scenarios read \alg{ q &\geq \frac{1}{2}(
H_{\rm min}^\epsilon(C)+H_{\rm min}^\epsilon(C|Y)-\Delta^{\epsilon,\delta}) -6f(\epsilon), \\ q &\geq \frac{1}{2}(
H(C)+H(C|Y)-\tilde{\Delta}) \nonumber\\ &\quad\quad\quad +\frac{1}{2}\log{(1-22\epsilon-16\sqrt{\delta})}. \laeq{QcompCsideCA} } The asymptotic result \req{QcompCsideDA} coincides with Theorem 7 in \cite{khanian2020distributed}, and \req{QcompCsideCA} is similar to Theorem 5 therein. It is left open, however, whether the quantity $\tilde{\Delta}$ is equal to the function $I_{(n,\delta)}$ that appears in Theorem 5 of \cite{khanian2020distributed} (see Definition 2 in the literature).
\subsubsection{Fully Classical Slepian-Wolf} In the fully classical scenario, the Slepian-Wolf problem is given by $ B=C=\emptyset $ in addition to $X=A=\emptyset$, and $ q=e=e_0=0 $. The one-shot achievability is given by \alg{
c\geq H_{\rm max}^\epsilon(Z|Y) -\log{\frac{\delta^2}{2}}, } and the one-shot converse bound reads \alg{ c\geq H_{\rm min}^\epsilon(YZ) - H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(Y) -f(\epsilon), } which are equivalent to the result obtained in \cite{renner2005simple}. It is easy to show that the well-known achievable rate region $ c
\geq H(Z|Y) $ follows from \rThm{asymptotic}.
\subsection{Quantum State Redistribution with Classical Side Information at The Decoder}
We consider a scenario in which $X=Z=\emptyset$ and $c=0$. This scenario can be regarded as a generalization of the fully quantum state redistribution, that incorporates classical side information at the decoder \cite{anshu2018noisy}. The one-shot direct bound is represented by \alg{ 2q &\geq \max\{\tilde{H}_{I}^{(3\epsilon/2,\epsilon/2)},\tilde{H}_{I\! I}^{(\epsilon/2)}\}-\log{(\delta^4/2)}, \\ q+e
&\geq H_{\rm max}^{\epsilon/2}(C|BY)-\log{(\delta^2/2)}, \\ e_0 & \geq
\frac{1}{2}(H_{\rm max'}^{\epsilon^2/8}(C)-H_{\rm max}^{3\epsilon/2}(C|BY))+\log{\delta}, } where \alg{ & \tilde{H}_{I}^{(3\epsilon/2,\epsilon/2)}
:=H_{*}^{(3\epsilon/2,\epsilon/2)}(C|AY)+H_{\rm max}^{\epsilon/2}(C|BY), \\ & \tilde{H}_{I\! I}^{(\epsilon/2)} :=
H_{\rm max}^{\epsilon/2}(C|A)+H_{\rm max}^{\epsilon/2}(C|BY). } The converse bound is also obtained from \rThm{converse}. The inner and outer bounds for the achievable rate region in the asymptotic limit is given by \alg{ 2q &\geq \tilde{H}_{I\!I}, \\ q+e
&\geq H(C|BY), \\ e_0 &\geq \frac{1}{2}I(C:BY), } and \alg{ 2q &\geq \max\{\tilde{H}_{I},\tilde{H}_{I\!I}-\tilde{\Delta}\}, \\ q+e
&\geq H(C|BY), } respectively, where \alg{ & \tilde{H}_{I}
:=H(C|AY)+H(C|BY), \\ & \tilde{H}_{I\! I} :=
H(C|A)+H(C|BY). } We may also obtain its descendants by further assuming $A=0$ or $B=0$, which are generalizations of the fully quantum Slepian-Wolf and quantum state splitting.
It is expected that various quantum communication protocols with classical side information only at the decoder are obtained by reduction from the above result, similarly to the family of quantum protocols \cite{ADHW2009,devetak2004family}. We, however, leave this problem as a future work.
\section{Proof of The Direct Part (\rThm{direct})} \lsec{direct}
We prove \rThm{direct} based on the following propositions:
\bprp{direct} A tuple $(c,q,e,e_0)$ is achievable within the error $4\sqrt{12\epsilon+6\delta}$ for $\Psi_s$ if $d_C\geq2$ and it holds that \alg{ c+q-e
&\geq H_{\rm max}^\epsilon(CZ|AX)_{\Psi_s}-\log{\frac{\delta^2}{2}}, \laeq{neon1} \\ q-e
&\geq H_{\rm max}^\epsilon(C|AXYZ)_{\Psi_s}-\log{\delta^2}, \laeq{neon2} \\ c+q+e
&\geq H_{\rm max}^\epsilon(CZ|BY)_{\Psi_s}-\log{\frac{\delta^2}{2}}, \laeq{neon3} \\ q+e
&\geq H_{\rm max}^\epsilon(C|BXYZ)_{\Psi_s}-\log{\delta^2}, \laeq{neon4} \\ e_0 &=\frac{1}{2}(\log{d_C}-q-e). \laeq{neon5} } In the case where $d_C=1$ and $q=e=e_0=0$, the classical communication rate $c$ is achievable within the error $\delta$ if it holds that \begin{eqnarray} c\geq
\max\{H_{\rm max}^\epsilon(Z|AX)_{\Psi_s},H_{\rm max}^\epsilon(Z|BY)_{\Psi_s}\} \nonumber\\ - \log{\frac{\delta^2}{2}}. \quad\quad \laeq{matahi} \end{eqnarray} \end{prp}
\bprp{direct2} A tuple $(c,q,e,e_0)$ is achievable within an error $4\sqrt{12\epsilon+6\delta}$ for $\Psi_s$ if $d_C\geq2$ and it holds that \alg{ c+2q &\geq \max\{\tilde{H}_{I}^{(\epsilon)},\tilde{H}_{I\! I}^{(\epsilon)}\}-\log{(\delta^4/2)}, \laeq{neon00} \\ \!\!c+q+e
&\geq H_{\rm max}^\epsilon(CZ|BY)_{\Psi_s}-\log{(\delta^2/2)},\!\!\! \laeq{neon03} \\ q+e
&\geq H_{\rm max}^\epsilon(C|BXYZ)_{\Psi_s}-\log{\delta^2}, \laeq{neon04} \\ e_0 &\geq
\frac{1}{2}(\log{d_C}-H_{\rm max}^\epsilon(C|BXYZ)_{\Psi_s}) \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad\quad\quad +\log{\delta}, \laeq{neon05} } where \alg{ \tilde{H}_{I}^{(\epsilon)} :=
&H_{*}^\epsilon(C|AXYZ)_{\Psi_s} \nonumber\\ &
+H_{\rm max}^\epsilon(CZ|BY)_{\Psi_s}, \\ \tilde{H}_{I\! I}^{(\epsilon)} := &
H_{\rm max}^\epsilon(C|AXZ)_{\Psi_s} \nonumber\\ &
+H_{\rm max}^\epsilon(C|BXYZ)_{\Psi_s} } and \alg{ &
H_{*}^\epsilon(C|AXYZ)_{\rho} := \nonumber\\ &\quad
\max\{H_{\rm min}^\epsilon(C|AXYZ)_{\rho}, H_{\rm max}^\epsilon(C|AXYZ)_{\rho}\}. }
In the case where $d_C=1$, a tuple $(c,0,0,0)$ is achievable for $\Psi_s$ within the error $\delta$ if it holds that \alg{
c\geq H_{\rm max}^\epsilon(Z|BY)_{\Psi_s} -\log{\frac{\delta^2}{2}}. \laeq{mongen} } \end{prp}
Proofs of \rPrp{direct} and \rPrp{direct2} will be given in the following subsections. In Section \rsec{PBD}, we prove the {\it partial bi-decoupling theorem}, which is a generalization of the bi-decoupling theorem \cite{ming08,berta2016smooth}. Based on this result, we prove \rPrp{direct} in \rSec{prfPrpdirect}. We adopt the idea that a protocol for state redistribution can be constructed from sequentially combining protocols for the (fully quantum) reverse Shannon and the (fully quantum) Slepian-Wolf. In Section \rsec{prfthmdirect}, we extend the rate region in \rPrp{direct} by incorporating teleportation and dense coding, thereby proving \rPrp{direct2}. Finally, we prove \rThm{direct} from \rPrp{direct2} in \rSec{seru}.
\subsection{Partial Bi-Decoupling} \lsec{PBD}
The idea of the bi-decoupling theorem was first introduced in \cite{ming08}, and was improved in \cite{berta2016smooth} to fit more into the framework of the one-shot information theory. The approach in \cite{berta2016smooth} is based on the decoupling theorem in \cite{DBWR2010}. In this subsection, we generalize those results by using the direct part of randomized partial decoupling \cite{wakakuwa2021one} to incorporate the hybrid communication scenario.
\subsubsection{Direct Part of Partial Decoupling}
We first present the direct part of randomized partial decoupling (Theorem 3 in \cite{wakakuwa2021one}). Let $\Psi^{\hat{C}\hat{S}}$ be a subnormalized state in the form of \begin{align} \Psi^{\hat{C}\hat{S}}=\sum_{j,k=1}^J\outpro{j}{k}^{Z}\otimes\psi_{jk}^{CS}\otimes\outpro{j}{k}^{Z'}.\laeq{romanof} \end{align} Here, $Z$ and $Z'$ are $J$-dimensional quantum system with a fixed orthonormal basis $\{\ket{j}\}_{j=1}^J$, $\hat{C}\equiv ZC$, $\hat{S}\equiv Z'S$ and $\psi_{jk}\in\ca{L}(\ca{H}^{C}\otimes\ca{H}^{S})$ for each $j$ and $k$.
Note that the positive-semidefiniteness of $\Psi^{\hat{C}\hat{S}}$ implies $\psi_{jj}\geq0$ for all $j$ and the subnormalization condition implies $\sum_{j=1}^J{\rm Tr}[\psi_{jj}]\leq1$.
Consider a random unitary $U$ on $\hat{C}$ in the form of \begin{align} U:=\sum_{j=1}^J\outpro{j}{j}^{Z} \otimes U_j^{C}, \laeq{RUrpd} \end{align} where $U_j \sim {\sf H}_j$ for each $j$, and ${\sf H}_j$ is the Haar measure on the unitary group on $\ca{H}^{C}$. The averaged state obtained after the action of the random unitary $U$ is given by \alg{ \Psi_{\rm av}^{\hat{C}\hat{S}} & :=\mbb{E}_{U} [ U^{\hat{C}} ( \Psi^{\hat{C}\hat{S}} ) U^{\dagger {\hat{C}}}] \laeq{SEK} \\ &= \sum_{j=1}^Jp_j\proj{j}^{Z}\otimes\pi^C\otimes\psi_{j}^{S}\otimes\proj{j}^{Z'}, \laeq{SEK2} } where $p_j:={\rm Tr}[\psi_{jj}]$ and $\psi_{j}:=p_j^{-1}\psi_{jj}$. Consider also the permutation group $\mbb{P}$ on $[1,\cdots,J]$, and define a unitary $G_\sigma$ for any $\sigma\in\mbb{P}$ by \alg{ G_\sigma:=\sum_{j=1}^J\outpro{\sigma(j)}{j}^{Z}. \label{eq:RPrpd} } We assume that the permutation $\sigma$ is chosen at random according to the uniform distribution on $\mbb{P}$.
\begin{figure}
\caption{ The situation of partial decoupling is depicted. }
\label{fig:partialdecoupling}
\end{figure}
Suppose that the state $\Psi^{\hat{C}\hat{S}}$ is transformed by unitaries $U$ and $G_\sigma$, and then is subject to the action of a quantum channel (linear CP map) $\ca{T}^{\hat{C}\rightarrow E}$ (see Figure \ref{fig:partialdecoupling}).
The final state is represented as
\alg{
&
\ca{T}^{{\hat{C}} \rightarrow E} ( (G_\sigma^Z U^{\hat{C}}) \Psi^{\hat{C}\hat{S}} (G_\sigma^Z U^{\hat{C}})^\dagger )
\nonumber\\
&\quad\quad
=
\ca{T}^{{\hat{C}} \rightarrow E} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{S}} ).
} We consider how close the final state is, on average over all $U$, to the averaged final state $\ca{T}^{{\hat{C}} \rightarrow E} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}\hat{S}} )$, for typical choices of the permutation $\sigma$. The following theorem is the direct part of the randomized partial decoupling theorem, which provides an upper bound on the average distance between $\ca{T}^{\hat{C} \rightarrow E} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{S}} )$ and $\ca{T}^{\hat{C} \rightarrow E} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}\hat{S}} )$. Although the original version in \cite{wakakuwa2021one} is applicable to any $J\geq1$, in this paper we assume that $J\geq2$.
\blmm{SmoothExMarkov} {\bf(Corollary of Theorem 3 in \cite{wakakuwa2021one})} Consider a subnormalized state $\Psi^{\hat{C}\hat{S}}\in\ca{S}_\leq(\ca{H}^{\hat{C}\hat{S}})$ that is decomposed as \req{romanof}. Let $\ca{T}^{\hat{C} \rightarrow E}$ be a linear trace non-increasing CP map with the complementary channel $\ca{T}^{\hat{C}\rightarrow F}$. Let $U$ and $G_\sigma$ be random unitaries given by (\ref{eq:RUrpd}) and (\ref{eq:RPrpd}), respectively, and fix arbitrary $\epsilon,\mu\geq0$. It holds that \begin{align}
&\mbb{E}_{\sigma,U } \left[ \left\| \ca{T}^{\hat{C} \rightarrow E} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{S}} ) \right.\right.\nonumber\\ &\quad\quad\quad\quad\quad\quad \left.\left.-\ca{T}^{\hat{C} \rightarrow E} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}\hat{S}} )
\right\|_1 \right]\nonumber\\ &\leq \begin{cases} 2^{-\frac{1}{2}H_I} + 2^{-\frac{1}{2}H_{I\!I}} +4(\epsilon+\mu+\epsilon\mu) & \! (d_C\geq2), \\ 2^{-\frac{1}{2}H_I} +4(\epsilon+\mu+\epsilon\mu) & \! (d_C=1), \end{cases} \! \laeq{SmExMa} \end{align} where $ \Psi_{\rm av}^{\hat{C}\hat{S}}:=\mbb{E}_{U}[ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{S}} )] $. The exponents $H_I$ and $H_{I\!I}$ are given by \alg{ & \!\! H_I= \log{(J-1)}+
H_{\rm min}^\epsilon(\hat{C}|\hat{S})_{\Psi} \nonumber\\ & \quad\quad\quad\quad\quad\quad\quad\quad
-H_{\rm max}^\mu(\hat{C}|F)_{\ca{C}(\tau)},
\\ & \!\! H_{I\!I}=
H_{\rm min}^\epsilon(\hat{C}|\hat{S})_{\ca{C}(\Psi)}-H_{\rm max}^\mu(C|FZ)_{\ca{C}(\tau)}. }
Here, $\ca{C}$ is the completely dephasing operation on $Z$ with respect to the basis $\{|j\rangle\}_{j=1}^J$, and $\tau$ is the Choi-Jamiolkowski state of $\ca{T}^{\hat{C}\rightarrow F}$ defined by $\tau^{\hat{C}F}:=\ca{T}^{\hat{C}'\rightarrow F}(\Phi^{\hat{C}\hat{C}'})$. The state $\Phi^{\hat{C}\hat{C}'}$ is the maximally entangled state in the form of \alg{
|\Phi\rangle^{\hat{C}\hat{C}'}=\frac{1}{\sqrt{J}}\sum_{j=1}^J\ket{jj}^{ZZ'}|\Phi_r\rangle^{CC'}. \laeq{maxentdfn} } \end{lmm}
\subsubsection{Partial Decoupling under Partial Trace}
We apply \rLmm{SmoothExMarkov} to a particular case where the channel $\ca{T}$ is the partial trace (see Figure \ref{fig:partialdecouplingPT}).
\begin{figure}
\caption{ The situation of partial decoupling under partial trace is depicted. }
\label{fig:partialdecouplingPT}
\end{figure}
\blmm{tus} Consider the same setting as in \rLmm{SmoothExMarkov}, and suppose that $Z=Z_LZ_R$, $C=C_LC_R$. We assume that $Z_L$ and $Z_R$ are equipped with fixed orthonormal bases $\{\ket{z_L}\}_{z_L=1}^{J_L}$ and $\{\ket{z_R}\}_{z_R=1}^{J_R}$, respectively, thus $J=J_LJ_R$ and the orthonormal basis of $Z$ is given by $\{\ket{z_L}\ket{z_R}\}_{z_L,z_R}$. Fix arbitrary $\epsilon\geq0$. If $d_C\geq2$ and \alg{ \log{\frac{d_{C_L}^2}{d_{Z_R}d_{C}}} &
\leq H_{\rm min}^\epsilon(\hat{C}|\hat{S})_{\Psi}+\log{\frac{\delta^2}{2}}, \laeq{ppt1} \\ \log{\frac{d_{C_L}^2}{d_{C}}} &
\leq H_{\rm min}^\epsilon(\hat{C}|\hat{S})_{\ca{C}(\Psi)}+\log{\delta^2}, \laeq{ppt2} } then it holds that \alg{ &
\mbb{E}_{\sigma,U } \left\| {\rm Tr}_{Z_RC_R} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{S}} ) \right. \nonumber\\ &\quad\quad \left. -{\rm Tr}_{Z_RC_R} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}\hat{S}} )
\right\|_1 \leq 4\epsilon+2\delta, \laeq{ppt} } where $ \Psi_{\rm av}^{\hat{C}\hat{S}}:=\mbb{E}_{U \sim {\sf H}_{\times}}[ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{S}} )] $. The same statement also holds in the case of $d_C=1$, in which case the condition \req{ppt2} can be removed. \end{lmm}
\begin{prf} We apply \rLmm{SmoothExMarkov} by the correspondence $\mu=0$, $E=Z_LC_L$, $F=Z_RC_R$, $J=d_Z$ and $\ca{T}^{\hat{C}\rightarrow Z_LC_L}={\rm id}^{Z_LC_L}\otimes{\rm Tr}_{Z_RC_R}$. It follows that Ineq.~\req{ppt} holds if $d_C\geq2$ and \alg{ & \log{(d_Z-1)}+
H_{\rm min}^\epsilon(\hat{C}|\hat{S})_{\Psi} \nonumber\\ &\quad\quad
-H_{\rm max}(\hat{C}|Z_R'C_R')_{\ca{C}(\tau)} +\log{\delta^2}\geq0, \laeq{PDB-Prf-1} \\ &
H_{\rm min}^\epsilon(\hat{C}|\hat{S})_{\ca{C}(\Psi)}-H_{\rm max}(C|Z_R'C_R'Z)_{\ca{C}(\tau)} \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad +\log{\delta^2}\geq 0. \laeq{PDB-Prf-2} } Here, $\tau$ is the Choi-Jamiolkowski state of the complementary channel of $\ca{T}^{\hat{C}\rightarrow Z_LC_L}$, and is given by \alg{ \!\! \tau^{\hat{C}Z_R'C_R'}=\pi^{Z_L}\otimes\pi^{C_L}\otimes\Phi^{Z_RZ_R'}\otimes\Phi^{C_RC_R'}. \!\! } Using the additivity of the max conditional entropy (\rLmm{addcondmax} in \rApp{propSmEn}), the entropies are calculated to be \alg{ &
H_{\rm max}(\hat{C}|Z_R'C_R')_{\ca{C}(\tau)} \nonumber\\ &\quad\quad =\log{d_{Z_L}}+\log{d_{C_L}}-\log{d_{C_R}},
\\ &
H_{\rm max}(C|Z_R'C_R'Z)_{\ca{C}(\tau)} \nonumber\\ &\quad\quad =\log{d_{C_L}}-\log{d_{C_R}}. } Thus, Inequalities \req{PDB-Prf-1} and \req{PDB-Prf-2} are equivalent to \alg{ & \log{(d_Z-1)}+
H_{\rm min}^\epsilon(\hat{C}|\hat{S})_{\Psi} \nonumber\\ &\quad\quad\quad\quad\quad -\log{\frac{d_{Z_L}d_{C_L}}{d_{C_R}}} +\log{\delta^2}\geq0, \\ &
H_{\rm min}^\epsilon(\hat{C}|\hat{S})_{\ca{C}(\Psi)}-\log{\frac{d_{C_L}}{d_{C_R}}}+\log{\delta^2}\geq 0. } Noting that $d_Z=d_{Z_L}d_{Z_R}$, $d_C=d_{C_L}d_{C_R}$ and that $(d_Z-1)/d_Z\geq1/2$, the above two inequalities follow from \req{ppt1} and \req{ppt2}, respectively. Thus, the proof in the case of $d_C\geq2$ is done. The proof for the case of $d_C=1$ proceeds along the same line.
$\blacksquare$ \end{prf}
\begin{figure*}
\caption{ The situation of partial bi-decoupling is depicted. As represented by the rotary, we consider two cases where $S_1C_2$ or $S_2C_1$ are traced out. }
\label{fig:partialbidecoupling}
\end{figure*}
\subsubsection{Partial Bi-Decoupling Theorem}
Based on \rLmm{tus}, we introduce a generalization of the ``bi-decoupling theorem''\cite{ming08,berta2016smooth} that played a crucial role in the proof of the direct part of one-shot fully quantum state redistribution. We consider the case where systems $C$ and $S$ are composed of three subsystems. The following lemma provides a sufficient condition under which a {\it single} pair of $\sigma$ and $U$ simultaneously achieves partial decoupling of a state, from the viewpoint of two different choices of subsystems (see Figure \ref{fig:partialbidecoupling} in the next page).
\blmm{tus2} {\bf (Partial bi-decoupling.)} Consider the same setting as in \rLmm{SmoothExMarkov}, assume $Z=Z_LZ_R$, $C=C_1C_2C_3$, $S=S_1S_2S_3$ and fix arbitrary $\epsilon\geq0$. If $d_C\geq2$ and \alg{ \log{\frac{d_{C_1}^2}{d_{Z_R}d_{C}}}
&\leq H_{\rm min}^\epsilon(\hat{C}|Z'S_2S_3)_{\Psi}+\log{\frac{\delta^2}{2}}, \laeq{PBD1}\\ \log{\frac{d_{C_1}^2}{d_{C}}}
&\leq H_{\rm min}^\epsilon(\hat{C}|Z'S_2S_3)_{\ca{C}(\Psi)}\!+\!\log{\delta^2}\!,\! \laeq{PBD2}\\ \log{\frac{d_{C_2}^2}{d_{Z_R}d_{C}}}
&\leq H_{\rm min}^\epsilon(\hat{C}|Z'S_1S_3)_{\Psi}+\log{\frac{\delta^2}{2}}, \laeq{PBD3}\\ \log{\frac{d_{C_2}^2}{d_{C}}}
&\leq H_{\rm min}^\epsilon(\hat{C}|Z'S_1S_3)_{\ca{C}(\Psi)}\!+\!\log{\delta^2}\!,\! \laeq{PBD4} } there exist $\sigma$ and $U$ such that \begin{align} &
\left\| {\rm Tr}_{Z_RC_2C_3} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}S_2S_3Z'} ) \right. \nonumber \\ & \quad\quad \left. -{\rm Tr}_{Z_RC_2C_3} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}S_2S_3Z'} )
\right\|_1 \leq 12\epsilon+6\delta, \laeq{PBD5}
\\ &
\left\| {\rm Tr}_{Z_RC_1C_3} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}S_1S_3Z'} ) \right. \nonumber \\ & \quad\quad \left.
-{\rm Tr}_{Z_RC_1C_3} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}S_1S_3Z'} )
\right\|_1 \leq 12\epsilon+6\delta. \laeq{PBD6} \end{align} The same statement also holds if $d_C=1$, in which case the conditions \req{PBD2} and \req{PBD4} can be removed. \end{lmm}
\begin{prf} Suppose that $d_C\geq2$ and the inequalities \req{PBD1}-\req{PBD4} are satisfied. We apply \rLmm{tus} under the correspondence $C_R=C_\alpha C_3$, $S=S_\alpha S_3$ and $C_L=C_{\bar{\alpha}}$, where $\alpha=1,2$ and $\bar{\alpha}=2,1$ for each. It follows that \alg{ & \mbb{E}_{\sigma,U}
\left\| {\rm Tr}_{Z_RC_\alpha C_3} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}S_\alpha S_3Z'} ) \right. \nonumber \\ & \quad\quad\quad \left.
-{\rm Tr}_{Z_RC_\alpha C_3} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}S_\alpha S_3Z'} )
\right\|_1 \leq 4\epsilon+2\delta. } Markov's inequality implies that there exist $\sigma$ and $U$ that satisfy both \req{PBD5} and \req{PBD6}, which completes the proof in the case of $d_C\geq2$. The proof in the case of $d_C=1$ proceeds along the same line.
$\blacksquare$ \end{prf}
\subsection{Proof of \rPrp{direct}} \lsec{prfPrpdirect}
To prove \rPrp{direct}, we follow the lines of the proof of the direct part of the fully quantum state redistribution protocol in \cite{ming08}. The key idea is that a protocol for state redistribution can be constructed from sequentially combining a protocol for the fully quantum reverse Shannon and that for the fully quantum Slepian-Wolf. We generalize this idea to the ``hybrid'' scenario (see Figure \ref{fig:stateredistributiondirect} in page \pageref{fig:stateredistributiondirect}). We only consider the case where $d_C\geq2$. The proof for the case of $d_C=1$ is obtained along the same line.
\subsubsection{Application of The Partial Bi-Decoupling Theorem} \lsec{consED}
Consider the ``purified'' source state \alg{ & \ket{\Psi}^{ABCRXYZT}:=
\nonumber\\ & \quad\quad \sum_{x,y,z}\sqrt{p_{xyz}} \ket{x}^X\ket{y}^Y\ket{z}^Z \ket{\psi_{xyz}}^{ABCR}\ket{xyz}^T, \laeq{psourcestate} } where we denoted $X'Y'Z'$ simply by $T$. Let $C$ be isomorphic to $C_1C_2C_3$ and $Z$ to $Z_LZ_R$. Fix an arbitrary $\epsilon>0$. We apply \rLmm{tus2} under the following correspondense: \alg{ S_1=\hat{A}, \quad S_2=\hat{B}, \quad S_3=RX'Y'. \laeq{corresp} } Note that $\hat{R}=RX'Y'Z'$. It follows that if the dimensions of $C_1$ and $C_2$ are sufficiently small (see the next subsection for the details), there exist $\sigma$ and $U$ that satisfy \alg{ &
\left\| {\rm Tr}_{Z_RC_2C_3} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{B}\hat{R}} ) \right. \nonumber \\ & \left.\quad\quad\quad -{\rm Tr}_{Z_RC_2C_3} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}\hat{B}\hat{R}} )
\right\|_1 \leq 12\epsilon+6\delta, \laeq{derby1} \\ &
\left\| {\rm Tr}_{Z_RC_1C_3} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{A}\hat{R}} ) \right. \nonumber \\ & \left.\quad\quad\quad
-{\rm Tr}_{Z_RC_1C_3} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}\hat{A}\hat{R}} )
\right\|_1 \leq 12\epsilon+6\delta. \laeq{derby2} } Let $\ket{\Psi_{\sigma,1}}^{C_1Z_L\hat{B}\hat{R}D_A}$ be a purification of ${\rm Tr}_{Z_RC_2C_3} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}\hat{B}\hat{R}} )$ with $D_A$ being the purifying system. Similarly, let $\ket{\Psi_{\sigma,2}}^{C_2Z_L\hat{A}\hat{R}D_B}$ be a purification of ${\rm Tr}_{Z_RC_1C_3} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}\hat{A}\hat{R}} )$ with $D_B$ being the purifying system. Due to Uhlmann's theorem (\cite{uhlmann1976transition}; see also e.g. Chapter 9 in \cite{wildetext}), there exist linear isometries \alg{ V^{D_A\rightarrow Z_RC_2C_3\hat{A}}, \quad W^{Z_RC_1C_3\hat{B}\rightarrow D_B} } such that \begin{eqnarray}
\left\| \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{A}\hat{B}\hat{R}} ) - \ca{V}^{D_A\rightarrow Z_RC_2C_3\hat{A}}(\Psi_{\sigma,1})
\right\|_1 \nonumber \\ \leq 2\sqrt{12\epsilon+6\delta}, \quad\quad \laeq{200a-11} \end{eqnarray} \begin{eqnarray}
\left\| \ca{W}^{Z_RC_1C_3\hat{B}\rightarrow D_B} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{A}\hat{B}\hat{R}} ) - \Psi_{\sigma,2}
\right\|_1 \nonumber\\ \leq 2\sqrt{12\epsilon+6\delta}. \quad\quad \laeq{200a-22} \end{eqnarray} We particularly choose $C_1$, $C_2$, $C_3$ and $Z_R$ so that they satisfy the isomorphism \alg{ C_1\cong E_B, C_2\cong F_A, C_3\cong Q, Z_R\cong M. \laeq{yukue} } In addition, we introduce systems $C''$, $Z''$, $A_1$ and $B_2$ such that \alg{ C''\cong C, Z''\cong Z, A_1\cong E_A, B_2\cong F_B. } We consider the purifying systems to be $D_A\equiv Z_R\hat{C}''\hat{A}A_1$ and $D_B\equiv Z_R\hat{C}''\hat{B}B_2$, where $\hat{C}''=C''Z''$.
\subsubsection{Explicit Forms of The Purifications}
To obtain explicit forms of the purifications $\Psi_{\sigma,1}$ and $\Psi_{\sigma,2}$, we define a state $\Psi_{\sigma}$ by \alg{ \ket{\Psi_{\sigma}}^{\hat{A}\hat{B}\hat{C}''\hat{R}Z} &:=\sum_{x,y,z}\sqrt{p_{xyz}} \ket{x}^X\ket{y}^Y\ket{\sigma(z)}^Z\ket{z}^{Z''} \nonumber \\ &\quad\quad\quad \otimes\ket{\psi_{xyz}}^{ABC''R}\ket{xyz}^T\!. \!\! \laeq{dfnPsisigma} } From the definition \req{sourcestate} of the source state $\Psi_s$, \req{psourcestate} of the purified source state $\Psi$ and \req{dfnPsisigma} of the state $\Psi_\sigma$, it is straightforward to verify that the states are related simply by \alg{ \ket{\Psi_{\sigma}}^{\hat{A}\hat{B}\hat{C}''\hat{R}Z} = G_\sigma^Z\circ P^{Z''\rightarrow Z''Z} \ket{\Psi}^{\hat{A}\hat{B}\hat{C}''\hat{R}} \!\!\! \laeq{psisigmaGPpsi} } and \alg{ {\rm Tr}_Z\otimes\ca{C}^T(\Psi_{\sigma}^{\hat{A}\hat{B}\hat{C}''\hat{R}Z}) & = \Psi_s^{\hat{A}\hat{B}\hat{C}''\hat{R}} \\ & = \ca{C}^T(\Psi^{\hat{A}\hat{B}\hat{C}''\hat{R}}). \laeq{btf} } Here, Let $P^{Z''\rightarrow Z''Z}$ be a linear isometry defined by \alg{ P^{Z''\rightarrow Z''Z} := \sum_z\ket{z}^{Z''}\ket{z}^{Z}\bra{z}^{Z''}, } and $\ca{C}$ be the completely dephasing operation on $T$ with respect to the basis $\{\ket{xyz}\}_{x,y,z}$. The state $\Psi_{\sigma}$ is simply represented as \alg{ \ket{\Psi_{\sigma}}^{\hat{A}\hat{B}\hat{C}''\hat{R}Z} = \sum_{z}\sqrt{p_{z}} \ket{\sigma(z)}^Z \ket{\psi_z}^{\hat{A}\hat{B}\hat{C}''RX'Y'}\ket{z}^{Z'} \!\!.\! } where \alg{ & \ket{\psi_z}^{\hat{A}\hat{B}\hat{C}''RX'Y'} \nonumber\\ &\quad:=\sum_{x,y}\sqrt{\frac{p_{xyz}}{p_z}} \ket{x}^X\ket{y}^Y\ket{z}^{Z''} \nonumber\\ &\quad\quad\quad\quad \otimes\ket{\psi_{xyz}}^{ABC''R}\ket{x}^{X'}\ket{y}^{Y'}. } It is convenient to note that \alg{ \psi_z^{\hat{A}\hat{B}RX'Y'}=\sum_{x,y}\sqrt{\frac{p_{xyz}}{p_z}} \:\psi_{xyz}^{ABR}\otimes\proj{x}^{X'}\!\otimes\proj{y}^{Y'}. }
Due to \req{psourcestate} and \req{SEK2}, the averaged state in \req{derby1} is calculated to be \begin{align} \Psi_{\rm av}^{\hat{C}\hat{B}\hat{R}} \!=\!\sum_{z}p_z \proj{z}^{Z} \!\otimes\! \pi^{C} \!\otimes\! \psi_{z}^{\hat{B}RX'Y'}\!\otimes\!\proj{z}^{Z'}, \! \end{align} where $p_z=\sum_{x,y}p_{xyz}$. It follows that \begin{align} & {\rm Tr}_{Z_RC_2 C_3} \circ \ca{G}_\sigma^Z ( \Psi_{\rm av}^{\hat{C}\hat{B}\hat{R}} ) \nonumber\\ &\quad =\sum_zp_z {\rm Tr}_{Z_R}[\proj{\sigma(z)}] \otimes\pi^{C_1} \nonumber\\ &\quad\quad\quad\quad\quad\quad \otimes\psi_{z}^{\hat{B}RX'Y'}\otimes\proj{z}^{Z'}. \end{align} Thus, a purification $\Psi_{\sigma,1}$ of this state is given by \alg{ \!\! \ket{\Psi_{\sigma,1}}^{\hat{A}\hat{B}\hat{C}''\hat{R}A_1C_1Z}=\ket{\Psi_{\sigma}}^{\hat{A}\hat{B}\hat{C}''\hat{R}Z}\ket{\phi_1}^{A_1C_1}, \!\! } where $\phi_1$ is the maximally entangled state of Schmidt rank $d_{C_1}$. In the same way, the purification $\Psi_{\sigma,2}$ is given by \alg{ \!\!\!\! \ket{\Psi_{\sigma,2}}^{\hat{A}\hat{B}\hat{C}''\hat{R}B_2C_2Z} =\ket{\Psi_{\sigma}}^{\hat{A}\hat{B}\hat{C}''\hat{R}Z}\ket{\phi_2}^{B_2C_2}\!,\!\! \laeq{gb} } with $\phi_2$ being the maximally entangled state of Schmidt rank $d_{C_2}$. Substituting these to \req{200a-11} and \req{200a-22}, we arrive at \alg{ &
\left\|
\Psi^{\hat{C}\hat{A}\hat{B}\hat{R}} - (\ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}})^\dagger\circ\ca{V}(\Psi_\sigma^{\hat{A}\hat{B}\hat{C}''\hat{R}}\otimes\phi_1^{A_1C_1})
\right\|_1 \nonumber \\ &\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad \leq 2\sqrt{12\epsilon+6\delta}, \laeq{200a-3-2-1} \\ &
\left\| \ca{W} \circ \ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}} ( \Psi^{\hat{C}\hat{A}\hat{B}\hat{R}} ) - \Psi_{\sigma}^{\hat{A}\hat{B}\hat{C}''\hat{R}Z}\otimes\phi_2^{B_2C_2}
\right\|_1 \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad \leq 2\sqrt{12\epsilon+6\delta}. \laeq{200a-3-2-2} }
Inequality \req{200a-3-2-1} implies that the operation $(\ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}})^\dagger\circ\ca{V}$ is a reverse Shannon protocol for the state $\Psi^{\hat{C}\hat{A}(\hat{B}\hat{R})} $, up to the action of a linear isometry $G_\sigma^Z\circ P^{Z''\rightarrow Z''Z}$ by which $\Psi_\sigma$ is obtained from $\Psi$ as \req{psisigmaGPpsi}. Similarly, Inequality \req{200a-3-2-2} implies that the operation $\ca{W}\circ\ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}}$ is a Slepian-Wolf protocol for the state $\Psi^{\hat{C}\hat{B}(\hat{A}\hat{R})} $, up to the action of $G_\sigma^Z\circ P^{Z''\rightarrow Z''Z}$ (see Figure \ref{fig:stateredistributiondirect} in page \pageref{fig:stateredistributiondirect}). We combine the two protocols to cancel out $(\ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}})^\dagger$ and $\ca{G}_\sigma^Z \circ \ca{U}^{\hat{C}}$. Due to the triangle inequality, it follows from \req{200a-3-2-1} and \req{200a-3-2-2} that \begin{eqnarray}
\left\| \ca{W} \circ \ca{V}
( \Psi_\sigma^{\hat{A}\hat{B}\hat{C}''\hat{R}}\otimes\phi_1^{A_1C_1} ) - \Psi_{\sigma}^{\hat{A}\hat{B}\hat{C}''\hat{R}Z}\otimes\phi_2^{B_2C_2}
\right\|_1
\nonumber\\ \leq 4\sqrt{12\epsilon+6\delta}. \quad\quad\quad \laeq{200a-3} \end{eqnarray}
\subsubsection{Construction of The Encoding and Decoding Operations}
Define a partial isometry \alg{ & \!\!V_\sigma^{\hat{A}\hat{C}''A_1\rightarrow \hat{A}ZC_2C_3} \nonumber\\ &\!\!\; := V^{Z_RA_1\hat{A}\hat{C}''\rightarrow Z_RC_2C_3\hat{A}} \circ G_\sigma^Z\circ P^{Z''\rightarrow Z''Z}. \laeq{dfnVsigma} } Applying the map ${\rm Tr}_Z\otimes\ca{C}^T$ to Inequality \req{200a-3}, and using \req{psisigmaGPpsi} and \req{btf}, it follows that \alg{ &
\left\| {\rm Tr}_Z\circ \ca{W} \circ \ca{V}_\sigma ( \Psi_s^{\hat{A}\hat{B}\hat{C}''\hat{R}}\otimes\phi_1^{A_1C_1})
\right.
\nonumber\\
&
\quad\quad\quad\quad\quad
\left. - \Psi_s^{\hat{A}\hat{B}\hat{C}''\hat{R}}\otimes\phi_2^{B_2C_2}
\right\|_1 \leq 4\sqrt{12\epsilon+6\delta}. \laeq{200a-4} } We construct a protocol for state redistribution as follows: In the first step, the sender performs the following encoding operation: \alg{ & \ca{E}^{\hat{A}\hat{C}''A_1\rightarrow \hat{A}Z_RC_2C_3} \nonumber\\ &\quad = {\rm Tr}_{Z_L}\circ \ca{V}_\sigma^{\hat{A}\hat{C}''A_1\rightarrow \hat{A}ZC_2C_3} \circ \ca{C}^{Z''}, \laeq{dfnVsignaenc} } where $\ca{C}^{Z''}$ is the completely dephasing operation on $Z''$ with respect to the basis $\{\ket{z_L}\ket{z_R}\}_{z_L,z_R}$. The sender then sends the classical system $Z_R\cong M$ and the quantum system $C_3\cong Q$ to the receiver, who performs the decoding operation defined by \alg{ & \ca{D}^{Z_RC_1C_3\hat{B}\rightarrow B_2\hat{B}\hat{C}''} \nonumber\\ &\quad = {\rm Tr}_{Z_R}\circ \ca{W}^{Z_RC_1C_3\hat{B}\rightarrow Z_RB_2\hat{B}\hat{C}''}. } Noting that ${\rm Tr}_Z={\rm Tr}_{Z_L}\otimes{\rm Tr}_{Z_R}$, we obtain from \req{200a-4} that \begin{eqnarray}
\left\| \ca{D} \circ \ca{E}
(\Psi_s^{\hat{A}\hat{B}\hat{C}''\hat{R}}\otimes\phi_1^{A_1C_1}) - \Psi_s^{\hat{A}\hat{B}\hat{C}''\hat{R}}\otimes\phi_2^{B_2C_2}
\right\|_1 \nonumber\\ \leq 4\sqrt{12\epsilon+6\delta}. \quad\quad\quad \laeq{200a-4-2} \end{eqnarray} From \req{dfnVsigma} and \req{dfnVsignaenc}, it is straightforward to verify that $\ca{E}(\tau)$ is diagonal in $Z_R$ for any input state $\tau$. Thus, the pair $(\ca{E},\ca{D})$ is a state redistribution protocol for the state $\Psi_s$ within the error $4\sqrt{12\epsilon+6\delta}$.
\subsubsection{Evaluation of Entropies} \lsec{evalent}
We analyze conditions on the size of systems $C_1$ and $C_2$, in order that inequalities \req{derby1} and \req{derby2} are satisfied. We use the partial bi-decoupling theorem (\rLmm{tus2}) under the correspondence \req{corresp}, which reads \alg{ S_1=\hat{A}, \quad S_2=\hat{B}, \quad S_3=RX'Y'. \laeq{corresp2} } It follows that inequalities \req{derby1} and \req{derby2} are satisfied if it holds that \alg{ \log{\frac{d_{C_1}^2}{d_{Z_R}d_{C}}}
&\leq H_{\rm min}^\epsilon(\hat{C}|\hat{B}\hat{R})_{\Psi}+\log{\frac{\delta^2}{2}}, \laeq{brd1}\\ \log{\frac{d_{C_1}^2}{d_{C}}}
&\leq H_{\rm min}^\epsilon(\hat{C}|\hat{B}\hat{R})_{\ca{C}(\Psi)}+\log{\delta^2}, \laeq{brd2}\\ \log{\frac{d_{C_2}^2}{d_{Z_R}d_{C}}}
&\leq H_{\rm min}^\epsilon(\hat{C}|\hat{A}\hat{R})_{\Psi}+\log{\frac{\delta^2}{2}}, \laeq{brd3}\\ \log{\frac{d_{C_2}^2}{d_{C}}}
&\leq H_{\rm min}^\epsilon(\hat{C}|\hat{A}\hat{R})_{\ca{C}(\Psi)}+\log{\delta^2}. \laeq{brd4} } Using the duality of the smooth conditional entropy (\rLmm{duality}), and noting that $\Psi^{\hat{A}\hat{B}\hat{C}}=\Psi_s^{\hat{A}\hat{B}\hat{C}}$, the min entropies in the first and the third inequalities are calculated to be \alg{
H_{\rm min}^\epsilon(\hat{C}|\hat{B}\hat{R})_{\Psi} &= -
H_{\rm max}^\epsilon(\hat{C}|\hat{A})_{\Psi} \\ & = -
H_{\rm max}^\epsilon(CZ|AX)_{\Psi_s}, \\
H_{\rm min}^\epsilon(\hat{C}|\hat{A}\hat{R})_{\Psi} &= -
H_{\rm max}^\epsilon(\hat{C}|\hat{B})_{\Psi} \\ &= -
H_{\rm max}^\epsilon(CZ|BY)_{\Psi_s}. } Similarly, due to \rLmm{condminCQCQ} and \rLmm{SE11d} in \rApp{propSmEn}, and noting that $\ca{C}(\Psi)=\Psi_s$ because of \req{sourcestate} and \req{psourcestate}, we have \alg{ &
H_{\rm min}^\epsilon(\hat{C}|\hat{B}\hat{R})_{\ca{C}(\Psi)} \nonumber\\ & =
H_{\rm min}^\epsilon(C|BRXYZ)_{\ca{C}(\Psi)} \\ & =
-H_{\rm max}^\epsilon(C|AXYZ)_{\Psi_s}
}
and
\alg{
&
H_{\rm min}^\epsilon(\hat{C}|\hat{A}\hat{R})_{\ca{C}(\Psi)} \nonumber\\ & =
H_{\rm min}^\epsilon(C|ARXYZ)_{\ca{C}(\Psi)} \\& =
-H_{\rm max}^\epsilon(C|BXYZ)_{\Psi_s}. } In addition, the isomorphism \req{yukue} implies \alg{ & \log{d_{C_1}}=e+e_0, \; \log{d_{C_2}}=e_0, \\ & \; \log{d_{C_3}}=q, \; \log{d_{Z_R}}=c. } Substituting these relations to \req{brd1}-\req{brd4}, and noting that $d_C=d_{C_1}d_{C_2}d_{C_3}$, we arrive at \alg{ c+q-e
&\geq H_{\rm max}^\epsilon(CZ|AX)_{\Psi_s}-\log{\frac{\delta^2}{2}}, \laeq{direct1'} \\ q-e
&\geq H_{\rm max}^\epsilon(C|AXYZ)_{\Psi_s}-\log{\delta^2}, \\ c+q+e
&\geq H_{\rm max}^\epsilon(CZ|BY)_{\Psi_s}-\log{\frac{\delta^2}{2}}, \\ q+e
&\geq H_{\rm max}^\epsilon(C|BXYZ)_{\Psi_s}-\log{\delta^2} } and $ q+e+2e_0 =\log{d_C} $. Combining these all together, we obtain the set of Ineqs.~\req{neon1}-\req{neon5} as a sufficient condition for the tuple $(c,q,e)$ to be achievable within the error $4\sqrt{12\epsilon+6\delta}$.
$\blacksquare$
\subsection{Proof of \rPrp{direct2} from \rPrp{direct}} \lsec{prfthmdirect}
We prove \rPrp{direct2} based on \rPrp{direct} by (i) modifying the first inequality \req{neon1}, and (ii) extending the rate region by incorporating teleportation and dense coding.
\subsubsection{Modification of Inequalities \req{neon1} and \req{matahi}}
We argue that the smooth conditional max entropy in the R.H.S. of Inequality \req{neon1} is modified to be $H_{\rm max}^\epsilon(C|AXZ)_{\Psi_s}$. Consider a ``modified'' redistribution protocol as follows: In the beginning of the protocol, the sender prepares a copy of $Z$, which we denote by $\tilde{Z}$. The sender then uses $X\tilde{Z}$ as the classical part of the side information, instead of $X$ alone, and apply the protocol presented in \rSec{consED}. The smooth max entropy corresponding to the first term in \req{neon1} is then given by (see \rLmm{condmaxCQCQ}) \alg{
H_{\rm max}^\epsilon(CZ|AX\tilde{Z})_{\Psi_s}=H_{\rm max}^\epsilon(C|AXZ)_{\Psi_s}. \laeq{dfnH1} }
For the same reason, the term $H_{\rm max}^\epsilon(Z|AX)_{\Psi_s}$ in the condition \req{matahi} is modified to be $H_{\rm max}^\epsilon(Z|AX\tilde{Z})_{\Psi_s}$, which is no greater than zero (see \rLmm{onedimHminmax} and \rLmm{condmaxCQCQ}). It should be noted that the entropies in the other three inequalities are unchanged by this modification.
\subsubsection{Extension of the rate region by Teleportation and Dense Coding}
To complete the proof of \rThm{direct}, we extend the achievable rate region given in \rPrp{direct} by incorporating teleportation and dense coding. More precisely, we apply the following lemma that follows from teleportation and dense coding (see the next subsection for a proof):
\blmm{TPDCextension} Suppose that a rate tuple $(\hat{c},\hat{q},\hat{e},\hat{e}_0)$ is achievable within the error $\delta$. Then, for any $\lambda,\mu\geq 0$ and $e_0\geq0$ such that \alg{ -\frac{\hat{c}}{2}\leq\lambda-\mu \leq\hat{q}, \quad \hat{e}_0 \leq e_0, } the tuple $(c,q,e,e_0):=(\hat{c}+2\lambda-2\mu,\hat{q}-\lambda+\mu,\hat{e}+\lambda+\mu, e_0)$ is also achievable within the error $\delta$. \end{lmm}
\noindent {\bf Proof of \rPrp{direct2}:} Due to \rPrp{direct} and \rLmm{TPDCextension}, a tuple $(c,q,e,e_0)$ is achievable within the error $\delta$ if there exists $\lambda,\mu\geq 0$ and $\hat{e}_0\leq e_0$ such that the tuple \alg{ & (\hat{c},\hat{q},\hat{e},\hat{e}_0) := \nonumber\\ &\; (c-2\lambda+2\mu,q+\lambda-\mu,e-\lambda-\mu,\hat{e}_0) \!\! \laeq{cqehat} } satisfies \alg{ \hat{c}+\hat{q}-\hat{e} &\geq H_1, \\ \hat{q}-\hat{e} &\geq H_2, \\ \hat{c}+\hat{q}+\hat{e} &\geq H_3, \\ \hat{q}+\hat{e} &\geq H_4, \\ \hat{q}+\hat{e}+2\hat{e}_0 &= \log{d_C} } and $\hat{c},\hat{q}\geq0$. Here, we have denoted the R.H.S.s of Inequalities \req{neon2}-\req{neon4} by $H_2$, $H_3$ and $H_4$, respectively, and that of \req{dfnH1} by $H_1$. Substituting \req{cqehat} to these inequalities yields \alg{ c+q-e &\geq H_1-2\mu, \laeq{neon1-4} \\ q-e &\geq H_2-2\lambda, \laeq{neon2-4} \\ c+q+e &\geq H_3+2\lambda, \laeq{neon3-4} \\ q+e &\geq H_4+2\mu, \laeq{neon4-4} \\ q+e+2\hat{e}_0 &= \log{d_C}+2\mu \laeq{neon4-5} } and \alg{ c-2\lambda+2\mu & \geq0, \laeq{mois1}\\ q+\lambda-\mu &\geq0. \laeq{mois2} } Thus, it suffices to prove that, for any tuple $(c,q,e,e_0)$ satisfying Inequalities \req{neon00}-\req{neon05}, there exist $\hat{e}_0\leq e_0$ and $\lambda,\mu\geq 0$ such that the above inequalities hold. This is proved by noting that the inequality \req{neon00} is expressed as \alg{ c+q+e-H_3 & \geq \max\{H_2,H_2'\}-q+e, \laeq{dirprp1}\\ q+e-H_4 & \geq H_1-c-q+e, \laeq{dirprp2} } where \alg{
H_2':=H_{\rm min}^\epsilon(C|AXYZ)_{\Psi_s}-\log{\delta^2}. } The L.H.S. of \req{dirprp1} and \req{dirprp2} are nonnegative because of Inequalities \req{neon03} and \req{neon04}. Thus, there exists $\lambda,\mu\geq0$ such that $2\lambda$ and $2\mu$ are in between both sides in \req{dirprp1} and \req{dirprp2}, respectively. This implies \req{neon1-4}-\req{neon4-4}. We particularly choose \alg{ \mu=\frac{1}{2}(q+e-H_4), \quad \hat{e}_0= \frac{1}{2}(\log{d_C}-H_4). } A simple calculation leads to \req{neon4-5}. Noting that $H_3\geq H_4$ by the data processing inequality, it follows from \req{neon3-4} that \alg{ c-2\lambda\geq H_3-q-e \geq H_4-q-e=-2\mu, } which implies \req{mois1}. Inequality \req{mois2} is obtained by combining \req{neon4-4} with $ 2\lambda\geq \max\{H_2,H_2'\}-q+e $. Note that \alg{ & H_2'+H_4 \nonumber\\ & =
H_{\rm min}^\epsilon(C|AXYZ)_{\Psi_s} \nonumber\\ &\quad\quad +
H_{\rm max}^\epsilon(C|BXYZ)_{\Psi_s} -2\log{\delta^2} \\ & =
H_{\rm min}^\epsilon(C|AXYZ)_{\Psi_s} \nonumber\\ &\quad\quad -
H_{\rm min}^\epsilon(C|ARXYZ)_{\Psi_s} -2\log{\delta^2} \\ & \geq0, } where the third line follows from \rLmm{SE11d}. This completes the proof of \rThm{direct}.
$\blacksquare$
\subsubsection{Proof of \rLmm{TPDCextension} (see also Section IV in \cite{min08})}
We first consider the case where $\lambda-\mu\geq0$, and prove that the tuple $(c,q,e,e_0,\delta):=(\hat{c}+2\lambda-2\mu,\hat{q}-\lambda+\mu,\hat{e}+\lambda+\mu,\hat{e}_0,\delta)$ is achievable if a rate tuple $(\hat{c},\hat{q},\hat{e},\hat{e}_0,\delta)$ is achievable and $\hat{c},\hat{q}\geq0$. Suppose that there exists a protocol $(\ca{E},\ca{D})$ with the classical communication cost $\hat{c}$, the quantum communication cost $\hat{q}$, the net entanglement cost $\hat{e}$ and the catalytic entanglement cost $\hat{e}_0$ that achieves the state redistribution of the state $\Psi_s$ within the error $\delta$. We construct a protocol $(\ca{E}',\ca{D}')$ such that the $\lambda-\mu$ qubits of quantum communication in the protocol $(\ca{E},\ca{D})$ is simulated by quantum teleportation, consuming $\lambda-\mu$ ebits of additional shared entanglement and $2\lambda-2\mu$ bits of classical communication. The net costs of the resources are given by $\hat{c}+2\lambda-2\mu$, $\hat{q}-\lambda+\mu$, $\hat{e}+\lambda-\mu$ and the catalytic entanglement cost is $\hat{e}_0$, which implies achievability of the tuple $(\hat{c}+2\lambda-2\mu,\hat{q}-\lambda+\mu,\hat{e}+\lambda+\mu,\hat{e}_0,\delta)$.
Second, we consider the case where $\lambda-\mu\leq0$.
Suppose that there exists a protocol $(\ca{E},\ca{D})$ with the classical communication cost $\hat{c}$, the quantum communication cost $\hat{q}$ and the net entanglement cost $\hat{e}$ that achieves the state redistribution of the state $\Psi_s$ within the error $\delta$. We construct a protocol $(\ca{E}'',\ca{D}'')$ such that the $2\mu-2\lambda$ bits of classical communication in $(\ca{E},\ca{D})$ is simulated by dense coding, consuming $\mu-\lambda$ ebits of shared entanglement and $\mu-\lambda$ qubits of quantum communication. The net costs of the resources are given by $\hat{c}-2\mu+2\lambda$, $\hat{q}+\mu-\lambda$, $\hat{e}+\mu-\lambda$ and the catalytic entanglement cost is $\hat{e}_0$, which implies achievability of the tuple $(\hat{c}-2\mu+2\lambda,\hat{q}+\mu-\lambda,\hat{e}+\mu+\lambda,\hat{e}_0,\delta)$.
$\blacksquare$
\subsection{Proof of \rThm{direct} from \rPrp{direct2}} \lsec{seru}
The achievability for the case of $d_C=1$ immediately follows from the condition \req{mongen} in \rPrp{direct2}. Thus, we only consider the case where $d_C\geq2$.
Let $\Pi$ be a projection onto a subspace $\ca{H}^{C_\Pi}\subseteq\ca{H}^C$ such that ${\rm dim}[\ca{H}^{C_\Pi}]=2^{H_{\rm max'}^{\epsilon^2/8}(C)_{\Psi_s}}$ and that ${\rm Tr}[\Pi\Psi_s^C]\geq1-\epsilon^2/8$. Such a projection exists due to the definition of $H_{\rm max'}$ given by \req{dfnHrank}. Consider the ``modified'' source state defined by \alg{ \Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}_\Pi\hat{R}} := \Pi(\Psi_s^{\hat{A}\hat{B}\hat{C}\hat{R}})\Pi. } From the gentle measurement lemma (see \rLmm{gentlemeasurement} in \rApp{extUhlmann}), it holds that \alg{ P(\Psi_s,\Psi_{s,\Pi}) \leq \frac{\epsilon}{2}, \quad
\|\Psi_s-\Psi_{s,\Pi}\|_1 \leq \frac{\epsilon}{\sqrt{2}}. \laeq{GMPsis} } Thus, due to the definitions of the smooth entropies \req{dfnmine} and \req{dfnmaxe}, we have \alg{
H_{\rm max}^{\epsilon/2}(CZ|BY)_{\Psi_s} &
\geq H_{\rm max}^\epsilon(C_\Pi Z|BY)_{\Psi_{s,\Pi}} \\ &
\geq H_{\rm max}^{3\epsilon/2}(CZ|BY)_{\Psi_s}, \\
H_{\rm min}^{3\epsilon/2}(C|AXYZ)_{\Psi_s} &
\geq H_{\rm min}^\epsilon(C_\Pi |AXYZ)_{\Psi_{s,\Pi}} } and so forth.
Suppose that the tuple $(c,q,e,e_0)$ satisfies Inequalities \req{neon00t}-\req{neon05t} in \rThm{direct}. It follows that \alg{ c+2q &\geq \max\{\tilde{H}_{I}^{(\epsilon,\epsilon)},\tilde{H}_{I\! I}^{(\epsilon)}\}_{\Psi_{s,\Pi}}-\log{(\delta^4/2)}, \laeq{neon00mod} \\ c+q+e
&\geq H_{\rm max}^\epsilon(C_\Pi Z|BY)_{\Psi_{s,\Pi}}-\log{(\delta^2/2)}, \laeq{neon03mod} \\ q+e
&\geq H_{\rm max}^\epsilon(C_\Pi |BXYZ)_{\Psi_{s,\Pi}}-\log{\delta^2}, \laeq{neon04mod} \\ e_0 &\geq
\frac{1}{2}(\log{d_{C_\Pi}}-H_{\rm max}^\epsilon(C_\Pi|BXYZ)_{\Psi_{s,\Pi}}) \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad+\log{\delta}. \laeq{neon05mod} } Thus, due to \rPrp{direct2}, the tuple $(c,q,e,e_0)$ is achievable within an error $4\sqrt{12\epsilon+6\delta}$ for the state $\Psi_{s,\Pi}$. That is, there exists a pair of an encoding CPTP map $\ca{E}_\Pi^{\hat{A}\hat{C}_\Pi E_A\rightarrow \hat{A}QMF_A}$ and a decoding CPTP map $\ca{D}_\Pi^{\hat{B}QME_B\rightarrow \hat{B}\hat{C}_\Pi F_B}$, such that \alg{ &
\left\| \ca{D}_\Pi\circ\ca{E}_\Pi(\Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}_\Pi\hat{R}}\otimes\Phi_{2^{e+e_0}}^{E_AE_B}) \right. \nonumber\\ &\quad \left.-\Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}_\Pi\hat{R}}\otimes\Phi_{2^{e_0}}^{F_AF_B}
\right\|_1 \leq 4\sqrt{12\epsilon+6\delta}. \laeq{gcodeforpsipi} } Define an encoding map $\ca{E}^{\hat{A}\hat{C} E_A\rightarrow \hat{A}QMF_A}$ and a decoding map $\ca{D}^{\hat{B}QME_B\rightarrow \hat{B}\hat{C} F_B}$ for the state $\Psi_s$ by \alg{ & \ca{E}^{\hat{A}\hat{C} E_A\rightarrow \hat{A}QMF_A} (\tau) \nonumber\\ &\quad = \ca{E}_\Pi(\Pi^C\tau\Pi^C) +{\rm Tr}[(I^C-\Pi^C)\tau]\xi_0, } where $\xi_0$ is an arbitrary fixed state on $\hat{A}QMF_A$, and $\ca{D}=\ca{D}_\Pi$. Note that the system $C_\Pi$ is naturally embedded into $C$. By the triangle inequality, we have \alg{ &
\left\| \ca{D}\circ\ca{E}(\Psi_{s}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e+e_0}}^{E_AE_B})-\Psi_{s}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e_0}}^{F_AF_B}
\right\|_1 \nonumber\\ & \leq
\left\| \ca{D}\circ\ca{E}(\Psi_{s}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e+e_0}}^{E_AE_B}) \right. \nonumber\\ & \quad\quad\quad\quad \left.-\ca{D}\circ\ca{E}(\Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e+e_0}}^{E_AE_B})
\right\|_1 \nonumber\\ &\quad +
\left\| \ca{D}\circ\ca{E}(\Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e+e_0}}^{E_AE_B})\right. \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad\quad\left. -\Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e_0}}^{F_AF_B}
\right\|_1 \nonumber\\ & \quad +
\left\| \Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e_0}}^{F_AF_B}-\Psi_{s}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e_0}}^{F_AF_B}
\right\|_1 \\ & \leq
\left\| \ca{D}_\Pi\circ\ca{E}_\Pi(\Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e+e_0}}^{E_AE_B})\right.\nonumber\\ & \quad\quad\quad\quad\quad\quad\quad\quad \left. -\Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e_0}}^{F_AF_B}
\right\|_1 \nonumber\\ &\quad\quad + 2
\left\| \Psi_{s}^{\hat{A}\hat{B}\hat{C}\hat{R}}-\Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}\hat{R}}
\right\|_1 \laeq{monn}\\ &\leq 4\sqrt{12\epsilon+6\delta} + \sqrt{2} \epsilon, } Here, Inequality \req{monn} follows from $\ca{D}_\Pi\circ\ca{E}_\Pi(\Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e+e_0}}^{E_AE_B})=\ca{D}\circ\ca{E}(\Psi_{s,\Pi}^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e+e_0}}^{E_AE_B})$ and the monotonicity of the trace distance, and the last line from \req{GMPsis} and \req{gcodeforpsipi}. Hence, the tuple $(c,q,e,e_0)$ is achievable within an error $4\sqrt{12\epsilon+6\delta}+\sqrt{2}\epsilon$ for the state $\Psi_s$, which completes the proof of \rThm{direct}.
$\blacksquare$
\begin{figure*}
\caption{ The purified picture of the task is depicted in the diagram. The black lines and the dashed lines represent classical and quantum systems, respectively. }
\label{fig:C}
\end{figure*}
\section{Proof of The Converse Part\\ (\rThm{converse} and \rLmm{propDelta})} \lsec{converse}
We prove the one-shot converse bound (\rThm{converse}). The proof proceeds as follows: First, we construct quantum states that describe the state transformation in a redistribution protocol in a ``purified picture''. Second, we prove four entropic inequalities that hold for those states. Finally, we prove that the four inequalities imply the three inequalities in \rThm{converse}, thereby completing the proof of the converse bound. We also analyze the properties of the function $\Delta^{(\epsilon,\delta)}$, and prove \rLmm{propDelta}.
\subsection{Construction of States}
Let $U_\ca{E}^{\hat{A}\hat{C}E_A\rightarrow \hat{A}QMF_A\hat{G}_A}$ and $U_\ca{D}^{\hat{B}QME_B\rightarrow \hat{B}\hat{C}F_B\hat{G}_B}$ be the Stinespring dilations of the encoding operation $\ca{E}$ and the decoding operation $\ca{D}$, respectively, i.e., \alg{ \ca{E}={\rm Tr}_{\hat{G}_A}\circ\ca{U}_\ca{E}, \quad \ca{D}={\rm Tr}_{\hat{G}_B}\circ\ca{U}_\ca{D}. } We define the ``purified'' source state $\ket{\Psi}$ by \alg{ & \ket{\Psi}^{ABCRXYZT}:=
\nonumber\\ & \quad\quad \sum_{x,y,z}\sqrt{p_{xyz}} \ket{x}^X\ket{y}^Y\ket{z}^Z \ket{\psi_{xyz}}^{ABCR}\ket{xyz}^T, \laeq{psourcestateeee} } and consider the states \alg{ & \ket{\tilde{\Psi}}^{\hat{A}QMF_A\hat{G}_A\hat{B}\hat{R}E_B}:=U_\ca{E}\ket{\Psi}^{\hat{A}\hat{B}\hat{C}\hat{R}}\ket{\Phi_{2^{e+e_0}}}^{E_AE_B}, \laeq{dfnOmega} \\ & \ket{\Psi_f}^{\hat{A}\hat{B}\hat{C}\hat{R}F_AF_B\hat{G}_A\hat{G}_B}:=U_\ca{D}\ket{\tilde{\Psi}}. \laeq{dfnPsif} } The state $\tilde{\Psi}$ is a purification of the state after the encoding operation, and $\Psi_f$ is the one after the decoding operation. See Figure \ref{fig:C} for the diagram.
Due to the relation \req{relTDPD} between the trace distance and the purified distance, the condition \req{qeec} implies that \alg{ P \left( \ca{C}^T(\Psi_f)^{\hat{A}\hat{B}\hat{C}\hat{R}F_AF_B},\Psi_s^{\hat{A}\hat{B}\hat{C}\hat{R}}\otimes\Phi_{2^{e_0}}^{F_AF_B} \right) \leq 2\sqrt{\delta}, \laeq{qeed} }
with $\ca{C}^T$ being the completely dephasing operation on $T$ with respect to the basis $\{\ket{xyz}\}$. Due to an extension of Uhlmann's theorem (see \rLmm{extUhlmann1} in \rApp{extUhlmann}), there exists a pure state $|\Gamma\rangle^{\hat{A}\hat{B}\hat{C}\hat{G}_A\hat{G}_B\hat{R}}$, which is represented in the form of \alg{
|\Gamma\rangle &=\sum_{x,y,z}\sqrt{p_{xyz}}\ket{x}^X\ket{y}^Y\ket{z}^Z \nonumber\\ &\quad\quad \ket{\psi_{xyz}}^{ABCR} \ket{\phi_{xyz}}^{\hat{G}_A\hat{G}_B}\ket{xyz}^T, \laeq{dfntipsi} } such that \begin{eqnarray} P\left( \Psi_f^{\hat{A}\hat{B}\hat{C}\hat{R}F_AF_B\hat{G}_A\hat{G}_B}, \Gamma^{\hat{A}\hat{B}\hat{C}\hat{G}_A\hat{G}_B\hat{R}}\otimes\Phi_{2^{e_0}}^{F_AF_B} \right) \nonumber\\ \leq 2\sqrt{\delta}. \quad\quad \laeq{qeef} \end{eqnarray} Using this state, we define \alg{ & \ket{\tilde{\Gamma}}^{\hat{A}QMF_A\hat{G}_A\hat{B}\hat{R}E_B} \nonumber\\ &\quad := U_{\ca{D}}^\dagger \ket{\Gamma}^{\hat{A}\hat{B}\hat{C}\hat{G}_A\hat{G}_B\hat{R}}\ket{\Phi_{2^{e_0}}}^{F_AF_B}. \laeq{dfntildePsi} } Due to the isometric invariance of the purified distance, it follows from \req{qeef} and \req{dfnPsif} that \alg{ P\left( \tilde{\Psi}^{\hat{A}QMF_A\hat{G}_A\hat{B}\hat{R}E_B}, \tilde{\Gamma}^{\hat{A}QMF_A\hat{G}_A\hat{B}\hat{R}E_B} \right) \leq 2\sqrt{\delta}. \laeq{qeef2} } Relations among the states defined as above are depicted in Figure \ref{fig:D}. Some useful properties of these states are presented in the following, and will be used in the proof of the converse part.
\begin{figure*}
\caption{ Relations among the states $\tilde{\Psi}$, $\tilde{\Gamma}$, $\Psi$, $\Gamma$ and $\Psi_s$ are depicted. }
\label{fig:D}
\end{figure*}
\subsubsection{Decomposition of $U_{\ca{E}}$ and $U_{\ca{D}}$}
Since $M$ is a classical system, we may, without loss of generality, assume that $U_\ca{E}$ and $U_\ca{D}$ are decomposed as \alg{ & U_\ca{E} = \sum_m \ket{m}^M\ket{m}^{M_A} \!\otimes v_{m}^{\hat{A}\hat{C}E_A\rightarrow \hat{A}QF_A\tilde{G}_A}, \laeq{dfnUE}\\ & U_{\ca{D}} = \sum_m\ket{m}^{M_B}\bra{m}^M\!\otimes u_m^{\hat{B}QE_B\rightarrow \hat{B}\hat{C}F_BG_B} \!.\! \laeq{dfnUD} } Here, $M_A$ and $M_B$ are quantum systems isomorphic to $M$ with the fixed orthonormal basis $\{\ket{m}\}_m$, the operators $u_m$ are linear isometries, and $\hat{G}_A$ and $\hat{G}_B$ are such that $\hat{G}_A\equiv \tilde{G}_AM_A$ and $\hat{G}_B\equiv G_BM_B$. It follows that \alg{ U_{\ca{D}}\circ U_{\ca{E}} = \sum_m \ket{m}^{M_A}\ket{m}^{M_B}\otimes(u_m\circ v_m). \laeq{dfnUDE} } Since $Z$ is a classical system, we may further assume that $v_m$ are decomposed as \alg{ v_{m} := \sum_z \ket{z}^{Z''}\bra{z}^{Z} \otimes v_{m,z}^{\hat{A}CE_A\rightarrow \hat{A}QF_AG_A}, \laeq{dfnUDE2} } where $Z''$ is a system isomorphic to $Z$ with the fixed orthonormal basis $\{\ket{z}\}_z$ and $\tilde{G}_A\equiv G_AZ''$. The operators $v_{m,z}$ are linear operators such that $\sum_m v_{m,z}^\dagger v_{m,z}=I$ for all $z$. It should be noted that $\hat{G}_A=G_AM_AZ''$.
\subsubsection{Properties of $\tilde{\Psi}$ and $\Psi_f$}
Since $\ket{\tilde{\Psi}}$ is defined as \req{dfnOmega} by $U_{\ca{E}}$ that is in the form of \req{dfnUE}, it is decomposed into \alg{ \ket{\tilde{\Psi}} = \sum_m\sqrt{q_m}\ket{m}^{M}\ket{m}^{M_A}\ket{\tilde{\Psi}_{m}}, } with some probability distribution $\{q_m\}_m$ and pure states $\{\ket{\tilde{\Psi}_{m}}\}_m$. Thus, we have \alg{ \ca{C}^M(\tilde{\Psi}) = \sum_{m}q_{m}\proj{m}^M\!\otimes\!\proj{m}^{M_A}\!\otimes\!\proj{\tilde{\Psi}_{m}}, \laeq{tildemsimmz2} } where $\ca{C}^M$ is the completely dephasing operation on $M$ with respect to the basis $\{\ket{m}\}_m$. Similarly, due to \req{dfnUDE}, \req{dfnOmega} and \req{dfnPsif}, the state $\ket{\Psi_f}$ is decomposed into \alg{ \ket{\Psi_f} = \sum_{m}\sqrt{q_m}\ket{m}^{M_A}\ket{m}^{M_B}\ket{\Psi_{f,m}}. \laeq{tildemsimmz} } From \req{dfnUDE2}, it holds that $\bra{z_1}^{Z'}\bra{z_2}^{Z''}\ket{\Psi_f}\propto\delta_{z_1,z_2}$. Thus, the states $\ket{\Psi_{f,m}}$ are further decomposed into \alg{ \ket{\Psi_{f,m}} =
\sum_{z}\sqrt{q_{z|m}}\ket{z}^{Z''}\ket{\Psi_{f,m,z}}\ket{z}^{Z'}. \laeq{tildemsimmz3} }
\subsubsection{Properties of $\Gamma$}
From the definition \req{dfntipsi}, it follows that \alg{ & \ca{C}^T(\Gamma) = \nonumber\\ &\quad \sum_{x,y,z}p_{xyz}\proj{xyz}^{XYZ} \otimes \proj{\psi_{xyz}}^{ABCR} \nonumber\\ &\quad\quad\quad \otimes \proj{\phi_{xyz}}^{\hat{G}_A\hat{G}_B} \otimes \proj{xyz}^T \laeq{dfntipsiii} } and that \alg{ {\rm Tr}_T(\Gamma) & = \sum_{x,y,z}p_{xyz}\proj{xyz}^{XYZ} \nonumber\\ &\quad \otimes \proj{\psi_{xyz}}^{ABCR} \otimes \proj{\phi_{xyz}}^{\hat{G}_A\hat{G}_B}. \laeq{dfntipsiii2} } Both states are ensembles of pure states on $ABCR\hat{G}_A\hat{G}_B$, classically labelled by $xyz$ on $XYZ$ or $T$, that are decoupled between $ABCR$ and $\hat{G}_A\hat{G}_B$. It follows from \req{dfntipsiii} that \alg{ {\rm Tr}_{\hat{G}_A\hat{G}_B}\otimes\ca{C}^T(\Gamma) = \Psi_s^{\hat{A}\hat{B}\hat{C}\hat{R}}. \laeq{PTgammaf} }
Due to \req{tildemsimmz}, \req{tildemsimmz3} and \rLmm{extUhlmann2} in \rApp{extUhlmann}, we may, without loss of generality, assume that $\ket{\phi_{xyz}}$ is in the form of \alg{ \ket{\phi_{xyz}}^{\hat{G}_A\hat{G}_B} = \ket{\phi_{xyz}'}^{G_AM_A\hat{G}_B}\ket{z}^{Z''} \laeq{mardock} } and \alg{ & \ket{\phi_{xyz}'}^{G_AM_A\hat{G}_B} \nonumber\\ & \quad := \sum_m
\sqrt{p_{m|xyz}}\ket{m}^{M_A}\ket{m}^{M_B}\ket{\phi_{m,xyz}}^{G_AG_B}. } Substituting this to \req{dfntipsiii}, we have \alg{ & \ca{C}^T(\Gamma)^{AG_AM_AXYZT} \nonumber\\ & =\sum_{x,y,z}p_{xyz}\proj{xyz}^{XYZ} \otimes \proj{z}^{Z''} \nonumber\\ & \quad\quad \otimes \psi_{xyz}^{A} \otimes \phi_{xyz}^{G_AM_A} \otimes \proj{xyz}^T \!.\! \laeq{fstar} }
Thus, the state $\ca{C}^T(\Gamma)$ given by is classically coherent in $ZZ''$. Denoting $p_{xyz}p_{m|xyz}$ by $p_{m,xyz}$, it follows from \req{dfntipsiii} that \alg{ & \ca{C}^T\circ\ca{C}^{M_A}(\Gamma^{AG_AM_A\hat{G}_BT}) \nonumber\\ & =\sum_{x,y,z}p_{m,xyz} \: \psi_{xyz}^{A} \otimes \proj{m}^{M_A}\otimes\proj{m}^{M_B} \nonumber\\ &\quad\quad\quad\quad \otimes \proj{\phi_{m,xyz}}^{G_AG_B} \otimes \proj{xyz}^T, \laeq{dfntipsiii55} } with $\ca{C}^{M_A}$ being the completely dephasing operation on $M_A$ with respect to the basis $\{\ket{m}\}_m$. It should also be noted that \alg{ & \Gamma^{AG_AM_AXYZ} \nonumber\\ & \quad =\sum_{x,y,z}p_{m,xyz} \proj{m}^{M_A} \!\otimes\! \psi_{xyz}^{A} \nonumber\\ & \quad\quad\quad\quad \otimes \phi_{m,xyz}^{G_A} \!\otimes\! \proj{xyz}^{XYZ}. \laeq{sstar} }
\subsection{Inequalities for Proving \rThm{converse}}
As an intermediate goal for the proof of \rThm{converse}, we prove that the following four inequalities hold for the states $\Psi_s$ and $\Gamma$ defined by \req{sourcestate} and \req{dfntipsi}, respectively: \alg{ & c+q-e \geq H_{\rm min}^{\epsilon}(AXCZ)_{\Psi_s} - H_{\rm max}^{\epsilon}(AXZ)_{\Psi_s} \nonumber\\ &\quad\quad\quad\quad
-H_{\rm min}^{7\epsilon+2\sqrt{\delta}}(G_A|M_AAXZ)_{\Gamma}-4f(\epsilon), \laeq{convv1} \\ & q-e
\geq H_{\rm min}^{\epsilon}(AC|XYZ)_{\Psi_s} -
H_{\rm max}^{\epsilon}(A|XYZ)_{\Psi_s} \nonumber\\ &\quad\quad\quad\quad -
H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(G_AM_A|XYZ)_{\Gamma} -3f(\epsilon), \laeq{convv000} \\ & c+q+e \geq H_{\rm min}^{\epsilon}(BYCZ)_{\Psi_s} - H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(BY)_{\Psi_s} \nonumber\\ &\quad\quad\quad\quad +
H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(G_AM_A|XYZ)_{\Gamma} -f(\epsilon), \laeq{convv3} \\ & q+e
\geq H_{\rm min}^{\epsilon}(BC|XYZ)_{\Psi_s}
-H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(B|XYZ)_{\Psi_s} \nonumber\\ &\quad\quad\quad\quad
+H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(G_A|M_AAXYZ)_{\Gamma} -2f(\epsilon), \laeq{convv4} } where $ f(x):=-\log{(1-\sqrt{1-x^2})} $. The proof of these inequalities will be given in the following subsections. We will extensively use the properties of the smooth conditional entropies, which are summarized in \rApp{propSmEn}.
\subsubsection{Proof of Inequality \req{convv1}}
We start with \alg{ & e+e_0+H_{\rm min}^{\epsilon}(AXCZ)_{\Psi_s} \nonumber\\ & =e+e_0+H_{\rm min}^{\epsilon}(AXCZ)_{\Psi} \laeq{convmod4-0}\\ &\leq H_{\rm min}^{\epsilon}(AXCZE_A)_{\Psi_s\otimes\Phi_{2^{e+e_0}}} \laeq{convmod4-1}\\ &= H_{\rm min}^{\epsilon}(AXF_A\hat{G}_AQM)_{\tilde{\Psi}} \laeq{convmod4-2}\\ &\leq H_{\rm max}^{\epsilon}(QM) +
H_{\rm min}^{4\epsilon}(AXF_A\hat{G}_A|QM)_{\tilde{\Psi}} \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad +2f(\epsilon) \laeq{convmod4-3}\\ &\leq c+q +
H_{\rm min}^{4\epsilon}(AXF_A\hat{G}_A|M)_{\tilde{\Psi}}+2f(\epsilon), \laeq{convmod4-3-2} } where \req{convmod4-0} follows from $\Psi_s^{\hat{A}\hat{C}}=\Psi^{\hat{A}\hat{C}}$; \req{convmod4-1} from the superadditivity of the smooth conditional min entropy for product state (\rLmm{SE3}); \req{convmod4-2} from the fact that $\ket{\tilde{\Psi}}$ is obtained from $\ket{\Psi}\ket{\Phi_{2^{e+e_0}}}$ by an isometry $U_{\ca{E}}$ as \req{dfnOmega}, under which the smooth conditional entropy is invariant (\rLmm{invCEiso}); \req{convmod4-3} from the chain rule \req{CRminmaxmin}; and \req{convmod4-3-2} from the dimension bound (\rLmm{SE4}).
The third term in \req{convmod4-3-2} is further calculated as \alg{ &
H_{\rm min}^{4\epsilon}(AXF_A\hat{G}_A|M)_{\tilde{\Psi}} \laeq{convmod4-4}\\
&\leq H_{\rm min}^{4\epsilon}(AXF_A\hat{G}_A|M)_{\ca{C}^M(\tilde{\Psi})} \laeq{convmod4-5}\\ &=
H_{\rm min}^{4\epsilon}(AXZ''F_AG_AM_A|M)_{\ca{C}^M(\tilde{\Psi})} \laeq{convmod4-5-2}\\ &=
H_{\rm min}^{4\epsilon}(AXZ''F_AG_A|M_A)_{\tilde{\Psi}} \laeq{convmod4-6}\\
&\leq H_{\rm min}^{4\epsilon+2\sqrt{\delta}}(AXZ''F_AG_A|M_A)_{\tilde{\Gamma}} \laeq{convmod4-7}\\ &=
H_{\rm min}^{4\epsilon+2\sqrt{\delta}}(AXZ''F_AG_A|M_A)_{\Gamma\otimes\Phi_{2^{e_0}}} \laeq{convmod4-8}\\
&\leq H_{\rm min}^{4\epsilon+2\sqrt{\delta}}(AXZ''G_A|M_A)_{\Gamma}+e_0 \laeq{convmod4-9}\\ &=
H_{\rm min}^{4\epsilon+2\sqrt{\delta}}(AXZG_A|M_A)_{\Gamma}+e_0 \laeq{convmod4-9-2}\\
&\leq H_{\rm max}^{\epsilon}(AXZ|M_A)_{\Gamma} \nonumber\\ &\quad\quad
+H_{\rm min}^{7\epsilon+2\sqrt{\delta}}(G_A|M_AAXZ)_{\Gamma}+e_0+2f(\epsilon) \laeq{convmod4-10}\\ &\leq H_{\rm max}^{\epsilon}(AXZ)_{\Gamma} \nonumber\\ &\quad\quad
+H_{\rm min}^{7\epsilon+2\sqrt{\delta}}(G_A|M_AAXZ)_{\Gamma}+e_0+2f(\epsilon) \laeq{convmod4-11}\\ &= H_{\rm max}^{\epsilon}(AXZ)_{\Psi_s} \nonumber\\ &\quad\quad
+H_{\rm min}^{7\epsilon+2\sqrt{\delta}}(G_A|M_AAXZ)_{\Gamma}+e_0+2f(\epsilon). \laeq{convmod4-12} } Here, \req{convmod4-5} follows from the monotonicity of the smooth conditional entropy (\rLmm{Hminmonotonicity}); \req{convmod4-5-2} from $\hat{G}_A\equiv G_AM_AZ''$; \req{convmod4-6} from \rLmm{condminCQCQ} and the fact that $M_A$ is a classical copy of $M$ as \req{tildemsimmz2}; \req{convmod4-7} from the continuity of the smooth conditional entropy (\rLmm{SE12}) and the fact that $\tilde{\Gamma}$ and $\tilde{\Psi}$ are $2\sqrt{\delta}$-close with each other as \req{qeef2}; \req{convmod4-8} from the fact that $\tilde{\Gamma}$ is converted to $\Gamma$ by $U_{\ca{D}}$ as \req{dfntildePsi}, which does not change the reduced state on $AXZ''F_AG_AM_A$;
\req{convmod4-9} from the dimension bound (\rLmm{SE4}); \req{convmod4-9-2} from the fact that $Z''$ is a classical copy of $Z$, due to \req{fstar}; \req{convmod4-10} from the chain rule \req{CRminmaxmin}; \req{convmod4-11} from the fact that conditioning reduces the entropy due to the monotonicity of the smooth conditional entropy (\rLmm{Hminmonotonicity}); and \req{convmod4-12} from the fact that $\Gamma^{AXZ}=\Psi_s^{AXZ}$.
Combining these inequalities, we obtain \alg{ & e+e_0+H_{\rm min}^{\epsilon}(AXCZ)_{\Psi_s} \nonumber\\ & \leq c+q+2f(\epsilon) + H_{\rm max}^{\epsilon}(AXZ)_{\Psi_s} \nonumber\\ &\quad
+H_{\rm min}^{7\epsilon+2\sqrt{\delta}}(G_A|M_AAXZ)_{\Gamma}+e_0+2f(\epsilon), } which implies \req{convv1}.
\subsubsection{Proof of Inequality \req{convv000}}
We have \alg{ &
e_0+H_{\rm min}^{2\epsilon+2\sqrt{\delta}}(\hat{A}\hat{G}_A|T)_{\ca{C}^T(\Gamma)} \laeq{convmod1-0}\\ & =
e_0+H_{\rm min}^{2\epsilon+2\sqrt{\delta}}(\hat{B}\hat{C}R\hat{G}_B|T)_{\ca{C}^T(\Gamma)} \laeq{convmod1-2}\\ &
\geq H_{\rm min}^{2\epsilon+2\sqrt{\delta}}(\hat{B}\hat{C}RF_B\hat{G}_B|T)_{\ca{C}^T(\Gamma)\otimes\Phi_{2^{e_0}}} \laeq{convmod1-3}\\ & =
H_{\rm min}^{2\epsilon+2\sqrt{\delta}}(\hat{B}RE_BQM|T)_{\ca{C}^T(\tilde{\Gamma})} \laeq{convmod1-4}\\ &
\geq H_{\rm min}^{\epsilon+2\sqrt{\delta}}(\hat{B}RE_BM|T)_{\ca{C}^T(\tilde{\Gamma})} \nonumber\\ & \quad\quad +
H_{\rm min}(Q|\hat{B}RE_BMT)_{\ca{C}^T(\tilde{\Gamma})} -f(\epsilon) \laeq{convmod1-5}\\ &
\geq H_{\rm min}^{\epsilon+2\sqrt{\delta}}(\hat{B}RE_BM|T)_{\ca{C}^T(\tilde{\Gamma})} - q -f(\epsilon). \laeq{convmod1-6} } Here, \req{convmod1-2} is from the fact that $\Gamma$ is a pure state on $\hat{A}\hat{B}\hat{C}\hat{R}\hat{G}_A\hat{G}_B$ as \req{dfntipsi}, which is transformed by $\ca{C}^T$ to an ensemble of classically-labelled pure states, to which \rLmm{SE11} is applicable; \req{convmod1-3} from the dimension bound (\rLmm{SE4}); \req{convmod1-4} from the fact that $\tilde{\Gamma}$ is obtained from $\Gamma\otimes\Phi_{2^{e_0}}$ by an isometry as \req{dfntildePsi} under which the smooth conditional entropy is invariant (\rLmm{invCEiso}); \req{convmod1-5} from the chain rule \req{CRminminmin}; and \req{convmod1-6} from the dimension bound (\rLmm{SE2}).
The first term in \req{convmod1-6} is further calculated to be \alg{ &
H_{\rm min}^{\epsilon+2\sqrt{\delta}}(\hat{B}RE_BM|T)_{\ca{C}^T(\tilde{\Gamma})} \\
&\geq H_{\rm min}^{\epsilon}(\hat{B}RE_BM|T)_{\ca{C}^T(\tilde{\Psi})} \laeq{convmod2-2}\\ &=
H_{\rm min}^{\epsilon}(\hat{A}F_A\hat{G}_AQ|T)_{\ca{C}^T(\tilde{\Psi})} \laeq{convmod2-3}\\ &=
H_{\rm min}^{\epsilon}(\hat{A}F_A\hat{G}_AQM|T)_{\ca{C}^T\otimes\ca{C}^M(\tilde{\Psi})} \laeq{convmod2-4}\\
&\geq H_{\rm min}^{\epsilon}(\hat{A}F_A\hat{G}_AQM|T)_{\ca{C}^T(\tilde{\Psi})} \laeq{convmod2-5}\\ &=
H_{\rm min}^{\epsilon}(\hat{A}\hat{C}E_A|T)_{\ca{C}^T(\Psi)\otimes\Phi_{e+e_0}} \laeq{convmod2-6}\\
&\geq H_{\rm min}^{\epsilon}(\hat{A}\hat{C}|T)_{\ca{C}^T(\Psi)}+e+e_0 \laeq{convmod2-7}\\ &=
H_{\rm min}^{\epsilon}(AC|XYZ)_{\Psi_s}+e+e_0. \laeq{convmod2-8} } Inequality \req{convmod2-2} is from the continuity of the smooth conditional entropy (\rLmm{SE12}) and the fact that $\tilde{\Gamma}$ and $\tilde{\Psi}$ are $2\sqrt{\delta}$-close with each other as \req{qeef2}; \req{convmod2-3} from \rLmm{SE11} and the fact that $\tilde{\Psi}$ is a pure state on $\hat{A}\hat{B}\hat{R}QMF_A\hat{G}_AE_B$ as \req{dfnOmega}, which is transformed by $\ca{C}^T$ to an ensemble of classically-labelled pure states; \req{convmod2-4} from $\hat{G}_A=G_AM_AZ''$ and the fact that $M$ is a classical copy of $M_A$ as \req{tildemsimmz2}; \req{convmod2-5} from the monotonicity of the smooth conditional min entropy under unital maps (\rLmm{Hminmonotonicity}); \req{convmod2-6} from the isometric invariance of the smooth conditional entropy (\rLmm{invCEiso}) and the fact that $\tilde{\Psi}$ is obtained by an isometry $U_{\ca{E}}$ from $\Psi$ as \req{dfnOmega}; \req{convmod2-7} from the superadditivity of the smooth conditional entropy (\rLmm{SE3}); and \req{convmod2-8} from $\ca{C}^T(\Psi)=\Psi_s$ and the property of the smooth conditional entropy for CQ states (\rLmm{condminCQCQ}).
The second term in \req{convmod1-0} is bounded as \alg{ &
H_{\rm min}^{2\epsilon+2\sqrt{\delta}}(\hat{A}\hat{G}_A|T)_{\ca{C}^T(\Gamma)} \nonumber\\ &=
H_{\rm min}^{2\epsilon+2\sqrt{\delta}}(AG_AM_A|XYZ)_{\Gamma} \laeq{convmod3-1}\\
&\leq H_{\rm max}^{\epsilon}(A|XYZ)_{\Gamma} \nonumber\\ &\quad +
H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(G_AM_A|AXYZ)_{\Gamma} +2f(\epsilon) \laeq{convmod3-2}\\ &=
H_{\rm max}^{\epsilon}(A|XYZ)_{\Psi_s} \nonumber\\ &\quad\quad +
H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(G_AM_A|XYZ)_{\Gamma} +2f(\epsilon). \laeq{convmod3-3} } Here, \req{convmod3-1} follows from $\hat{G}_A\equiv G_AM_AZ''$ and the fact that $\ca{C}^T(\Gamma)$ is classically coherent in $XX'$ and in $ZZ''$ because of \req{fstar}; \req{convmod3-2} from the chain rule \req{CRminmaxmin}; and \req{convmod3-3} from $\Gamma^{AXYZ}=\Psi_s^{AXYZ}$ and the fact that the system $A$ in the conditioning part is decoupled from $G_AM_A$ when conditioned by $XYZ$ as \req{dfntipsiii2} in addition to \rLmm{SE1}.
Combining these all together, we arrive at \alg{ &
\! e_0\!+\!H_{\rm max}^{\epsilon}(A|XYZ)_{\Psi_s}\! \nonumber\\
&\quad+\!H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(G_AM_A|XYZ)_{\Gamma} \!+\!2f(\epsilon) \! \nonumber\\ &
\geq H_{\rm min}^{\epsilon}(AC|XYZ)_{\Psi_s} +e+e_0-q-f(\epsilon). } This completes the proof of Ineq.~\req{convv000}.
\subsubsection{Proof of Inequality \req{convv3}}
We first calculate \alg{ & H_{\rm min}^{\epsilon}(BYCZ)_{\Psi_s} \nonumber\\ &= H_{\rm min}^{\epsilon}(BYCZ)_{\Gamma} \laeq{convmod5-2}\\ &\leq H_{\rm min}^{12\epsilon+4\sqrt{\delta}}(BYCZF_B\hat{G}_B)_{\Gamma\otimes\Phi_{2^{e_0}}} \nonumber\\ &\quad\quad -
H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(F_B\hat{G}_B|BYCZ)_{\Gamma\otimes\Phi_{2^{e_0}}} +f(\epsilon) \laeq{convmod5-3}\\ &\leq H_{\rm min}^{12\epsilon+4\sqrt{\delta}}(BYCZF_B\hat{G}_B)_{\Gamma\otimes\Phi_{2^{e_0}}} \nonumber\\ &\quad\quad -
e_0-H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(\hat{G}_B|BYCZ)_{\Gamma} +f(\epsilon) \laeq{convmod5-4} \\ &= H_{\rm min}^{12\epsilon+4\sqrt{\delta}}(BYE_BQM)_{\tilde{\Gamma}} \nonumber\\ &\quad\quad -
e_0-H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(\hat{G}_B|BYCZ)_{\Gamma} +f(\epsilon). \laeq{convmod5-4-2} } Here, \req{convmod5-2} follows from $\Psi_s^{BYCZ}=\Gamma^{BYCZ}$; \req{convmod5-3} from the chain rule \req{CRminminmin}; \req{convmod5-4} from the superadditivity of the smooth conditional entropy for product states (\rLmm{SE3}); and \req{convmod5-4-2} from the fact that $\tilde{\Gamma}$ is obtained by an isometry $U_{\ca{D}}^\dagger$ from $\Gamma\otimes\Phi_{2^{e_0}}$ as \req{dfntildePsi}.
The first term in \req{convmod5-4-2} is further calculated to be \alg{ & H_{\rm min}^{12\epsilon+4\sqrt{\delta}}(BYE_BQM)_{\tilde{\Gamma}} \laeq{convmod5-5}\\ &\leq H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(BYE_BQM)_{\tilde{\Psi}} \laeq{convmod5-6}\\ &\leq H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(BYE_B)_{\tilde{\Psi}}+c+q \laeq{convmod5-7}\\ &= H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(BYE_B)_{\Psi\otimes\Phi_{e+e_0}}+c+q \laeq{convmod5-8}\\ &\leq H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(BY)_{\Psi_s}+e+e_0+c+q, \laeq{convmod5-9} } where \req{convmod5-6} follows from the continuity of the smooth conditional entropy (\rLmm{SE12}) and the fact that $\tilde{\Gamma}$ and $\tilde{\Psi}$ are $2\sqrt{\delta}$-close with each other as \req{qeef2}; \req{convmod5-7} from the dimension bound (\rLmm{SE4}); \req{convmod5-8} from the fact that $\tilde{\Psi}$ is converted to $\Psi\otimes\Phi_{e+e_0}$ by an operation $U_{\ca{E}}$ by Alice as \req{dfnOmega}, which does not change the reduced state on $BYE_B$; and \req{convmod5-9} from the dimension bound (\rLmm{SE4}) and $\Psi_s^{BY}=\Psi^{BY}$.
For the third term in \req{convmod5-4-2}, we have \alg{ &
H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(\hat{G}_B|BYCZ)_{\Gamma} \laeq{convmod5-10}\\
&\geq H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(\hat{G}_B|BCXYZ)_{\Gamma} \laeq{convmod5-11}\\ &=
H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(\hat{G}_B|XYZ)_{\Gamma} \laeq{convmod5-12}\\ &=
H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(\hat{G}_A|XYZ)_{\Gamma} \laeq{convmod5-13}\\ &=
H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(G_AM_A|XYZ)_{\Gamma} \laeq{convmod5-14} } Here, \req{convmod5-11} is from the monotonicity of the smooth conditional entropy (\rLmm{Hminmonotonicity}); \req{convmod5-12} from the fact that $\Gamma$ is decoupled between $BC$ and $\hat{G}_B$ when conditioned by $XYZ$ as \req{dfntipsiii2}, and the property of the smooth conditional entropy (\rLmm{SE1}); \req{convmod5-13} from \rLmm{SE11} and the fact that $\Gamma^{\hat{G}_A\hat{G}_BXYZ}$ is an ensemble of classically-labelled pure states on $\hat{G}_A\hat{G}_B$ as \req{dfntipsiii2}; and \req{convmod5-14} from $\hat{G}_A\equiv G_AM_AZ''$, \rLmm{condminCQCQ} and the fact that $Z''$ is a classical copy of $Z$ due to \req{fstar}.
Combining these all together, we arrive at \alg{ & H_{\rm min}^{\epsilon}(BYCZ)_{\Psi_s} \nonumber\\ & \leq H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(BY)_{\Psi_s} + e+c+q+f(\epsilon) \nonumber\\ &\quad\quad
-H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(G_AM_A|XYZ)_{\Gamma}. }
\subsubsection{Proof of Inequality \req{convv4}}
We have \alg{ &
e+e_0+H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(B|XYZ)_{\Psi_s} \\ &=
e+e_0+H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(ACR|XYZ)_{\Psi_s} \laeq{Vconv1-0}\\ &=
e+e_0+H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(\hat{A}\hat{C}R|T)_{\Psi_s} \laeq{Vconv1-0-2}\\ &
=e+e_0+H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(\hat{A}\hat{C}R|T)_{\ca{C}^T(\Psi)} \laeq{Vconv1-1} \\ &
\geq H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(\hat{A}\hat{C}E_AR|T)_{\ca{C}^T(\Psi)\otimes\Phi_{2^{e+e_0}}} \laeq{dorodoro}\\ & =
H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(\hat{A}QMF_A\hat{G}_AR|T)_{\ca{C}^T(\tilde{\Psi})} \laeq{Vconv2-2}\\ &
\geq H_{\rm min}^{10\epsilon+8\sqrt{\delta}}(\hat{A}MF_A\hat{G}_AR|T)_{\ca{C}^T(\tilde{\Psi})} \nonumber\\ &\quad +
H_{\rm min}(Q|\hat{A}MF_A\hat{G}_ART)_{\ca{C}^T(\tilde{\Psi})} -f(\epsilon) \laeq{Vconv3-2}\\ &
\geq H_{\rm min}^{10\epsilon+8\sqrt{\delta}}(\hat{A}MF_A\hat{G}_AR|T)_{\ca{C}^T(\tilde{\Psi})} -q -f(\epsilon) \laeq{Vconv3-2-2} \\ & =
H_{\rm min}^{10\epsilon+8\sqrt{\delta}}(\hat{B}E_BQ|T)_{\ca{C}^T(\tilde{\Psi})} -q -f(\epsilon), \laeq{Vconv3-2-3} } where \req{Vconv1-0} follows from \rLmm{SE11}; \req{Vconv1-0-2} from \rLmm{condminCQCQ} and the fact that $T=X'Y'Z'$ is a classical copy of $XYZ$; \req{Vconv1-1} from $\Psi_s=\ca{C}^T(\Psi)$, \req{dorodoro} from the dimension bound (\rLmm{SE4}), \req{Vconv2-2} from the fact that $\tilde{\Psi}$ is obtained from $\Psi\otimes\Phi_{2^{e+e_0}}$ by applying the isometry $U_{\ca{E}}$ as \req{dfnOmega}, under which the smooth conditional entropy is invariant (\rLmm{invCEiso}), \req{Vconv3-2} from the chain rule \req{CRminminmin}, \req{Vconv3-2-2} from the dimension bound (\rLmm{SE2}), and \req{Vconv3-2-3} from \rLmm{SE11} and the fact that $\tilde{\Psi}$ is a pure state on $\hat{A}\hat{B}\hat{R}F_A\hat{G}_AQME_B$ as \req{dfnOmega}, which is converted by $\ca{C}^T$ to an ensemble of classically-labelled pure states.
The first term in \req{Vconv3-2-3} is further calculated to be \alg{ &
H_{\rm min}^{10\epsilon+8\sqrt{\delta}}(\hat{B}E_BQ|T)_{\ca{C}^T(\tilde{\Psi})} \\ & =
H_{\rm min}^{10\epsilon+8\sqrt{\delta}}(\hat{B}E_BQ|T)_{\ca{C}^T\otimes\ca{C}^M(\tilde{\Psi})} \laeq{ttt00}\\ &
\geq H_{\rm min}^{10\epsilon+8\sqrt{\delta}}(\hat{B}E_BQ|TM)_{\ca{C}^T\otimes\ca{C}^M(\tilde{\Psi})} \laeq{ttt1}\\ & =
H_{\rm min}^{10\epsilon+8\sqrt{\delta}}(\hat{B}E_BQM|TM_A)_{\ca{C}^T\otimes\ca{C}^{M_A}(\tilde{\Psi})} \laeq{ttt2}\\ & =
H_{\rm min}^{10\epsilon+8\sqrt{\delta}}(\hat{B}\hat{C}F_B\hat{G}_B|TM_A)_{\ca{C}^T\otimes\ca{C}^{M_A}(\Psi_f)} \laeq{ttt0}\\ &
\geq H_{\rm min}^{10\epsilon+6\sqrt{\delta}}(\hat{B}\hat{C}F_B\hat{G}_B|TM_A)_{\ca{C}^T\otimes\ca{C}^{M_A}(\Gamma)\otimes\Phi_{2^{e_0}}} \laeq{ttt0-2}\\ &
\geq H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(\hat{G}_B|TM_A)_{\ca{C}^T\otimes\ca{C}^{M_A}(\Gamma)\otimes\Phi_{2^{e_0}}} \nonumber\\ &\quad\quad +
H_{\rm min}^{\epsilon}(\hat{B}\hat{C}F_B|T\hat{G}_BM_A)_{\ca{C}^T\otimes\ca{C}^{M_A}(\Gamma)\otimes\Phi_{2^{e_0}}} \nonumber\\ &\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad -f(\epsilon). \laeq{ttt} } Inequality \req{ttt00} is due to the fact that $\ca{C}^M$ does not change the reduced state on $\hat{B}E_BQT$; \req{ttt1} from the monotonicity of the conditional entropy (\rLmm{Hminmonotonicity}); \req{ttt2} from the property of the conditional entropy for classical-quantum states (\rLmm{condminCQCQ}) and the fact that $M_A$ is a classical copy of $M$ as \req{tildemsimmz2}; \req{ttt0} from the fact that $\Psi_f$ is obtained from $\tilde{\Psi}$ by the isometry $U_{\ca{D}}$ as \req{dfnPsif}, under which the smooth conditional entropy is invariant; \req{ttt0-2} from the continuity (\rLmm{SE12}) and the fact that $\Gamma\otimes\Phi_{2^{e_0}}$ is $2\sqrt{\delta}$-close to $\Psi_f$ as \req{qeef}; and \req{ttt} from the chain rule \req{CRminminmin}.
The second term in \req{ttt} is further calculated as \alg{ &
H_{\rm min}^{\epsilon}(\hat{B}\hat{C}F_B|T\hat{G}_BM_A)_{\ca{C}^T\otimes\ca{C}^{M_A}(\Gamma)\otimes\Phi_{2^{e_0}}} \laeq{rashi1}\\
&\geq H_{\rm min}^{\epsilon}(\hat{B}\hat{C}|T\hat{G}_BM_A)_{\ca{C}^T\otimes\ca{C}^{M_A}(\Gamma)}+e_0 \laeq{rashi2-0}\\
&\geq H_{\rm min}^{\epsilon}(\hat{B}\hat{C}|T\hat{G}_BM_A)_{\ca{C}^T(\Gamma)}+e_0 \laeq{rashi2-0-2}\\ &=
H_{\rm min}^{\epsilon}(\hat{B}\hat{C}|T)_{\ca{C}^T(\Gamma)}+e_0 \laeq{rashi2}\\ &=
H_{\rm min}^{\epsilon}(\hat{B}\hat{C}|T)_{\Psi_s}+e_0 \laeq{rashi3-2}\\ &=
H_{\rm min}^{\epsilon}(BC|XYZ)_{\Psi_s}+e_0, \laeq{rashi4} } where \req{rashi2-0} follows from the superadditivity of the smooth conditional entropy (\rLmm{SE3}); \req{rashi2-0-2} from the monotonicity of the smooth conditional entropy (\rLmm{Hminmonotonicity}); \req{rashi2} from \rLmm{SE1} and the fact that the state $\ca{C}^T(\Gamma)$ is decoupled between $\hat{B}\hat{C}$ and $\hat{G}_BM_A$ when conditioned by $T$ as \req{dfntipsiii}; \req{rashi3-2} from Equality \req{PTgammaf}; and \req{rashi4} from \rLmm{condminCQCQ}.
The first term in \req{ttt} is calculated as \alg{ &
H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(\hat{G}_B|TM_A)_{\ca{C}^T\otimes\ca{C}^{M_A}(\Gamma)} \laeq{kota1}\\ & =
H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(G_A|TM_A)_{\Gamma} \laeq{kota2}\\ & =
H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(G_A|M_AXYZ)_{\Gamma} \laeq{kota3}\\ & =
H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(G_A|M_AAXYZ)_{\Gamma}, \laeq{kota4} } where \req{kota2} is from $\hat{G}_B=G_BM_B$, Equality \req{dfntipsiii55} and \rLmm{SE11}; \req{kota3} from \rLmm{condminCQCQ} and the fact that $T=X'Y'Z'$ is a copy of $XYZ$ as \req{dfntipsi}; and \req{kota4} from \rLmm{SE1} and the fact that the state $\Gamma$ is decoupled between $A$ and $G_A$ when conditioned by $M_AXYZ$ as \req{sstar}.
Combining these all together, we arrive at \alg{ &
e+e_0+H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(B|XYZ)_{\Psi_s} \nonumber\\ &\geq -q
+H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(G_A|M_AAXYZ)_{\Gamma} \nonumber\\ &\quad
+H_{\rm min}^{\epsilon}(BC|XYZ)_{\Psi_s}+e_0 -2f(\epsilon). } This completes the proof of Inequality \req{convv4}.
$\blacksquare$
\subsection{Proof of \rThm{converse} from Inequalities \req{convv1}-\req{convv4}}
Since $\Gamma$ is diagonal in $M_AXYZ$ as \req{sstar}, and due to the properties of the smooth conditional entropies for classical-quantum states (\rLmm{SE1}), we have \alg{ &
H_{\rm min}^{5\epsilon+2\sqrt{\delta}}(G_AM_A|XYZ)_{\Gamma} \geq0, \\ &
H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(G_A|M_AAXYZ)_{\Gamma} \geq 0. } Thus, Inequalities \req{convv3} and \req{convv4} implies Inequalities \req{convv03} and \req{convv04} in \rThm{converse}, respectively. Summing up both sides in \req{convv000} and \req{convv3} yields \alg{ c+2q &
\geq H_{\rm min}^{\epsilon}(AC|XYZ)_{\Psi_s} -
H_{\rm max}^{\epsilon}(A|XYZ)_{\Psi_s} \nonumber\\ &\quad +H_{\rm min}^{\epsilon}(BYCZ)_{\Psi_s} \nonumber\\ &\quad - H_{\rm min}^{12\epsilon+6\sqrt{\delta}}(BY)_{\Psi_s} -4f(\epsilon) \nonumber\\ &= \tilde{H}_{I}'^{(\epsilon,\delta)}-4f(\epsilon). \laeq{doso1} } Similarly, combining Inequalities \req{convv1} and \req{convv4}, we obtain \alg{ c+2q &\geq H_{\rm min}^{\epsilon}(AXCZ)_{\Psi_s} - H_{\rm max}^{\epsilon}(AXZ)_{\Psi_s} \nonumber\\ &\quad\quad +
H_{\rm min}^{\epsilon}(BC|XYZ)_{\Psi_s} \nonumber\\ &\quad\quad
-H_{\rm min}^{11\epsilon+8\sqrt{\delta}}(B|XYZ)_{\Psi_s} \nonumber\\ &\quad\quad
-H_{\rm min}^{7\epsilon+2\sqrt{\delta}}(G_A|M_AAXZ)_{\Gamma} \nonumber\\ &\quad\quad
+H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(G_A|M_AAXYZ)_{\Gamma} -6f(\epsilon) \\ &= \tilde{H}_{I\!I}'^{(\epsilon,\delta)} - \Delta_\Gamma'^{(\epsilon,\delta)} -6f(\epsilon), \laeq{doso2} } where we have defined \alg{ \Delta_\Gamma'^{(\epsilon,\delta)} := &
H_{\rm min}^{7\epsilon+2\sqrt{\delta}}(G_A|M_AAXZ)_{\Gamma} \nonumber\\ & -
H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(G_A|M_AAXYZ)_{\Gamma}. } In the following, we prove that \alg{ \Delta_\Gamma'^{(\epsilon,\delta)} \leq \Delta^{(\epsilon,\delta)}. } Combining this with \req{doso2} in addition to \req{doso1}, we arrive at Inequality \req{convv00} in \rThm{converse}.
We start by noting that \alg{ \Delta_\Gamma'^{(\epsilon,\delta)} & =
H_{\rm min}^{7\epsilon+2\sqrt{\delta}}(G_A|M_AAX'Z')_{\Gamma} \nonumber\\ & \quad\quad -
H_{\rm min}^{7\epsilon+6\sqrt{\delta}}(G_A|M_AAX'Y'Z')_{\Gamma} \\ & \leq
\tilde{I}_{\rm min}^{7\epsilon+4\sqrt{\delta}}(G_A:Y'|M_AAX'Z')_{\tilde{\Psi}} } The first line follows from \rLmm{condminCQCQ} and the fact that $XYZ$ is a copy of $X'Y'Z'$ as \req{dfntipsi}, and
the second line from the continuity bounds for the smooth conditional entropy (\rLmm{SE12}) and the definition of the smooth conditional min mutual information \req{dfnmimMI}. Hence, it suffices to prove that there exists an operation $\ca{F}:\hat{A}\hat{C}\rightarrow AG_AM_A$ satisfying \alg{ \ca{F}(\Psi_s^{\hat{A}\hat{C}\hat{R}})=\tilde{\Psi}^{AG_AM_A\hat{R}}, \quad \ca{C}^{M_A}\circ\ca{F}=\ca{F} \laeq{fpsiseqtildepsi} } and that $\tilde{\Psi}$ satisfies the condition \begin{eqnarray} \! \inf_{\{\omega_{xyz}\}} \! P \! \left( \! \tilde{\Psi}^{AG_AM_A\hat{R}}, \sum_{x,y,z}p_{xyz} \psi_{xyz}^{A\hat{R}} \otimes \omega_{xyz}^{G_AM_A} \! \right) \nonumber\\ \leq 2\sqrt{\delta}. \quad\quad \laeq{conditionaldecoupling2} \end{eqnarray}
Recall that the state $\ket{\tilde{\Psi}}$ is obtained by an encoding isometry $U_\ca{E}^{\hat{A}\hat{C}E_A\rightarrow \hat{A}QMF_A\hat{G}_A}$ from $\ket{\Psi}\ket{\Phi_{2^{e+e_0}}}$ as \req{dfnOmega}, where $\hat{G}_A=G_AM_AZ''$. We define an operation $\ca{F}:\hat{A}\hat{C}\rightarrow AG_AM_A$ by \alg{ \ca{F}(\tau) := {\rm Tr}_{QMF_AXZ''}\circ\ca{U}_{\ca{E}}(\tau\otimes\pi_{2^{e+e_0}}^{E_A}). } Noting that $U_\ca{E}$ is in the form of \req{dfnUE}, this implies \req{fpsiseqtildepsi}. To obtain the decoupling condition \req{conditionaldecoupling2}, note that, since $\tilde{\Psi}$ is converted by an operation by Bob to $\Psi_f$ as \req{dfnPsif}, it holds that $\tilde{\Psi}^{AG_AM_A\hat{R}}=\Psi_f^{AG_AM_A\hat{R}}$. Thus, tracing out $\hat{B}\hat{C}F_AF_B\hat{G}_BXZ''$ in \req{qeef}, we obtain \alg{ P\left( \tilde{\Psi}^{A\hat{R}G_AM_A}, \Gamma^{A\hat{R}G_AM_A} \right) \leq 2\sqrt{\delta}. } Due to \req{dfntipsi}, the state $\Gamma$ is in the form of \alg{ \! \Gamma^{A\hat{R}G_AM_A} = \!\sum_{x,y,z}p_{xyz} \psi_{xyz}^{AR} \otimes \phi_{xyz}^{G_AM_A} \!\otimes \proj{xyz}^T. \! } This implies \req{conditionaldecoupling2} and completes the proof of Inequality \req{convv00}.
$\blacksquare$
\subsection{Property of $\Delta^{(\epsilon,\delta)}$ (Proof of \rLmm{propDelta})} \lsec{propDelta}
Due to the definition of the smooth conditional min mutual information \req{dfnmimMI} and \req{dfnDeltaed}, it is straightforward to verify that $\Delta^{\epsilon,\delta}\geq0$. The equality holds if $Y'\cong Y$ is a one-dimensional system, that is, if there is no classical side information at the decoder. In the case where there is neither quantum message nor quantum side information at the encoder, i.e. \alg{ d_A=d_C=1, \quad \hat{A}=X, \quad \hat{C}=Z, \laeq{ssff} } the source state $\Psi_s$ is represented as \alg{ \! \Psi_s^{XZ\hat{B}\hat{R}} \!=\! \sum_{x,y,z} p_{xyz} \proj{x}^X \!\!\otimes\! \proj{y}^Y \!\!\otimes\! \proj{z}^Z \!\!\otimes \psi_{xyz}^{B\hat{R}}. \! } Thus, for any CPTP map $\ca{F}:XZ\rightarrow G_AM_A$, we have \alg{ \ca{F}(\Psi_s)^{G_AM_A\hat{R}} = \sum_{x,y,z} p_{xyz} \omega_{xz}^{G_AM_A} \otimes \psi_{xyz}^{\hat{R}}, } where $\omega_{xz}:=\ca{F}(\proj{x}^X \otimes\proj{z}^Z)$. It follows that \alg{ & \ca{F}(\Psi_s)^{G_AM_AX'Y'Z'} \nonumber\\ &\; = \sum_{x,z} p_{xz} \omega_{xz}^{G_AM_A} \otimes \proj{xz}^{X'Z'} \nonumber\\ &\quad\quad \otimes \left(
\sum_y p_{y|xz} \proj{y}^{Y'} \right), }
and consequently, $\tilde{I}_{\rm min}^{7\epsilon+4\sqrt{\delta}}(G_A:Y'|M_AX'Z')=0$. This implies $\Delta^{\epsilon,\delta}=0$, and completes the proof of \rLmm{propDelta}.
$\blacksquare$
\section{Conclusion} \lsec{conclusion}
In this paper, we investigated the state redistribution of classical and quantum hybrid sources in the one-shot scenario. We analyzed the costs of classical communication, quantum communication and entanglement. We obtained the direct bound and the converse bound for those costs in terms of smooth conditional entropies. In most of the cases that have been analyzed in the previous literatures, the two bounds coincide in the asymptotic limit of infinitely many copies and vanishingly small error. Various coding theorems for two-party source coding tasks are systematically obtained by reduction from our results, including the ones that have not been analyzed in the previous literatures.
To investigate the protocol that are covered by our result, but have not been addressed in the previous literature, in detail is left as a future work. Another direction is to explore the family of quantum communication protocols in the presence of classical side information only at the decoder. It would also be beneficial to analyze the relation between our results and the one-shot bounds for entanglement-assisted communication of classical and quantum messages via a noisy quantum channel \cite{wakakuwa2020randomized}.
\onecolumn
\appendix
\section{Definitions and Properties of Smooth Entropies} \lapp{propSmEn}
In this appendix, we summarize the properties of the smooth conditional entropies that are used in the main text. For the properties of the purified distance used in some of the proofs, see \rApp{extUhlmann}.
\subsection{Basic Properties}
\blmm{duality} {\bf (duality: see e.g. \cite{tomamichel2010duality})}
For any subnormalized pure state $|\psi\rangle$ on system $ABC$, and for any $\epsilon>0$,
$H_{\rm max}^\epsilon(A|B)_\psi= -
H_{\rm min}^\epsilon(A|C)_\psi$. \end{lmm}
\begin{lmm}\label{lmm:Hminmonotonicity} {\bf(monotonicity: Theorem 18 in \cite{tomamichel2010duality} and Theorem 6.2 in \cite{T16})}
For any $\rho^{AB} \in \ca{S}_\leq(\ca{H}^{AB})$, $0\leq\epsilon\leq\sqrt{{\rm Tr}[\rho]}$, any unital CPTP map $\ca{E}:A\rightarrow C$ and any CPTP map $\ca{F}:B\rightarrow D$, it holds that $H_{\rm min}^\epsilon(A|B)_\rho\leq H_{\rm min}^\epsilon(C|D)_{\ca{E}\otimes\ca{F}(\rho)}$. \end{lmm}
\begin{lmm}[isometric invariance: Lemma 13 in \cite{tomamichel2010duality}]\label{lmm:invCEiso}
For any $\epsilon\geq0$, $\rho^{AB} \in \ca{S}_\leq(\ca{H}^{AB})$ and any linear isometries $U:A\rightarrow C$ and $V:B\rightarrow D$, $H_{\rm min}^\epsilon(A|B)_\rho=H_{\rm min}^\epsilon(C|D)_{\ca{U}\otimes\ca{V}(\rho)}$. \end{lmm}
\begin{lmm}[additivity: see Section I C in \cite{konig2009operational}]\label{lmm:addcondmax} For any $\rho\in\ca{S}(\ca{H}^{AB})$ and $\sigma\in\ca{S}(\ca{H}^{CD})$, it holds that \alg{
\!\! H_{\rm max}(AC|BD)_{\rho\otimes\sigma} =
H_{\rm max}(A|B)_{\rho} +
H_{\rm max}(C|D)_{\sigma} . \!\! } \end{lmm}
\begin{lmm}\label{lmm:SE3} {\bf (superadditivity: Lemma A.2 in \cite{DBWR2010})} For any states $\rho^{AB}$, $\sigma^{CD}$ and any $\epsilon,\epsilon'\geq0$, it holds that \alg{
\!\! H_{\rm min}^{\epsilon+\epsilon'}(AC|BD)_{\rho\otimes\sigma}
\geq H_{\rm min}^{\epsilon}(A|B)_{\rho}
\!+\! H_{\rm min}^{\epsilon'}(C|D)_{\sigma}. \!\!\! }
\end{lmm}
\begin{lmm}[chain rule: see \cite{vitanov2013chain}]\label{lmm:chainrule} For any $\epsilon>0$, $\epsilon',\epsilon''\geq0$ and $\rho\in\ca{S}_\leq(\ca{H}^{ABC})$, it holds that \alg{
H_{\rm min}^{\epsilon+\epsilon'+2\epsilon''}(AB|C)_\rho &
\geq H_{\rm min}^{\epsilon'}(B|C)_\rho +
H_{\rm min}^{\epsilon''}(A|BC)_\rho -f(\epsilon), \laeq{CRminminmin}\\
H_{\rm min}^{\epsilon'}(AB|C)_\rho &
\leq H_{\rm max}^{\epsilon''}(B|C)_\rho +
H_{\rm min}^{\epsilon+\epsilon'+2\epsilon''}(A|BC)_\rho +2f(\epsilon), \laeq{CRminmaxmin} } where \alg{ f(\epsilon):=-\log{(1-\sqrt{1-\delta^2})}. } \end{lmm}
\begin{lmm}\label{lmm:SE2} {\bf(dimension bounds: Corollary of Lemma 20 in \cite{tomamichel2010duality})} For any state $\rho^{AB}$ and $\epsilon\geq0$, it holds that \alg{
H_{\rm min}^\epsilon(A|B)_\rho &\geq -\log{d_A}, \\
H_{\rm max}^\epsilon(A|B)_\rho &\leq \log{d_A}. } \end{lmm}
\begin{lmm}[dimension bound: Lemma 21 in \cite{datta2011apex}]\label{lmm:SE4} For any state $\rho^{ABC}$ and $\epsilon>0$, it holds that \alg{
H_{\rm min}^{\epsilon}(AB|C)_{\rho}
\leq H_{\rm min}^{\epsilon}(A|C)_{\rho} + \log{d_B}. } \end{lmm}
\blmm{SE12}{\bf (continuity)} For any $\epsilon,\delta\geq0$, any $\rho^{AB}$ and $\sigma^{AB}\in\ca{B}^\delta(\rho)$, it holds that \alg{ &
H_{\rm min}^{\epsilon+\delta}(A|B)_\rho
\geq H_{\rm min}^{\epsilon}(A|B)_\sigma. \laeq{mincont}
} \end{lmm}
\begin{prf} Let $\hat{\sigma}^{AB}\in\ca{B}^{\epsilon}(\sigma)$ be such that $H_{\rm min}^{\epsilon}(A|B)_\sigma=H_{\rm min}(A|B)_{\hat{\sigma}}$. Due to the triangle inequality for the purified distance, it holds that \alg{ P(\rho,\hat{\sigma}) \leq P(\rho,\sigma) + P(\sigma,\hat{\sigma}) \leq \epsilon+\delta, } which implies $\hat{\sigma}\in\ca{B}^{\epsilon+\delta}(\rho)$. Thus, we obtain Inequality \req{mincont} as \alg{
H_{\rm min}^{\epsilon}(A|B)_\sigma
=H_{\rm min}(A|B)_{\hat{\sigma}} \leq
\sup_{\hat{\rho}\in\ca{B}^{\epsilon+\delta}(\rho)}H_{\rm min}(A|B)_{\hat{\rho}} =
H_{\rm min}^{\epsilon+\delta}(A|B)_\rho. }
$\blacksquare$ \end{prf}
\blmm{onedimHminmax}{\bf (one-dimensional system.)} Suppose that $d_A=1$. For any $\epsilon\geq0$ and $\rho\in\ca{S}(\ca{H}^{AB})$, it holds that \alg{ & 0
\leq H_{\rm min}^\epsilon(A|B)_\rho \leq -\log{(1-2\epsilon)}, \laeq{katt1}\\ & 0
\geq H_{\rm max}^\epsilon(A|B)_\rho \geq \log{(1-2\epsilon)}. \laeq{katt2} } \end{lmm}
\begin{prf} Since $d_A=1$, there exists a fixed vector $\ket{e}\in\ca{H}^A$ such that $I^A=\proj{e}$ and that any $\tilde{\rho}\in\ca{S}_\leq(\ca{H}^{AB})$ is represented as $\proj{e}^A\otimes\tilde{\rho}^B$. Due to the definition of the smooth conditional min entropy, we have \alg{
H_{\rm min}^\epsilon(A|B)_\rho &
\geq H_{\rm min}(A|B)_\rho \\ & =
\sup_{\sigma^B \in \ca{S}_=(\ca{H}^B)}H_{\rm min}(A|B)_{\rho|\sigma} \\ &
\geq H_{\rm min}(A|B)_{\rho^{AB}|\rho^B} \\ &=
\sup \{ \lambda \in \mathbb{R}| 2^{-\lambda} I^A \otimes \rho^B \geq \rho^{AB} \} \\ &=
\sup \{ \lambda \in \mathbb{R}| 2^{-\lambda} \proj{e}^A \otimes \rho^B \geq \proj{e}^A\otimes\rho^{B} \} \\ &=0. } This implies the first inequality in \req{katt1}. To prove the second inequality in \req{katt1}, let $\hat{\rho}\in\ca{B}^\epsilon(\rho)$ and $\sigma^B \in \ca{S}_=(\ca{H}^B)$ be such that \alg{
H_{\rm min}^\epsilon(A|B)_\rho =
H_{\rm min}(A|B)_{\hat{\rho}} =
H_{\rm min}(A|B)_{\hat{\rho}|\sigma}. } By definition, it holds that \alg{
2^{-H_{\rm min}^\epsilon(A|B)_\rho} I^A \otimes \sigma^B \geq \hat{\rho}^{AB}, } which is equivalent to \alg{
2^{-H_{\rm min}^\epsilon(A|B)_\rho} \proj{e}^A \otimes \sigma^B \geq \proj{e}^A\otimes\hat{\rho}^{B}. } By taking the trace in both sides, we obtain \alg{
2^{-H_{\rm min}^\epsilon(A|B)_\rho} \geq {\rm Tr}[\hat{\rho}]. } The R.H.S. of the above inequality is evaluated as \alg{ {\rm Tr}[\hat{\rho}] =
\|\hat{\rho}\|_1 \geq
\|\rho\|_1-\|\rho-\hat{\rho}\|_1 \geq 1-2\epsilon, } where the last line follows from \req{relTDPD} and the condition $\hat{\rho}\in\ca{B}^\epsilon(\rho)$. This implies the second inequality in \req{katt1}. Inequality \req{katt2} follows due to the duality relation (\rLmm{duality}).
$\blacksquare$ \end{prf}
\subsection{Classical-Quantum States}
\begin{lmm}[Lemma A.5 in \cite{DBWR2010}]\label{lmm:condminCQ} For any state $\rho^{ABK} \in \ca{S}_=(\ca{H}^{ABK})$ in the form of \alg{ \rho^{ABK}=\sum_kp_k\rho_k^{AB}\otimes\proj{k}^K, \laeq{cqsmoothstate} } where $\rho_k \in \ca{S}_=(\ca{H}^{AB})$, $\inpro{k}{k'}=\delta_{k,k'}$ and $\{p_k\}_k$ is a normalized probability distribution, it holds that \alg{
H_{\rm min}(A|BK)_\rho=-\log\left(\sum_kp_k\cdot2^{-H_{\rm min}(A|B)_{\rho_k}}\right). } \end{lmm}
\begin{lmm}[Lemma A.7 in \cite{DBWR2010}]\label{lmm:condminCQCQ} For any state $\rho^{ABK_1K_2} \in \ca{S}_\leq(\ca{H}^{ABK_1K_2})$ in the form of \alg{ \rho^{ABK_1K_2}=\sum_kp_k\rho_k^{AB}\otimes\proj{k}^{K_1}\otimes\proj{k}^{K_2}, \laeq{rhoclk1k2} } where $\inpro{k}{k'}=\delta_{k,k'}$, and for any $\epsilon\geq0$, it holds that \alg{
H_{\rm min}^\epsilon(AK_1|BK_2)_\rho=H_{\rm min}^\epsilon(A|BK_2)_\rho=H_{\rm min}^\epsilon(A|BK_1)_\rho. } \end{lmm}
\begin{lmm}[Lemma 29 in \cite{wakakuwa2021one}]\label{lmm:condmaxCQCQ} In the same setting as in \rLmm{condminCQCQ}, it holds that \alg{
H_{\rm max}^\epsilon(AK_1|BK_2)_\rho=H_{\rm max}^\epsilon(A|BK_2)_\rho=H_{\rm max}^\epsilon(A|BK_1)_\rho. } \end{lmm}
\blmm{SE1} Consider a state in the form of \alg{ \rho^{ACK}=\sum_kp_k\rho_k^{A}\otimes\sigma_k^C\otimes\proj{k}^K. } For any $\epsilon>0$, it holds that \alg{
H_{\rm min}^{\epsilon}(A|CK)_{\rho} =
H_{\rm min}^{\epsilon}(A|K)_{\rho} \geq 0. } \end{lmm}
\begin{prf} It is straightforward to verify that there exists a quantum operation $\ca{E}:K\rightarrow CK$ such that $\rho^{ACK}=\ca{E}(\rho^{AK})$. Due to the monotonicity of the smooth conditional min entropy under operations on the conditioning system, we have \alg{ &
H_{\rm min}^{\epsilon}(A|K)_{\rho^{AK}}
\leq H_{\rm min}^{\epsilon}(A|CK)_{\rho^{ACK}} =
H_{\rm min}^{\epsilon}(A|CK)_{\ca{E}(\rho^{AK})}
\leq H_{\rm min}^{\epsilon}(A|K)_{\rho^{AK}}, }
which implies $H_{\rm min}^{\epsilon}(A|CK)_{\rho}=H_{\rm min}^{\epsilon}(A|K)_{\rho}$. The non-negativity follows due to \rLmm{condminCQ} as \alg{
H_{\rm min}^{\epsilon}(A|K)_{\rho}
\geq H_{\rm min}(A|K)_{\rho} = -\log\left(\sum_kp_k\cdot2^{-H_{\rm min}(A)_{\rho_k}}\right) \geq -\log\left(\sum_kp_k\right) =0, } which completes the proof.
$\blacksquare$ \end{prf}
\subsection{Classically-labelled Pure States}
\blmm{SE11d} Consider a state in the form of \alg{ \rho^{ABCK}=\sum_kp_k\proj{\psi_k}^{ABC}\otimes\proj{k}^K. \laeq{CQpurestate} } For any $\epsilon>0$, it holds that \alg{ &
H_{\rm max}^{\epsilon}(A|BK)_{\rho} =
-H_{\rm min}^{\epsilon}(A|CK)_{\rho}. \laeq{minsymd} } \end{lmm}
\begin{prf} It is straightforward to verify that a purification of the state $\rho$, defined by \req{CQpurestate}, is given by \alg{ \ket{\psi_\rho}^{ABCKK'}=\sum_k\sqrt{p_k}\ket{\psi_k}^{ABC}\ket{k}^K\ket{k}^{K'}. \laeq{CQpurestate2} } Due to the duality of the smooth conditional entropies (\rLmm{duality}), we have \alg{
H_{\rm max}^{\epsilon}(A|BK)_{\rho} =
H_{\rm max}^{\epsilon}(A|BK)_{\psi_\rho} =
-H_{\rm min}^{\epsilon}(A|CK')_{\psi_\rho} =
-H_{\rm min}^{\epsilon}(A|CK)_{\rho}, } which completes the proof.
$\blacksquare$ \end{prf}
\blmm{SE11} Consider the same setting as in \rLmm{SE11d}. For any $\epsilon>0$, it holds that \alg{ &
H_{\rm min}^{\epsilon}(A|K)_{\rho} =
H_{\rm min}^{\epsilon}(B|K)_{\rho}. \laeq{minsym} } \end{lmm}
\begin{prf} To prove \req{minsym}, let $\hat{\rho}^{AK}\in\ca{B}^\epsilon(\rho)$ and $\varsigma\in\ca{S}_=(\ca{H}^K)$ be such that \alg{
H_{\rm min}^{\epsilon}(A|K)_{\rho} =
H_{\rm min}(A|K)_{\hat{\rho}} =
H_{\rm min}(A|K)_{\hat{\rho}|\varsigma}. } With $\ca{C}$ being the completely dephasing operation on $K$ with respect to the basis $\{\ket{k}\}_k$, it holds that \alg{ P(\ca{C}(\hat{\rho}),\ca{C}(\rho)) \leq P(\hat{\rho}),\rho) \leq \epsilon. } In addition, if \alg{ 2^{-\lambda} I^A \otimes \varsigma^K \geq \hat{\rho}^{AK}, \laeq{mincond1} } then \alg{ 2^{-\lambda} I^A \otimes \ca{C}(\varsigma)^K \geq {\rm id}^A\otimes\ca{C}^K(\hat{\rho}^{AK}). } Thus, without loss of generality, we may assume that both $\hat{\rho}^{AK}$ and $\varsigma$ are diagonal in $\{\ket{k}\}_k$. That is, we may assume that $\hat{\rho}^{AK}$ and $\varsigma$ are in the form of \alg{ \hat{\rho}^{AK} = \sum_k \hat{p}_k \hat{\rho}_k^A\otimes\proj{k}^K, \quad \varsigma = \sum_kq_k\proj{k}. } Suppose that the Schmidt decomposition of $\ket{\psi_k}$ is given by \alg{ \ket{\psi_k} =
\sum_j\sqrt{\mu_{j|k}}\ket{e_{j|k}}^A\ket{f_{j|k}}^B, } Define linear operators $v_k:\ca{H}^A\rightarrow\ca{H}^B$ and $V:\ca{H}^A\otimes\ca{H}^K\rightarrow\ca{H}^B\otimes\ca{H}^K$ by \alg{
v_k:=\sum_j\ket{f_{j|k}}^B\bra{e_{j|k}}^A \quad (\forall k) } and $V:=\sum_kv_k\otimes\proj{k}^K$. It is straightforward to verify that $\rho^{BK}=V\rho^{AK}V^\dagger$. Thus, due to the monotonicity of the purified distance under trace non-increasing CP maps (Lemma 7 in \cite{tomamichel2010duality}), it holds that \alg{ P(\rho^{BK},V\hat{\rho}^{AK}V^\dagger) \leq P(\rho^{AK},\hat{\rho}^{AK}) \leq \epsilon. } Applying $V$ to the both sides in condition \req{mincond1}, it follows that \alg{ 2^{-\lambda} V(I^A\otimes\varsigma^K)V^\dagger
\geq V\hat{\rho}^{AK}V^\dagger. } Noting that $I^B\geq (v_k^\dagger v_k)^B$, this implies that \alg{ 2^{-\lambda} I^B\otimes\varsigma^K
\geq V\hat{\rho}^{AK}V^\dagger. } Thus, we arrive at \alg{
H_{\rm min}^{\epsilon}(A|K)_{\rho}
\leq H_{\rm min}^{\epsilon}(B|K)_{\rho}. } By exchanging the roles of $A$ and $B$, we also obtain the converse inequality. This completes the proof of Equality \req{minsym}. \end{prf}
\section{Properties of The Purified Distance} \lapp{extUhlmann}
We summarize the properties of the purified distance, used in \rApp{propSmEn} to prove the properties of the smooth conditional entropies.
\blmm{purifiedDmonot} {\bf (monotonicity: Lemma 7 in \cite{tomamichel2010duality})} For any subnormalized states $\rho,\sigma\in\ca{S}_\leq(\ca{H})$ and for any completely positive trace non-increasing map $\ca{E}$, it holds that $P(\rho,\sigma)\geq P(\ca{E}(\rho),\ca{E}(\sigma))$.
Consequently, for any linear isometry $\ca{U}$, it holds that $P(\rho,\sigma)= P(\ca{U}(\rho),\ca{U}(\sigma))$ \end{lmm}
\blmm{extUhlmann0} For any normalized state $\rho$ on system $A$ and any normalized pure state $\ket{\phi}$ on system $AB$, the purified distance satisfies \alg{ P(\rho^A,\phi^A) =
\min_{|\psi\rangle^{AB}} P(\proj{\psi},\proj{\phi}) =
\sqrt{1-\max_{|\psi\rangle^{AB}}|\langle\psi|\phi\rangle|^2}, } where the minimum and the maximum are taken over all purifications $\ket{\psi}$ of $\rho$. \end{lmm}
\begin{prf} Follows from Definition 4 and Lemma 8 in \cite{tomamichel2010duality}.
$\blacksquare$ \end{prf}
\blmm{extUhlmann1} Consider a state $\Gamma$ on $KAB$ and a pure state $\ket{\Psi}$ on $KABC$ in the form of \alg{ \ket{\Psi}=\sum_k\sqrt{p_k}\ket{k}^K\ket{\psi_k}^{ABCD}, \quad\quad \Gamma=\sum_kp_k\proj{k}^K\otimes\proj{\gamma_k}^{AB}. } There exists a set of pure states $\{\ket{\phi_k}\}_k$ on $CD$ such that, for the state \alg{ \ket{\tilde{\Gamma}}&=\sum_k\sqrt{p_k}\ket{k}^K\ket{\gamma_k}^{AB}\ket{\phi_k}^{CD}, \laeq{kilkil} } it holds that \alg{ \!\!\! P\left( \proj{\tilde{\Gamma}}\!,\proj{\Psi} \right) = P\left( \Gamma^{KAB} ,\: \ca{C}^K\!\circ\!{\rm Tr}_{CD}(\proj{\Psi}) \right), \!\! } where $\ca{C}$ is the completely dephasing operation on $K$ with respect to the basis $\{\ket{k}\}_k$. \end{lmm}
\begin{prf} It is straightforward to verify that a purification of the state $\ca{C}^K\!\circ\!{\rm Tr}_{CD}(\proj{\Psi})$ is given by \alg{ \ket{\Psi_p}=\sum_k\sqrt{p_k}\ket{k}^K\ket{\psi_k}^{ABCD}\ket{k}^{K'}, } and that any purification of the state $\Gamma^{KAB}$ to the system $KABCDK'$ is in the form of \alg{ \ket{\Gamma_p}=\sum_k\sqrt{p_k}\ket{k}^K\ket{\gamma_k}^{AB}\ket{\xi_k}^{CDK'}, } with $\{\ket{\xi_k}\}_k$ being a set of orthogonal states. A simple calculation yields \alg{
|\langle\Psi_p|\Gamma_p\rangle| = \sum_kp_k
|(\langle\psi_k|^{ABCD}\langle k|^{K'})(|\gamma_k\rangle^{AB}|\xi_k\rangle^{CDK'})|. }
The maximum of the above quantity over all orthogonal $\{\ket{\xi_k}\}_k$ is achieved by $\{\ket{\xi_k}\}_k$ that is decomposed into $|\xi_k\rangle^{CDK'}=|\phi_k\rangle^{CD}|k\rangle^{K'}$. Using this $\{|\phi_k\rangle\}_k$, we define a state $|\tilde{\Gamma}\rangle$ by \alg{ \ket{\tilde{\Gamma}}:=\sum_k\sqrt{p_k}\ket{k}^K\ket{\gamma_k}^{AB}\ket{\phi_k}^{CD} \laeq{kilkilff} } and a purification of $\Gamma^{KAB}$ by \alg{ \ket{\Gamma_p^*}:=\sum_k\sqrt{p_k}\ket{k}^K\ket{\gamma_k}^{AB}\ket{\phi_k}^{CD}\ket{k}^{K'}. \laeq{kilkili} } It follows that \alg{ & \max_{\{\xi_k\}_k}
|\langle\Psi_p|\Gamma_p\rangle| =
|\langle\Psi_p|\Gamma_p^*\rangle| \\ & = \sum_kp_k
|\langle\psi_k|^{ABCD}|\gamma_k\rangle^{AB}|\phi_k\rangle^{CD}| \laeq{bilkili} \\
&=|\langle\Psi|\tilde{\Gamma}\rangle|. } In addition, the states $\ket{\Psi_p}$ and $\ket{\Gamma_p^*}$ are obtained by a linear isometry $P^{K\rightarrow KK'}:=\sum_k\ket{k}^K\ket{k}^{K'}\bra{k}$ from $\ket{\Psi}$ and $\ket{\tilde{\Gamma}}$as \alg{ \ket{\Psi_p}=P^{K\rightarrow KK'}\ket{\Psi}, \quad \ket{\Gamma_p^*}=P^{K\rightarrow KK'}\ket{\tilde{\Gamma}} } Thus, due to the property of the purified distance (\rLmm{extUhlmann0} and \rLmm{purifiedDmonot}), it follows that \alg{ P\left( \proj{\tilde{\Gamma}},\proj{\Psi} \right) = P\left( \proj{\Gamma_p^*},\proj{\Psi_p}, \right) = P\left( \Gamma^{KAB} ,\: \ca{C}^K\!\circ\!{\rm Tr}_{CD}(\proj{\Psi}) \right), } which completes the proof.
$\blacksquare$ \end{prf}
\blmm{extUhlmann2} Consider the same setting as in \rLmm{extUhlmann1}, and assume that $C$ and $D$ are composite systems $C_0M_C$ and $D_0M_D$, respectively, where $M_C$ and $M_D$ are isomorphic quantum systems with an orthonormal basis $\{\ket{m}\}_m$. In addition, suppose that the state $\Psi$ is classically coherent in $M_CM_D$, i.e., that \alg{
\|
\langle m|^{M_C}\langle m'|^{M_D}\ket{\Psi}\| \propto \delta_{m,m'}. }
Then, without loss of generality, we may assume that the states $|\phi_k\rangle$ are classically coherent in $M_CM_D$. \end{lmm}
\begin{prf} It is straightforward to verify that the state $\Psi$ is classically coherent in $M_CM_D$ if and only if all $\psi_k$ are classically coherent in $M_CM_D$. Consequently, the maximum of each term in \req{bilkili} is achieved by $\phi_k$ that is classically coherent in $M_CM_D$, which completes the proof.
$\blacksquare$ \end{prf}
\blmm{gentlemeasurement} {\bf (gentle measurement: Lemma 5 in \cite{ogawa2002new} and Corollary of Lemma 7 in \cite{berta2010uncertainty})} Let $\epsilon\in(0,1]$, $\rho\in\ca{S}(\ca{H})$ and $\Lambda\in\ca{L}(\ca{H})$ be such that $0\leq\Lambda\leq I$ and ${\rm Tr}[\Lambda\rho]\geq1-\epsilon$. It holds that \alg{ &
\|\rho-\sqrt{\Lambda}\rho\sqrt{\Lambda}\|_1 \leq 2\sqrt{\epsilon}, \quad P( \rho, \sqrt{\Lambda}\rho\sqrt{\Lambda} ) \leq \sqrt{2\epsilon}. } \end{lmm}
\begin{figure*}
\caption{ The construction of encoding and decoding operations in the proof of the direct part is depicted. (i) is obtained by cancelling out $G_\sigma U$ and $(G_\sigma U)^\dagger$, corresponding to Inequality \req{200a-3} obtained from \req{200a-11} and \req{200a-22}. (ii) follows from the fact that the state $\ket{\Psi_\sigma}$ is obtained from $\ket{\Psi}$ by applying $P$ and $G_\sigma$, due to \req{psisigmaGPpsi}. In (iii), we trace out $Z\equiv Z_LZ_R$ and apply the completely dephasing operation $\ca{C}$ to $X'Y'Z'$. See Inequalities \req{200a-4} and \req{200a-4-2} that are obtained from \req{200a-3}. Note that the source state $\Psi_s$ is obtained from $\ket{\Psi}$ and $\ket{\Psi_\sigma}$ as \req{btf}. }
\label{fig:stateredistributiondirect}
\end{figure*}
\end{document} |
\begin{document}
\title{An extended Hilbert scale and its applications}
\author[V. Mikhailets]{Vladimir Mikhailets}
\address{Institute of Mathematics of the National Academy of Sciences of Ukraine, 3 Tereshchen\-kivs'ka, Kyiv, 01024, Ukraine}
\email{[email protected]}
\author[A. Murach]{Aleksandr Murach}
\address{Institute of Mathematics of the National Academy of Sciences of Ukraine, 3 Tereshchen\-kivs'ka, Kyiv, 01024, Ukraine}
\email{[email protected]}
\author[T. Zinchenko]{Tetiana Zinchenko}
\address{5d Mittelstr., Oranienburg, 16515, Germany}
\email{[email protected]}
\subjclass[2010]{46B70, 46E35, 47A40}
\keywords{Hilbert scale, interpolation space, interpolation with function parameter, interpolational inequality, generalized Sobolev space, spectral expansion}
\thanks{This work is supported by the European Unionβs Horizon 2020 research and innovation programme under the Marie Sk{\l}odowska-Curie grant agreement No 873071 (SOMPATY: Spectral Optimization: From Mathematics to Physics and Advanced Technology).}
\begin{abstract} We propose a new viewpoint on Hilbert scales extending them by means of all Hilbert spaces that are interpolation ones between spaces on the scale. We prove that this extension admits an explicit description with the help of $\mathrm{OR}$-varying functions of the operator generating the scale. We also show that this extended Hilbert scale is obtained by the quadratic interpolation (with function parameter) between the above spaces and is closed with respect to the quadratic interpolation between Hilbert spaces. We give applications of the extended Hilbert scale to interpolational inequalities, generalized Sobolev spaces, and spectral expansions induced by abstract and elliptic operators. \end{abstract}
\maketitle
\section{Introduction}\label{sec1}
Hilbert scales (above all, the Sobolev scale) play an important role in mathematical analysis and the theory of differential equations; see, e.g., the classical monographs \cite{Berezansky68, Hermander85iii, Lax06, LionsMagenes72}, surveys \cite{Agranovich94, Agranovich97, Eidelman94}, and recent book \cite{KoshmanenkoDudkin16}. Such scales are built with respect to an arbitrarily chosen Hilbert space $H$ and a positive definite self-adjoint unbounded operator $A$ acting in this space. As a result, we obtain the Hilbert scale $\{H^{s}_{A}:s\in\mathbb{R}\}$, where $H^{s}_{A}$ is the completion of the domain of $A^{s}$ in the norm $\|A^{s}u\|_{H}$ of a vector~$u$. This scale has the following fundamental property: if $0<\theta<1$, then the mapping $\{H^{s_0}_{A},H^{s_1}_{A}\}\mapsto H^{s}_{A}$, with $s_0<s_1$ and $s:=(1-\theta)s_0+\theta s_1$, is an exact interpolation functor of type $\theta$ \cite[Theorem~9.1]{KreinPetunin66}. Concerning a linear operator $T$ bounded on both spaces $H^{s_0}_{A}$ and $H^{s_1}_{A}$, this means that $T$ is also bounded on $H^{s}_{A}$ and that the norms of $T$ on these spaces satisfy the inequality \begin{equation*}
\|T:H^{s}_{A}\to H^{s}_{A}\|\leq
\|T:H^{s_0}_{A}\to H^{s_0}_{A}\|^{1-\theta}\,
\|T:H^{s_1}_{A}\to H^{s_1}_{A}\|^{\theta}. \end{equation*} (An analogous property is fulfilled for bounded linear operators that act on pairs of different spaces belonging to two Hilbert scales.) Hence, every space $H^{s}_{A}$ subject to $s_0<s<s_1$ is an interpolation space between $H^{s_0}_{A}$ and $H^{s_1}_{A}$. However, the class of such interpolation Hilbert spaces is far broader than the section $\{H^{s}_{A}:s_0\leq s\leq s_1\}$ of the Hilbert scale.
It is therefore natural to consider the extension of this scale by means of all Hilbert spaces that are interpolation ones between some spaces $H^{s_0}_{A}$ and $H^{s_1}_{A}$, where the numbers $s_0<s_1$ range over $\mathbb{R}$. Such an extended Hilbert scale is an object of our investigation. We will show that this scale admits a simple explicit description with the help of $\mathrm{OR}$-varying functions of $A$, is obtained by the quadratic interpolation (with function parameter) between the spaces $H^{s_0}_{A}$ and $H^{s_1}_{A}$, and is closed with respect to the quadratic interpolation between Hilbert spaces. These and some other properties of the extended Hilbert scale are considered in Section~\ref{sec2} of this paper; they are proved in Section~\ref{sec3}. Note that the above interpolation and interpolational properties of Hilbert scales are studied in articles \cite{Ameur04, Ameur19, Donoghue67, Fan11, FoiasLions61, Krein60a, KreinPetunin66, Lions58, MikhailetsMurach08MFAT1, Ovchinnikov84, Pustylnik82} (see also monographs \cite[Chapter~1]{LionsMagenes72}, \cite[Section 1.1]{MikhailetsMurach14}, and \cite[Chapters 15 and 30]{Simon19}). Among them, of fundamental importance for our investigation is Ovchinnikov's result \cite[Theorem 11.4.1]{Ovchinnikov84} on an explicit description (with respect to equivalence of norms) of all Hilbert spaces that are interpolation ones between arbitrarily chosen compatible Hilbert spaces.
The next sections are devoted to various applications of the extended Hilbert scale. Section~\ref{sec3b} considers interpolational inequalities that connect the norms in spaces on the scale to each other, as well as the norms of linear operators acting between extended Hilbert scales. From the viewpoint of inequalities for norms of vectors, this scale can be interpreted as a variable Hilbert scale investigated in \cite{Hegland95, Hegland10, HeglandAnderssen11, MatheTautenhahn06}; the latter appears naturally in the theory of ill-posed problems (see, e.g., \cite{HeglandHofmann11, JinTautenhahn11, MathePereverzev03, TautenhahnHamarikHofmannShao13}). Section~\ref{sec4} gives applications of the extended Hilbert scale to function or distribution spaces, which are used specifically in the theory of pseudodifferential operators. We show that the extended Hilbert scale generated by some elliptic operators consists of generalized Sobolev spaces whose regularity order is a function $\mathrm{OR}$-varying at infinity. These spaces form the extended Sobolev scale considered in \cite{MikhailetsMurach13UMJ3, MikhailetsMurach15ResMath1} and \cite[Section~2.4.2]{MikhailetsMurach14}. It has important applications to elliptic operators \cite{Murach09UMJ3, MurachZinchenko13MFAT1, ZinchenkoMurach12UMJ11, ZinchenkoMurach14JMathSci} and elliptic boundary-value problems \cite{AnopDenkMurach20arxiv, AnopKasirenko16MFAT, AnopMurach14MFAT, AnopMurach14UMJ, KasirenkoMurach18UMJ11}. Among them are applications to the investigation of various types of convergence of spectral expansions induced by elliptic operators. This topic is examined in the last Section~\ref{sec6}. Its results are based on theorems on the convergence---in a space with two norms---of the spectral expansion induced by an abstract normal operator and on the degree of this convergence. These theorems are proved in Section~\ref{sec6a}.
\section{Basic results}\label{sec2}
Let $H$ be a separable infinite-dimensional complex Hilbert space, with $(\cdot,\cdot)$ and $\|\cdot\|$ respectively denoting the inner product and the corresponding norm in~$H$. Let $A$ be a positive definite self-adjoint unbounded linear operator in~$H$. The positive definiteness of $A$ means that there exists a number $r>0$ such that $(Au,u)\geq r(u,u)$ for every $u\in\mathrm{Dom}\,A$. As usual, $\mathrm{Dom}\,A$ denotes the domain of $A$. Without loss of generality we suppose that the lower bound $r=1$.
For every $s\in\mathbb{R}$, the self-adjoint operator $A^{s}$ in $H$ is well defined with the help of the spectral decomposition of~$A$. The domain $\mathrm{Dom}\,A^{s}$ of $A^{s}$ is dense in $H$; moreover, $\mathrm{Dom}\,A^{s}=H$ whenever $s\leq0$. Let $H^{s}_{A}$ denote the completion of $\mathrm{Dom}\,A^{s}$ with respect to the norm $\|u\|_{s}:=\|A^{s}u\|$ and the corresponding inner product $(u_{1},u_{2})_{s}:=(A^{s}u_{1},A^{s}u_{2})$, with $u,u_{1},u_{2}\in\mathrm{Dom}\,A^{s}$. The Hilbert space $H^{s}_{A}$ is separable. As usual, we retain designations $(\cdot,\cdot)_{s}$ and $\|\cdot\|_{s}$ for the inner product and the corresponding norm in this space. Note that the linear manifold $H^{s}_{A}$ coincides with $\mathrm{Dom}\,A^{s}$ whenever $s\geq0$ and that $H^{s}_{A}\supset H$ whenever $s<0$. The set $H^{\infty}_{A}:=\bigcap_{\lambda>0}H^{\lambda}_{A}$ is dense in every space $H^{s}_{A}$, with $s\in\mathbb{R}$.
The class $\{H^{s}_{A}:s\in\mathbb{R}\}$ is called the Hilbert scale generated by $A$ or, simply, $A$-scale (see., e.g., \cite[Section~9, Subsection~1]{KreinPetunin66}). If $s_{0},s_{1}\in\mathbb{R}$ and $s_{0}<s_{1}$, then the identity mapping on $\mathrm{Dom}\,A^{s_{1}}$ extends uniquely to a continuous embedding operator $H^{s_{1}}_{A}\hookrightarrow H^{s_{0}}_{A}$, the embedding being normal. Therefore, interpreting $H^{s_{1}}_{A}$ as a linear manifold in $H^{s_{0}}_{A}$, we obtain the normal pair $[H^{s_{0}}_{A},H^{s_{1}}_{A}]$ of Hilbert spaces. This means that $H^{s_{1}}_{A}$ is dense in $H^{s_{0}}_{A}$ and that $\|u\|_{s_{0}}\leq\|u\|_{s_{1}}$ for every $u\in H^{s_{1}}_{A}$.
\begin{main-definition} \emph{The extended Hilbert scale generated by $A$} or, simply, \emph{the extended $A$-scale} consists of all Hilbert spaces each of which is an interpolation space for a certain pair $[H^{s_{0}}_{A},H^{s_{1}}_{A}]$ where $s_{0}<s_{1}$ (the real numbers $s_{0}$ and $s_{1}$ may depend on the interpolation Hilbert space). \end{main-definition}
We will give an explicit description of this scale and prove its important interpolation properties.
Beforehand, let us recall the definition of an interpolation space in the case considered. Suppose $H_{0}$ and $H_{1}$ are Hilbert spaces such that $H_{1}$ is a linear manifold in $H_{0}$ and that the embedding $H_{1}\hookrightarrow H_{0}$ is continuous. A~Hilbert space $X$ is called an interpolation space for the pair $[H_{0},H_{1}]$ (or, in other words, an interpolation space between $H_{0}$ and $H_{1}$) if $X$ satisfies the following two conditions: \begin{enumerate} \item [(i)] $X$ is an intermediate space for this pair, i.e. $X$ is a linear manifold in $H_{0}$ and the continuous embeddings $H_{1}\hookrightarrow X\hookrightarrow H_{0}$ hold; \item [(ii)] for every linear operator $T$ given on $H_{0}$, the following implication is true: if the restriction of $T$ to $H_{j}$ is a bounded operator on $H_{j}$ for each $j\in\{0,1\}$, then the restriction of $T$ to $X$ is a bounded operator on $X$. \end{enumerate}
Property (ii) implies the following inequality for norms of operators: \begin{equation*}
\|T:X\to X\|\leq c\,\max\bigl\{\,\|T:H_{0}\to H_{0}\|,\,
\|T:H_{1}\to H_{1} \|\,\bigr\}, \end{equation*} where $c$ is a certain positive number which does not depend on $T$ (see, e.g., \cite[Theorem 2.4.2]{BerghLefstrem76}). If $c=1$, the interpolation space $X$ is called exact.
Both properties (i) and (ii) are invariant with respect to the change of the norm in $X$ for an equivalent norm. Therefore, it makes sense to describe the interpolation spaces for the pair $[H_{0},H_{1}]$ up to equivalence of norms.
As is known \cite[Theorem 9.1]{KreinPetunin66}, every space $H^{s}_{A}$ is an interpolation one for the pair $[H^{s_{0}}_{A},H^{s_{1}}_{A}]$ whenever $s_{0}\leq s\leq s_{1}$. To give a description of all interpolation Hilbert spaces for this pair, we need more general functions of $A$ than power functions used in the definition of $H^{s}_{A}$.
Choosing a Borel measurable function $\varphi:[1,\infty)\to(0,\infty)$ arbitrarily and using the spectral decomposition of~$A$, we define the self-adjoint operator $\varphi(A)>0$ which acts in~$H$. Recall that $\mathrm{Spec}\,A\subseteq[1,\infty)$ according to our assumption. Here and below, $\mathrm{Spec}\,A$ denotes the spectrum of $A$, and $\varphi(A)>0$ means that $(\varphi(A)u,u)>\nobreak0$ for every $u\in\mathrm{Dom}\,\varphi(A)\setminus\{0\}$. Let $H^{\varphi}_{A}$ denote the completion of the domain $\mathrm{Dom}\,\varphi(A)$ of $\varphi(A)$ with respect to the norm $\|u\|_{\varphi}:=\|\varphi(A)u\|$ of $u\in\mathrm{Dom}\,\varphi(A)$.
The space $H^{\varphi}_{A}$ is Hilbert and separable. Indeed, this norm is induced by the inner product $(u_{1},u_{2})_{\varphi}:=(\varphi(A)u_{1},\varphi(A)u_{2})$ of $u_{1},u_{2}\in\mathrm{Dom}\,\varphi(A)$. Besides, endowing the linear space $\mathrm{Dom}\,\varphi(A)$ with the norm $\|\cdot\|_{\varphi}$ and considering the isometric operator \begin{equation}\label{f2.1} \varphi(A):\mathrm{Dom}\,\varphi(A)\to H, \end{equation}
we infer the separability of $\mathrm{Dom}\,\varphi(A)$ (in the norm $\|\cdot\|_{\varphi}$) from the separability of~$H$. Therefore, the space $H_{A}^{\varphi}$ is separable as well. In the sequel we use the same designations $(\cdot,\cdot)_{\varphi}$ and $\|\cdot\|_{\varphi}$ for the inner product and the corresponding norm in the whole Hilbert space~$H^{\varphi}_{A}$.
Operator \eqref{f2.1} extends uniquely (by continuity) to an isometric isomorphism \begin{equation}\label{f2.2} B:H_{A}^{\varphi}\leftrightarrow H. \end{equation} The equality $B(H_{A}^{\varphi})=H$ follows from the fact that the range of $\varphi(A)$ coincides with $H$ whenever $0\not\in\mathrm{Spec}\,\varphi(A)$ and that the range is narrower than $H$ but is dense in~$H$ whenever $0\in\mathrm{Spec}\,\varphi(A)$. Hence, $(u_{1},u_{2})_{\varphi}=(Bu_{1},Bu_{2})$ for every $u_{1},u_{2}\in H_{A}^{\varphi}$. Besides, $H_{A}^{\varphi}=\mathrm{Dom}\,\varphi(A)$ if and only if $0\not\in\mathrm{Spec}\,\varphi(A)$.
Remark that we use the same designation $H^{\varphi}_{A}$ both in the case where $\varphi$ is a function and in the case where $\varphi$ is a number. This will not lead to ambiguity because we will always specify what $\varphi$ means, a function or number. Of course, this remark also concerns the designations of the norm and inner product in $H^{\varphi}_{A}$.
We need the Hilbert spaces $H^{\varphi}_{A}$ such that $\varphi$ ranges over a certain function class $\mathrm{OR}$. By definition, this class consists of all Borel measurable functions $\varphi:\nobreak[1,\infty)\rightarrow(0,\infty)$ for which there exist numbers $a>1$ and $c\geq1$ such that $c^{-1}\leq\varphi(\lambda t)/\varphi(t)\leq c$ for all $t\geq1$ and $\lambda\in[1,a]$ (the numbers $a$ and $c$ may depend on $\varphi$). Such functions were introduced by V.~G.~Avakumovi\'c \cite{Avakumovic36} in 1936, are called OR-varying (or O-regularly varying) at infinity and have been well investigated \cite{BinghamGoldieTeugels89, BuldyginIndlekoferKlesovSteinebach18, Seneta76}.
The class $\mathrm{OR}$ admits the following simple description \cite[Theorem~A.1]{Seneta76}: $\varphi\in\mathrm{OR}$ if and only if \begin{equation*} \varphi(t)= \exp\Biggl(\beta(t)+ \int\limits_{1}^{t}\frac{\gamma(\tau)}{\tau}\;d\tau\Biggr), \quad t\geq1, \end{equation*} for some bounded Borel measurable functions $\beta,\gamma:[1,\infty)\to\mathbb{R}$.
This class has the following important property \cite[Theorem~A.2(a)]{Seneta76}: for every $\varphi\in\mathrm{OR}$ there exist real numbers $s_{0}$ and $s_{1}$, with $s_{0}\leq s_{1}$, and positive numbers $c_{0}$ and $c_{1}$ such that \begin{equation}\label{f2.3} c_{0}\lambda^{s_{0}}\leq\frac{\varphi(\lambda t)}{\varphi(t)}\leq c_{1}\lambda^{s_{1}}\quad\mbox{for all}\quad t\geq1\quad\mbox{and}\quad\lambda\geq1. \end{equation} Let $\varphi\in\mathrm{OR}$; considering the left-hand side of the inequality \eqref{f2.3} in the $t=1$ case, we conclude that $\varphi(\lambda)\geq\mathrm{const}\cdot e^{-\lambda}$ whenever $\lambda\geq1$. Hence, the identity mapping on $\mathrm{Dom}\,\varphi(A)$ extends uniquely to a continuous embedding operator $H^{\varphi}_{A}\hookrightarrow H^{1/\exp}_{A}$. This will be shown in the first two paragraphs of the proof of Theorem~\ref{th2.6}, in which we put $\varphi_{1}(t):=\varphi(t)$ and $\varphi_{2}(t):=e^{-t}$. Here, of course, $H^{1/\exp}_{A}$ denotes the Hilbert space $H^{\chi}_{A}$ parametrized with the function $\chi(t):=e^{-t}$ of $t\geq1$. Therefore, we will interpret $H^{\varphi}_{A}$ as a linear manifold in $H^{1/\exp}_{A}$.
Thus, all the spaces $H^{\varphi}_{A}$ parametrized with $\varphi\in\mathrm{OR}$ and, hence, all the spaces from the extended $A$-scale lie in the same space $H^{1/\exp}_{A}$, which enables us to compare them.
\begin{theorem}\label{th2.1} A Hilbert space $X$ belongs to the extended $A$-scale if and only if $X=\nobreak H^{\varphi}_{A}$ up to equivalence of norms for certain $\varphi\in\mathrm{OR}$. \end{theorem}
\begin{remark}\label{rem2.2} We cannot transfer from the extended $A$-scale to a wider class of spaces by means of interpolation Hilbert spaces between any spaces from this scale. Namely, suppose that certain Hilbert spaces $H_{0}$ and $H_{1}$ belong to the extended $A$-scale and satisfy the continuous embedding $H_{1}\hookrightarrow H_{0}$. Then every Hilbert space $X$ which is an interpolation one for the pair $[H_{0},H_{1}]$ belongs to this scale as well. Indeed, for each $j\in\{0,1\}$, the space $H_{j}$ is an interpolation one for a certain pair $[H^{s_{j,0}}_{A},H^{s_{j,1}}_{A}]$, where $s_{j,0}<s_{j,1}$. Besides, both $H^{s_{j,0}}_{A}$ and $H^{s_{j,1}}_{A}$ are interpolation spaces for the pair $[H^{s_{0}}_{A},H^{s_{1}}_{A}]$ provided that $s_{0}:=\min\{s_{0,0},s_{1,0}\}$ and $s_{1}:=\max\{s_{0,1},s_{1,1}\}$. Hence, the above-mentioned space $X$ is an interpolation one for the latter pair, which follows directly from the given definition of an interpolation space. Thus, $X$ belongs to the extended $A$-scale. \end{remark}
We will also give an explicit description (up to equivalence of norms) of all Hilbert spaces that are interpolation ones for the given pair $[H^{s_{0}}_{A},H^{s_{1}}_{A}]$, where $s_{0}<s_{1}$. Considering $\varphi\in\mathrm{OR}$, we put \begin{gather}\label{f2.4} \sigma_{0}(\varphi):=\sup\{s_{0}\in\mathbb{R}\mid\mbox{the left-hand inequality in \eqref{f2.3} holds}\},\\ \label{f2.5} \sigma_{1}(\varphi):=\inf\{s_{1}\in\mathbb{R}\mid\mbox{the right-hand inequality in \eqref{f2.3} holds}\}. \end{gather} Evidently, $-\infty<\sigma_{0}(\varphi)\leq\sigma_{1}(\varphi)<\infty$. The numbers $\sigma_{0}(\varphi)$ and $\sigma_{1}(\varphi)$ are equal to the lower and the upper Matuszewska indices of $\varphi$, respectively (see \cite{Matuszewska64} and \cite[Theorem~2.2.2]{BinghamGoldieTeugels89}).
\begin{theorem}\label{th2.3} Let $s_{0},s_{1}\in\mathbb{R}$ and $s_{0}<s_{1}$. A Hilbert space $X$ is an interpolation space for the pair $[H^{s_{0}}_{A},H^{s_{1}}_{A}]$ if and only if $X=H^{\varphi}_{A}$ up to equivalence of norms for a certain function parameter $\varphi\in\mathrm{OR}$ that satisfies condition~\eqref{f2.3}. \end{theorem}
\begin{remark}\label{rem2.4} Of course, we mean in Theorem~\ref{th2.3} that the positive numbers $c_{0}$ and $c_{1}$ in condition \eqref{f2.3} depend neither on $t$ nor on $\lambda$. Evidently, this condition is equivalent to the following pair of conditions: \begin{enumerate} \item [$\mathrm{(i)}$] $s_{0}\leq\sigma_{0}(\varphi)$ and, moreover, $s_{0}<\sigma_{0}(\varphi)$ if the supremum in $\eqref{f2.4}$ is not attained; \item [$\mathrm{(ii)}$] $\sigma_{1}(\varphi)\leq s_{1}$ and, moreover, $\sigma_{1}(\varphi)<s_{1}$ if the infimum in $\eqref{f2.5}$ is not attained. \end{enumerate} \end{remark}
It is important for applications that the extended $A$-scale can be obtained by means of the quadratic interpolation (with function parameter) between spaces from $A$-scale. Before we formulate a relevant theorem, we will recall the definition of the quadratic interpolation between Hilbert spaces. This interpolation is a natural generalization of the classical interpolation method by J.-L.~Lions \cite{Lions58} and S.~G.~Krein \cite{Krein60a} (see also the book \cite[Chapter~1, Sections 2 and~5]{LionsMagenes72} and survey \cite[Section~9]{KreinPetunin66}) to the case where a general enough function is used, instead of the number $\theta\in(0,\,1)$, as an interpolation parameter. The generalization first appeared in C.~Foia\c{s} and J.-L.~Lions' paper \cite[Section~3.4]{FoiasLions61}. We mainly follow monograph \cite[Section~1.1]{MikhailetsMurach14} (see also \cite[Section~2.1]{MikhailetsMurach08MFAT1}).
Let $\mathcal{B}$ denote the set of all Borel measurable functions $\psi:(0,\infty)\rightarrow(0,\infty)$ such that $\psi$ is bounded on each compact interval $[a,b]$, with $0<a<b<\infty$, and that $1/\psi$ is bounded on every set $[r,\infty)$, with $r>0$. We arbitrarily choose a function $\psi\in\mathcal{B}$ and a regular pair $\mathcal{H}:=[H_{0},H_{1}]$ of separable complex Hilbert spaces. The regularity of this pair means that $H_{1}$ is a dense linear manifold in $H_{0}$ and that the embedding $H_{1}\hookrightarrow H_{0}$ is continuous. For $\mathcal{H}$ there exists a positive definite self-adjoint linear operator $J$ in $H_{0}$ such that $\mathrm{Dom}\,J=H_{1}$ and that $\|Ju\|_{H_{0}}=\|u\|_{H_{1}}$ for every $u\in H_{1}$. The operator $J$ is uniquely determined by the pair $\mathcal{H}$ and is called the generating operator for this pair.
Using the spectral decomposition of $J$, we define the self-adjoint operator $\psi(J)$ in $H_{0}$. Let $[H_{0},H_{1}]_{\psi}$ or, simply, $\mathcal{H}_{\psi}$ denote the domain of $\psi(J)$ endowed with the inner product $(u_{1},u_{2})_{\mathcal{H}_{\psi}}:=(\psi(J)u_{1},\psi(J)u_{2})_{H_{0}}$ and the corresponding norm $\|u\|_{\mathcal{H}_{\psi}}=\|\psi(J)u\|_{H_{0}}$, with $u,u_{1},u_{2}\in\mathcal{H}_{\psi}$. The space $\mathcal{H}_{\psi}$ is Hilbert and separable.
A function $\psi\in\mathcal{B}$ is called an interpolation parameter if the following condition is fulfilled for all regular pairs $\mathcal{H}=[H_{0},H_{1}]$ and $\mathcal{G}=[G_{0},G_{1}]$ of separable complex Hilbert spaces and for an arbitrary linear mapping $T$ given on $H_{0}$: if the restriction of $T$ to $H_{j}$ is a bounded operator $T:H_{j}\rightarrow G_{j}$ for each $j\in\{0,1\}$, then the restriction of $T$ to $\mathcal{H}_{\psi}$ is also a bounded operator $T:\mathcal{H}_{\psi}\to\mathcal{G}_{\psi}$. If $\psi$ is an interpolation parameter, we will say that the Hilbert space $\mathcal{H}_{\psi}$ is obtained by the quadratic interpolation with the function parameter~$\psi$ of the pair $\mathcal{H}$ (or, in other words, between the spaces $H_{0}$ and $H_{1}$). In this case, the dense continuous embeddings $H_{1}\hookrightarrow\mathcal{H}_{\psi}\hookrightarrow H_{0}$ hold true.
A function $\psi\in\mathcal{B}$ is an interpolation parameter if and only if $\psi$ is pseudoconcave in a neighbourhood of infinity. The latter property means that there exists a number $r>0$ and a concave function $\psi_{1}:(r,\infty)\rightarrow(0,\infty)$ that both functions $\psi/\psi_{1}$ and $\psi_{1}/\psi$ are bounded on $(r,\infty)$. This key fact follows from J.~Peetre's \cite{Peetre66, Peetre68} description of all interpolation functions for the weighted $L_{p}(\mathbb{R}^{n})$-type spaces (the description is also set forth in monograph \cite[Theorem 5.4.4]{BerghLefstrem76}).
The above-mentioned interpolation property of the extended $A$-scale is formulated as follows:
\begin{theorem}\label{th2.5} Let $\varphi\in\mathrm{OR}$, and let real numbers $s_{0}<s_{1}$ be taken from condition~\eqref{f2.3}. Put \begin{equation}\label{f2.6} \psi(\tau):= \begin{cases} \;\tau^{-s_{0}/(s_{1}-s_{0})}\,\varphi(\tau^{1/(s_{1}-s_{0})}) &\text{whenever}\quad\tau\geq1, \\ \;\varphi(1) & \text{whenever}\quad0<\tau<1. \end{cases} \end{equation} Then the function $\psi$ belongs to $\mathcal{B}$ and is an interpolation parameter, and \begin{equation}\label{f2.7} \bigl[H^{s_{0}}_{A},H^{s_{1}}_{A}\bigr]_{\psi}=H^{\varphi}_{A} \quad\mbox{with equality of norms}. \end{equation} \end{theorem}
For instance, considering the function $\varphi(t):=1+\log t$ of $t\geq1$ from the class $\mathrm{OR}$, we can take $s_{0}:=0$ and $s_{1}:=\varepsilon$ for every $\varepsilon>0$ and put $\psi(\tau):=1+\varepsilon^{-1}\log\tau$ whenever $\tau\geq1$ in the interpolation formula \eqref{f2.7}.
Note that, if $s_{0}<\sigma_{0}(\varphi)$ and $s_{1}>\sigma_{1}(\varphi)$, the numbers $s_{0}$ and $s_{1}$ will satisfy the condition of Theorem~\ref{th2.5} whatever $\varphi\in\mathrm{OR}$.
The extended $A$-scale is closed with respect to the quadratic interpolation (with function parameter). This follows directly from the next two results.
\begin{theorem}\label{th2.6} Let $\varphi_{0},\varphi_{1}:[1,\infty)\to(0,\infty)$ be Borel measurable functions. Suppose that the function $\varphi_{0}/\varphi_{1}$ is bounded on $[1,\infty)$. Then the pair $[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}\bigr]$ is regular. Let $\psi\in\mathcal{B}$, and put \begin{equation}\label{f2.8} \varphi(t):=\varphi_{0}(t)\,\psi \biggl(\frac{\varphi_{1}(t)}{\varphi_{0}(t)}\biggr) \quad\mbox{whenever}\quad t\geq1. \end{equation} Then \begin{equation}\label{f2.9} \bigl[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}\bigr]_{\psi}=H^{\varphi}_{A} \quad\mbox{with equality of norms}. \end{equation} \end{theorem}
\begin{proposition}\label{prop2.7} Let $\varphi_{0},\varphi_{1}\in\mathrm{OR}$ and $\psi\in\mathcal{B}$. Suppose that the function $\varphi_{0}/\varphi_{1}$ is bounded in a neighbourhood of infinity and that $\psi$ is an interpolation parameter. Then the function \eqref{f2.8} belongs to the class $\mathrm{OR}$. \end{proposition}
This proposition is contained in \cite[Theorem~5.2]{MikhailetsMurach15ResMath1}.
As to Theorem~\ref{th2.6}, it is necessary to note that its hypothesis allows us to consider $H^{\varphi_{1}}_{A}$ and $H^{\varphi}_{A}$ as linear manifolds in $H^{\varphi_0}_{A}$. Indeed, since the functions $\varphi_{0}/\varphi_{1}$ and $\varphi_{0}/\varphi$ are bounded on $[1,\infty)$, the identity mappings on $\mathrm{Dom}\,\varphi_{1}(A)$ and on $\mathrm{Dom}\,\varphi(A)$ extend uniquely to some continuous embedding operators $H^{\varphi_{1}}_{A}\hookrightarrow H^{\varphi_{0}}_{A}$ and $H^{\varphi}_{A}\hookrightarrow H^{\varphi_{0}}_{A}$ respectively (see the first two paragraphs of the proof of Theorem~\ref{th2.6}). Thus, we may say about the regularity of the pair $[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}]$ and compare the spaces $[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}]_{\psi}$ and $H^{\varphi}_{A}$ in \eqref{f2.9}.
\section{Proofs of basic results}\label{sec3}
We will prove Theorems \ref{th2.1}, \ref{th2.3}, \ref{th2.5}, and \ref{th2.6} in the reverse order, which is stipulated by a remarkable result by Ovchinnikov \cite[Theorem 11.4.1]{Ovchinnikov84}. This result explicitly describes (up to equivalence of norms) all the Hilbert spaces that are interpolation ones for an arbitrary compatible pair of Hilbert spaces. As to our consideration, Ovchinnikov's theorem can be formulated as follows:
\begin{proposition}\label{prop3.1} Let $\mathcal{H}:=[H_{0},H_{1}]$ be a regular pair of separable complex Hilbert spaces. A Hilbert space $X$ is an interpolation space for $\mathcal{H}$ if and only if $X=\mathcal{H}_{\psi}$ up to equivalence of norms for a certain interpolation parameter $\psi\in\mathcal{B}$. \end{proposition}
Note that all exact interpolation Hilbert spaces for $\mathcal{H}$ were characterized (isometrically) by Donoghue \cite{Donoghue67}.
Let us turn to the proofs of the theorems formulated in Section~\ref{sec2}.
\begin{proof}[Proof of Theorem $\ref{th2.6}$]
We first show that the pair $[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}]$ is regular. It follows from the hypothesis of the theorem that $\mathrm{Dom}\,\varphi_{1}(A)\subseteq\mathrm{Dom}\,\varphi_{0}(A)$ and that $\|u\|_{\varphi_{0}}\leq\varkappa^{-1}\|u\|_{\varphi_{1}}$ for every $u\in\mathrm{Dom}\,\varphi_{1}(A)$, with \begin{equation}\label{f3.2} \varkappa:=\inf_{t\geq1} \frac{\varphi_{1}(t)}{\varphi_{0}(t)}>0. \end{equation} Hence, the identity mapping on $\mathrm{Dom}\,\varphi_{1}(A)$ extends uniquely to a continuous linear operator \begin{equation}\label{f3.3} I:H^{\varphi_{1}}_{A}\to H^{\varphi_{0}}_{A}. \end{equation} Let us prove that this operator is injective.
Suppose that $Iu=0$ for certain $u\in H^{\varphi_{1}}_{A}$. We must prove the equality $u=0$. Choose a sequence $(u_{k})_{k=1}^{\infty}\subset\mathrm{Dom}\,\varphi_{1}(A)$ such that $u_{k}\to u$ in $H^{\varphi_{1}}_{A}$ as $k\to\infty$. Since operator \eqref{f3.3} is bounded, we have the convergence $u_{k}=Iu_{k}\to Iu=0$ in $H^{\varphi_{0}}_{A}$. Hence, the sequence $(u_{k})_{k=1}^{\infty}$ is a Cauchy one in $\mathrm{Dom}\,\varphi_{1}(A)$, and $u_{k}\to0$ in $\mathrm{Dom}\,\varphi_{0}(A)$. Here and below in the proof, the linear space $\mathrm{Dom}\,\varphi_{j}(A)$ is endowed with the norm $\|\cdot\|_{\varphi_{j}}$ for each $j\in\{0,1\}$. Thus, there exists a vector $v\in H$ such that $\varphi_{1}(A)u_{k}\to v$ in $H$, and $\varphi_{0}(A)u_{k}\to0$ in $H$. Besides, \begin{equation*} \varphi_{0}(A)u_{k}= \frac{\varphi_{0}}{\varphi_{1}}(A)\varphi_{1}(A)u_{k}\to \frac{\varphi_{0}}{\varphi_{1}}(A)v\quad\mbox{in}\quad H \end{equation*}
because the function $\varphi_{0}/\varphi_{1}$ is bounded on $[1,\infty)$. Therefore, $(\varphi_{0}/\varphi_{1})(A)v=0$. Hence, $v=0$ as a vector from $H$ because the function $\varphi_{0}/\varphi_{1}$ is positive on $[1,\infty)$. Thus, $\varphi_{1}(A)u_{k}\to0$ in~$H$. Therefore, $\|u_{k}\|_{\varphi_{1}}=\|\varphi_{1}(A)u_{k}\|\to0$, i.e. $u=\lim_{k\to\infty}u_{k}=0$ in $H^{\varphi_{1}}_{A}$. We have proved that the operator \eqref{f3.3} is injective.
Hence, it realizes a continuous embedding $H^{\varphi_{1}}_{A}\hookrightarrow H^{\varphi_{0}}_{A}$. The density of this embedding follows directly from the density of $\mathrm{Dom}\,\varphi_{1}(A)$ in the normed space $\mathrm{Dom}\,\varphi_{0}(A)$. Let us prove the latter density. Choose a vector $u\in\mathrm{Dom}\,\varphi_{0}(A)$ arbitrarily. The domain of the operator $\varphi_{1}(A)(1/\varphi_{0})(A)$ is dense in $H$ because the closure of this operator coincides with the operator $(\varphi_{1}/\varphi_{0})(A)$, whose domain is dense in~$H$. Hence, there exists a sequence \begin{equation*} (v_{k})_{k=1}^{\infty}\subset \mathrm{Dom}\bigl(\varphi_{1}(A)(1/\varphi_{0})(A)\bigr) \end{equation*} such that $v_{k}\to\varphi_{0}(A)u$ in $H$ as $k\to\infty$. Putting \begin{equation*} u_{k}:=(1/\varphi_{0})(A)v_{k}\in\mathrm{Dom}\,\varphi_{1}(A) \end{equation*} for every integer $k\geq1$, we conclude that $\varphi_{0}(A)u_{k}=v_{k}\to\varphi_{0}(A)u$ in $H$. Therefore, $\mathrm{Dom}\,\varphi_{1}(A)\ni u_{k}\to u$ in $\mathrm{Dom}\,\varphi_{0}(A)$. Hence, the set $\mathrm{Dom}\,\varphi_{1}(A)$ is dense in $\mathrm{Dom}\,\varphi_{0}(A)$. Thus, the continuous embedding $H^{\varphi_{1}}_{A}\hookrightarrow H^{\varphi_{0}}_{A}$ is dense, i.e. the pair $[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}\bigr]$ is regular.
Let us build the generating operator for this pair. Choosing $j\in\{0,1\}$ arbitrarily, we have the isometric linear operator \begin{equation*} \varphi_{j}(A):\mathrm{Dom}\,\varphi_{j}(A)\to H. \end{equation*} This operator extends uniquely (by continuity) to an isometric isomorphism \begin{equation}\label{f3.4} B_{j}:H_{A}^{\varphi_{j}}\leftrightarrow H, \quad\mbox{with}\quad j\in\{0,1\} \end{equation} (see the explanation for \eqref{f2.2}). Define the linear operator $J$ in $H_{A}^{\varphi_{0}}$ by the formula $Ju:=B_{0}^{-1}B_{1}u$ for every $u\in\mathrm{Dom}\,J:=H_{A}^{\varphi_{1}}$. Let us prove that $J$ is the generating operator for the pair $[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}]$.
Note first that $J$ sets an isometric isomorphism \begin{equation}\label{f3.5} J=B_{0}^{-1}B_{1}:H_{A}^{\varphi_{1}}\leftrightarrow H_{A}^{\varphi_{0}}. \end{equation} Hence, the operator $J$ is closed in $H_{A}^{\varphi_{0}}$. Besides, $J$ is a positive definite operator in $H_{A}^{\varphi_{0}}$. Indeed, choosing $u\in\mathrm{Dom}\,\varphi_{1}(A)$ arbitrarily, we write the following: \begin{align*} (Ju,u)_{\varphi_{0}}&=(B_{0}^{-1}B_{1}u,u)_{\varphi_{0}}=(B_{1}u,B_{0}u)= (\varphi_{1}(A)u,\varphi_{0}(A)u)\\ &=\Bigl(\frac{\varphi_{1}}{\varphi_{0}}(A)\varphi_{0}(A)u, \varphi_{0}(A)u\Bigl)\geq\varkappa(\varphi_{0}(A)u,\varphi_{0}(A)u)= \varkappa(u,u)_{\varphi_{0}}, \end{align*} the inequality being due to~\eqref{f3.2}. Passing here to the limit and using \eqref{f3.5}, we conclude that \begin{equation}\label{f3.5bis} (Ju,u)_{\varphi_{0}}\geq\varkappa(u,u)_{\varphi_{0}} \quad\mbox{for every}\quad u\in H_{A}^{\varphi_{1}}. \end{equation} Thus, $J$ is a positive definite closed operator in $H_{A}^{\varphi_{0}}$. Moreover, since $0\notin\mathrm{Spec}\,J$ by \eqref{f3.5}, this operator is self-adjoint on $H_{A}^{\varphi_{0}}$. Regarding \eqref{f3.5} again, we conclude that $J$ is the generating operator for the pair $[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}]$.
Let us reduce the self-adjoint operator $J$ in $H_{A}^{\varphi_{0}}$ to an operator of multiplication by function. Since the operator $A$ is self-adjoint in $H$ and since $A\geq1$, there exists a space $R$ with a finite measure $\mu$, a measurable function $\alpha:R\to[1,\infty)$, and an isometric isomorphism \begin{equation}\label{f3.6} \mathcal{I}:H\leftrightarrow L_{2}(R,d\mu) \end{equation} such that \begin{equation*} \mathrm{Dom}\,A=\{u\in H:\alpha\cdot\mathcal{I}u\in L_{2}(R,d\mu)\} \end{equation*} and that $\mathcal{I}Au=\alpha\cdot\mathcal{I}u$ for every $u\in\mathrm{Dom}\,A$; see, e.g, \cite[Theorem VIII.4]{ReedSimon72}. Otherwise speaking, $\mathcal{I}$ reduces $A$ to the operator of multiplication by~$\alpha$.
Using \eqref{f3.4} and \eqref{f3.6}, we introduce the isometric isomorphism \begin{equation}\label{f3.9} \mathcal{I}_{0}:=\mathcal{I}B_{0}:H^{\varphi_{0}}_{A}\leftrightarrow L_{2}(R,d\mu). \end{equation} Let us show that $\mathcal{I}_{0}$ reduces $J$ to an operator of multiplication by function. Given $u\in\mathrm{Dom}\,\varphi_{1}(A)$, we write the following: \begin{align*} \mathcal{I}_{0}Ju&=(\mathcal{I}B_{0})(B_{0}^{-1}B_{1})u= \mathcal{I}B_{1}u=\mathcal{I}\varphi_{1}(A)u\\ &=\mathcal{I}\Bigl(\frac{\varphi_{1}}{\varphi_{0}}\Bigr)(A)\varphi_{0}(A)u =\Bigl(\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr) \mathcal{I}\varphi_{0}(A)u= \Bigl(\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr)\mathcal{I}_{0}u. \end{align*} Thus, \begin{equation}\label{f3.10} \mathcal{I}_{0}Ju= \Bigl(\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr)\mathcal{I}_{0}u \quad\mbox{for every}\quad u\in\mathrm{Dom}\,\varphi_{1}(A). \end{equation} Let us prove that this equality holds true for every $u\in H^{\varphi_{1}}_{A}$.
Choose $u\in H^{\varphi_{1}}_{A}$ arbitrarily, and consider a sequence $(u_{k})_{k=1}^{\infty}\subset\mathrm{Dom}\,\varphi_{1}(A)$ such that $u_{k}\to u$ in $H^{\varphi_{1}}_{A}$ as $k\to\infty$. Owing to \eqref{f3.10}, we have the equality \begin{equation}\label{f3.11} \mathcal{I}_{0}Ju_{k}= \Bigl(\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr)\mathcal{I}_{0}u_{k} \quad\mbox{whenever}\quad 1\leq k\in\mathbb{Z}. \end{equation} Here, \begin{equation}\label{f3.12} \mathcal{I}_{0}Ju_{k}\to\mathcal{I}_{0}Ju\quad\mbox{and}\quad \mathcal{I}_{0}u_{k}\to\mathcal{I}_{0}u \quad\mbox{in}\quad L_{2}(R,d\mu)\quad\mbox{as}\quad k\to\infty \end{equation} due to the isometric isomorphisms \eqref{f3.5} and \eqref{f3.9}. Since convergence in $L_{2}(R,d\mu)$ implies convergence in the measure $\mu$, it follows from \eqref{f3.12} by the Riesz theorem that \begin{equation}\label{f3.13} \mathcal{I}_{0}Ju_{k_l}\to\mathcal{I}_{0}Ju \quad\mbox{and}\quad \mathcal{I}_{0}u_{k_l}\to\mathcal{I}_{0}u \quad\mu\mbox{-a.e. on}\;R\quad\mbox{as}\quad l\to\infty \end{equation} for a certain subsequence $(u_{k_l})_{l=1}^{\infty}$ of $(u_{k})_{k=1}^{\infty}$. The latter convergence implies that \begin{equation}\label{f3.14} \Bigl(\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr) \mathcal{I}_{0}u_{k_l}\to \Bigl(\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr)\mathcal{I}_{0}u \quad\mu\mbox{-a.e. on}\;R\quad\mbox{as}\quad l\to\infty. \end{equation} Now formulas \eqref{f3.11}, \eqref{f3.13}, and \eqref{f3.14} yield the required equality \begin{equation}\label{f3.15} \mathcal{I}_{0}Ju= \Bigl(\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr)\mathcal{I}_{0}u \quad\mbox{for every}\quad u\in H^{\varphi_{1}}_{A}. \end{equation}
It follows from \eqref{f3.9} and \eqref{f3.15} that \begin{equation}\label{f3.16} H^{\varphi_{1}}_{A}\subseteq\Bigl\{u\in H^{\varphi_{0}}_{A}: \Bigl(\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr)\mathcal{I}_{0}u\in L_{2}(R,d\mu)\Bigr\}=:Q, \end{equation} we recalling $H^{\varphi_{1}}_{A}=\mathrm{Dom}\,J$. Let us prove that $H^{\varphi_{1}}_{A}=Q$ in fact. We endow the linear space $Q$ with the norm \begin{equation*}
\|u\|_{Q}:=\Bigl\|\Bigl(\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr)
\mathcal{I}_{0}u\Bigr\|_{L_{2}(R,d\mu)}. \end{equation*} Owing to \eqref{f3.15} and \eqref{f3.16}, we have the normal embedding \begin{equation}\label{f3.17} H^{\varphi_{1}}_{A}\subseteq Q\quad\mbox{with}\quad
\|u\|_{Q}=\|u\|_{\varphi_{1}}\;\;\mbox{for every}\;\; u\in H^{\varphi_{1}}_{A}. \end{equation}
Consider the linear mapping \begin{equation}\label{f3.18} L:u\mapsto B_{1}^{-1}\mathcal{I}^{-1}\Bigl[\Bigl (\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr)\cdot \mathcal{I}_{0}u\Bigr]\quad\mbox{where}\quad u\in Q. \end{equation} According to the isometric isomorphisms \eqref{f3.4} and \eqref{f3.6}, we have \begin{equation}\label{f3.19} \mbox{the isometric operator}\quad L:Q\to H^{\varphi_{1}}_{A}. \end{equation} If $Lu=u$ for every $u\in H^{\varphi_{1}}_{A}$, the required equality $H^{\varphi_{1}}_{A}=Q$ will follow plainly from \eqref{f3.16} and the injectivity of \eqref{f3.19}. Let us prove that $Lu=u$ for every $u\in H^{\varphi_{1}}_{A}$.
Given $u\in\mathrm{Dom}\,\varphi_{1}(A)$, we write the following: \begin{align*} Lu&=B_{1}^{-1}\mathcal{I}^{-1}\Bigl[\Bigl (\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr) \mathcal{I}\varphi_{0}(A)u\Bigr]= B_{1}^{-1}\mathcal{I}^{-1}\Bigl[\Bigl (\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr) (\varphi_{0}\circ\alpha)\mathcal{I}u\Bigr]\\ &=B_{1}^{-1}\mathcal{I}^{-1} \bigl[(\varphi_{1}\circ\alpha)\mathcal{I}u\bigr]= B_{1}^{-1}\varphi_{1}(A)u=B_{1}^{-1}B_{1}u=u. \end{align*} Thus, \begin{equation}\label{f3.20} Lu=u\quad\mbox{for every}\quad u\in\mathrm{Dom}\,\varphi_{1}(A). \end{equation} Choose now $u\in H^{\varphi_{1}}_{A}$ arbitrarily, and let a sequence $(u_{k})_{k=1}^{\infty}\subset\mathrm{Dom}\,\varphi_{1}(A)$ converge to $u$ in $H^{\varphi_{1}}_{A}$. Since $u_{k}\to u$ in $Q$ by \eqref{f3.17}, we write \begin{equation*} Lu=\lim_{k\to\infty}Lu_{k}=\lim_{k\to\infty}u_{k}=u \quad\mbox{in}\quad H^{\varphi_{1}}_{A} \end{equation*} in view of \eqref{f3.19} and \eqref{f3.20}. Thus, $Lu=u$ for every $u\in H^{\varphi_{1}}_{A}$, and we have proved the required equality \begin{equation}\label{f3.21} \mathrm{Dom}\,J=\Bigl\{u\in H^{\varphi_{0}}_{A}: \Bigl(\frac{\varphi_{1}}{\varphi_{0}}\circ\alpha\Bigr)\mathcal{I}_{0}u\in L_{2}(R,d\mu)\Bigr\}. \end{equation}
Formulas \eqref{f3.15} and \eqref{f3.21} mean that the operator $J$ is reduced by the isometric isomorphism \eqref{f3.9} to the operator of multiplication by the function $(\varphi_{1}/\varphi_{0})\circ\alpha$. Using this fact, we will prove the required formula \eqref{f2.9}.
Since $\psi\in\mathcal{B}$, the function $1/\psi$ is bounded on $[\varkappa,\infty)$. Hence, the function \begin{equation*} \frac{\varphi_{0}(t)}{\varphi(t)}= \frac{1}{\psi}\Bigl(\frac{\varphi_{1}(t)}{\varphi_{0}(t)}\Bigr) \quad\mbox{of}\quad t\geq1 \end{equation*} is bounded due to \eqref{f3.2}. Therefore, $\mathrm{Dom}\,\varphi(A)\subseteq\mathrm{Dom}\,\varphi_{0}(A)$.
Choosing $u\in\mathrm{Dom}\,\varphi(A)$ arbitrarily and using the above-mentioned reductions of $A$ and $J$ to operators of multiplication by function, we write the following: \begin{align*} L_2(R,d\mu)\ni\mathcal{I}\varphi(A)u&=(\varphi\circ\alpha)\mathcal{I}u= \Bigl(\psi\circ\frac{\varphi_1}{\varphi_0}\circ\alpha\Bigr) (\varphi_{0}\circ\alpha)\mathcal{I}u\\ &=\Bigl(\psi\circ\frac{\varphi_1}{\varphi_0}\circ\alpha\Bigr) \mathcal{I}\varphi_{0}(A)u= \Bigl(\psi\circ\frac{\varphi_1}{\varphi_0}\circ\alpha\Bigr) \mathcal{I}_{0}u=\mathcal{I}_{0}\psi(J)u. \end{align*} Hence, \begin{equation*}
\|u\|_{\varphi}=\|\varphi(A)u\|=\|\mathcal{I}\varphi(A)u\|_{L_2(R,d\mu)}=
\|\mathcal{I}_{0}\psi(J)u\|_{L_2(R,d\mu)}=\|\psi(J)u\|_{\varphi_0}. \end{equation*}
Therefore, $\mathrm{Dom}\,\varphi(A)\subseteq\mathrm{Dom}\,\psi(J)$, and $\|u\|_{\varphi}=\|u\|_{X}$ for every $u\in\mathrm{Dom}\,\varphi(A)$, where $X:=[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}]_{\psi}= \mathrm{Dom}\,\psi(J)$. Passing here to the limit, we infer the normal embedding \begin{equation}\label{f3.22} H^{\varphi}_{A}\subseteq X\quad\mbox{with}\quad
\|u\|_{\varphi}=\|u\|_{X}\;\;\mbox{for every}\;\; u\in H^{\varphi}_{A}. \end{equation} Besides, as we have just shown, \begin{equation}\label{f3.23} \mathcal{I}\varphi(A)u=\mathcal{I}_{0}\psi(J)u \quad\mbox{whenever}\quad u\in\mathrm{Dom}\,\varphi(A). \end{equation}
Let us deduce the equality $H^{\varphi}_{A}=X$ from \eqref{f3.22} and \eqref{f3.23}. Using the isometric isomorphisms \eqref{f2.2}, \eqref{f3.6}, and \eqref{f3.9}, we get \begin{equation}\label{f3.24} \mbox{the isometric operator}\quad M:=B^{-1}\mathcal{I}^{-1}\mathcal{I}_{0}\psi(J):X\to H^{\varphi}_{A}. \end{equation} Owing to \eqref{f3.23}, we write $Mu=B^{-1}\mathcal{I}^{-1}\mathcal{I}\varphi(A)u=u$ for every $u\in\mathrm{Dom}\,\varphi(A)$. Therefore, choosing $u\in H^{\varphi}_{A}$ arbitrarily and considering a sequence $(u_{k})_{k=1}^{\infty}\subset\mathrm{Dom}\,\varphi(A)$ such that $u_{k}\to u$ in $H^{\varphi}_{A}$, we get \begin{equation*} Mu=\lim_{k\to\infty}Mu_{k}=\lim_{k\to\infty}u_{k}=u \quad\mbox{in}\quad H^{\varphi}_{A} \end{equation*} due to \eqref{f3.22}. Now the required equality $H^{\varphi}_{A}=X$ follows from the property $Mu=u$ whenever $u\in H^{\varphi}_{A}$, the inclusion $H^{\varphi}_{A}\subseteq X$, and the injectivity of the operator \eqref{f3.24}. In view of \eqref{f3.22}, we have proved \eqref{f2.9} and, hence, Theorem~\ref{th2.6}. \end{proof}
We will deduce Theorem~\ref{th2.5} from Theorem~\ref{th2.6} with the help of the following result \cite[Theorem~4.2]{MikhailetsMurach15ResMath1}:
\begin{proposition}\label{prop3.2} Let $s_{0},s_{1}\in\mathbb{R}$, $s_{0}<s_{1}$, and $\psi\in\mathcal{B}$. Put $\varphi(t):=t^{s_{0}}\psi(t^{s_{1}-s_{0}})$ for every $t\geq1$. Then the function $\psi$ is an interpolation parameter if and only if the function $\varphi$ satisfies \eqref{f2.3} with some positive numbers $c_{0}$ and $c_{1}$ that are independent of $t$ and $\lambda$. \end{proposition}
\begin{proof}[Proof of Theorem $\ref{th2.5}$] Let us show first that $\psi\in\mathcal{B}$. Evidently, the function $\psi$ is Borel measurable. Putting $t:=1$ in \eqref{f2.3}, we write $c_{0}\varphi(1)\lambda^{s_{0}}\leq\varphi(\lambda)\leq c_{1}\varphi(1)\lambda^{s_{1}}$ for arbitrary $\lambda\geq1$. Hence, the function $\varphi$ is bounded on every compact subset of $[1,\infty)$, which yields the boundedness of $\psi$ on every interval $(0,b]$ with $b>1$. Besides, \begin{equation*} \psi(\tau):=\tau^{-s_{0}/(s_{1}-s_{0})}\,\varphi(\tau^{1/(s_{1}-s_{0})}) \geq c_{0}\varphi(1)\quad\mbox{whenever}\quad\tau\geq1. \end{equation*} Therefore, the function $1/\psi$ is bounded on $(0,\infty)$. Thus, $\psi\in\mathcal{B}$ by the definition of~$\mathcal{B}$.
It follows from the definition of $\psi$ that $\varphi(t)=t^{s_{0}}\psi(t^{s_{1}-s_{0}})$ for every $t\geq1$. Hence, $\psi$ is an interpolation parameter according to Proposition~\ref{prop3.2}, whereas the interpolation property \eqref{f2.7} is due to Theorem~\ref{th2.6}, in which we put $\varphi_{0}(t)\equiv t^{s_0}$ and $\varphi_{1}(t)\equiv t^{s_1}$. \end{proof}
\begin{proof}[Proof of Theorem $\ref{th2.3}$.] \emph{Necessity.} Suppose that a Hilbert space $X$ is an interpolation space for the pair $[H^{s_{0}}_{A},H^{s_{1}}_{A}]$. Then, owing to Proposition~\ref{prop3.1}, there exists an interpolation parameter $\psi\in\mathcal{B}$ such that $X=[H^{s_{0}}_{A},H^{s_{1}}_{A}]_{\psi}$ up to equivalence of norms. According to Theorem~\ref{th2.6}, we get $[H^{s_{0}}_{A},H^{s_{1}}_{A}]_{\psi}=H^{\varphi}_{A}$ with equality of norms; here, $\varphi(t):=t^{s_{0}}\psi(t^{s_{1}-s_{0}})$ for every $t\geq1$. Thus, $X=H^{\varphi}_{A}$ up to equivalence of norms. Note that $\varphi\in\mathrm{OR}$ due to Proposition~\ref{prop2.7} considered in the case of $\varphi_{0}(t)\equiv t^{s_0}$ and $\varphi_{1}(t)\equiv t^{s_1}$. Moreover, according to Proposition~\ref{prop3.2}, the function $\varphi$ satisfies condition~\eqref{f2.3}. The necessity is proved.
\emph{Sufficiency.} Suppose now that a Hilbert space $X$ coincides with $H^{\varphi}_{A}$ up to equivalence of norms for a certain function parameter $\varphi\in\mathrm{OR}$ that satisfies condition~\eqref{f2.3}. Then, owing to Theorem~\ref{th2.5}, we get $H^{\varphi}_{A}=[H^{s_{0}}_{A},H^{s_{1}}_{A}]_{\psi}$ with equality of norms, where the interpolation parameter $\psi\in\mathcal{B}$ is defined by formula~\eqref{f2.6}. Thus, $X=[H^{s_{0}}_{A},H^{s_{1}}_{A}]_{\psi}$ up to equivalence of norms. This implies that $X$ is an interpolation space for the pair $[H^{s_{0}}_{A},H^{s_{1}}_{A}]$ in view of the definition of an interpolation parameter (or by Proposition~\ref{prop3.1}). The sufficiency is also proved. \end{proof}
\begin{proof}[Proof of Theorem $\ref{th2.1}$.] \emph{Necessity.} Suppose that a Hilbert space $X$ belongs to the extended $A$-scale. Then $X$ is an interpolation space for a certain pair $[H^{s_{0}}_{A},H^{s_{1}}_{A}]$ where $s_{0}<s_{1}$. Hence, we conclude by Theorem~\ref{th2.3} that $X=H^{\varphi}(\Omega)$ up to equivalence of norms for a certain function parameter $\varphi\in\mathrm{OR}$. The necessity is proved.
\emph{Sufficiency.} Suppose now that $X=H^{\varphi}_{A}$ up to equivalence of norms for certain $\varphi\in\mathrm{OR}$. The function $\varphi$ satisfies condition~\eqref{f2.3} for the numbers $s_{0}:=\sigma_{0}(\varphi)-1$ and $s_{1}:=\sigma_{1}(\varphi)+1$, for example. Therefore, $X$ is an interpolation space for the pair $[H^{s_{0}}_{A},H^{s_{1}}_{A}]$ due to Theorem~\ref{th2.3}; i.e., $X$ belongs to the extended $A$-scale. The sufficiency is also proved. \end{proof}
\section{Interpolational inequalities}\label{sec3b}
We assume in this section that functions $\varphi_{0},\varphi_{1}:[1,\infty)\to(0,\infty)$ and $\psi:(0,\infty)\to(0,\infty)$ satisfy the hypothesis of Theorem~\ref{th2.6}; i.e., $\varphi_{0}$ and $\varphi_{1}$ are Borel measurable, and $\varphi_{0}/\varphi_{1}$ is bounded on $[1,\infty)$, and $\psi$ belongs to $\mathcal{B}$. Moreover, suppose that $\psi$ is pseudoconcave in a neighbourhood of infinity; then $\psi$ is an interpolation parameter (see, e.g., \cite[Theorem~1.9]{MikhailetsMurach14}). Owing to Theorem~\ref{th2.6}, we have the dense continuous embedding $H^{\varphi_{1}}_{A}\hookrightarrow H^{\varphi_{0}}_{A}$ and the interpolation formula $[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}]_{\psi}=H^{\varphi}_{A}$ with equality of norms. Here, the Borel measurable function $\varphi:[1,\infty)\to(0,\infty)$ is defined by \eqref{f2.8}. Hence, $H^{\varphi}_{A}$ is an interpolation space between $H^{\varphi_{0}}_{A}$ and~$H^{\varphi_{1}}_{A}$.
We will obtain some inequalities that estimate (from above) the norm in the interpolation space $H^{\varphi}_{A}$ via the norms in the marginal spaces $H^{\varphi_{0}}_{A}$ and $H^{\varphi_{1}}_{A}$ with the help of the interpolation parameter $\psi$. Such inequalities are naturally called interpolational. Specifically, if $\varphi_{0},\varphi_{1}\in\mathrm{OR}$, then $\varphi\in\mathrm{OR}$ as well, due to Proposition~\ref{prop2.7}. In this case, these interpolational inequalities deal with norms in spaces belonging to the extended Hilbert scale.
We denote the number $\varkappa>0$ by formula \eqref{f3.2}. Owing to \cite[Lemma 1.1]{MikhailetsMurach14}, the function $\psi$ is pseudoconcave on $(\varepsilon,0)$ whenever $\varepsilon>0$. Hence, according to \cite[Lemma 1.2]{MikhailetsMurach14}, there exists a number $c_{\psi,\varkappa}>0$ such that \begin{equation}\label{f3b.2} \frac{\psi(t)}{\psi(\tau)}\leq c_{\psi,\varkappa}\max\biggl\{1,\frac{t}{\tau}\biggr\}\quad\mbox{for all}\;\; t,\tau\in[\varkappa,\infty). \end{equation}
\begin{theorem}\label{th3b.1} Let $\tau\geq\varkappa$ and $u\in H^{\varphi_{1}}_{A}$; then \begin{equation}\label{f3b.3}
\|u\|_{\varphi}\leq c_{\psi,\varkappa}\,\psi(\tau)
\bigl(\|u\|_{\varphi_0}^{2}+\tau^{-2}\|u\|_{\varphi_1}^{2}\bigr)^{1/2}. \end{equation} \end{theorem}
Before we prove this theorem, let us comment formula \eqref{f3b.3}. It follows from \eqref{f3b.2} that $\psi(t)\leq c_{\psi,\varkappa}\psi(\tau)$ whenever $\varkappa\leq t\leq\tau$ and that $\psi(t)/t\leq c_{\psi,\varkappa}\psi(\tau)/\tau$ whenever $\varkappa\leq\tau\leq t$. Hence, $\psi$ is slowly equivalent to an increasing function on the set $[\varkappa,\infty)$, and the function $\psi(\tau)/\tau$ is slowly equivalent to a decreasing function on the same set. Of the main interest is the case where $\psi(\tau)\to\infty$ and $\psi(\tau)/\tau\to0$ as $\tau\to\infty$. In this case, it is useful to rewrite inequality \eqref{f3b.3} in the form \begin{equation}\label{f3b.4}
\|u\|_{\varphi}\leq c_{\psi,\varkappa}
\biggl(\frac{\psi^{2}(\tau)}{\tau^{2}}\|u\|_{\varphi_1}^{2}+
\psi^{2}(\tau)\|u\|_{\varphi_0}^{2}\biggr)^{1/2}. \end{equation} Restricting ourselves to the Hilbert scale $\{H^{s}_{A}:s\in\mathbb{R}\}$, we conclude that this inequality becomes \begin{equation}\label{f3b.5}
\|u\|_{s}\leq\bigl(\tau^{2(\theta-1)}\|u\|_{s_1}^{2}+
\tau^{2\theta}\|u\|_{s_0}^{2}\bigr)^{1/2} \quad\mbox{whenever}\;\;u\in H^{s_1}_{A}; \end{equation} here, the real numbers $s$, $s_0$, $s_1$, $\theta$, and $\tau$ satisfy the conditions \begin{equation}\label{f3b.6} s_0<s_1,\quad 0<\theta<1,\quad s=(1-\theta)s_0+\theta s_1, \end{equation} and $\tau\geq1$. Indeed, we only need to take \begin{equation}\label{f3b.7} \varphi_0(t)\equiv t^{s_0},\quad\varphi_1(t)\equiv t^{s_1},\quad \psi(\tau)\equiv\tau^{\theta},\quad\mbox{and}\quad\varphi(t)\equiv t^{s}\;\;\mbox{by \eqref{f2.8}} \end{equation} in \eqref{f3b.4} and observe that $c_{\psi,\varkappa}=1$ for $\psi$ taken. Interpolational inequalities of type \eqref{f3b.5} for Sobolev scales are used in the theory of partial differential operators (see, e.g., \cite[Section~1, Subsection~6]{AgranovichVishik64}).
\begin{proof}[Proof of Theorem $\ref{th3b.1}$.] Let $J$ denote the generating operator for the pair $[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}]$. Recall that $J$ is a positive definite self-adjoint operator in the Hilbert space $H^{\varphi_{0}}_{A}$ and that $\|Ju\|_{\varphi_{0}}=\|u\|_{\varphi_{1}}$ for every $u\in H^{\varphi_{1}}_{A}=\mathrm{Dom}\,J$. According to \eqref{f3.5bis}, we have $\mathrm{Spec}\,J\subseteq[\varkappa,\infty)$.
Let $E_{t}$, $t\geq\varkappa$, be the resolution of the identity associated with the self-adjoint operator $J$. Choosing $\tau\geq\varkappa$ and $u\in H^{\varphi_{1}}_{A}$ arbitrarily, we get \begin{align*}
\|u\|_{\varphi}^{2}&=\|\psi(J)u\|_{\varphi_{0}}^{2}= \int\limits_{\varkappa}^{\infty}\psi^{2}(t)\,d(E_{t}u,u)_{\varphi_{0}}\\ &\leq c_{\psi,\varkappa}^{2}\,\psi^{2}(\tau)\int\limits_{\varkappa}^{\infty} \max\biggl\{1,\frac{t^{2}}{\tau^{2}}\biggr\}d(E_{t}u,u)_{\varphi_{0}}\\ &\leq c_{\psi,\varkappa}^{2}\,\psi^{2}(\tau)\int\limits_{\varkappa}^{\infty} \biggl(1+\frac{t^{2}}{\tau^{2}}\biggr)d(E_{t}u,u)_{\varphi_{0}}\\
&=c_{\psi,\varkappa}^{2}\,\psi^{2}(\tau)\bigl(\|u\|_{\varphi_0}^{2}+
\tau^{-2}\,\|Ju\|_{\varphi_0}^{2}\bigr)\\
&=c_{\psi,\varkappa}^{2}\,\psi^{2}(\tau)\bigl(\|u\|_{\varphi_0}^{2}+
\tau^{-2}\,\|u\|_{\varphi_1}^{2}\bigr). \end{align*} Here, we use the interpolation formula $[H^{\varphi_{0}}_{A},H^{\varphi_{1}}_{A}]_{\psi}=H^{\varphi}_{A}$ and inequality \eqref{f3b.2}. Thus, the required inequality \eqref{f3b.3} is proved. \end{proof}
Let us consider an application of Theorem~\ref{th3b.1}. We arbitrarily choose $u\in H^{\varphi_{1}}_{A}$ such that $u\neq0$. Put $\tau:=\|u\|_{\varphi_1}/\|u\|_{\varphi_0}$ in \eqref{f3b.3} and note that $\tau\geq\varkappa$ in view of \eqref{f3.2}. We then obtain the interpolational inequality \begin{equation}\label{f3b.9new}
\|u\|_{\varphi}\leq c_{\psi,\varkappa}\sqrt{2}\,\|u\|_{\varphi_0}\,
\psi\biggl(\frac{\|u\|_{\varphi_1}}{\|u\|_{\varphi_0}}\biggr). \end{equation} If the function \begin{equation}\label{f3b.10} \chi(\tau):=\psi^{2}(\sqrt{\tau})\quad\mbox{is concave on}\quad [\varkappa^{2},\infty), \end{equation}
then \eqref{f3b.9new} holds true without the factor $c_{\psi,\varkappa}\sqrt{2}$. Indeed, choosing $v\in H^{\varphi_{1}}_{A}$ with $\|v\|_{\varphi_0}=1$ arbitrarily, we get \begin{align*}
\|v\|_{\varphi}^2&= \int\limits_{\varkappa}^{\infty}\psi^{2}(t)\,d(E_{t}v,v)_{\varphi_{0}}= \int\limits_{\varkappa}^{\infty}\chi(t^2)\,d(E_{t}v,v)_{\varphi_{0}}\\ &\leq\chi\Biggl(\,\int\limits_{\varkappa}^{\infty} t^2\,d(E_{t}v,v)_{\varphi_{0}}\Biggr)=
\chi\bigl(\|Jv\|_{\varphi_0}^2\bigr)=
\chi\bigl(\|v\|_{\varphi_1}^2\bigr)=
\psi^2\bigl(\|v\|_{\varphi_1}\bigr) \end{align*} due to the Jensen inequality applied to the concave function $\chi$. Here, $E_{t}$ is the same as that in the proof of Theorem~\ref{th3b.1}, and \begin{equation*} \int\limits_{\varkappa}^{\infty}d(E_{t}v,v)_{\varphi_{0}}=
\|v\|_{\varphi_0}^2=1. \end{equation*}
Putting $v:=u/\|u\|_{\varphi_{0}}$ in the inequality $\|v\|_{\varphi}\leq\psi(\|v\|_{\varphi_1})$ just obtained, we conclude that \begin{equation}\label{f3b.11}
\|u\|_{\varphi}\leq\|u\|_{\varphi_0}\,
\psi\biggl(\frac{\|u\|_{\varphi_1}}{\|u\|_{\varphi_0}}\biggr) \quad\mbox{under condition \eqref{f3b.10}}. \end{equation}
This interpolational inequality is equivalent to Variable Hilbert Scale Inequality \cite[Theorem~1, formula (9)]{HeglandAnderssen11} on the supplementary assumption that both functions $\varphi_0$ and $\varphi_1$ are continuous. (Note that the norm $\|\cdot\|_{\varphi}$ used in the cited article \cite{HeglandAnderssen11} means the norm $\|\cdot\|_{\sqrt{\varphi}}$ used by us. Besides, there is no assumption in this article that the function $\varphi_0/\varphi_1$ is bounded.)
In the case of the Hilbert scale $\{H^{s}_{A}:s\in\mathbb{R}\}$, inequality \eqref{f3b.11} becomes \begin{equation}\label{f3b.12}
\|u\|_{s}\leq\|u\|_{s_0}^{1-\theta}\,\|u\|_{s_1}^{\theta} \end{equation} provided that the real numbers $s$, $s_0$, $s_1$, and $\theta$ satisfy \eqref{f3b.6} (we use the power functions \eqref{f3b.7}). The interpolational inequality \eqref{f3b.12} is well known \cite[Section~9, Subsection~1]{KreinPetunin66} and means that the Hilbert scale is a normal scale of spaces.
The interpolational inequalities just considered deal with norms of vectors. Now we focus our attention on interpolational inequalities that involve norms of linear operators acting continuously between appropriate Hilbert spaces $H^{\varphi}_{A}$ and~$G^{\eta}_{Q}$. Here, $G$ (just as $H$) is a separable infinite-dimensional complex Hilbert space, and $Q$ is a counterpart of $A$ for $G$. Namely, $Q$ is a self-adjoint unbounded linear operator in $G$ such that $\mathrm{Spec}\,Q\subseteq[1,\infty)$. We suppose that functions $\eta_{0},\eta_{1},\eta:[1,\infty)\to(0,\infty)$ satisfy analogous conditions to those imposed on $\varphi_{0}$, $\varphi_{1}$, and $\varphi$ at the beginning of this section. Namely, these functions are Borel measurable, and the function $\eta_{0}/\eta_{1}$ is bounded, and \begin{equation}\label{f3b.13} \eta(t)=\eta_{0}(t)\,\psi\biggl(\frac{\eta_{1}(t)}{\eta_{0}(t)}\biggr) \quad\mbox{whenever}\quad t\geq1. \end{equation}
We suppose that a linear mapping $T$ is given on $H^{\varphi_0}_{A}$ and satisfies the following condition: the restriction of $T$ to the space $H^{\varphi_j}_{A}$ is a bounded operator \begin{equation}\label{f3b.14} T:H^{\varphi_j}_{A}\to G^{\eta_j}_{Q}\quad\mbox{for each}\quad j\in\{0,1\}. \end{equation} Then the restriction of $T$ to $H^{\varphi}_{A}$ is a bounded operator \begin{equation}\label{f3b.15} T:H^{\varphi}_{A}=\bigl[H^{\varphi_0}_{A},H^{\varphi_1}_{A}\bigr]_{\psi}\to \bigl[G^{\eta_0}_{Q},G^{\eta_1}_{Q}\bigr]_{\psi}=G^{\eta}_{Q} \end{equation}
according to Theorem~\ref{th2.6} and because $\psi$ is an interpolation parameter. Let $\|T\|_{j}$ and $\|T\|$ denote the norms of operators \eqref{f3b.14} and \eqref{f3b.15} respectively. Then \begin{equation}\label{f3b.16}
\|T\|\leq c\,\max\{\|T\|_0,\|T\|_1\} \end{equation} for some number $c>0$ that does not depend on $T$ but may depend on $\psi$ and the spaces $H^{\varphi_j}_{A}$ and $G^{\eta_j}_{Q}$ (see, e.g., \cite[Theorem 2.4.2]{BerghLefstrem76}). This is an interpolational inequality for operator norms, which means that the method of quadratic interpolation is uniform.
We will consider a more precise interpolational inequality than \eqref{f3b.16}; it involves $\psi$ in some way. Put \begin{equation*} \nu:=\min\biggl\{\inf_{t\geq1}\frac{\varphi_{1}(t)}{\varphi_{0}(t)},\, \inf_{t\geq1}\frac{\eta_{1}(t)}{\eta_{0}(t)}\biggr\}>0, \end{equation*} and let $c_{\psi,\nu}$ denote a positive number such that inequality \eqref{f3b.2} holds true with $\nu$ taken instead of $\varkappa$. Without loss of generality we suppose that \begin{equation}\label{f3b.17} \frac{\psi(t)}{\psi(\tau)}\leq c_{\psi,\nu}\max\biggl\{1,\frac{t}{\tau}\biggr\}\quad\mbox{for all}\;\; t,\tau>0. \end{equation} (Hence, $\psi$ is pseudoconcave on $(0,\infty)$ according to \cite[Lemma 5.4.3]{BerghLefstrem76}.) We can achieve this by redefining $\psi$ properly on $(0,\nu)$, e.g., by the formula $\psi(t):=\psi(\nu)\nu^{-1}t$ whenever $0<t<\nu$. This does not change $\varphi$ and $\eta$ in view of \eqref{f2.8} and \eqref{f3b.13}. Let $\widetilde{\psi}$ denote the dilation function for $\psi$, i.e. \begin{equation}\label{f3b.18} \widetilde{\psi}(\lambda):=\sup_{t>0}\frac{\psi(\lambda t)}{\psi(t)} \leq c_{\psi,\nu}\max\{1,\lambda\} \quad\mbox{whenever}\;\;\lambda>0. \end{equation}
\begin{theorem}\label{th3b.2} The following interpolational inequality holds true: \begin{equation}\label{f3b.19}
\|T\|\leq c_{\psi,\nu}^{2}\sqrt{8}\,\|T\|_{0}\,
\widetilde{\psi}\biggl(\frac{\|T\|_{1}}{\|T\|_{0}}\biggr). \end{equation} \end{theorem}
\begin{proof} It follows directly from Theorem~\ref{th2.6} (namely, from the equalities in \eqref{f3b.15}) and the result by Fan \cite[formula (2.3)]{Fan11} that inequality \eqref{f3b.19} holds true with a certain number $c>0$ written instead of $c_{\psi,\nu}^{2}\sqrt{8}$. Note that Fan \cite{Fan11} denotes the interpolation space $[H_{0},H_{1}]_{\psi}$ by $\overline{\mathcal{H}}_{\chi}$ where $\chi(t)\equiv\psi^{2}(\sqrt{t})$ and that $\psi$ is pseudoconcave on $(0,\infty)$ if and only if so is $\chi$, with the dilation function $\widetilde{\chi}(t)\equiv\widetilde{\psi}^{2}(\sqrt{t})$. (As in Section~\ref{sec2}, $[H_{0},H_{1}]$ is a regular pair of separable complex Hilbert spaces.) Besides, if $\chi$ is concave on $(0,\infty)$, then $c=\sqrt{2}$. This is a direct consequence of Theorem~\ref{th2.6} and the inequality \cite[formula (2.2)]{Fan11}. Let us show that we may take $c=c_{\psi,\nu}^{2}\sqrt{8}$ in \eqref{f3b.19} in the general case where $\psi$ is pseudoconcave on $(0,\infty)$.
Considering the function $\chi(t):=\psi^2(\sqrt{t})$ of $t>0$ and using \eqref{f3b.17}, we have \begin{equation*} \frac{\chi(t)}{\chi(\tau)}\leq c_{\psi,\nu}^2\max\biggl\{1,\frac{t}{\tau}\biggr\} \quad\mbox{for all}\;\; t,\tau>0. \end{equation*} It follows from this that \begin{equation*} \frac{1}{2c_{\psi,\nu}^2}\,\chi_1(t)\leq \chi(t)\leq\chi_1(t)\quad\mbox{whenever}\quad t>0, \end{equation*} with $\chi_1:(0,\infty)\to(0,\infty)$ being the least concave majorant of $\chi$ (see \cite[p.~91]{Peetre68}). Hence, \begin{equation}\label{est-psi} \frac{1}{\sqrt{2}\,c_{\psi,\nu}}\,\psi_{\ast}(t)\leq \psi(t)\leq\psi_{\ast}(t)\quad\mbox{whenever}\quad t>0, \end{equation} where $\psi_{\ast}(t):=\sqrt{\chi_1(t^2)}$ of $t>0$. Since the function $\psi_{\ast}^{2}(\sqrt{t})\equiv\chi_{1}(t)$ is concave on $(0,\infty)$, we conclude by \cite[formula (2.2)]{Fan11} that \begin{equation}\label{est-T-ast}
\|T\|_{\ast}\leq \sqrt{2}\,\|T\|_{0}\,
\widetilde{\psi_{\ast}}\biggl(\frac{\|T\|_{1}}{\|T\|_{0}}\biggr). \end{equation}
Here, $\|T\|_{\ast}$ denotes the norm of the bounded operator \begin{equation*} T:\bigl[H^{\varphi_0}_{A},H^{\varphi_1}_{A}\bigr]_{\psi_{\ast}}\to \bigl[G^{\eta_0}_{Q},G^{\eta_1}_{Q}\bigr]_{\psi_{\ast}}, \end{equation*} and $\widetilde{\psi_{\ast}}$ is the dilation function for $\psi_{\ast}$. It follows from \eqref{f3b.15} and \eqref{est-psi} that \begin{equation}\label{f3b.20}
\|T\|\leq \sqrt{2}\,c_{\psi,\nu}\,\|T\|_{\ast}. \end{equation} Besides, \begin{equation}\label{f3b.21} \widetilde{\psi_{\ast}}(\lambda)= \sup_{t>0}\frac{\psi_{\ast}(\lambda t)}{\psi_{\ast}(t)}\leq \sqrt{2}\,c_{\psi,\nu}\widetilde{\psi}(\lambda) \quad\mbox{whenever}\;\;\lambda>0. \end{equation} Now \eqref{est-T-ast}, \eqref{f3b.20}, and \eqref{f3b.21} yield the required inequality \eqref{f3b.19}. \end{proof}
The inequality \eqref{f3b.19} is more precise than \eqref{f3b.16} in view of \eqref{f3b.18}.
\begin{remark} If the function $\psi$ is concave on $(0,\infty)$, then $c_{\psi,\nu}=1$ in inequality \eqref{f3b.19}. Besides, we may write $\sqrt{2}$ instead of $c_{\psi,\nu}^{2}\sqrt{8}$ in this inequality provided that the function $\psi^{2}(\sqrt{t})$ of $t>0$ is concave on $(0,\infty)$, as we have noted in the proof of Theorem~\ref{th3b.2}. \end{remark}
\section{Applications to function spaces}\label{sec4}
In this section, we will show how the concept of the extended Hilbert scale allows us to introduce and investigate wide classes of Hilbert function (or distribution) spaces related to Sobolev spaces on manifolds.
Let $\Gamma$ be a separable paracompact infinitely smooth Riemannian manifold without boundary. Consider the separable complex Hilbert space $H:=L_{2}(\Gamma)$ of all functions $f:\Gamma\to\mathbb{C}$ which are square integrable over $\Gamma$ with respect to the Riemann measure. Let $\Delta_{\Gamma}$ be the Laplace\,--\,Beltrami operator on $\Gamma$; it is defined on the linear manifold $C^{\infty}_{0}(\Gamma)$ of all compactly supported functions $f\in C^{\infty}(\Gamma)$. Assume that the closure of this operator is self-adjoint in $H$, and denote this closure by $\Delta_{\Gamma}$ too. (Specifically, this self-adjointness follows from the completeness of $\Gamma$ under the Riemann metric \cite[p.~140]{Gaffney54}. For incomplete Riemannian manifolds, sufficient conditions for the self-adjointness are given, e.g., in \cite{BravermanMilatovicShubin02, MilatovicTruc16}). Then the operator $A:=(1-\Delta_{\Gamma})^{1/2}$ is self-adjoint and positive definite with the lower bound $r=1$. Therefore, the separable Hilbert space $H_{A}^{\varphi}$ is defined for every Borel measurable function $\varphi:\nobreak[1,\infty)\to(0,\infty)$; we denote this space by $H_{A}^{\varphi}(\Gamma)$. If $\varphi(t)\equiv t^{s}$ for certain $s\in\mathbb{R}$, then $H_{A}^{\varphi}(\Gamma)$ becomes the Sobolev space $H^{s}(\Gamma)$ of order~$s$. According to Theorem~\ref{th2.1}, the extended $A$-scale consists (up to equivalence of norms) of all Hilbert spaces $H_{A}^{\varphi}(\Gamma)$ where $\varphi\in\mathrm{OR}$. In other words, the class $\{H_{A}^{\varphi}(\Gamma):\varphi\in\mathrm{OR}\}$ consists of all interpolation Hilbert spaces between inner product Sobolev spaces over $\Gamma$. Therefore, it is naturally to call this class the extended Sobolev scale over $\Gamma$.
Now we focus our attention on two important cases where $\Gamma$ is the Euclidean space $\mathbb{R}^{n}$ and where $\Gamma$ is a compact boundaryless manifold. Generalizing the above consideration, we use some elliptic operators as $A$. We will prove that the extended Hilbert scales generated by these operators consist of some distribution spaces, admit explicit description with the help of the Fourier transform and local charts on $\Gamma$, and do not depend on the choice of the elliptic operators and these charts. Since we consider complex linear spaces formed by functions or distributions, all functions and distributions are supposed to be complex-valued unless otherwise stated.
\subsection{}\label{sec4.1}
In this subsection, we consider the extended Hilbert scale generated by a uniformly elliptic operator on $\mathbb{R}^{n}$, with $n\geq1$. Here, we put $H:=L_{2}(\mathbb{R}^{n})$ and note that the Riemann measure on $\mathbb{R}^{n}$ is the Lebesgue measure. Let $(\cdot,\cdot)_{\mathbb{R}^{n}}$ and $\|\cdot\|_{\mathbb{R}^{n}}$ stand respectively for the inner product and norm in $L_{2}(\mathbb{R}^{n})$.
Following \cite[Section 1.1]{Agranovich94}, we let $\Psi^{m}(\mathbb{R}^{n})$, where $m\in\mathbb{R}$, denote the class of all pseudodifferential operator (PsDO) $\mathcal{A}$ on $\mathbb{R}^{n}$ whose symbols $a$ belong to $C^{\infty}(\mathbb{R}^{2n})$ and satisfy the following condition: for every multi-indices $\alpha,\beta\in\mathbb{Z}_{+}^{n}$ there exists a number $c_{\alpha,\beta}>0$ such that \begin{equation*}
|\partial^{\alpha}_{x}\partial^{\beta}_{\xi}a(x,\xi)|\leq c_{\alpha,\beta}(1+|\xi|)^{m-|\beta|}\quad\mbox{for all}\quad x,\xi\in\mathbb{R}^{n}, \end{equation*}
with $x$ and $\xi$ being considered respectively as spatial and frequency variables. If $\mathcal{A}\in\Psi^{m}(\mathbb{R}^{n})$, we say that the (formal) order of $\mathcal{A}$ is $m$. A PsDO $\mathcal{A}\in\Psi^{m}(\mathbb{R}^{n})$ is called uniformly elliptic on $\mathbb{R}^{n}$ if there exist positive numbers $c_{1}$ and $c_{2}$ such that $|a(x,\xi)|\geq c_{1}|\xi|^{m}$ whenever $x,\xi\in\mathbb{R}^{n}$ and $|\xi|\geq c_{2}$ (see \cite[Section 3.1~b]{Agranovich94}).
We suppose henceforth in this subsection that \begin{itemize} \item[(a)] $\mathcal{A}$ is a PsDO of class $\Psi^{1}(\mathbb{R}^{n})$; \item[(b)] $\mathcal{A}$ is uniformly elliptic on $\mathbb{R}^{n}$;
\item[(c)] the inequality $(\mathcal{A}w,w)_{\mathbb{R}^{n}}\geq\|w\|_{\mathbb{R}^{n}}^{2}$ holds true for every $w\in C^{\infty}_{0}(\mathbb{R}^{n})$. \end{itemize} Here, as usual, $C^{\infty}_{0}(\mathbb{R}^{n})$ denotes the set of all compactly supported functions $u\in C^{\infty}(\mathbb{R}^{n})$.
Consider the mapping \begin{equation}\label{f4.1} w\mapsto\mathcal{A}w,\quad\mbox{with}\quad w\in C^{\infty}_{0}(\mathbb{R}^{n}), \end{equation} as an unbounded linear operator in the Hilbert space $H=L_{2}(\mathbb{R}^{n})$. This operator is closable because the PsDO $\mathcal{A}\in\Psi^{1}(\mathbb{R}^{n})$ acts continuously from $L_{2}(\mathbb{R}^{n})$ to $H^{-1}(\mathbb{R}^{n})$ (see \cite[Theorem 1.1.2]{Agranovich94}). Here and below, $H^{s}(\mathbb{R}^{n})$ denotes the inner product Sobolev space of order $s\in\mathbb{R}$ over~$\mathbb{R}^{n}$.
Let $A$ denote the closure of the operator \eqref{f4.1} in $L_{2}(\mathbb{R}^{n})$. It follows from (a) and (b) that $\mathrm{Dom}\,A=H^{1}(\mathbb{R}^{n})$ (see \cite[Sections 2.3~c and 3.1~b]{Agranovich94}). Owing to (c), the operator $A$ is positive definite with the lower bound $r=1$. This implies due to (b) that $A$ is self-adjoint \cite[Sections 2.3~d and 3.1~b]{Agranovich94}, with $\mathrm{Spec}\,A\subseteq[1,\infty)$.
Thus, $A$ is the operator considered in Section~\ref{sec2}. Therefore, the separable Hilbert space $H_{A}^{\varphi}$ is defined for every Borel measurable function $\varphi:\nobreak[1,\infty)\to(0,\infty)$. We denote this space by $H_{A}^{\varphi}(\mathbb{R}^{n})$. An important example of $A$ is the operator $(1-\Delta)^{1/2}$, with $\Delta$ denoting the Laplace operator in $\mathbb{R}^{n}$. In this case, the PsDO $\mathcal{A}$ has the symbol $a(x,\xi)\equiv(1+|\xi|^{2})^{1/2}$.
If $\varphi(t)\equiv t^{s}$ for certain $s\in\mathbb{R}$, then it is possible to show that the space $H_{A}^{s}(\mathbb{R}^{n}):=H_{A}^{\varphi}(\mathbb{R}^{n})$ coincides with the Sobolev space $H^{s}(\mathbb{R}^{n})$ up to equivalence norms. Thus, the $A$-scale $\{H_{A}^{s}(\mathbb{R}^{n}):s\in\mathbb{R}\}$ is the Sobolev Hilbert scale. Let us show that the extended $A$-scale consists of some generalized Sobolev spaces, namely the spaces $H^{\varphi}(\mathbb{R}^{n})$ with $\varphi\in\mathrm{OR}$.
Let $\varphi\in\mathrm{OR}$. By definition, the linear space $H^{\varphi}(\mathbb{R}^{n})$ consists of all distributions $w\in\mathcal{S}'(\mathbb{R}^{n})$ that their Fourier transform $\widehat{w}$ is locally Lebesgue integrable over $\mathbb{R}^{n}$ and satisfies the condition $$ \int\limits_{\mathbb{R}^{n}}\varphi^{2}(\langle\xi\rangle)\,
|\widehat{w}(\xi)|^{2}\,d\xi<\infty. $$ Here, as usual, $\mathcal{S}'(\mathbb{R}^{n})$ is the linear topological space of all tempered distributions on $\mathbb{R}^{n}$, and
$\langle\xi\rangle:=(1+|\xi|^{2})^{1/2}$ is the smoothed modulus of $\xi\in\mathbb{R}^{n}$. The space $H^{\varphi}(\mathbb{R}^{n})$ is endowed with the inner product $$ (w_{1},w_{2})_{\varphi,\mathbb{R}^{n}}:= \int\limits_{\mathbb{R}^{n}}\varphi^{2}(\langle\xi\rangle)\, \widehat{w_{1}}(\xi)\,\overline{\widehat{w_{2}}(\xi)}\,d\xi $$
and the corresponding norm $\|w\|_{\varphi,\mathbb{R}^{n}}:= (w,w)_{\varphi,\mathbb{R}^{n}}^{1/2}$. This space is complete and separable with respect to this norm and is embedded continuously in $\mathcal{S}'(\mathbb{R}^{n})$; the set $C^{\infty}_{0}(\mathbb{R}^{n})$ is dense in $H^{\varphi}(\mathbb{R}^{n})$ \cite[Theorem 2.2.1]{Hermander63}.
If $\varphi(t)\equiv t^{s}$ for some $s\in\mathbb{R}$, then $H^{\varphi}(\mathbb{R}^{n})$ becomes the Sobolev space $H^{s}(\mathbb{R}^{n})$. The Hilbert space $H^{\varphi}(\mathbb{R}^{n})$ is an isotropic case of the spaces introduced and investigated by Malgrange \cite{Malgrange57}, H\"ormander \cite[Sec. 2.2]{Hermander63} (see also \cite[Section 10.1]{Hermander83}), and Volevich and Paneah \cite[Section~2]{VolevichPaneah65}. These spaces are generalizations of Sobolev spaces to the case where a general enough function of frequency variables serves as an order of distribution spaces.
\begin{theorem}\label{th4.1} Let $\varphi\in\mathrm{OR}$. Then the spaces $H^{\varphi}_{A}(\mathbb{R}^{n})$ and $H^{\varphi}(\mathbb{R}^{n})$ coincide as completions of $C^{\infty}_{0}(\mathbb{R}^{n})$ with respect to equivalent norms. \end{theorem}
It is worth-wile to note that the norm of $w\in C^{\infty}_{0}(\mathbb{R}^{n})$ in $H^{\varphi}_{A}(\mathbb{R}^{n})$ is $\|\varphi(A)w\|_{\mathbb{R}^{n}}$ because \begin{equation*} \mathrm{Dom}\,\varphi(A)\supset\mathrm{Dom}\,A^{s_1}= H^{s_1}_{A}(\mathbb{R}^{n})\supset C^{\infty}_{0}(\mathbb{R}^{n}); \end{equation*} here $s_1$ is a positive number that satisfies \eqref{f2.3}.
According to Theorem~\ref{th4.1}, the space $H^{\varphi}_{A}(\mathbb{R}^{n})$, with $\varphi\in\mathrm{OR}$, does not depend on~$A$ up to equivalence of norms. Theorems \ref{th2.1} and \ref{th4.1} yield the following explicit description of the extended Hilbert scale generated by the considered operator $A$:
\begin{corollary}\label{cor4.2} The extended $A$-scale consists (up to equivalence of norms) of all the spaces $H^{\varphi}(\mathbb{R}^{n})$ with $\varphi\in\mathrm{OR}$. \end{corollary}
Thus, the class $\{H^{\varphi}(\mathbb{R}^{n}):\varphi\in\mathrm{OR}\}$ consists (up to equivalence if norms) of all Hilbert spaces each of which is an interpolation space between some Sobolev spaces $H^{s_0}(\mathbb{R}^{n})$ and $H^{s_1}(\mathbb{R}^{n})$ with $s_0<s_1$. As we have noted, this class is called the extended Sobolev scale over~$\mathbb{R}^{n}$. It was considered in \cite{MikhailetsMurach13UMJ3, MikhailetsMurach15ResMath1} and \cite[Section~2.4.2]{MikhailetsMurach14}.
\begin{proof}[Proof of Theorem $\ref{th4.1}$.] Choose an integer $k\gg1$ such that $-k<\sigma_{0}(\varphi)$ and $k>\sigma_{1}(\varphi)$, and define the interpolation parameter $\psi$ by formula \eqref{f2.6} in which $s_{0}:=-k$ and $s_{1}:=k$. According to Theorem~\ref{th2.5}, we have the equality \begin{equation}\label{f4.2} H^{\varphi}_{A}(\mathbb{R}^{n})= \bigl[H^{-k}_{A}(\mathbb{R}^{n}),H^{k}_{A}(\mathbb{R}^{n})\bigr]_{\psi}. \end{equation}
Note that each space $H^{\pm k}_{A}(\mathbb{R}^{n})$ coincides with $H^{\pm k}(\mathbb{R}^{n})$ up to equivalence of norms. Indeed, $A$ sets an isomorphism between $H^{1}(\mathbb{R}^{n})$ and $L_{2}(\mathbb{R}^{n})$ because $\mathrm{Dom}\,A=H^{1}(\mathbb{R}^{n})$ and $0\notin\mathrm{Spec}\,A$. Besides, since the PsDO $\mathcal{A}$ of the first order is uniformly elliptic on $\mathbb{R}^{n}$, the operator $A$ has the following lifting property: if $u\in H^{1}(\mathbb{R}^{n})$ and if $Au\in H^{s-1}(\mathbb{R}^{n})$ for some $s>1$, then $u\in H^{s}(\mathbb{R}^{n})$ (see \cite[Sections 1.8 and 3.1~b]{Agranovich94}). Hence, $A^{k}$ sets an isomorphism between $H^{k}(\mathbb{R}^{n})$ and $L_{2}(\mathbb{R}^{n})$. Thus, $H^{k}_{A}(\mathbb{R}^{n})=H^{k}(\mathbb{R}^{n})$ up to equivalence of norms. Passing here to dual spaces with respect to $L_{2}(\mathbb{R}^{n})$, we conclude that $H^{-k}_{A}(\mathbb{R}^{n})=H^{-k}(\mathbb{R}^{n})$ up to equivalence of norms (see \cite[Section~9, Subsection~1]{KreinPetunin66}).
Thus, it follows from \eqref{f4.2} that \begin{equation}\label{f4.3} H^{\varphi}_{A}(\mathbb{R}^{n})= \bigl[H^{-k}(\mathbb{R}^{n}),H^{k}(\mathbb{R}^{n})\bigr]_{\psi} \end{equation} up to equivalence of norms. Indeed, since the identity mapping sets the isomorphisms \begin{equation*} I:H^{\mp k}_{A}(\mathbb{R}^{n})\leftrightarrow H^{\mp k}(\mathbb{R}^{n}) \end{equation*} and since $\psi$ is an interpolation parameter, the identity mapping realizes the isomorphism \begin{equation*} I:\bigl[H^{-k}_{A}(\mathbb{R}^{n}),H^{k}_{A}(\mathbb{R}^{n})\bigr]_{\psi} \leftrightarrow \bigl[H^{-k}(\mathbb{R}^{n}),H^{k}(\mathbb{R}^{n})\bigr]_{\psi}. \end{equation*} This yields \eqref{f4.3} due to \eqref{f4.2}.
Thus, Theorem~\ref{th4.1} follows directly from \eqref{f4.3} and \begin{equation}\label{f4.4} H^{\varphi}(\mathbb{R}^{n})= \bigl[H^{-k}(\mathbb{R}^{n}),H^{k}(\mathbb{R}^{n})\bigr]_{\psi}. \end{equation} The letter equality is proved in \cite[Theorem~2.19]{MikhailetsMurach14}. Besides, \eqref{f4.4} is a special case of \eqref{f4.3} because $H^{\varphi}(\mathbb{R}^{n})=H_{A}^{\varphi}(\mathbb{R}^{n})$ if $A=(1-\Delta)^{1/2}$. Indeed, since the Fourier transform reduces
$A=(1-\Delta)^{1/2}$ to the operator of multiplication by $\langle\xi\rangle$, we conclude that $\mathrm{Dom}\,\varphi(A)\subseteq H^{\varphi}(\mathbb{R}^{n})$ and $\|\varphi(A)u\|_{\mathbb{R}^{n}}=\|u\|_{\varphi,\mathbb{R}^{n}}$ for every $u\in\mathrm{Dom}\,\varphi(A)$. Hence, $H_{A}^{\varphi}(\mathbb{R}^{n})$ is a subspace of $H^{\varphi}(\mathbb{R}^{n})$. But $C^{\infty}_{0}(\mathbb{R}^{n})$ and then $H_{A}^{\varphi}(\mathbb{R}^{n})$ are dense in $H^{\varphi}(\mathbb{R}^{n})$. Thus, $H_{A}^{\varphi}(\mathbb{R}^{n})$ coincides with $H^{\varphi}(\mathbb{R}^{n})$ if $A=(1-\Delta)^{1/2}$. \end{proof}
Ending this subsection, we note the following: in contrast to the spaces $H^{s}(\mathbb{R}^{n})$, the inner product Sobolev spaces \begin{equation*} H^{s}(\Omega):=\bigl\{w\!\upharpoonright\!\Omega:w\in H^{s}(\mathbb{R}^{n})\bigr\},\quad\mbox{with}\quad s\in\mathbb{R}, \end{equation*} over a domain $\Omega\subset\mathbb{R}^{n}$ do not form a Hilbert scale even if $\Omega$ is a bounded domain with infinitely smooth boundary and if we restrict ourselves to the spaces of order $s\geq0$ \cite[Corollary~2.3]{Neubauer88}. An explicit description of all Hilbert spaces that are interpolation ones for an arbitrary chosen pair of Sobolev spaces $H^{s_{0}}(\Omega)$ and $H^{s_{1}}(\Omega)$, where $-\infty<s_{0}<s_{1}<\infty$, is given in \cite[Theorem~2.4]{MikhailetsMurach15ResMath1} provided that $\Omega$ is a bounded domain with Lipschitz boundary. These interpolation spaces are (up to equivalence of norms) the generalized Sobolev spaces \begin{gather*} H^{\varphi}(\Omega):=\bigl\{v:=w\!\upharpoonright\!\Omega:w\in H^{\varphi}(\mathbb{R}^{n})\bigr\} \end{gather*} such that $\varphi$ belongs to $\mathrm{OR}$ and satisfies~\eqref{f2.3}, the Hilbert norm in $H^{\varphi}(\Omega)$ being naturally defined by the formula \begin{equation*}
\|v\|_{\varphi,\Omega}:=\inf\bigl\{\|w\|_{\varphi,\mathbb{R}^{n}}: w\in H^{\varphi}(\mathbb{R}^{n}),v=w\!\upharpoonright\!\Omega\bigr\}. \end{equation*}
\subsection{}\label{sec4.2}
Here, we consider the extended Hilbert scale generated by an elliptic operator given on a closed manifold. Let $\Gamma$ be an arbitrary closed (i.e. compact and boundaryless) infinitely smooth manifold of dimension $n\geq1$. We suppose that a certain positive $C^{\infty}$-density $dx$ is given on~$\Gamma$. We put $H:=L_{2}(\Gamma)$, where $L_{2}(\Gamma)$ is the complex Hilbert space of all square integrable functions over $\Gamma$ with respect to the measure induced by this density. Let $(\cdot,\cdot)_{\Gamma}$ and $\|\cdot\|_{\Gamma}$ stand respectively for the inner product and norm in $L_{2}(\Gamma)$.
Following \cite[Section 2.1]{Agranovich94}, we let $\Psi^{m}(\Gamma)$, where $m\in\mathbb{R}$, denote the class of all PsDOs on $\Gamma$ whose representations in every local chart on $\Gamma$ belong to $\Psi^{m}(\mathbb{R}^{n})$. If $\mathcal{A}\in\Psi^{m}(\Gamma)$, we say that the (formal) order of $\mathcal{A}$ is $m$. A~PsDO $\mathcal{A}\in\Psi^{m}(\Gamma)$ is called elliptic on $\Gamma$ if for every point $x_{0}\in\Gamma$ there exist positive numbers $c_{1}$ and $c_{2}$ such that $|a_{x_{0}}(x,\xi)|\geq c_{1}|\xi|^{m}$ whenever $x\in U(x_{0})$ and $\xi\in\mathbb{R}^{n}$ and $|\xi|\geq\nobreak c_{2}$, with $a_{x_{0}}(x,\xi)$ be the local symbol of $\mathcal{A}$ corresponding to a certain coordinate neighbourhood $U(x_{0})$ of $x_{0}$ (see \cite[Section 3.1 b]{Agranovich94}).
We suppose in this subsection that \begin{itemize} \item[(a)] $\mathcal{A}$ is a PsDO of class $\Psi^{1}(\Gamma)$; \item[(b)] $\mathcal{A}$ is elliptic on $\Gamma$;
\item[(c)] the inequality $(\mathcal{A}f,f)_{\Gamma}\geq\|f\|_{\Gamma}^{2}$ holds true for every $f\in C^{\infty}(\Gamma)$. \end{itemize}
Let $A$ denote the closure, in $H=L_{2}(\Gamma)$, of the linear operator $f\mapsto\mathcal{A}f$, with $f\in\nobreak C^{\infty}(\Gamma)$. Note that this operator is closable in $L_{2}(\Gamma)$ because the PsDO $\mathcal{A}\in\nobreak\Psi^{1}(\Gamma)$ acts continuously from $L_{2}(\Gamma)$ to $H^{-1}(\Gamma)$ \cite[Theorem 2.1.2]{Agranovich94}. Here and below, $H^{s}(\Gamma)$ stands for the inner product Sobolev space of order $s\in\mathbb{R}$ over $\Gamma$. It follows from (a)--(c) that the operator $A$ is positive definite and self-adjoint in $L_{2}(\Gamma)$, with $\mathrm{Dom}\,A=H^{1}(\Gamma)$ and $\mathrm{Spec}\,A\subseteq[1,\infty)$ (see \cite[Sections 2.3 c, d and 3.1~b]{Agranovich94}).
Thus, $A$ is the operator considered in Section~\ref{sec2}, and the separable Hilbert space $H_{A}^{\varphi}$ is defined for every Borel measurable function $\varphi:\nobreak[1,\infty)\to(0,\infty)$. We denote this space by $H_{A}^{\varphi}(\Gamma)$. An important example of $A$ is the operator $(1-\Delta_{\Gamma})^{1/2}$, where $\Gamma$ is endowed with a Riemann metric (then the density $dx$ is induced by this metric).
If $\varphi(t)\equiv t^{s}$ for some $s\in\mathbb{R}$, then the space $H_{A}^{s}(\Gamma):=H_{A}^{\varphi}(\Gamma)$ coincides with the Sobolev space $H^{s}(\Gamma)$ up to equivalence norms \cite[Corollary 5.3.2]{Agranovich94}. Thus, the $A$-scale $\{H_{A}^{s}(\Gamma):s\in\nobreak\mathbb{R}\}$ is the Sobolev Hilbert scale over~$\Gamma$. We will prove that the extended $A$-scale consists of the generalized Sobolev spaces $H^{\varphi}(\Gamma)$ with $\varphi\in\mathrm{OR}$. Let us give their definition with the help of local charts on~$\Gamma$.
We arbitrarily choose a finite atlas from the $C^{\infty}$-structure on $\Gamma$; let this atlas be formed by $\varkappa$ local charts $\pi_j: \mathbb{R}^{n}\leftrightarrow \Gamma_{j}$, with $j=1,\ldots,\varkappa$. Here, the open sets $\Gamma_{1},\ldots,\Gamma_{\varkappa}$ form a covering of $\Gamma$. We also arbitrarily choose functions $\chi_j\in C^{\infty}(\Gamma)$, with $j=1,\ldots,\varkappa$, that form a partition of unity on $\Gamma$ such that $\mathrm{supp}\,\chi_j\subset \Gamma_j$.
Let $\varphi\in\mathrm{OR}$. By definition, the linear space $H^{\varphi}(\Gamma)$ is the completion of the linear manifold $C^{\infty}(\Gamma)$ with respect to the inner product \begin{equation}\label{f4.5} (f_{1},f_{2})_{\varphi,\Gamma}:= \sum_{j=1}^{\varkappa}\,((\chi_{j}f_{1})\circ\pi_{j}, (\chi_{j}f_{2})\circ\pi_{j})_{\varphi,\mathbb{R}^{n}} \end{equation}
of functions $f_{1},f_{2}\in C^{\infty}(\Gamma)$. Thus, $H^{\varphi}(\Gamma)$ is a Hilbert space. Let $\|\cdot\|_{\varphi,\Gamma}$ denote the norm induced by the inner product \eqref{f4.5}. If $\varphi(t)\equiv t^{s}$ for certain $s\in\mathbb{R}$, then $H^{\varphi}(\Gamma)$ becomes the Sobolev space $H^{s}(\Gamma)$.
\begin{theorem}\label{th4.3} Let $\varphi\in\mathrm{OR}$. Then the spaces $H^{\varphi}_{A}(\Gamma)$ and $H^{\varphi}(\Gamma)$ coincide as completions of $C^{\infty}(\Gamma)$ with respect to equivalent norms. \end{theorem}
Note that the norm of $f\in C^{\infty}(\Gamma)$ in $H^{\varphi}_{A}(\Gamma)$ is $\|\varphi(A)f\|_{\Gamma}$ because \begin{equation*} \mathrm{Dom}\,\varphi(A)\supset\mathrm{Dom}\,A^{s_1} \supset C^{\infty}(\Gamma); \end{equation*} here $s_1$ is a positive integer that satisfies \eqref{f2.3}.
Theorem \ref{th4.3} specifically entails
\begin{corollary}\label{cor4.4} Let $\varphi\in\mathrm{OR}$. Then the space $H^{\varphi}_{A}(\Gamma)$ does not depend on~$A$ up to equivalence of norms. Besides, the space $H^{\varphi}(\Gamma)$ does not depend (up to equivalence of norms) on our choice of the atlas and partition of unity on $\Gamma$. \end{corollary}
Owing to Theorems \ref{th2.1} and \ref{th4.3}, we obtain the following explicit description of the extended Hilbert scale generated by the considered PsDO $A$ on $\Gamma$:
\begin{corollary}\label{cor4.5} The extended $A$-scale consists (up to equivalence if norms) of all the spaces $H^{\varphi}(\Gamma)$ with $\varphi\in\mathrm{OR}$. \end{corollary}
Thus, the class $\{H^{\varphi}(\Gamma):\varphi\in\mathrm{OR}\}$ consists (up to equivalence if norms) of all Hilbert spaces each of which is an interpolation space between some Sobolev inner-product spaces $H^{s_0}(\Gamma)$ and $H^{s_1}(\Gamma)$ with $s_0<s_1$. This class is called the extended Sobolev scale over~$\Gamma$.
\begin{theorem}\label{th4.6} Suppose that the manifold $\Gamma$ is endowed with a Riemann metric, and let $\varphi\in\mathrm{OR}$. Then the space $H^{\varphi}(\Gamma)$ admits the following three equivalent definitions in the sense that they introduce the same Hilbert space up to equivalence of norms: \begin{itemize}
\item[(i)] \textbf{Operational definition.} The Hilbert space $H^{\varphi}(\Gamma)$ is the completion of $C^{\infty}(\Gamma)$ with respect to the norm $\|\varphi((1-\Delta_{\Gamma})^{1/2})f\|_{\Gamma}$ of $f\in C^{\infty}(\Gamma)$. \item[(ii)] \textbf{Local definition.} The Hilbert space $H^{\varphi}(\Gamma)$ consists of all distributions $f\in\mathcal{D}'(\Gamma)$ such that $(\chi_{j}f)\circ\pi_{j}\in H^{\varphi}(\mathbb{R}^{n})$ for every $j\in\{1,\ldots,\varkappa\}$ and is endowed with the inner product \eqref{f4.5} of distributions $f_{1},f_{2}\in H^{\varphi}(\Gamma)$. \item[(iii)] \textbf{Interpolational definition.} Let integers $s_{0}$ and $s_{1}$ satisfy the conditions $s_{0}<\sigma_{0}(\varphi)$ and $s_{1}>\sigma_{1}(\varphi)$, and let $\psi$ be the interpolation parameter defined by \eqref{f2.6}. Then \begin{equation*} H^{\varphi}(\Gamma):= \bigl[H^{s_{0}}(\Gamma),H^{s_{1}}(\Gamma)\bigr]_{\psi}. \end{equation*} \end{itemize} \end{theorem}
Here, as usual, $\mathcal{D}'(\Gamma)$ is the linear topological space of all distributions on $\Gamma$, and $(\chi_{j}f)\circ\pi_{j}$ stands for the representation of the distribution $\chi_{j}f\in\mathcal{D}'(\Gamma)$ in the local chart $\pi_{j}$. We naturally interpret $\mathcal{D}'(\Gamma)$ as the dual of $C^{\infty}(\Gamma)$ with respect to the extension of the inner product in $L_{2}(\Gamma)$. This extension is denoted by $(\cdot,\cdot)_{\Gamma}$ as well. Of course, the $C^{\infty}$-density $dx$ is now induced by the Riemann metric.
\begin{remark}\label{rem4.7} It follows directly from Theorem~\ref{th4.3} that in the operational definition we may change $(1-\Delta_{\Gamma})^{1/2}$ for the more general PsDO $A$ considered in this subsection. \end{remark}
Let us prove Theorems \ref{th4.3} and \ref{th4.6}.
\begin{proof}[Proof of Theorem $\ref{th4.3}$.] Let the integer $k\gg1$ and the interpolation parameter $\psi$ be the same as those in the proof of Theorem~$\ref{th4.1}$. Then \begin{equation}\label{f4.7} H^{\varphi}_{A}(\Gamma)= \bigl[H^{-k}_{A}(\Gamma),H^{k}_{A}(\Gamma)\bigr]_{\psi} \end{equation} due to Theorem~\ref{th2.5}. Here, $H^{\pm k}_{A}(\Gamma)=H^{\pm k}(\Gamma)$ up to equivalence of norms, which is demonstrated in the same way as that in the proof of Theorem~$\ref{th4.1}$. Therefore, \eqref{f4.7} implies that \begin{equation}\label{f4.8} H^{\varphi}_{A}(\Gamma)= \bigl[H^{-k}(\Gamma),H^{k}(\Gamma)\bigr]_{\psi} \end{equation} up to equivalence of norms. Hence, we have the dense continuous embedding $H^{k}(\Gamma)\hookrightarrow H^{\varphi}_{A}(\Gamma)$, which entails the density of $C^{\infty}(\Gamma)$ in $H^{\varphi}_{A}(\Gamma)$.
Owing to \eqref{f4.8}, it remains to show that \begin{equation}\label{f4.9} \bigl[H^{-k}(\Gamma),H^{k}(\Gamma)\bigr]_{\psi}=H^{\varphi}(\Gamma) \end{equation} up to equivalence of norms. We will deduce this formula from \eqref{f4.4} with the help of certain operators of flattening and sewing of the manifold $\Gamma$.
Let us define the flattening operator by the formula \begin{equation}\label{f4.10} T:f\mapsto ((\chi_1 f)\circ\pi_1,\ldots, (\chi_{\varkappa}f)\circ\pi_{\varkappa}) \;\;\mbox{for every}\;\;f\in C^{\infty}(\Gamma). \end{equation} The mapping \eqref{f4.10} extends by continuity to isometric linear operators \begin{equation}\label{f4.11} T:H^{\varphi}(\Gamma)\rightarrow (H^{\varphi}(\mathbb{R}^n))^{\varkappa} \end{equation} and \begin{equation}\label{f4.12} T:H^{\mp k}(\Gamma)\rightarrow (H^{\mp k}(\mathbb{R}^n))^{\varkappa}. \end{equation} Since $\psi$ is an interpolation parameter, it follows from the boundedness of the operators \eqref{f4.12} that a restriction of the first operator acts continuously \begin{equation}\label{f4.13} T:\bigl[H^{-k}(\Gamma),H^{k}(\Gamma)\bigr]_{\psi}\to \bigl[(H^{-k}(\mathbb{R}^n))^{\varkappa}, (H^{k}(\mathbb{R}^n))^{\varkappa}\bigr]_{\psi}. \end{equation} Here, the target space equals $(H^{\varphi}(\mathbb{R}^n))^{\varkappa}$ due to \eqref{f4.4} and the definition of the interpolation with the parameter $\psi$. Thus, the operator \eqref{f4.13} acts continuously \begin{equation}\label{f4.14} T:\bigl[H^{-k}(\Gamma),H^{k}(\Gamma)\bigr]_{\psi}\to \bigl(H^{\varphi}(\mathbb{R}^n)\bigr)^{\varkappa}. \end{equation}
We define the sewing operator by the formula \begin{equation}\label{f4.15} \begin{gathered} K:\mathbf{w}\mapsto\sum_{j=1}^{\varkappa} \Theta_j\bigl((\eta_j w_j)\circ\pi_j^{-1}\bigr)\\ \mbox{for every}\quad\mathbf{w}:=(w_{1},\ldots, w_{\varkappa})\in \bigl(C^{\infty}_{0}(\mathbb{R}^n)\bigr)^{\varkappa}. \end{gathered} \end{equation} Here, for each $j\in\{1,\ldots,\varkappa\}$, the function $\eta_j \in C^\infty_0(\mathbb{R}^n)$ is chosen such that $\eta_j=\nobreak1$ in a neighbourhood of $\pi^{-1}_j(\mathrm{supp}\,\chi_j)$. Besides, for every function $\omega:\Gamma_{j}\to\mathbb{C}$, we put $(\Theta_j\omega)(x):=\omega(x)$ whenever $x\in\Gamma_{j}$ and put $(\Theta_j\omega)(x):=0$ whenever $x\in\Gamma\setminus\Gamma_{j}$. Thus, $K\mathbf{w}\in C^{\infty}(\Gamma)$ for every $\mathbf{w}\in (C^{\infty}_{0}(\mathbb{R}^n))^{\varkappa}$.
The mapping $K$ is left inverse to the flattening operator \eqref{f4.10}. Indeed, given $f\in C^{\infty}(\Gamma)$, we have the following equalities: \begin{align*} KTf=&\sum_{j=1}^\varkappa\Theta_j\Bigl(\bigl(\eta_j ((\chi_j f)\circ\pi_j)\bigr)\circ\pi_j^{-1}\Bigr)\\ =&\sum_{j=1}^\varkappa \Theta_j\bigl((\eta_j\circ\pi_j^{-1})(\chi_j f)\bigr) =\sum_{j=1}^\varkappa \Theta_j (\chi_j f) = \sum_{j=1}^\varkappa \chi_j f = f. \end{align*} Thus, \begin{equation}\label{f4.16} KTf=f\quad\mbox{for every}\quad f\in C^{\infty}(\Gamma). \end{equation}
There exists a number $c>0$ such that \begin{equation}\label{f4.17}
\|K\mathbf{w}\|_{\varphi,\Gamma}^{2}\leq c \sum_{l=1}^{\varkappa}\|w_l\|_{\varphi,\mathbb{R}^n}^{2} \quad\mbox{whenever}\quad\mathbf{w}\in \bigl(C^{\infty}_{0}(\mathbb{R}^n)\bigr)^{\varkappa}. \end{equation} Indeed, \begin{align*}
\| K\mathbf{w} \|_{\varphi,\Gamma}^{2}&=
\sum_{j=1}^{\varkappa}\|(\chi_jK\mathbf{w})\circ\pi_j\|_ {\varphi,\mathbb{R}^n}^2 \\
&= \sum_{j=1}^{\varkappa}\,\Bigl\|\sum_{l=1}^{\varkappa} \bigl(\chi_j\Theta_l((\eta_lw_l)\circ\pi_l^{-1})\bigr)
\circ\pi_j\Bigr\|_{\varphi,\mathbb{R}^n}^2\\
&= \sum_{j=1}^{\varkappa}\,\Bigl\|\sum_{l=1}^{\varkappa} (\eta_{l,j} w_l)\circ\beta_{l,j} \Bigr\|_{\varphi,\mathbb{R}^n}^2\leq c \sum_{l=1}^{\varkappa}\|w_l\|_{\varphi,\mathbb{R}^n}^2. \end{align*}
Here, $\eta_{l,j}:=(\chi_j\circ\pi_l)\eta_l\in C^{\infty}_{0}(\mathbb{R}^n)$, and $\beta_{l,j}:\mathbb{R}^n\rightarrow\mathbb{R}^n$ is a $C^\infty$-diffeomorphism such that $\beta_{l,j}=\pi_l^{-1}\circ\pi_j$ in a neighbourhood of $\mathrm{supp}\,\eta_{l,j}$ and that $\beta_{l,j}(t)= t$ whenever $|t|$ is sufficiently large. The last inequality is a consequence of the fact that the operator of the multiplication by a function from $C^{\infty}_{0}(\mathbb{R}^n)$ and the operator $v\mapsto v\circ\beta_{l,j}$ of change of variables are bounded on the space $H^\varphi(\mathbb{R}^n)$. These properties of $H^\varphi(\mathbb{R}^n)$ follow by \eqref{f4.4} from their known analogs for the Sobolev spaces $H^{\mp k}(\mathbb{R}^n)$.
According to \eqref{f4.17}, the mapping \eqref{f4.15} extends by continuity to a bounded linear operator \begin{equation}\label{f4.18} K:(H^{\varphi}(\mathbb{R}^n))^{\varkappa}\rightarrow H^{\varphi}(\Gamma) \end{equation} and, specifically, to bounded linear operators \begin{equation}\label{f4.19} K:(H^{\mp k}(\mathbb{R}^n))^{\varkappa}\rightarrow H^{\mp k}(\Gamma). \end{equation} Hence, a restriction of the first operator \eqref{f4.19} acts continuously \begin{equation}\label{f4.19b} K:\bigl(H^{\varphi}(\mathbb{R}^n)\bigr)^{\varkappa}= \bigl[(H^{-k}(\mathbb{R}^n))^{\varkappa}, (H^{k}(\mathbb{R}^n))^{\varkappa}\bigr]_{\psi}\to \bigl[H^{-k}(\Gamma),H^{k}(\Gamma)\bigr]_{\psi} \end{equation} in view of \eqref{f4.4}.
According to \eqref{f4.11} and \eqref{f4.19b}, we have the bounded operator \begin{equation*} KT:H^{\varphi}(\Gamma)\to[H^{-k}(\Gamma),H^{k}(\Gamma)]_{\psi}. \end{equation*} Besides, owing to \eqref{f4.14} and \eqref{f4.18}, we get the bounded operator \begin{equation*} KT:[H^{-k}(\Gamma),H^{k}(\Gamma)]_{\psi}\to H^{\varphi}(\Gamma). \end{equation*} These operators are identical mappings in view of \eqref{f4.16} and the density of $C^{\infty}(\Gamma)$ in their target spaces. Thus, the required equality \eqref{f4.9} holds true up to equivalence of norms. \end{proof}
\begin{proof}[Proof of Theorem $\ref{th4.6}$.] Let us prove that the initial definition of $H^{\varphi}(\Gamma)$ as the completion of $C^{\infty}(\Gamma)$ with respect to the inner product \eqref{f4.5} is equivalent to each of definitions (i)\,--\,(iii). The initial definition is tantamount to (i) due to Theorem~\ref{th4.3} in the $A=(1-\Delta_{\Gamma})^{1/2}$ case. Hence, the initial definition is equivalent to (iii) in view of Theorem~\ref{th2.5}.
To prove the equivalence of this definition and (ii), it suffices to show that $C^{\infty}(\Gamma)$ is dense in the space defined by (ii). We arbitrarily choose a distribution $f\in\mathcal{D}'(\Gamma)$ such that $(\chi_{j}f)\circ\pi_{j}\in H^{\varphi}(\mathbb{R}^{n})$ for every $j\in\{1,\ldots,\varkappa\}$. Given such $j$, we take a sequence $(w^{r}_{j})_{r=1}^{\infty}\subset C^{\infty}_{0}(\mathbb{R}^{n})$ such that $w^{(r)}_{j}\to(\chi_{j}f)\circ\pi_{j}$ in $H^{\varphi}(\mathbb{R}^{n})$ as $r\to\infty$. Let $T$ and $K$ be the flattening and sewing mappings used in the proof of Theorem~\ref{th4.3}. These mappings are well defined respectively on $\mathcal{D}'(\Gamma)$ and $(\mathcal{S}'(\mathbb{R}^{n}))^{\varkappa}$, with the formulas $KTf=f$ and \eqref{f4.17} being valid whenever $f\in\mathcal{D}'(\Gamma)$ and $w\in(H^{\varphi}(\mathbb{R}^{n}))^{\varkappa}$. Therefore, putting $\mathbf{w}^{(r)}:=(w^{(r)}_{1},\ldots,w^{(r)}_{\varkappa})$, we conclude that $K\mathbf{w}^{(r)}\in C^{\infty}(\Gamma)$ and that \begin{align*}
\|K\mathbf{w}^{(r)}-f\|_{\varphi,\Gamma}^{2}&=
\|K(\mathbf{w}^{(r)}-Tf)\|_{\varphi,\Gamma}^{2}\\ &\leq c\sum_{l=1}^{\varkappa}
\|w^{(r)}_{l}-(\chi_{l}f)\circ\pi_{l}\|_{\varphi,\mathbb{R}^n}^{2} \to0\quad\mbox{as}\quad r\to\infty. \end{align*} Thus, $C^{\infty}(\Gamma)$ is dense in the space defined by (ii), and the initial definition is then equivalent to (ii). \end{proof}
At the end of this subsection, we give a description of the space $H^{\varphi}(\Gamma)$ in terms of sequences induced by the spectral decomposition of the self-adjoint operator $A$. Since this operator is positive definite and since $\mathrm{Dom}\,A=H^{1}(\Gamma)$, its inverse $A^{-1}$ is a compact self-adjoint operator on $L_{2}(\Gamma)$ (recall that $H^{1}(\Gamma)$ is compactly embedded in $L_{2}(\Gamma)$). Hence, the Hilbert space $L_{2}(\Gamma)$ has an orthonormal basis $\mathcal{E}:=(e_{j})_{j=1}^{\infty}$ formed by eigenvectors of $A$. Let $\lambda_{j}\geq1$ be the corresponding eigenvalue of $A$, i.e. $Ae_j=\lambda_{j}e_j$. We may and will enumerate the eigenvectors $e_{j}$ so that $\lambda_{j}\leq\lambda_{j+1}$ whenever $j\geq1$, with $\lambda_{j}\to\infty$ as $j\to\infty$. Since $\mathcal{A}$ is elliptic on $\Gamma$, each $e_{j}\in C^{\infty}(\Gamma)$. We suppose that the PsDO $\mathcal{A}$ is classical (i.e. polyhomogeneous); see, e.g., \cite[Definitions 1.5.1 and 2.1.3]{Agranovich94}. Then \begin{equation}\label{f5.20} \lambda_{j}\sim\widetilde{c}\,j^{1/n}\quad\mbox{as}\quad j\to\infty, \end{equation} where $\widetilde{c}$ is a positive number that does not depend on $j$ \cite[Section 6.1~b]{Agranovich94}. Every distribution $f\in\mathcal{D}'(\Gamma)$ expands into the series \begin{equation}\label{f5.21} f=\sum_{j=1}^{\infty}\varkappa_{j}(f)e_j\quad\mbox{in}\;\;\mathcal{D}'(\Gamma); \end{equation} here, $\varkappa_{j}(f):=(f,e_j)_{\Gamma}$ is the value of the distribution $f$ at the test function $e_{j}$ \cite[Section 6.1~a]{Agranovich94}.
\begin{theorem}\label{th5.8} Let $\varphi\in\mathrm{OR}$. Then the space $H^{\varphi}(\Gamma)$ consists of all distributions $f\in\mathcal{D}'(\Gamma)$ such that \begin{equation}\label{f5.22}
\|f\|_{\varphi,\Gamma,\mathcal{E}}^{2}:=
\sum_{j=1}^{\infty}\varphi^{2}(j^{1/n})|\varkappa_{j}(f)|^{2}<\infty, \end{equation}
and the norm in $H^{\varphi}(\Gamma)$ is equivalent to the (Hilbert) norm $\|\cdot\|_{\varphi,\Gamma,\mathcal{E}}$. If $f\in H^{\varphi}(\Gamma)$, then the series \eqref{f5.21} converges in $H^{\varphi}(\Gamma)$. \end{theorem}
\begin{proof} It follows from \eqref{f5.20} and $\varphi\in\mathrm{OR}$ that there exists a number $c\geq1$ such that \begin{equation}\label{f5.23} c^{-1}\varphi(\lambda_{j})\leq\varphi(j^{1/n})\leq c\,\varphi(\lambda_{j})\quad\mbox{whenever}\quad 1\leq j\in\mathbb{Z}. \end{equation} Since $\mathrm{Spec}\,A=\{\lambda_{j}:j\geq1\}$, we have \begin{equation*}
\|\varphi(A)f\|_{\Gamma}^{2}=
\sum_{j=1}^{\infty}\varphi^{2}(\lambda_{j})|\varkappa_{j}(f)|^{2}<\infty \end{equation*}
for every $f\in\mathrm{Dom}\,\varphi(A)$. Hence, the norm $\|\cdot\|_{\varphi,\Gamma,\mathcal{E}}$ is equivalent to the norm in $H^{\varphi}(\Gamma)$ on $\mathrm{Dom}\,\varphi(A)\supset C^{\infty}(\Gamma)$ (see Theorem~\ref{th4.3}).
If $f\in H^{\varphi}(\Gamma)$, we consider a sequence $(f_k)_{k=1}^{\infty}\subset C^{\infty}(\Gamma)$ such that $f_{k}\rightarrow f$ in $H^{\varphi}(\Gamma)$ as $k\rightarrow\infty$. There exist positive numbers $c_1$ and $c_2$ such that \begin{equation*}
\sum_{j=1}^{\infty}\varphi^{2}(j^{1/n})|\varkappa_{j}(f_k)|^{2}=
\|f_k\|_{\varphi,\Gamma,\mathcal{E}}^{2}\leq c_1\|f_k\|_{\varphi,\Gamma}^{2}\leq c_2<\infty \end{equation*} for every integer $k\geq1$. Passing here to the limit as $k\to\infty$ and taking $\varkappa_{j}(f_k)\to\varkappa_{j}(f)$ into account, we conclude by Fatou's lemma that every distribution $f\in H^{\varphi}(\Gamma)$ satisfies \eqref{f5.22}.
Assume now that a distribution $f\in\mathcal{D}'(\Gamma)$ satisfies \eqref{f5.22}, and prove that $f\in H^{\varphi}(\Gamma)$. Owing to our assumption and \eqref{f5.23}, we have the convergent orthogonal series \begin{equation}\label{f5.26} \sum_{j=1}^{\infty}\varphi(\lambda_{j})\varkappa_{j}(f)e_{j}=:h \quad\mbox{in}\quad L_{2}(\Gamma). \end{equation} Consider its partial sum \begin{equation*} h_k:=\sum_{j=1}^{k}\varphi(\lambda_{j})\varkappa_{j}(f)e_{j} \end{equation*} for each $k$, and note that \begin{equation*} \varphi^{-1}(A)h_k=\sum_{j=1}^{k}\varkappa_{j}(f)e_{j}\in C^{\infty}(\Gamma). \end{equation*} Since $h_k\to h$ in $L_{2}(\Gamma)$ as $k\to\infty$, the sequence $(\varphi^{-1}(A)h_k)_{k=1}^{\infty}$ is Cauchy in $H^{\varphi}_{A}(\Gamma)$. Denoting its limit by $g$, we get \begin{equation}\label{f5.27} g=\lim_{k\to\infty}\varphi^{-1}(A)h_k= \sum_{j=1}^{\infty}\varkappa_{j}(f)e_{j}\quad\mbox{in}\quad H^{\varphi}(\Gamma). \end{equation} Hence, $f=g\in H^{\varphi}(\Gamma)$ in view of \eqref{f5.21}.
Thus, a distribution $f\in\mathcal{D}'(\Gamma)$ belongs to $H^{\varphi}(\Gamma)$ if and only if \eqref{f5.22} is satisfied. Besides, given $f\in H^{\varphi}(\Gamma)$, we have \begin{align*}
\|f\|_{\varphi,\Gamma}^{2}&=
\lim_{k\to\infty}\|\varphi^{-1}(A)h_k\|_{\varphi,\Gamma}^{2}\asymp
\lim_{k\to\infty}\|h_k\|_{\Gamma}^{2}=\|h\|_{\Gamma}^{2}\\
&=\sum_{j=1}^{\infty}\varphi^{2}(\lambda_{j})|\varkappa_{j}(f)|^{2}\asymp
\|f\|_{\varphi,\Gamma,\mathcal{E}}^{2} \end{align*} by \eqref{f5.23}, \eqref{f5.26}, and \eqref{f5.27} where $g=f$ (as usual, the symbol $\asymp$ means equivalence of norms). The last assertion of the theorem is due to~\eqref{f5.27}. \end{proof}
\begin{remark}\label{rem4.8} Let $0<m\in\mathbb{R}$. Analogs of Theorems \ref{th4.1} and \ref{th4.3} hold true for PsDOs of order~$m$. Namely, suppose that a PsDO $\mathcal{A}$ belongs to $\Psi^{m}(\mathbb{R}^{n})$ or $\Psi^{m}(\Gamma)$ and satisfies conditions (b) and (c). Let $\varphi\in\mathrm{OR}$, and put $\varphi_{m}(t):=\varphi(t^{m})$ whenever $t\geq1$ (evidently, $\varphi_{m}\in\mathrm{OR}$). Then the equality of spaces \begin{equation}\label{f4.20} H^{\varphi}_{A}(\mathbb{R}^{n}\;\mbox{or}\;\Gamma)= H^{\varphi_{m}}(\mathbb{R}^{n}\;\mbox{or}\;\Gamma) \end{equation} holds in the sense that these spaces coincide as completions of $C^{\infty}_{0}(\mathbb{R}^{n})$ or $C^{\infty}(\Gamma)$ with respect to equivalent norms. This implies that Corollaries \ref{cor4.2}, \ref{cor4.4}, and \ref{cor4.5} remain true in this (more general) case. The proof of \eqref{f4.20} is very similar to the proofs of Theorems \ref{th4.1} and \ref{th4.3}. We only observe that $H^{k}_{A}(V)=\nobreak H^{km}(V)$ for every $k\in\mathbb{Z}$ whenever $V=\mathbb{R}^{n}$ or $V=\Gamma$ because $\mathrm{ord}\,\mathcal{A}=m$, which gives \begin{equation}\label{f4.21} H^{\varphi}_{A}(V)= \bigl[H^{-k}_{A}(V),H^{k}_{A}(V)\bigr]_{\psi}= \bigl[H^{-km}(V),H^{km}(V)\bigr]_{\psi}= H^{\varphi_{m}}(V) \end{equation} with equivalence of norms; here the integer $k>0$ and the interpolation parameter $\psi$ are the same as those in the proof of Theorem~\ref{th4.1}. The first equality in \eqref{f4.21} is due to Theorem~\ref{th2.5}, whereas the third is a direct consequence of this theorem and Theorems \ref{th4.1} and \ref{th4.3}. Note if the PsDO $\mathcal{A}\in\Psi^{m}(\Gamma)$ is classical, then $A^{1/m}$ is the closure (in $L_{2}(\Gamma)$) of some classical PsDO $\mathcal{A}_{1}\in\Psi^{1}(\Gamma)$ as was established by Seeley \cite{Seeley67}. Hence, $$ H^{\varphi}_{A}(\Gamma)=H^{\varphi_{m}}_{A^{1/m}}(\Gamma)= H^{\varphi_{m}}(\Gamma) $$ immediately due to Theorem~\ref{th4.3}. Ending this remark, note that Theorem~\ref{th5.8} remains true if the order of the classical PsDO $\mathcal{A}$ is~$m$. It follows from the fact every eigenvector of $A$ is also an eigenvector of $A^{1/m}$. \end{remark}
\section{Spectral expansions in spaces with two norms}\label{sec6a}
We will obtain some abstract results on the convergence of spectral expansions in a Hilbert space endowed with a second norm. In the next section, we will apply these results (together with results of Section~\ref{sec4}) to the investigation of the convergence of spectral expansions induced by normal elliptic operators.
\subsection{}\label{sec6.1a} As in Section~\ref{sec2}, $H$ is a separable infinite-dimensional complex Hilbert space. Let $L$ be a normal (specifically, self-adjoint) unbounded linear operator in $H$. Let $E$ be the resolution of the identity (i.e., the spectral measure) generated by $L$, we considering $E$ as an operator-valued function $E=E({\delta})$ of $\delta\in\mathcal{B}(\mathbb{C})$. Here, as usual, $\mathcal{B}(\mathbb{C})$ denotes the class of all Borel subsets of the complex plane~$\mathbb{C}$. Then \begin{equation}\label{f6.1} f=\int\limits_{\mathbb{C}}dEf \end{equation}
for every $f\in H$. Besides, let $N$ be a normed space. (We use the standard notation $\|\cdot\|_{N}$ for the norm in $N$. As above, $\|\cdot\|$ and $(\cdot,\cdot)$ denote the norm and inner product in~$H$.) Suppose that $N$ and $H$ are embedded algebraically in a certain linear space. We find sufficient conditions for the convergence of the spectral expansion \eqref{f6.1} in the space~$N$. Put $\widetilde{B}_{\lambda}:=\{z\in\mathbb{C}:|z|\leq\lambda\}$ for every number $\lambda>0$.
\begin{definition}\label{def6.1} Let $f\in H$. We say that the spectral expansion \eqref{f6.1} converges unconditionally in the space $N$ at the vector $f$ if $E(\delta)f\in N$ whenever $\delta\in\mathcal{B}(\mathbb{C})$ and if for an arbitrary number $\varepsilon>0$ there exists a bounded set $\gamma=\gamma(\varepsilon)\in\mathcal{B}(\mathbb{C})$ such that \begin{equation}\label{f6.2}
\|f-E(\delta)f\|_{N}<\varepsilon\quad\mbox{whenever}\quad \gamma\subseteq\delta\in\mathcal{B}(\mathbb{C}). \end{equation} \end{definition}
Note that \begin{equation*} E(\delta)f=\int\limits_{\delta}dEf \end{equation*} for all $f\in H$ and $\delta\in\mathcal{B}(\mathbb{C})$. If the spectrum of $L$ is countable, say $\mathrm{Spec}\,L=\{z_{j}:1\leq j\in\mathbb{Z}\}$ where $j\neq k\Rightarrow z_{j}\neq z_{k}$, then \eqref{f6.1} becomes \begin{equation}\label{f6.3} f=\sum_{j=1}^{\infty}E(\{z_{j}\})f. \end{equation}
If moreover $|z_{j}|\to\infty$ as $j\to\infty$, Definition~\ref{def6.1} will mean that the series \eqref{f6.3} converges to $f$ in $N$ under an arbitrary permutation of its terms.
Let $I$ stand for the identity operator on $H$, and let $\|\cdot\|_{H\to N}$ and $\|\cdot\|_{H\to H}$ denote the norms of bounded linear operators on the pair of spaces $H$ and $N$ and on the space $H$, respectively.
\begin{theorem}\label{th6.2} Let $R$ and $S$ be bounded linear operators on (whole) $H$ such that they are commutative with $L$ and that \begin{equation}\label{f6.4} \mbox{$R$ is a bounded operator from $H$ to $N$.} \end{equation} Then the spectral expansion \eqref{f6.1} converges unconditionally in the space $N$ at every vector $f\in RS(H)$. Moreover, the degree of this convergence admits the estimate \begin{equation}\label{f6.5}
\|f-E(\delta)f\|_{N}\leq\|R\|_{H\to N}\cdot\|g\|\cdot \|S(I-E(\delta))\|_{H\to H}\cdot r_{g}(\delta) \end{equation} for every $\delta\in\mathcal{B}(\mathbb{C})$ and with some decreasing function $r_{g}(\delta)\in[0,1]$ of $\delta\in\mathcal{B}(\mathbb{C})$ such that $r_{g}(\widetilde{B}_{\lambda})\to0$ as $\lambda\to\infty$. Here, $g\in H$ is an arbitrary vector satisfying $f=RSg$, and the function $r_{g}(\delta)$ does not depend on $S$ and~$R$. \end{theorem}
Note, if $T$ is a bounded linear operator on $H$ and if $M$ is an unbounded linear operator in $H$, the phrase ``$T$ is commutative with $M$'' means that $TMf=MTf$ for every vector $f\in(\mathrm{Dom}\,M)\cap\mathrm{Dom}(MT)$ (see, e.g., \cite[Chapter~IV, \S~3, Section~4]{FunctionalAnalysis72}).
\begin{proof}[Proof of Theorem $\ref{th6.2}$] Choose a vector $f\in RS(H)\subseteq N\cap H$ arbitrarily. If $f=0$, the conclusion of this theorem will be trivial; we thus suppose that $f\neq0$. Consider a nonzero vector $g\in H$ such that $f=RSg$. Choose a set $\delta\in\mathcal{B}(\mathbb{C})$ arbitrarily. Since the operators $R$ and $S$ are bounded on $H$ and commutative with $L$, they are also commutative with $E(\delta)$. Therefore, \begin{equation*} E(\delta)f=E(\delta)(RS)g=(RS)E(\delta)g\in N \end{equation*} due to \eqref{f6.4}. Hence, \begin{equation}\label{f6.8} \begin{aligned}
\|f-E(\delta)f\|_{N}&
=\|RS(I-E(\delta))g\|_{N}=\|RS(I-E(\delta))^{2}g\|_{N}\\
&\leq\|R\|_{H\to N}\cdot\|S(I-E(\delta))\|_{H\to H}\cdot \|(I-E(\delta))g\|. \end{aligned} \end{equation} Put \begin{equation}\label{f6.10}
r_{g}(\delta):=\|(I-E(\delta))g\|\cdot\|g\|^{-1}; \end{equation} then \eqref{f6.8} yields the required estimate \eqref{f6.5}. It follows plainly from \eqref{f6.10} that $r_{g}(\delta)$ viewed as a function of $\delta\in\mathcal{B}(\mathbb{C})$ is required. \end{proof}
\begin{remark}\label{rem6.3} Let $R$ be a bounded operator on $H$. If the norms in $N$ and $H$ are compatible, condition \eqref{f6.4} is equivalent to the inclusion $R(H)\subseteq N$. Indeed, assume that these norms are compatible and that $R(H)\subseteq N$, and show that $R$ satisfies~\eqref{f6.4}. According to the closed graph theorem, the operator $R:H\to\widetilde{N}$ is bounded if and only if it is closed; here, $\widetilde{N}$ is the completion of the normed space~$N$. Therefore, it is enough to prove that this operator is closable. Assume that a sequence $(f_{k})_{k=1}^{\infty}\subset H$ satisfies the following two conditions: $f_{k}\to0$ in $H$ and $Rf_{k}\to h$ in $\widetilde{N}$ for certain $h\in\widetilde{N}$, as $k\to\infty$. Then $Rf_{k}\to0$ in $H$ because $R$ is bounded on $H$. Hence, $h=0$ as the norms in $N$ and $H$ are compatible. Thus, the operator $R:H\to\widetilde{N}$ is closable. \end{remark}
\begin{remark}\label{rem6.4} Borel measurable bounded functions of $L$ are important examples of the bounded operators on $H$ commuting with~$L$. If $S=\eta(L)$ for a bounded Borel measurable function $\eta:\mathrm{Spec}\,L\to\mathbb{C}$, the third factor on the right of \eqref{f6.5} will admit the estimate \begin{equation} \begin{aligned}
\|S(I-E(\delta))\|_{H\to H}&\leq
\sup\bigl\{|\eta(z)|(1-\chi_{E(\delta)}(z)):z\in\mathrm{Spec}\,L\bigr\}\\
&\leq\sup\bigl\{|\eta(z)|:z\in(\mathrm{Spec}\,L)\setminus\delta\bigr\}. \end{aligned} \end{equation}
(As usual, $\chi_{E(\delta)}$ stands for the characteristic function of the set $E(\delta)$.) Hence, if $\eta(z)\to0$ as $|z|\to\infty$, then \begin{equation*}
\lim_{\lambda\to\infty}\|S(I-E(\widetilde{B}_{\lambda}))\|_{H\to H}=0 \end{equation*} (as well as the fourth factor $r_{g}(\delta)$ if $\delta=\widetilde{B}_{\lambda}$). \end{remark}
\subsection{}\label{sec6.1b} Assume now that the normal operator $L$ has pure point spectrum, i.e. the Hilbert space $H$ has an orthonormal basis $(e_{j})_{j=1}^{\infty}$ formed by some eigenvectors $e_{j}$ of $L$. Unlike the previous part of this subsection, we suppose that $L$ is either unbounded in $H$ or bounded on $H$. Thus, \begin{equation}\label{f6.11} f=\sum_{j=1}^{\infty}(f,e_j)e_j \end{equation} in $H$ for every $f\in H$. Let $\lambda_{j}$ denote the eigenvalue of $L$ such that $Le_j=\lambda_{j}e_j$. Note that the expansions \eqref{f6.1} and \eqref{f6.3} become \eqref{f6.11} provided that all the proper subspaces of $L$ are one-dimensional. Let $P_k$ denote the orthoprojector on the linear span of the eigenvectors $e_1,\ldots,e_k$.
\begin{theorem}\label{th6.5} Let $\omega,\eta:\mathrm{Spec}\,L\to\mathbb{C}\setminus\{0\}$ be Borel measurable bounded functions, and consider the bounded linear operators $R:=\omega(L)$ and $S:=\eta(L)$ on $H$. Assume that $R$ satisfies \eqref{f6.4}. Then the series \eqref{f6.11} converges unconditionally (i.e. under each permutation of its terms) in the space $N$ at every vector $f\in RS(H)$. Moreover, the degree of this convergence admits the estimate \begin{equation}\label{f6.12}
\biggl\|f-\sum_{j=1}^{k}(f,e_j)e_j\biggr\|_{N}\leq
\|R\|_{H\to N}\cdot\|g\|\cdot \|S(I-P_k)\|_{H\to H}\cdot r_{g,k} \end{equation} for every integer $k\geq1$ and with some decreasing sequence $(r_{g,k})_{k=1}^{\infty}\subset[0,1]$ that tends to zero and does not depend on $S$ and~$R$. Here, $g:=(RS)^{-1}f\in H$. \end{theorem}
\begin{proof} Since $RSe_j=(\omega\eta)(\lambda_j)e_j$ for every integer $j\geq1$ and since $(\omega\eta)(t)\neq0$ for every $t\in\mathrm{Spec}\,L$, we conclude that each $e_j\in N$ in view of hypothesis \eqref{f6.4}. Thus, the left-hand side of \eqref{f6.12} makes sense. Besides, the operator $RS=(\omega\eta)(L)$ is algebraically reversible; hence, the vector $g:=(RS)^{-1}f\in H$ is well defined for every $f\in RS(H)$. We suppose that $f\neq0$ because the conclusion of this theorem is trivial in the $f=0$ case. Choosing an integer $k\geq1$ arbitrarily, we get \begin{align*} (RS)P_{k}g&=RS\sum_{j=1}^{k}(g,e_j)e_j=\sum_{j=1}^{k}(g,e_j)RSe_j= \sum_{j=1}^{k}(g,e_j)(\omega\eta)(\lambda_j)e_j\\ &=P_{k}(RS)\sum_{j=1}^{\infty}(g,e_j)e_j=P_{k}(RS)g. \end{align*} Hence, \begin{equation}\label{f6.13} \begin{aligned}
\biggl\|f-\sum_{j=1}^{k}(f,e_j)e_j\biggr\|_{N}&=\|f-P_{k}f\|_{N}=
\|RSg-P_{k}(RS)g\|_{N}\\
&=\|RS(I-P_{k})g\|_{N}=\|RS(I-P_{k})^{2}g\|_{N}\\
&\leq\|R\|_{H\to N}\cdot\|S(I-P_{k})\|_{H\to H}\cdot \|(I-P_{k})g\|. \end{aligned} \end{equation} Putting \begin{equation}\label{f6.14}
r_{g,k}:=\|(I-P_k)g\|\cdot\|g\|^{-1}, \end{equation} we see that \eqref{f6.13} yields the required estimate \eqref{f6.12}. It follows plainly from \eqref{f6.14} that the sequence $(r_{g,k})_{k=1}^{\infty}$ is required. Hence, the series \eqref{f6.11} converges in $N$. This convergence is unconditional because the hypotheses of the theorem are invariant with respect to permutations of terms of this series. \end{proof}
\begin{remark}\label{rem6.6} The third factor on the right of \eqref{f6.12} admits the estimate \begin{equation}\label{f6.15}
\|S(I-P_k)\|_{H\to H}\leq\sup_{j\geq k+1}|\eta(\lambda_j)| \end{equation} for each integer $k\geq1$. Indeed, since \begin{equation*} S(I-P_k)f=\eta(L)\sum_{j=k+1}^{\infty}(f,e_{j})e_{j}= \sum_{j=k+1}^{\infty}(f,e_{j})\eta(\lambda_j)e_{j} \end{equation*} for every $f\in H$ (the convergence holds in $H$), we have \begin{equation*}
\|S(I-P_k)f\|^{2}=\sum_{j=k+1}^{\infty}|(f,e_{j})\eta(\lambda_j)|^{2}\leq
\bigl(\sup_{j\geq k+1}|\eta(\lambda_j)|\bigr)^{2}\cdot\|f\|^{2}, \end{equation*}
which gives \eqref{f6.15}. Specifically, if $\eta(t)\to0$ as $|t|\to\infty$ and if $|\lambda_j|\to\infty$ as $j\to\infty$, then \begin{equation*}
\lim_{k\to\infty}\|S(I-P_k)\|_{H\to H}=0 \end{equation*} (as well as the fourth factor $r_{g,k}$). \end{remark}
It is worthwhile to note that the hypotheses of Theorem~\ref{th6.5} do not depend on the choice of a basis of~$H$. They hence imply the unconditional convergence of the series \eqref{f6.11} in $N$ at every vector $f\in RS(H)$ for \emph{any} orthonormal basis of $H$ formed by eigenvectors of~$L$. Remark also that Theorem~\ref{th6.5} reinforces the conclusion of Theorem~\ref{th6.2} under the hypotheses of Theorem~\ref{th6.5}. Indeed, owing to Theorem~\ref{th6.2}, the series \eqref{f6.11} converges in $N$ at every $f\in RS(H)$ if its terms corresponding to equal eigenvalues are grouped together and if $|\lambda_{j}|\to\infty$ as $j\to\infty$.
Theorem~\ref{th6.5} contains M.~G.~Krein's theorem \cite{Krein47} according to which the series \eqref{f6.11} converges in $N$ at every $f\in L(H)$ if $L$ is a self-adjoint compact operator in $H$ obeying \eqref{f6.4}. The latter theorem generalizes (to abstract operators) the Hilbert\,--\,Schmidt theorem about the uniform decomposability of sourcewise representable functions with respect to eigenfunctions of a symmetric integral operator. If $L$ is a positive definite self-adjoint operator with discrete spectrum and if $R=L^{-\sigma}$ and $S=L^{-\tau}$ for certain $\sigma,\tau\geq0$ and if $R$ satisfies \eqref{f6.4}, Krasnosel'ski\u{\i} and Pustyl'nik \cite[Theorem~22.1]{KrasnoselskiiZabreikoPustylnikSobolevskii76} proved that the left-hand side of \eqref{f6.12} is $o(\lambda_{k}^{-\tau})$ as $k\to\infty$. This result follows from \eqref{f6.12} in view of \eqref{f6.15}.
\section{Applications to spectral expansions induced by elliptic operators}\label{sec6}
This section is devoted to applications of results of Sections \ref{sec4} and \ref{sec6a} to the investigation of the convergence (in the uniform metric) of spectral expansions induced by uniformly elliptic operators on $\mathbb{R}^{n}$ and by elliptic operators on a closed manifold $\Gamma\in C^{\infty}$. We find explicit criteria of the convergence of these expansions in the normed space $C^{q}$, with $q\geq0$, on the function class $H^{\varphi}$, with $\varphi\in\mathrm{OR}$, and evaluate the degree of this convergence. Besides, we consider applications of the spaces $H^{\varphi}(\Gamma)$ to the investigation of the almost everywhere convergence of the spectral expansions.
\subsection{}\label{sec6.2} Let $1\leq n\in\mathbb{Z}$ and $0<m\in\mathbb{R}$. We suppose in this subsection that $L$ is a PsDO of class $\Psi^{m}(\mathbb{R}^{n})$ and that $L$ is uniformly elliptic on $\mathbb{R}^{n}$. We may and will consider $L$ as a closed unbounded operator in the Hilbert space $H:=L_{2}(\mathbb{R}^{n})$ with $\mathrm{Dom}\,L=H^{m}(\mathbb{R}^{n})$ (see \cite[Sections 2.3~d and 3.1~b]{Agranovich94}). We also suppose that $L$ is a normal operator in $L_{2}(\mathbb{R}^{n})$. Then $L$ generates a resolution of the identity $E=E({\delta})$, and the spectral expansion \eqref{f6.1} holds for every function $f\in L_{2}(\mathbb{R}^{n})$. Note that the spectrum of $L$ may be uncountable and may not have any eigenfunctions. Hence, the expansion \eqref{f6.1} may not be represented in the form of the series \eqref{f6.3} or \eqref{f6.11}. For example, if $L=-\Delta$, then the spectrum of $L$ coincides with $[0,\infty)$ and is continuous.
\begin{definition}\label{def6.7} Let a normed function space $N$ lie in $\mathcal{S}'(\mathbb{R}^{n})$. We say that the expansion \eqref{f6.1} (where $H=L_{2}(\mathbb{R}^{n})$) converges unconditionally in $N$ on a function class $\Upsilon$ if $\Upsilon\subset L_{2}(\mathbb{R}^{n})$ and if this expansion satisfies Definition~\ref{def6.1} for every $f\in\Upsilon$. \end{definition}
We consider the important case where $N=C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ for an integer $q\geq0$ and use generalized Sobolev spaces $H^{\varphi}(\mathbb{R}^{n})$ as $\Upsilon$. Here, $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ denotes the Banach space of
$q$ times continuously differentiable functions $\nobreak{f:\mathbb{R}^{n}\to\mathbb{C}}$ whose partial derivatives $\partial^{\alpha}f$ are bounded on $\mathbb{R}^{n}$ whenever $|\alpha|\leq q$. As usual, $\alpha=(\alpha_{1},\ldots,\alpha_{n})\in\mathbb{Z}_{+}^{n}$ and $|\alpha|=\alpha_{1}+\cdots+\alpha_{n}$. This space is endowed with the norm \begin{equation*}
\|f\|_{C,q,\mathbb{R}^{n}}:=\sum_{|\alpha|\leq q}\,
\sup\bigl\{|\partial^{\alpha}f(x)|:x\in\mathbb{R}^{n}\bigr\}. \end{equation*}
\begin{theorem}\label{th6.8} Let $0\leq q\in\mathbb{Z}$ and $\varphi\in\mathrm{OR}$. The spectral expansion \eqref{f6.1} converges unconditionally in the normed space $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ on the function class $H^{\varphi}(\mathbb{R}^{n})$ if and only if \begin{equation}\label{f6.16} \int\limits_{1}^{\infty}\frac{t^{2q+n-1}}{\varphi^2(t)}\,dt<\infty. \end{equation} \end{theorem}
\begin{remark}\label{rem6.9} If we replace the lover limit $1$ in \eqref{f6.16} with an arbitrary number $k>1$, we will obtain an equivalent condition on the function $\varphi\in\mathrm{OR}$. This is due to the fact that every function $\varphi\in\mathrm{OR}$ is bounded together with $1/\varphi$ on each compact interval $[1,k]$ where $k>1$. This follows from property \eqref{f2.3}, in which we put $t=1$. \end{remark}
The next result allows us to estimate the degree of the convergence stipulated by Theorem~\ref{th6.8}.
\begin{theorem}\label{th6.10} Let $0\leq q\in\mathbb{Z}$ and $\phi_{1},\phi_{2}\in\mathrm{OR}$. Suppose that $\phi_{1}(t)\to\infty$ as $t\to\infty$ and that \begin{equation}\label{f6.17} \int\limits_{1}^{\infty}\frac{t^{2q+n-1}}{\phi_{2}^{2}(t)}\,dt<\infty. \end{equation} Consider the function $\varphi:=\phi_{1}\phi_{2}$, which evidently belongs to $\mathrm{OR}$ and satisfies \eqref{f6.16}. Then the degree of the convergence of the spectral expansion \eqref{f6.1} in the normed space $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ on the class $H^{\varphi}(\mathbb{R}^{n})$ admits the estimate \begin{equation}\label{f6.18}
\|f-E(\widetilde{B}_{\lambda})f\|_{C,q,\mathbb{R}^{n}}\leq c\cdot\|f\|_{\varphi,\mathbb{R}^{n}}\cdot \sup\bigl\{(\phi_{1}(t))^{-1}:t\geq\langle\lambda\rangle^{1/m}\bigr\} \cdot\theta_{f}(\lambda) \end{equation} for every function $f\in H^{\varphi}(\mathbb{R}^{n})$ and each number $\lambda>0$. Here, $c$ is a certain positive number that does not depend on $f$ and $\lambda$, and $\theta_{f}(\lambda)$ is a decreasing function of $\lambda$ such that $\nobreak{0\leq\theta_{f}(\lambda)\leq1}$ whenever $\lambda>0$ and that $\theta_{f}(\lambda)\to0$ as $\lambda\to\infty$. \end{theorem}
As to \eqref{f6.18}, recall that $\langle\lambda\rangle:=(1+|\lambda|^{2})^{1/2}$.
\begin{remark}\label{rem6.11} Suppose that a function $\varphi\in\mathrm{OR}$ satisfies \eqref{f6.16}; then it may be represented in the form $\varphi=\phi_{1}\phi_{2}$ for some functions $\phi_{1},\phi_{2}\in\mathrm{OR}$ subject to the hypotheses of Theorem~\ref{th6.10}. Indeed, considering the function \begin{equation*} \eta(t):= \int\limits_t^\infty\frac{\tau^{2q+n-1}}{\varphi^2(\tau)}\,d\tau<\infty \quad\mbox{of}\quad t\geq1 \end{equation*} and choosing a number $\varepsilon\in(0,1/2)$, we put $\phi_{1}(t):=\eta^{-\varepsilon}(t)$ and $\phi_{2}(t):=\varphi(t)\eta^{\varepsilon}(t)$ whenever $t\geq1$. Then $\phi_{1}(t)\to\infty$ as $t\to\infty$, and \begin{equation*} \int\limits_1^\infty\frac{t^{2q+n-1}}{\phi_2^2(t)}\,dt= \int\limits_1^\infty\frac{t^{2q+n-1}}{\varphi^2(t)\eta^{2\varepsilon}(t)} \,dt=-\int\limits_1^\infty\frac{d\eta(t)}{\eta^{2\varepsilon}(t)}= \int\limits^{\eta(1)}_0\frac{d\eta}{\eta^{2\varepsilon}}<\infty. \end{equation*} To show that $\phi_{1},\phi_{2}\in\mathrm{OR}$, it suffices to prove the inclusion $\eta\in\mathrm{OR}$. Since $\varphi\in\mathrm{OR}$, there exist numbers $a>1$ and $c\geq1$ such that $c^{-1}\leq\varphi(\lambda\zeta)/\varphi(\zeta)\leq c$ for all $\zeta\geq1$ and $\lambda\in[1,a]$. Assuming $t\geq1$ and $1\leq\lambda\leq a$, we therefore get \begin{equation*} \eta(\lambda t)=\int\limits_{\lambda t}^\infty \frac{\tau^{2q+n-1}}{\varphi^2(\tau)}\,d\tau= \lambda^{2q+n}\int\limits_{t}^\infty \frac{\zeta^{2q+n-1}}{\varphi^2(\lambda\zeta)}\,d\zeta\leq c^{2}\lambda^{2q+n}\int\limits_{t}^\infty \frac{\zeta^{2q+n-1}}{\varphi^2(\zeta)}\,d\zeta \leq c^2a^{2q+n}\eta(t) \end{equation*} and \begin{equation*} \eta(\lambda t)=\lambda^{2q+n}\int\limits_{t}^\infty \frac{\zeta^{2q+n-1}}{\varphi^2(\lambda\zeta)}\,d\zeta\geq c^{-2}\lambda^{2q+n}\int\limits_{t}^\infty \frac{\zeta^{2q+n-1}}{\varphi^2(\zeta)}\,d\zeta\geq c^{-2}\eta(t); \end{equation*} i.e. $\eta\in\mathrm{OR}$. \end{remark}
Before we prove Theorems \ref{th6.8} and \ref{th6.10}, we will illustrate them with three examples. As above, $0\leq q\in\mathbb{Z}$. As in Theorem~\ref{th6.10}, we let $c$ denote a positive number that does not depend on $f$ and $\lambda$.
\begin{example}\label{ex6.2.1} Let us restrict ourselves to the Sobolev spaces $H^{s}(\mathbb{R}^{n})$, with $s\in\mathbb{R}$. Owing to Theorem~\ref{th6.8}, the spectral expansion \eqref{f6.1} converges unconditionally in $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ on the class $H^{s}(\mathbb{R}^{n})$ if and only if $s>q+n/2$. Let $s>q+n/2$, and put $r:=s-q-n/2>0$. If $0<\varepsilon<r/m$, then the degree of this convergence admits the following estimate: \begin{equation*}
\|f-E(\widetilde{B}_{\lambda})f\|_{C,q,\mathbb{R}^{n}}\leq c\,\|f\|_{s,\mathbb{R}^{n}}\langle\lambda\rangle^{\varepsilon-r/m} \end{equation*}
for all $f\in H^{s}(\mathbb{R}^{n})$ and $\lambda>0$. Here, $\|\cdot\|_{s,\mathbb{R}^{n}}$ is the norm in $H^{s}(\mathbb{R}^{n})$. This estimate follows from Theorem~\ref{th6.10}, in which we put $\phi_{1}(t):=t^{r-m\varepsilon}$ and $\phi_{2}(t):=t^{s-r+m\varepsilon}$ for every $t\geq1$. Choosing a number $\varepsilon>0$ arbitrarily and putting \begin{equation}\label{f6.18b} \phi_{1}(t):=t^{r}\log^{-\varepsilon-1/2}(1+t)\quad\mbox{and}\quad \phi_{2}(t):=t^{s-r}\log^{\varepsilon+1/2}(1+t) \end{equation} for every $t\geq1$ in this theorem, we obtain the sharper estimate \begin{equation*}
\|f-E(\widetilde{B}_{\lambda})f\|_{C,q,\mathbb{R}^{n}}\leq c\,\|f\|_{s,\mathbb{R}^{n}}\langle\lambda\rangle^{-r/m} \log^{\varepsilon+1/2}(1+\langle\lambda\rangle) \end{equation*} for the same $f$ and $\lambda$. \end{example}
Using the generalized Sobolev spaces $H^{\varphi}(\mathbb{R}^{n})$, with $\varphi\in\mathrm{OR}$, we may establish the unconditional convergence of \eqref{f6.1} in $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ at some functions \begin{equation*} f\notin H^{q+n/2+}(\mathbb{R}^{n}):= \bigcup_{s>q+n/2}H^{s}(\mathbb{R}^{n}) \end{equation*} and evaluate its degree. (Note that this union is narrower than $H^{q+n/2}(\mathbb{R}^{n})$.)
\begin{example}\label{ex6.2.2} Choosing a number $\varrho>0$ arbitrarily and putting \begin{equation}\label{f6.19} \varphi(t):=t^{q+n/2}\log^{\varrho+1/2}(1+t) \quad\mbox{for every}\quad t\geq1, \end{equation} we conclude by Theorem~\ref{th6.8} that the spectral expansion \eqref{f6.1} converges unconditionally in $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ on the class $H^{\varphi}(\mathbb{R}^{n})$. This class is evidently broader than $H^{q+n/2+}(\mathbb{R}^{n})$. If $0<\varepsilon<\varrho$, then the degree of this convergence admits the estimate \begin{equation*}
\|f-E(\widetilde{B}_{\lambda})f\|_{C,q,\mathbb{R}^{n}}\leq c\,\|f\|_{\varphi,\mathbb{R}^{n}} \log^{\varepsilon-\varrho}(1+\langle\lambda\rangle) \end{equation*} for all $f\in H^{\varphi}(\mathbb{R}^{n})$ and $\lambda>0$. This estimate follows from Theorem~\ref{th6.10}, in which we represent $\varphi$ as the product of the functions \begin{equation*} \phi_{1}(t):=\log^{\varrho-\varepsilon}(1+t)\quad\mbox{and}\quad \phi_{2}(t):=t^{q+n/2}\log^{\varepsilon+1/2}(1+t). \end{equation*} \end{example}
Using iterated logarithms, we may obtain weaker sufficient conditions for the unconditional convergence of \eqref{f6.1} in $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$. The next example involves the double logarithm.
\begin{example}\label{ex6.2.3} Choose a number $\varrho>0$ arbitrarily, and consider the function \begin{equation}\label{f6.20} \varphi(t):=t^{q+n/2}\,(\log(1+t))^{1/2}\,(\log\log(2+t))^{\varrho+1/2} \quad\mbox{of}\quad t\geq1. \end{equation} According to Theorem~\ref{th6.8}, the spectral expansion \eqref{f6.1} converges unconditionally in $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ on the class $H^{\varphi}(\mathbb{R}^{n})$. If $0<\varepsilon<\varrho$, then the degree of this convergence admits the estimate \begin{equation*}
\|f-E(\widetilde{B}_{\lambda})f\|_{C,q,\mathbb{R}^{n}}\leq c\,\|f\|_{\varphi,\mathbb{R}^{n}} \bigl(\log\log(2+\langle\lambda\rangle)\bigr)^{\varepsilon-\varrho} \end{equation*} for all $f\in H^{\varphi}(\mathbb{R}^{n})$ and $\lambda>0$. The estimate follows from Theorem~\ref{th6.10} provided that we represent $\varphi$ as the product of the functions $\phi_{1}(t):=(\log\log(2+t))^{\varrho-\varepsilon}$ and \begin{equation*} \phi_{2}(t):=t^{q+n/2}(\log(1+t))^{1/2} (\log\log(2+t))^{\varepsilon+1/2}. \end{equation*} \end{example}
Let us turn to the proofs of Theorems \ref{th6.8} and \ref{th6.10}. The proofs are based on the following version of H\"ormander's embedding theorem \cite[Theorem 2.2.7]{Hermander63}:
\begin{proposition}\label{prop6.12} Let $0\leq q\in\mathbb{Z}$ and $\varphi\in\mathrm{OR}$. Then condition \eqref{f6.16} implies the continuous embedding $H^{\varphi}(\mathbb{R}^n)\hookrightarrow C^{q}_\mathrm{b}(\mathbb{R}^n)$. Conversely, if \begin{equation}\label{f6.21} \{w\in H^{\varphi}(\mathbb{R}^n): \mathrm{supp}\,w\subset G\}\subseteq C^q(\mathbb{R}^n) \end{equation} for an open nonempty set $G\subset\mathbb{R}^n$, then condition \eqref{f6.16} is satisfied. \end{proposition}
\begin{proof} We previously recall the definition of the H\"ormander space $\mathcal{B}_{p,k}$, which the embedding theorem deals with. Let $1\leq p\leq\infty$, and let a function $k:\mathbb{R}^{n}\to(0,\infty)$ satisfy the following condition: there exist positive numbers $c$ and $\ell$ such that \begin{equation}\label{f6.22}
k(\xi+\zeta)\leq(1+c|\xi|)^{\ell}\,k(\zeta)\quad\mbox{for all}\quad \xi,\zeta\in\mathbb{R}^{n} \end{equation}
(the class of all such functions $k$ is denoted by $\mathcal{K}$). According to \cite[Definition 2.2.1]{Hermander63}, the complex linear space $\mathcal{B}_{p,k}$ consists of all distributions $w\in\mathcal{S}'(\mathbb{R}^{n})$ that their Fourier transform $\widehat{w}$ is locally Lebesgue integrable over $\mathbb{R}^{n}$ and that the product $k\widehat{w}$ belongs to the Lebesgue space $L_{p}(\mathbb{R}^{n})$. The space $\mathcal{B}_{p,k}$ is endowed with the norm $\|k\widehat{w}\|_{L_{p}(\mathbb{R}^{n})}$ and is complete with respect to it.
According to \cite[Theorem 2.2.7]{Hermander63} and its proof, the condition \begin{equation}\label{f6.23}
\frac{(1+|\xi|)^{q}}{k(\xi)}\in L_{p'}(\mathbb{R}^{n}) \end{equation} implies the inclusion $\mathcal{B}_{p,k}\subset C^{q}_\mathrm{b}(\mathbb{R}^n)$; here, as usual, the conjugate parameter $p'\in[1,\infty]$ is defined by $1/p+1/p'=1$. Moreover, if the set $\{w\in \mathcal{B}_{p,k}:\mathrm{supp}\,w\subset G\}$ lies in $C^{q}(\mathbb{R}^n)$ for an open nonempty set $G\subset\mathbb{R}^n$, then condition \eqref{f6.23} is satisfied. Note that the inclusion $\mathcal{B}_{p,k}\subset C^{q}_\mathrm{b}(\mathbb{R}^n)$ is continuous because its components are continuously embedded in a Hausdorff space, e.g. in $\mathcal{S}'(\mathbb{R}^{n})$.
The Hilbert space $H^{\varphi}(\mathbb{R}^n)$ is the H\"ormander space $\mathcal{B}_{2,k}$ provided that $k(\xi)=\varphi(\langle\xi\rangle)$ for every $\xi\in\mathbb{R}^n$ and that $k$ satisfies \eqref{f6.22}. Owing to \cite[Lemma~2.7]{MikhailetsMurach14}, the function $k(\xi):=\varphi(\langle\xi\rangle)$ of $\xi\in\mathbb{R}^n$ satisfies a weaker condition than \eqref{f6.22}; namely, there exist positive numbers $c_{0}$ and $\ell_0$ such that \begin{equation*}
k(\xi+\zeta)\leq c_{0}(1+|\xi|)^{\ell_0}k(\zeta) \quad\mbox{for all}\quad \xi,\zeta\in\mathbb{R}^{n}. \end{equation*}
However, there exists a function $k_{1}\in\mathcal{K}$ that both functions $k/k_{1}$ and $k_{1}/k$ are bounded on $\mathbb{R}^n$ (see \cite[the remark at the end of Section~2.1]{Hermander63}). Hence, the spaces $H^{\varphi}(\mathbb{R}^n)$ and $\mathcal{B}_{2,k_1}$ are equal with equivalence of norms. Thus, Proposition~\ref{prop6.12} holds true if we change \eqref{f6.16} for the condition $(1+|\xi|)^{q}/k_{1}(\xi)\in L_{2}(\mathbb{R}^{n})$. The latter is equivalent to \begin{equation}\label{f6.24} \int\limits_{\mathbb{R}^{n}}\, \frac{\langle\xi\rangle^{2q}d\xi}{\varphi^{2}(\langle\xi\rangle)}<\infty \end{equation} It remains to show that $\eqref{f6.16}\Leftrightarrow\eqref{f6.24}$.
Passing to spherical coordinates with $r:=|\xi|$ and changing variables $t=\sqrt{1+r^{2}}$, we obtain \begin{align*} \int\limits_{\mathbb{R}^{n}}\, \frac{\langle\xi\rangle^{2q}d\xi}{\varphi^{2}(\langle\xi\rangle)}&= c_1\int\limits_{0}^{\infty}\, \frac{(1+r^{2})^{q}\,r^{n-1}dr}{\varphi^{2}(\sqrt{1+r^{2}}\,)}= c_1\int\limits_{1}^{\infty}\, \frac{t^{2q+1}(t^{2}-1)^{n/2-1}dt}{\varphi^{2}(t)}\\ &=c_2+c_1\int\limits_{2}^{\infty} \frac{t^{2q+1}(t^{2}-1)^{n/2-1}dt}{\varphi^{2}(t)}. \end{align*} Here, $c_1:=n\,\mathrm{mes}\,\widetilde{B}_{1}$, with the second factor being the volume of the unit ball in $\mathbb{R}^{n}$, and $$ c_2:=c_1\int\limits_{1}^{2} \frac{t^{2q+1}(t^{2}-1)^{n/2-1}dt}{\varphi^{2}(t)}<\infty $$ because the function $1/\varphi$ is bounded on $[1,2]$ and because $n/2-1>-1$. Hence, \begin{align*} \eqref{f6.24}\,\Longleftrightarrow \int\limits_{2}^{\infty} \frac{t^{2q+1}(t^{2}-1)^{n/2-1}dt}{\varphi^{2}(t)}<\infty\, \Longleftrightarrow \int\limits_{2}^{\infty}\frac{t^{2q+n-1}dt}{\varphi^{2}(t)}<\infty \,\Longleftrightarrow\,\eqref{f6.16}. \end{align*} \end{proof}
We systematically use the following auxiliary result:
\begin{lemma}\label{lem6.13} Suppose that a function $\chi\in\mathrm{OR}$ is integrable over $[1,\infty)$. Then $\chi$ is bounded on $[1,\infty)$, and $t\chi(t)\to0$ as $t\to\infty$. \end{lemma}
\begin{proof} Let us prove by contradiction that $t\chi(t)\to0$ as $t\to\infty$. Assume the contrary; i.e., there exists a number $\varepsilon>0$ and a sequence $(t_{j})_{j=1}^{\infty}\subset[1,\infty)$ such that $t_{j}\to\infty$ as $j\to\infty$ and that $t_{j}\,\chi(t_{j})\geq\varepsilon$ for each $j\geq1$. Since $\chi\in\mathrm{OR}$, there are numbers $a>1$ and $c\geq1$ such that $c^{-1}\leq\chi(\lambda\tau)/\chi(\tau)\leq c$ for all $\tau\geq1$ and $\lambda\in[1,a]$. Since $\chi$ is integrable over $[1,\infty)$, we have \begin{equation}\label{f6.25} \sum_{k=0}^{\infty}\int\limits_{a^k}^{a^{k+1}}\chi(t)dt<\infty. \end{equation} Choosing an integer $j\geq1$ arbitrarily, we find an integer $k(j)\geq0$ such that $a^{k(j)}\leq t_{j}<a^{k(j)+1}$ and observe that $c^{-1}\leq\chi(t)/\chi(t_{j})\leq c$ whenever $t\in[a^{k(j)},a^{k(j)+1}]$. Hence, \begin{equation*} \int\limits_{a^{k(j)}}^{a^{k(j)+1}}\chi(t)dt\geq \int\limits_{a^{k(j)}}^{a^{k(j)+1}}c^{-1}\chi(t_{j})dt\geq c^{-1}\varepsilon\,t_{j}^{-1}(a^{k(j)+1}-a^{k(j)})> c^{-1}\varepsilon(1-a^{-1}) \end{equation*} for each integer $j\geq1$, which contradicts \eqref{f6.25} because $c^{-1}\varepsilon(1-a^{-1})>0$ and $k(j)\to\infty$ as $j\to\infty$. Thus, our assumption is wrong; i.e., $t\chi(t)\to0$ as $t\to\infty$. It follows from this that the function $\chi\in\mathrm{OR}$ is bounded on $[1,\infty)$ because it is bounded on each compact subinterval of $[1,\infty)$. \end{proof}
\begin{proof}[Proof of Theorem $\ref{th6.8}$] \emph{Sufficiency.} Assume that $\varphi$ satisfies \eqref{f6.16} and prove that the spectral expansion \eqref{f6.1} converges unconditionally in the normed space $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ on the class $H^{\varphi}(\mathbb{R}^{n})$. Note first that $H^{\varphi}(\mathbb{R}^{n})\subset L_{2}(\mathbb{R}^{n})$ because the function $1/\varphi$ is bounded on $[1,\infty)$; the latter property follows from \eqref{f6.16} due to Lemma~\ref{lem6.13}. We put $A:=I+L^{\ast}L$ and observe that $A$ is a positive definite self-adjoint unbounded linear operator in the Hilbert space $H=L_{2}(\mathbb{R}^{n})$ and that $\mathrm{Spec}\,A\subseteq[1,\infty)$. Here, $I$ is the identity operator in $L_{2}(\mathbb{R}^{n})$. It follows from the theorem on composition of PsDOs \cite[Theorem~1.2.4]{Agranovich94} that $A\in\Psi^{2m}(\mathbb{R}^{n})$ is uniformly elliptic on $\mathbb{R}^{n}$. Consider the functions $\chi(t):=\varphi(t^{1/(2m)})$ of $t\geq1$ and $\omega(z):=(\chi(1+|z|^{2}))^{-1}$ of $z\in\mathbb{C}$, and put $R:=\omega(L)=(1/\chi)(A)$ and $S:=I$ in Theorem~\ref{th6.2}. Since the function $1/\chi$ is bounded on $[1,\infty)$, the operator $R$ is bounded on $L_{2}(\mathbb{R}^{n})$, and $0\not\in\mathrm{Spec}\,\chi(A)$. It follows from the latter property that $H^{\chi}_{A}=\mathrm{Dom}\,\chi(A)$; hence, the operator $\chi(A)$ sets an isometric isomorphism between $H^{\chi}_{A}$ and $L_{2}(\mathbb{R}^{n})$. Thus, \begin{equation*} R(L_{2}(\mathbb{R}^{n}))=H^{\chi}_{A}=H^{\varphi}(\mathbb{R}^{n})\subset C^{q}_{\mathrm{b}}(\mathbb{R}^{n}) \end{equation*} due to \eqref{f4.20}, Proposition~\ref{prop6.12}, and our assumption~\eqref{f6.16}. Since the norms in the spaces $L_{2}(\mathbb{R}^{n})$ and $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ are compatible, the operator $R$ acts continuously from $L_{2}(\mathbb{R}^{n})$ to $N=C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$, as was shown in Remark~\ref{rem6.3}. Thus, the operators $R$ and $S$ satisfy all the hypotheses of Theorem~\ref{th6.2}. According to this theorem, the spectral expansion \eqref{f6.1} converges unconditionally in the space $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ at every vector $f\in (RS)(L_{2}(\mathbb{R}^{n}))=H^{\varphi}(\mathbb{R}^{n})$. The sufficiency is proved
\emph{Necessity.} Assume now that the spectral expansion \eqref{f6.1} converges unconditionally in $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ on the class $H^{\varphi}(\mathbb{R}^{n})$. Then $f\in H^{\varphi}(\mathbb{R}^{n})$ implies $f=E(\mathbb{C})f\in C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ by Definition~\ref{def6.1}. Hence, $\varphi$ satisfies \eqref{f6.16} due to Proposition~\ref{prop6.12}. The necessity is also proved. \end{proof}
\begin{proof}[Proof of Theorem $\ref{th6.10}$]
Consider the function $\chi_{j}(t):=\phi_{j}(t^{1/(2m)})$ of $t\geq1$ for each $j\in\{1,2\}$ and the functions $\eta(z):=(\chi_{1}(1+|z|^{2}))^{-1}$ and $\omega(z):=(\chi_{2}(1+|z|^{2}))^{-1}$ of $z\in\mathbb{C}$. Setting $A:=I+L^{\ast}L$, we put $S:=\eta(L)=(1/\chi_{1})(A)$ and $R:=\omega(L)=(1/\chi_{2})(A)$ in Theorem~\ref{th6.2}. The functions $\eta$ and $\omega$ are bounded on $\mathbb{C}$ by its hypotheses (note that the boundedness of $\omega$ follows from \eqref{f6.17} in view of Lemma~\ref{lem6.13}). Hence, the operators $R$ and $S$ are bounded on the Hilbert space $H=L_{2}(\mathbb{R}^{n})$. It follows from \eqref{f6.17} that $R$ acts continuously from $L_{2}(\mathbb{R}^{n})$ to $N=C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$, as was shown in the proof of Theorem $\ref{th6.8}$ (the sufficiency). According to Theorem~\ref{th6.2} and Remark~\ref{rem6.4}, we have the estimate \begin{equation}\label{f6.26} \begin{aligned}
&\|f-E(\widetilde{B}_{\lambda})f\|_{C,q,\mathbb{R}^{n}}\\
&\leq c'\cdot\|g\|_{\mathbb{R}^{n}}\cdot
\sup_{}\bigl\{(\phi_{1}(\langle z\rangle^{1/m})^{-1}:z\in\mathbb{C},|z|\geq\lambda\bigr\} \cdot r_{g}(\widetilde{B}_{\lambda}) \end{aligned} \end{equation}
for all $f\in RS(L_{2}(\mathbb{R}^{n}))$ and $\lambda>0$. Here, $c'$ denotes the norm of the bounded operator $R:L_{2}(\mathbb{R}^{n})\to C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$, whereas $\|\cdot\|_{\mathbb{R}^{n}}$ stands for the norm in $L_{2}(\mathbb{R}^{n})$, and $g\in L_{2}(\mathbb{R}^{n})$ satisfies $f=RSg$. Note that $RS=(1/\chi)(A)$ where $\chi(t):=\chi_{1}(t)\chi_{2}(t)=\varphi(t^{1/(2m)})$ for every $t\geq1$. Since $0\not\in\mathrm{Spec}\,\chi(A)$, the operator $\chi(A)$ sets an isometric isomorphism between $H^{\chi}_{A}$ and $L_{2}(\mathbb{R}^{n})$. The inverse operator $RS$ sets an isomorphism between $L_{2}(\mathbb{R}^{n})$ and $H^{\varphi}(\mathbb{R}^{n})$ because the spaces $H^{\chi}_{A}$ and $H^{\varphi}(\mathbb{R}^{n})$ coincide up to equivalence of norms by \eqref{f4.20}. Hence, $c'\|g\|_{\mathbb{R}^{n}}\leq c\,\|f\|_{\varphi,\mathbb{R}^{n}}$ for some number $c>0$ that does not depend on $f$ and $\lambda$. Thus, formula \eqref{f6.26} yields the required estimate \eqref{f6.18} if we put $\theta_{f}(\lambda):=r_{g}(\widetilde{B}_{\lambda})$. \end{proof}
\subsection{}\label{sec6.3} As in Subsection~\ref{sec4.2}, let $\Gamma$ be a compact boundaryless $C^{\infty}$-manifold of dimension $n\geq1$ endowed with a positive $C^{\infty}$-density $dx$. We suppose here that $L$ is a PsDO of class $\Psi^{m}(\Gamma)$ for some $m>0$ and that $L$ is elliptic on $\Gamma$. We may and will consider $L$ as a closed unbounded operator in the Hilbert space $H:=L_{2}(\Gamma)$ with $\mathrm{Dom}\,L=H^{m}(\Gamma)$ (see \cite[Sections 2.3~d and 3.1~b]{Agranovich94}). We also suppose that $L$ is a normal operator in $L_{2}(\Gamma)$. Then the Hilbert space $L_{2}(\Gamma)$ has an orthonormal basis $\mathcal{E}:=(e_{j})_{j=1}^{\infty}$ formed by some eigenvectors $e_{j}\in C^{\infty}(\Gamma)$ of $L$ (see, e.g., \cite[Section~15.2]{Shubin01}). Thus, the spectral expansion \begin{equation}\label{f6.27} f=\sum_{j=1}^{\infty}\varkappa_{j}(f)e_j,\quad\mbox{with}\quad \varkappa_{j}(f):=(f,e_j)_{\Gamma}, \end{equation}
holds in $L_{2}(\Gamma)$ for every $f\in L_{2}(\Gamma)$. (Recall that $(\cdot,\cdot)_{\Gamma}$ and $\|\cdot\|_{\Gamma}$ respectively stand for the inner product and norm in $L_{2}(\Gamma)$.) These eigenvectors are enumerated so that $|\lambda_{j}|\leq|\lambda_{j+1}|$ whenever $j\geq1$, with $\lambda_{j}$ denoting the eigenvalue of $L$ such that $Le_j=\lambda_{j}e_j$. Note that $|\lambda_{j}|\to\infty$ as $j\to\infty$. Moreover, if $L$ is a classical PsDO, then \begin{equation}\label{f6.28}
|\lambda_{j}|\sim\widetilde{c}\,j^{m/n}\quad\mbox{as}\quad j\to\infty, \end{equation} where $\widetilde{c}$ is a certain positive number that does not depend on~$j$.
As usual, $C^{q}(\Gamma)$ denotes the Banach space of all functions $u:\Gamma\to\mathbb{C}$ that are $q$ times continuously differentiable on $\Gamma$. The norm in this space is denoted by $\|\cdot\|_{C,q,\Gamma}$.
For the spectral expansion \eqref{f6.27}, the following versions of Theorems \ref{th6.8} and \ref{th6.10} hold true:
\begin{theorem}\label{th6.14} Let $0\leq q\in\mathbb{Z}$ and $\varphi\in\mathrm{OR}$. The series \eqref{f6.27} converges unconditionally in the normed space $C^{q}(\Gamma)$ on the function class $H^{\varphi}(\Gamma)$ if and only if $\varphi$ satisfies \eqref{f6.16}. \end{theorem}
\begin{theorem}\label{th6.15} Let $0\leq q\in\mathbb{Z}$, and assume that the PsDO $L$ is classical. Suppose that certain functions $\phi_{1},\phi_{2}\in\mathrm{OR}$ satisfy the hypotheses of Theorem~$\ref{th6.10}$, and consider the function $\varphi:=\phi_{1}\phi_{2}\in\mathrm{OR}$ subject to \eqref{f6.16}. Then the degree of the convergence of \eqref{f6.27} in the normed space $C^{q}(\Gamma)$ on the class $H^{\varphi}(\Gamma)$ admits the estimate \begin{equation}\label{f6.29}
\biggl\|f-\sum_{j=1}^{k}\varkappa_j(f)e_j\biggr\|_{C,q,\Gamma}
\leq c\cdot\|f\|_{\varphi,\Gamma}\cdot \sup\bigl\{(\phi_{1}(j^{1/n}))^{-1}:k+1\leq j\in\mathbb{Z}\bigr\} \cdot\theta_{f,k} \end{equation} for every function $f\in H^{\varphi}(\Gamma)$ and each integer $k\geq1$. Here, $c$ is a certain positive number that does not depend on $f$ and $k$, and $(\theta_{f,k})_{k=1}^{\infty}$ is a decreasing sequence that lies in $[0,1]$ and tends to zero. \end{theorem}
We illustrate these theorems with analogous examples to those given in the previous subsection. Let $0\leq q\in\mathbb{Z}$, and let $c$ denote a positive number that does not depend on the function $f$ and integer $k$ from Theorem~\ref{th6.15}. Dealing with estimates of the form \eqref{f6.29}, we suppose that the PsDO $L$ is classical.
\begin{example}\label{ex6.3.1} Owing to Theorem~\ref{th6.14}, the series \eqref{f6.27} converges unconditionally in $C^{q}(\Gamma)$ on the Sobolev class $H^{s}(\Gamma)$ if and only if $s>q+n/2$. This fact is known (see, e.g., \cite[Chapter~XII, Exercise~4.5]{Taylor81} in the $q=0$ case). Let $s>q+n/2$, and put $r:=s-q-n/2>0$. If $0<\varepsilon<r/n$, then \begin{equation*}
\biggl\|f-\sum_{j=1}^{k}\varkappa_j(f)e_j\biggr\|_{C,q,\Gamma}\leq c\,\|f\|_{s,\Gamma}(k+1)^{\varepsilon-r/n} \end{equation*}
for all $f\in H^{s}(\Gamma)$ and $k\geq1$, with $\|\cdot\|_{s,\Gamma}$ being the norm in $H^{s}(\Gamma)$. This estimate follows from Theorem~\ref{th6.15}, in which we put $\phi_{1}(t):=t^{r-n\varepsilon}$ and $\phi_{2}(t):=t^{s-r+n\varepsilon}$ for every $t\geq1$. The estimate admits the following refinement: \begin{equation*}
\biggl\|f-\sum_{j=1}^{k}\varkappa_j(f)e_j\biggr\|_{C,q,\Gamma}\leq c\,\|f\|_{s,\Gamma}(k+1)^{-r/n}\log^{\varepsilon+1/2}(k+1) \end{equation*} for the same $f$ and $k$, we choosing a real number $\varepsilon>0$ arbitrarily. This estimate follows from Theorem~\ref{th6.15} applied to the functions \eqref{f6.18b}. \end{example}
\begin{example}\label{ex6.3.2} We choose a number $\varrho>0$ arbitrarily and define a function $\varphi$ by formula \eqref{f6.19}. According to Theorem~\ref{th6.14}, the series \eqref{f6.27} converges unconditionally in $C^{q}(\Gamma)$ on the class $H^{\varphi}(\Gamma)$. This fact is known at least in the $q=0$ case (see \cite[Chapter~XII, Exercise~4.8]{Taylor81}). If $0<\varepsilon<\varrho$, then \begin{equation*}
\biggl\|f-\sum_{j=1}^{k}\varkappa_j(f)e_j\biggr\|_{C,q,\Gamma}\leq c\,\|f\|_{\varphi,\Gamma}\log^{\varepsilon-\varrho}(k+1) \end{equation*} for all $f\in H^{\varphi}(\Gamma)$ and $k\geq1$. This estimate follows from Theorem~\ref{th6.15} if we represent $\varphi$ in the form used in Example~\ref{ex6.2.2}. Comparing this result with the previous example, we see that $H^{\varphi}(\Gamma)$ is broader than the union \begin{equation*} H^{q+n/2+}(\Gamma):=\bigcup_{s>q+n/2}H^{s}(\Gamma). \end{equation*} \end{example}
\begin{example}\label{ex6.3.3} We choose a number $\varrho>0$ arbitrarily and define a function $\varphi$ by formula \eqref{f6.20}. Owing to Theorem~\ref{th6.14}, the series \eqref{f6.27} converges unconditionally in $C^{q}_{\mathrm{b}}(\mathbb{R}^{n})$ on the class $H^{\varphi}(\mathbb{R}^{n})$. This class is broader than that used in Example~\ref{ex6.3.2}. If $0<\varepsilon<\varrho$, then \begin{equation*}
\biggl\|f-\sum_{j=1}^{k}\varkappa_j(f)e_j\biggr\|_{C,q,\Gamma}\leq c\cdot\|f\|_{\varphi,\Gamma}\cdot(\log\log(k+2))^{\varepsilon-\varrho} \end{equation*} for all $f\in H^{\varphi}(\Gamma)$ and $k\geq1$. This bound follows from Theorem~\ref{th6.15} if we represent $\varphi$ in the form given in Example~\ref{ex6.2.3}. \end{example}
These results are applicable to multiple trigonometric series. Indeed, if $\Gamma=\mathbb{T}^{n}$ and $A=\Delta_{\Gamma}$, then \eqref{f6.27} becomes the expansion of $f$ into the $n$-multiple trigonometric series (as usual, $\mathbb{T}:=\{e^{i\tau}:0\leq\tau\leq 2\pi\}$). It is known \cite[Section~6]{Golubov84} that this series is unconditionally uniformly convergent (on $\Gamma$) on every H\"older class $C^{s}(\Gamma)$ of order $s>n/2$. The exponent $n/2$ is critical here; namely, there exists a function $f\in C^{n/2}(\Gamma)$ whose trigonometric series diverges at some point of $\mathbb{T}^{n}$. These results consist a multi-dimensional generalization of Bernstein's theorem for trigonometric series. Since $C^{s}(\Gamma)\subset H^{s}(\Gamma)$, Example \ref{ex6.3.1} gives a weaker sufficient condition for this convergent. The next Examples \ref{ex6.3.2} and \ref{ex6.3.3} treat the case of the critical exponent with the help of generalized Sobolev spaces.
The proofs of Theorems \ref{th6.14} and \ref{th6.15} are similar to the proofs of Theorems \ref{th6.8} and \ref{th6.10}, we using Theorem~\ref{th6.5} (instead of Theorem~\ref{th6.2}) and the following analog of Proposition~\ref{prop6.12}:
\begin{proposition}\label{prop6.16} Let $0\leq q\in\mathbb{Z}$ and $\varphi\in\mathrm{OR}$. Then condition \eqref{f6.16} is equivalent to the embedding $H^{\varphi}(\Gamma)\subseteq C^{q}(\Gamma)$. Moreover, this embedding is compact under condition \eqref{f6.16}. \end{proposition}
\begin{proof} Suppose first that $\varphi$ satisfies condition~\eqref{f6.16}. Then the continuous embedding $H^\varphi(\mathbb{R}^n)\hookrightarrow C^q_\mathrm{b}(\mathbb{R}^n)$ holds true by Proposition~\ref{prop6.12}. Let $\varkappa$, $\chi_j$, and $\pi_j$ be the same as those in the definition of $H^{\varphi}(\Gamma)$. Choosing $f\in H^\varphi(\Gamma)$ arbitrarily, we get the inclusion \begin{equation*} (\chi_jf)\circ\pi_j\in H^\varphi(\mathbb{R}^n)\hookrightarrow C^q_\mathrm{b}(\mathbb{R}^n) \end{equation*} for each $j\in\{1,\ldots,\varkappa\}$. Hence, each $\chi_jf\in C^q(\Gamma)$, which implies that \begin{equation*} f=\sum_{j=1}^\varkappa\chi_j f\in C^q(\Gamma). \end{equation*} Thus, $H^\varphi(\Gamma)\subseteq C^q(\Gamma)$; this embedding is continuous because both the spaces are complete and continuously embedded in $\mathcal{D}'(\Gamma)$. Let us prove that it is compact.
We showed in Remark~\ref{rem6.11} that $\varphi=\phi_{1}\phi_{2}$ for some functions $\phi_{1}$ and $\phi_{2}$ satisfying the hypotheses of Theorem~\ref{th6.10}. Since $\phi_{2}(t)/\varphi(t)=1/\phi_{1}(t)\to0$ as $t\to\infty$, the compact embedding $H^\varphi(\Gamma)\hookrightarrow H^{\phi_{2}}(\Gamma)$ holds true. Indeed, let $T$ and $K$ be the bounded operators \eqref{f4.11} and \eqref{f4.18}. If a sequence $(f_{k})$ is bounded in $H^\varphi(\Gamma)$, then the sequence $(Tf_{k})$ is bounded in $(H^{\varphi}(\mathbb{R}^n))^{\varkappa}$. It follows from this by \cite[Theorem~2.2.3]{Hermander63} that the latter sequence contains a convergent subsequence $(Tf_{k_\ell})$ in $(H^{\phi_{2}}(\mathbb{R}^n))^{\varkappa}$. Hence, the subsequence of vectors $f_{k_\ell}=KTf_{k_\ell}$ is convergent in $H^{\phi_{2}}(\Gamma)$. Thus, the embedding $H^\varphi(\Gamma)\hookrightarrow H^{\phi_{2}}(\Gamma)$ is compact. As we showed in the previous paragraph, the continuous embedding $H^{\phi_{2}}(\Gamma)\hookrightarrow C^q(\Gamma)$ holds true because $\phi_{2}$ satisfies \eqref{f6.17}. Therefore, the embedding $H^\varphi(\Gamma)\hookrightarrow C^q(\Gamma)$ is compact.
Assume now that the embedding $H^\varphi(\Gamma)\subseteq C^q(\Gamma)$ holds true, and prove that $\varphi$ satisfies \eqref{f6.16}. We suppose without loss of generality that $\Gamma_1$ is not contained in $\Gamma_2\cup\cdots\cup\Gamma_\varkappa$, choose an open nonempty set $U\subset\Gamma_1$ which satisfies $U\cap\Gamma_j=\emptyset$ whenever $j\neq1$, and put $G:=\pi^{-1}_1(U)$. Consider an arbitrary distribution $w\in H^{\varphi}(\mathbb{R}^n)$ subject to $\mathrm{supp}\,w\subset G$. Owing to \eqref{f4.18} and our assumption, we have the inclusion \begin{equation*} u:=K(w,\underbrace{0,\ldots,0}_{\varkappa-1}\,)\in H^{\varphi}(\Gamma) \subseteq C^q(\Gamma). \end{equation*} Hence, $w=(\chi_1 u)\circ\pi_1\in C^q(\mathbb{R}^{n})$; note that the letter equality is true because $\chi_1=1$ on $U$. Thus, \eqref{f6.21} holds true, which implies \eqref{f6.16} due to Proposition~\ref{prop6.12}. \end{proof}
\begin{proof}[Proof of Theorem $\ref{th6.14}$] \emph{Sufficiency} is proved in the same manner as the proof of the sufficiency in Theorem~\ref{th6.8}. We only replace $\mathbb{R}^{n}$ with $\Gamma$ and use Theorem~\ref{th6.5} instead of Theorem~\ref{th6.2} and Proposition~\ref{prop6.16} instead of Proposition~\ref{prop6.12}.
\emph{Necessity.} Assume that the series \eqref{f6.27} converges in $C^{q}(\Gamma)$ on the class $H^{\varphi}(\Gamma)$. Then $H^{\varphi}(\Gamma)\subseteq C^{q}(\Gamma)$, which implies \eqref{f6.16} by Proposition~\ref{prop6.16}. \end{proof}
\begin{proof}[Proof of Theorem $\ref{th6.15}$.] It is very similar to the proof of Theorem $\ref{th6.10}$. Replacing $\mathbb{R}^{n}$ with $\Gamma$ in this proof and using Theorem~\ref{th6.5} and Remark~\ref{rem6.6} instead of Theorem~\ref{th6.2} and Remark~\ref{rem6.4}, we obtain the following analog of the estimate \eqref{f6.12}: \begin{equation}\label{f6.30}
\biggl\|f-\sum_{j=1}^{k}\varkappa_j(f)e_j\biggr\|_{C,q,\Gamma}\leq c'\cdot\|g\|_{\Gamma}\cdot\sup_{j\geq k+1} \bigl\{(\phi_{1}(\langle\lambda_j\rangle^{1/m}))^{-1}\bigr\} \cdot r_{g,k} \end{equation}
for every function $f\in H^{\varphi}(\Gamma)$ and each integer $k\geq1$. Here, $c'$ denotes the norm of the bounded operator $R:L_{2}(\Gamma)\to C^{q}(\Gamma)$, and $g:=(RS)^{-1}f\in L_{2}(\Gamma)$. Reasoning in the same way as that given after formula \eqref{f6.26}, we arrive at the inequality $c'\|g\|_{\Gamma}\leq c''\|f\|_{\varphi,\Gamma}$ where the number $c''>0$ does not depend on $f$ and $k$. Besides, owing to the inclusion $\phi_{1}\in\mathrm{OR}$ and asymptotic formula \eqref{f6.28}, the exist two positive numbers $c_{1}$ and $c_{2}$ such that \begin{equation*} c_{1}\phi_{1}(j^{1/n})\leq \phi_{1}(\langle\lambda_j\rangle^{1/m})\leq c_{2}\,\phi_{1}(j^{1/n}) \end{equation*} for every integer $j\geq1$. Thus, formula \eqref{f6.30} yields the required estimate \eqref{f6.29} if we put $\theta_{f,k}:=r_{g,k}$ and $c:=c''/c_{1}$. \end{proof}
\subsection{}\label{sec6.last} We end Section~\ref{sec6} with two sufficient conditions under which the spectral expansion \eqref{f6.27} converges a.e. (almost everywhere) on the manifold $\Gamma$ with respect to the measure induced by the $C^{\infty}$-density $dx$. These conditions are formulated in terms of belonging of $f$ to some generalized Sobolev spaces on $\Gamma$. Put \begin{equation*} S^{\ast}(f,x):=\sup_{1\leq k<\infty}\,
\biggl|\,\sum_{j=1}^{k}\;\varkappa_{j}(f)e_{j}(x)\,\biggr| \end{equation*} for all $f\in L_{2}(\Gamma)$ and $x\in\Gamma$; thus, $S^{\ast}(f,x)$ is the majorant of partial sums of \eqref{f6.27}. Consider the function $\log^{\ast}t:=\max\{1,\log t\}$ of $t\geq1$; it pertains to $\mathrm{OR}$. We suppose that the PsDO $L$ is classical.
\begin{theorem}\label{th6.17} The series \eqref{f6.27} converges a.e. on $\Gamma$ on the function class $H^{\log^{\ast}}(\Gamma)$. Besides, there exists a number $c>0$ such that \begin{equation*}
\|S^{\ast}(f,\cdot)\|_{\Gamma}\leq c\,\|f\|_{\log^{\ast},\Gamma} \quad\mbox{for every}\quad f\in H^{\log^{\ast}}(\Gamma). \end{equation*} \end{theorem}
If $f\in H^{\log^{\ast}}(\Gamma)$, then the convergence of the series \eqref{f6.27} may be violated under a permutation of its terms. To ensure that the convergence does not depend on their order, we should subject $f$ to a stronger condition.
\begin{theorem}\label{th6.18} Assume that a function $\varphi\in\mathrm{OR}$ (nonstrictly) increases and satisfies \begin{equation}\label{f6.32} \int\limits_{2}^{\infty}\frac{dt}{t\,(\log t)\,\varphi^{2}(t)}<\infty. \end{equation} Then the series \eqref{f6.27} converges unconditionally a.e. on $\Gamma$ on the function class $H^{\varphi\log^{\ast}}(\Gamma)$. \end{theorem}
These theorems are proved in \cite[Section~2.3.2]{MikhailetsMurach14}, the second being demonstrated in the case where $\varphi$ varies slowly at infinity in the sense of Karamata. The proofs rely on Theorem~\ref{th5.8} and general forms of the classical Menshov--Rademacher \cite{Menschoff23, Rademacher22} and Orlicz \cite{Orlicz27} theorems about a.e. convergence of orthogonal series. We give these brief proofs for the sake of completeness.
\begin{proof}[Proof of Theorem~$\ref{th6.17}$.] Note that the orthonormal basis $\mathcal{E}$ of $L_{2}(\Gamma)$ consists of eigenvectors of the operator $A:=(I+L^{\ast}L)^{1/m}$, to which Theorem~\ref{th5.8} is applicable. Owing to this theorem, we have \begin{equation*}
\sum_{j=1}^{\infty}(\log^{2}(j+1))|\varkappa_{j}(f)|^{2}\asymp
\sum_{j=1}^{\infty}(\log^{\ast}(j^{1/n}))^{2}\,|\varkappa_{j}(f)|^{2}
\asymp\|f\|_{\log^{\ast},\Gamma}^{2}<\infty \end{equation*} whenever $f\in H^{\log^{\ast}}(\Gamma)$, with $\asymp$ meaning equivalence of norms. Now Theorem~\ref{th6.17} follows from the Menshov--Rademacher theorem, which remains true for general complex orthogonal series formed by square integrable functions (see, e.g., \cite{Meaney07, MikhailetsMurach11MFAT4, MoriczTandori96}). \end{proof}
\begin{proof}[Proof of Theorem~$\ref{th6.18}$.] Let $f\in H^{\varphi\log^{\ast}}(\Gamma)$, and put $\omega_{j}:=\varphi^{2}(j^{1/n})$ for every integer $j\geq1$. Owing to Theorem~\ref{th5.8} applied to $A:=(I+L^{\ast}L)^{1/m}$, we have \begin{equation}\label{f6.33}
\sum_{j=2}^{\infty}(\log^{2}j)\,\omega_{j}\,|\varkappa_{j}(f)|^{2}\asymp
\|f\|_{\varphi\log^{\ast},\Gamma}^{2}<\infty. \end{equation} Besides, condition \eqref{f6.32} implies that \begin{equation}\label{f6.34} \sum_{j=3}^{\infty}\frac{1}{j\,(\log j)\,\omega_{j}}\leq \int\limits_{2}^{\infty} \frac{d\tau}{\tau\,(\log\tau)\,\varphi^{2}(\tau^{1/n})}= \int\limits_{2^{1/n}}^{\infty} \frac{n\,t^{n-1}\,dt}{t^{n}\,n\,(\log t)\,\varphi^{2}(t)}<\infty. \end{equation} The conclusion of Theorem \ref{th6.18} follows from \eqref{f6.33} and \eqref{f6.34} due to the Orlicz theorem (in Ul'janov's equivalent statement \cite[Section~9, Subsection~1]{Uljanov64}), which remains true for general complex orthogonal series \cite[Theorem~2]{MikhailetsMurach12UMJ10} (see also \cite[Theorem~3]{MikhailetsMurach11MFAT4}). \end{proof}
As to Theorems \ref{th6.17} and \ref{th6.18}, note the following: if we restrict ourselves to the Sobolev spaces, we will assert only that the series \eqref{f6.27} converges unconditionally a.e. on $\Gamma$ on the function class $H^{0+}(\Gamma):=\bigcup_{s>0}H^{s}(\Gamma)$ (cf. \cite{Meaney82}). This class is significantly narrower than the spaces used in these theorems. Using the extended Sobolev scale, we express in adequate forms the hypotheses of the Menshov--Rademacher and Orlicz theorems.
\end{document} |
\begin{document}
\title[Subgeometric Adaptive MCMC]{Limit theorems for some adaptive MCMC
algorithms with subgeometric kernels }
\author[Y. AtchadΓ©]{Yves AtchadΓ©}
\thanks{ Y. AtchadΓ©: University of Michigan, 1085 South University, Ann Arbor,
48109, MI, United States. {\em E-mail address} [email protected]}
\author[G. Fort]{ Gersende Fort} \thanks{G. Fort: LTCI, CNRS-TELECOM ParisTech,
46 rue Barrault, 75634 Paris Cedex 13, France. {\em E-mail address}
[email protected]}
\thanks{This work is partly supported by the french National Research Agency
(ANR) under the program ANR-05-BLAN-0299.}
\subjclass[2000]{60J10, 65C05}
\keywords{Adaptive Markov chain Monte Carlo, Markov chain, Subgeometric ergodicity.}
\maketitle
\begin{abstract} This paper deals with the ergodicity (convergence of the marginals) and the law of large numbers for adaptive MCMC algorithms built from transition kernels that are not necessarily geometrically ergodic.
We develop a number of results that broaden significantly the class of adaptive MCMC algorithms for which rigorous analysis is now possible. As an example, we give a detailed analysis of the Adaptive Metropolis Algorithm of \cite{haarioetal00} when the target distribution is sub-exponential in the tails. \end{abstract}
\setcounter{secnumdepth}{3}
\section{Introduction}
This paper deals with the convergence of Adaptive Markov Chain Monte Carlo (AMCMC).
Markov Chain Monte Carlo (MCMC) is a well known, widely used method to sample from arbitrary probability distributions. One of the major limitation of the method is the difficulty in finding sensible values for the parameters of the Markov kernels. Adaptive MCMC provides a general framework to tackle this problem where the parameters are adaptively tuned, often using previously generated samples. This approach generates a class of stochastic processes that is the object of this paper.
Denote $\pi$ the probability measure of interest on some measure space $(\mathsf{X},\mathcal{X})$. Let $\{P_\theta,\theta\in\Theta\}$ be a family of $\phi$-irreducible and aperiodic Markov kernels each with invariant distribution $\pi$. We are interested in the class of stochastic processes based on non-homogeneous Markov chains $\{(X_n,\theta_n),\;n\geq 0\}$ with transition kernels $\{\bar P\left(n; (x,\theta); (dx',d\theta')\right), n\geq 0 \}$ satisfying $\int_{\Theta} \bar P\left(n; (x,\theta); (\cdot,d\theta') \right) = P_\theta(x,\cdot)$. Often, these transition kernels are of the form $\{P_\theta(x,dy)\delta_{H_{n}(\theta,y)}(d\theta'), n\geq 0\}$ where $\{H_l,\;l\geq 0\}$ is a family measurable functions, $H_l:\; \Theta\times \mathsf{X}\to \Theta$. The stochastic approximation dynamic corresponds to the case $H_l(\theta,x)=\theta+\gamma_l \; H(\theta,x)$. In this latter case, it is assumed that the best values for $\theta$ are the solutions of the equation $\int H(\theta,x)\pi(dx)=0$. Since the pioneer work of \cite{gilksetal98,
holden98, haarioetal00, andrieuetrobert02}, the number of AMCMC algorithms in the literature has significantly increased in recent years. But despite many recent works on the topic, the asymptotic behavior of these algorithms is still not completely understood. Almost all previous works on the convergence of AMCMC are limited to the case when each kernel $P_\theta$ is geometrically ergodic (see e.g.. \cite{rosenthaletroberts05,andrieuetal06}). In this paper, we weaken this condition and consider the case when each transition kernel is sub-geometrically ergodic.
More specifically, we study the ergodicity of the marginal $\{X_n, n\geq 0 \}$ i.e. the convergence to $\pi$ of the distribution of $X_n$ irrespective of the initial distribution, and the existence of a strong law of large numbers for AMCMC.
We first show that a diminishing adaptation assumption of the form
$|\theta_n-\theta_{n-1}|\to 0$ in a sense to be made precise (assumption B\ref{B1}) together with a uniform-in-$\theta$ positive recurrence towards a small set $C$ (assumptions A\ref{A-VCset}(\ref{Anew}) and A\ref{A-VCset}(\ref{A3rev})) and a uniform-in-$\theta$ ergodicity condition of the kernels $\{P_\theta, \theta \in \Theta\}$ (assumption A\ref{A-VCset}(\ref{A4rev})) are enough to imply the ergodicity of AMCMC.
We believe that this result is close to be optimal. Indeed, it is well documented in the literature that AMCMC can fail to be ergodic if the diminishing assumption does not hold (see e.g. \cite{rosenthaletroberts05} for examples). Furthermore, the additional assumptions are also fairly weak since in the case where $\Theta$ is reduced to the single point $\{\theta_\star\}$ so that $\{X_n, n\geq 0\}$ is a Markov chain with transition kernel $P_{\theta_\star}$, these conditions hold if $P_{\theta_\star}$ is an aperiodic positive that is polynomially ergodic.
We then prove a strong law of large numbers for AMCMC. We show that the diminishing adaptation assumption and a uniform-in-$\theta$ polynomial drift condition towards a small set $\mathcal{C}$ of the form $P_\theta V\leq V-c V^{1-\alpha}+b\ensuremath{\mathbbm{1}}_{\mathcal{C}}(x)$, $\alpha\in (0,1)$, implies a strong law of large number for all real-valued measurable functions $f$ for which
$\sup_{\mathsf{X}}(|f|/V^{\beta})<\infty$, $\beta\in[0,1-\alpha)$. This result is close to what can be achieved with Markov chains (with fixed transition kernel) under similar conditions (\cite{meynettweedie93}).
On a more technical note, this paper makes two key contributions to the analysis of AMCMC. Firstly, to study the ergodicity, we use a more careful coupling technique which extends the coupling approach of \cite{rosenthaletroberts05}. Secondly, we tackle the law of large numbers using a resolvent kernel approach together with martingales theory. This approach has a decisive advantage over the more classical Poisson equation approach (\cite{andrieuetal06}) in that no continuity property of the resolvent kernels is required. It is also worth noting that the results developed in this paper can be applied to adaptive Markov chains beyond Markov Chain Monte Carlo simulation provided all the transition kernels have the same invariant distribution.
The remainder of the paper is organized as follows. In Section \ref{sec:ResultsUnif} we state our assumptions followed by a statement of our main results. Detailed discussion of the assumptions and some comparison with the literature are provided in Section \ref{sec:discussionUnif}. We apply our results to the analysis of the Adaptive Random Walk Metropolis algorithm of \cite{haarioetal00} when the target distribution is sub-exponential in the tails. This is covered in Section \ref{sec:Example} together with a toy example taken from \cite{atchadeetrosenthal03}. All the proofs are postponed to Section~\ref{sec:Proofs}.
\section{Statement of the results and discussion}\label{sec:ResultsUnif} \subsection{Notations}\label{sec:notations} For a transition kernel $P$ on a measurable general state space $(\mathbb{T},\mathcal{B}(\mathbb{T}))$, denote by $P^n$, $n\geq 0$, its $n$-th iterate defined as \[ P^0(x,A) \eqdef \delta_x(A) \;, \qquad \qquad P^{n+1}(x,A) \eqdef \int P(x,dy ) P^n(y,A) \;, \quad n \geq 0 \;; \] $\delta_x(dt)$ stands for the Dirac mass at $\{x\}$. $P^n$ is a transition kernel on $(\mathbb{T},\mathcal{B}(\mathbb{T}))$ that acts both on bounded measurable functions $f$ on $\mathbb{T}$ and on $\sigma$-finite measures $\mu$ on $(\mathbb{T},\mathcal{B}(\mathbb{T}))$ via $P^nf(\cdot) \eqdef \int P^n(\cdot,dy) f(y)$ and $\mu P^n(\cdot) \eqdef \int \mu(dx) P^n(x, \cdot)$.
If $V: \mathbb{T}\to [1, +\infty)$ is a function, the $V$-norm of a function
$f: \mathbb{T}\to \mathbb R$ is defined as $|f|_V \eqdef \sup_{\mathbb{T}} |f| /V$. When $V=1$, this is the supremum norm. The set of functions with finite $V$-norm is denoted by $\mathcal{L}_V$.
If $\mu$ is a signed measure on a measurable space
$(\mathbb{T},\mathcal{B}(\mathbb{T}))$, the total variation norm $\| \mu
\|_{\mathrm{TV}}$ is defined as \[
\| \mu \|_{\mathrm{TV}} \eqdef \sup_{\{f, |f|_1 \leq 1 \}} | \mu(f)| = 2 \; \sup_{A \in
\mathcal{B}(\mathbb{T})}|\mu(A)|= \sup_{A \in \mathcal{B}(\mathbb{T})} \mu(A) - \inf_{A \in \mathcal{B}(\mathbb{T})} \mu(A) \;; \]
and the $V$-norm, for some function $V : \mathbb{T} \to [1, +\infty)$, is defined as $\| \mu \|_{V} \eqdef \sup_{\{g, |g|_V \leq 1 \}} |\mu(g)|$.
Let $\mathsf{X}, \Theta$ be two general state space resp. endowed with a countably generated $\sigma$-field $\mathcal{X}$ and $\mathcal{B}(\Theta)$. Let $\{P_\theta, \theta \in \Theta \}$ be a family of Markov transition kernels on $(\mathsf{X},\mathcal{X})$ such that for any $(x,A) \in \mathsf{X} \times \mathcal{X}$, $\theta \mapsto P_\theta(x,A)$ is measurable. Let $\{\bar P(n;\cdot,\cdot), n \geq 0 \}$ be a family of transition kernels on $(\mathsf{X} \times \Theta, \mathcal{X} \otimes \mathcal{B}(\Theta))$, satisfying for any $A \in \mathcal{X}$, \begin{equation}\label{eq:tk1} \int_{A \times \Theta} \bar P\left(n; (x,\theta); (dx',d\theta')\right) = P_{\theta}(x, A) \;. \end{equation} An adaptive Markov chain is a non-homogeneous Markov chain $\{ Z_n = (X_n,\theta_n), n\geq 0 \}$ on $\mathsf{X}\times\Theta$ with transition kernels $\{\bar P(n; \cdot; \cdot), n \geq 0\}$.
Among examples of such transition kernels, consider the case when $\{(X_n,\theta_n), n\geq 0\}$ is obtained through the algorithm: given $(X_n,\theta_n)$, sample $X_{n+1} \sim P_{\theta_n}(X_n, \cdot)$ and set $\theta_{n+1} = \theta_n$ with probability $1-p_{n+1}$ or set $\theta_{n+1}= \tilde \Xi_{n+1}(X_n,\theta_n,X_{n+1})$ with probability $p_{n+1}$. Then \begin{multline*}
\bar P\left(n; (x,\theta); (dx',d\theta')\right) = P_\theta(x,dx') \ \left\{
\left(1-p_{n+1} \right) \ \delta_\theta(d\theta') + p_{n+1} \ \delta_{\tilde
\Xi_{n+1}(x,\theta,x')}(d\theta') \right\} \;. \end{multline*} A special case is the case when $p_{n+1}=1$ and $\theta_{n+1} = H_{n+1}(\theta_n,X_{n+1})$, where $\{H_l, l\geq 0 \}$ is a family of measurable functions $H_l: \Theta \times \mathsf{X} \to \Theta$. Then, \[ \bar P\left(n; (x,\theta); (dx',d\theta')\right) \eqdef P_{\theta}(x, dx') \ \ \delta_{H_{n+1}(\theta,x')}(d \theta') \;. \] Such a situation occurs for example if $\theta_{n+1}$ is updated following a stochastic approximation dynamic: $\theta_{n+1} = \theta_n + \gamma_{n+1} H(\theta_n,X_{n+1})$.
From $\{\bar P\left(n;\cdot,\cdot\right),\;n\geq 0\}$ and for any integer $l\geq 0$, we introduce a family - indexed by $l$ - of sequence of transition kernels $\{\bar P_l(n;\cdot,\cdot), n \geq 0 \}$, where $\bar P_l\left(n;\cdot,\cdot\right) \eqdef \bar P\left(l+n;\cdot,\cdot\right)$ and we denote by $\mathbb{P}_{x,\theta}^{(l)}$ and $\mathbb{E}_{x,\theta}^{(l)}$ the probability and expectation on the canonical space $(\Omega, \mathcal{F})$ of the canonical non-homogeneous Markov chain $\{ Z_n = (X_n,\theta_n), n\geq 0 \}$ with transition kernels $\{\bar P_l(n; \cdot; \cdot), n \geq 0\}$ and initial distribution $\delta_{(x,\theta)}$. We denote by $\underline{\theta}$ the shift operator on $\Omega$ and by $\{\mathcal{F}_k, k \geq 0 \}$ the natural filtration of the process $\{Z_k, k\geq 0\}$. We use the notations $\mathbb{P}_{x,\theta}$ and $\mathbb{E}_{x,\theta}$ as shorthand notations for $\mathbb{P}_{x,\theta}^{(0)}$ and $\mathbb{E}_{x,\theta}^{(0)}$.
Set \[
D(\theta,\theta') \eqdef \sup_{x \in \mathsf{X}} \| P_{\theta}(x,\cdot) -
P_{\theta'}(x,\cdot) \|_{\mathrm{TV}} \;. \]
\subsection{Convergence of the marginals} We assume that minorization, drift conditions and ergodicity are available for $P_\theta$ uniformly in $\theta$. For a set $\mathcal{C}$, denote by $\tau_\mathcal{C}$ the return-time to $\mathcal{C} \times \Theta$ : $\tau_\mathcal{C} \eqdef \inf\{n \geq 1, X_n \in \mathcal{C} \}$.
\debutA \item \label{A-VCset} There exist a measurable function $V: \mathsf{X} \to
[1,+\infty)$ and a measurable set $\mathcal{C}$ such that
\begin{enumerate}[(i)]
\item \label{Anew} $\sup_l \sup_{\mathcal{C} \times \Theta}
\mathbb{E}_{x,\theta}^{(l)}\left[\mathbf{r}(\tau_\mathcal{C}) \right] < +\infty$ for some
non-decreasing function $\mathbf{r} : {\mathbb{N}} \to (0, +\infty)$ such that $\sum_n
1/\mathbf{r}(n) < +\infty$.
\item \label{A4rev} there exist a probability measure $\pi$ such that \[ \lim_{n \to +\infty} \ \sup_{x \in \mathsf{X}} V^{-1}(x) \ \sup_{\theta \in \Theta}
\| P^n_\theta(x, \cdot) -\pi \|_{\mathrm{TV}} = 0 \;. \] \item \label{A3rev} $ \sup_\theta P_\theta V \leq V$ on $\mathcal{C}^c$ and $
\sup_{\mathcal{C} \times \Theta} \{P_\theta V(x) + V(x) \} < +\infty$. \end{enumerate} \finA \debutB \item \label{B1} There exist probability distributions $\xi_1, \xi_2$ resp. on
$\mathsf{X}, \Theta$ such that for any $\epsilon>0$, $ \lim_n
\mathbb{P}_{\xi_1,\xi_2}\left( D(\theta_n, \theta_{n-1}) \geq \epsilon \right)=0$.
\finB \begin{theo} \label{theo:MarginalUnifCase} Assume A\ref{A-VCset} and B\ref{B1}. Then \[
\lim_{n \to +\infty} \sup_{\{f, |f|_1 \leq 1 \}} \left|
\mathbb{E}_{\xi_1, \xi_2}\left[f(X_n) - \pi (f)\right] \right| = 0 \;. \]
\end{theo}
Sufficient conditions for A\ref{A-VCset} to hold are the following
uniform-in-$\theta$ conditions \debutA \item \label{Adrift}
\begin{enumerate}[(i)]
\item The transition kernels $P_\theta$ are $\phi$-irreducible, aperiodic.
\item There exist a function $V : \mathsf{X} \to [1, +\infty)$, $\alpha \in
(0,1)$ and constants $b,c$ such that for any $\theta \in \Theta$ \[ P_\theta V(x) \leq V(x) - c\ V^{1-\alpha}(x) + b\ensuremath{\mathbbm{1}}_\mathcal{C}(x) \;. \] \item For any level set $\mathcal{D}$ of $V$, there exist $\epsilon_\mathcal{D}>0$ and a
probability $\nu_\mathcal{D}$ such that for any $\theta$, $P_\theta(x, \cdot) \geq
\epsilon_\mathcal{D} \ensuremath{\mathbbm{1}}_\mathcal{D}(x) \ \nu_\mathcal{D}(\cdot)$.
\end{enumerate}
\finA We thus have the corollary
\begin{coro}{(of Theorem~\ref{theo:MarginalUnifCase})} \label{coro:MarginalUnifCase}
Assume A\ref{Adrift} and B\ref{B1}. Then \[
\lim_{n \to +\infty} \sup_{\{f, |f|_1 \leq 1 \}} \left|
\mathbb{E}_{\xi_1, \xi_2}\left[f(X_n) - \pi (f)\right] \right| = 0 \;. \] \end{coro}
Assumption A\ref{A-VCset}(\ref{Anew}) and A\ref{A-VCset}(\ref{A3rev}) are
designed to control the behavior of the chain ``far from the center''. When
the state space $\mathsf{X}$ is ``bounded'' so that for example, $V=1$ in
A\ref{A-VCset}(\ref{A4rev}), then we have the following result
\begin{lemma}
\label{lemma:MarginalUnifCaseBounded}
If there exists a probability measure $\pi$ such that $\lim_{n \to +\infty} \
\sup_{ \mathsf{X} \times \Theta} \| P^n_\theta(x, \cdot) -\pi(\cdot) \|_{\mathrm{TV}} = 0 $, then
A\ref{A-VCset}(\ref{Anew}) and A\ref{A-VCset}(\ref{A3rev}) hold with a bounded
function $V$ and $\mathcal{C} = \mathsf{X}$. \end{lemma} Combining the assumptions of Lemma~\ref{lemma:MarginalUnifCaseBounded} and B\ref{B1}, we deduce from Theorem~\ref{theo:MarginalUnifCase} the convergence of the marginals. This result coincides with \cite[Theorem 5]{rosenthaletroberts05}. As observed by \cite{Bai:2008} (personal communication), assumption A\ref{Adrift} also imply the ``containment condition'' as defined in \cite{rosenthaletroberts05}. Consequently, Corollary~\ref{coro:MarginalUnifCase} could also be established by applying \cite[Theorem 13]{rosenthaletroberts05}: this would yield to the following statement, which is adapted from \cite{Bai:2008}. Define $M_\epsilon(x,\theta)
\eqdef \inf \{n \geq 1, \|P_\theta^n(x,\cdot) - \pi(\cdot) \|_\mathrm{TV} \leq \epsilon \}$. \begin{prop} \label{prop:YanBai} Assume A\ref{Adrift} and B\ref{B1}. Then for any $\epsilon>0$, the sequence $\{M_\epsilon(X_n,\theta_n), n\geq 0 \}$ is bounded in probability for the probability $\mathbb{P}_{\xi_1,\xi_2}$ and \[
\lim_{n \to +\infty} \sup_{\{f, |f|_1 \leq 1 \}} \left| \mathbb{E}_{\xi_1,
\xi_2}\left[f(X_n) - \pi (f)\right] \right| = 0 \;. \] \end{prop}
\subsection{Strong law of large numbers} Assumptions A\ref{A-VCset} and B\ref{B1} are strengthened as follows \debutA \item \label{A2} There exist a probability measure $\nu$ on $\mathsf{X}$, a positive
constant $\varepsilon$ and a set $\mathcal{C} \in \mathcal{X}$ such that for any
$\theta \in \Theta$, $P_\theta(x,\cdot) \geq \ensuremath{\mathbbm{1}}_\mathcal{C}(x) \ \varepsilon
\nu(\cdot)$. \item \label{A5} There exist a measurable function $V: \mathsf{X} \to [1,+\infty)$,
$0 < \alpha < 1$ and positive constants $b,c$ such that for any $\theta \in
\Theta$, $P_\theta V \leq V - c \ V^{1-\alpha} + b \ensuremath{\mathbbm{1}}_\mathcal{C}$. \item \label{A6} There exist a probability measure $\pi$ and some $0 \leq
\beta < 1-\alpha$ such that for any level set $\mathcal{D} \eqdef \{x \in \mathsf{X}, V(x)
\leq d \}$ of $V$, \[
\lim_{n \to +\infty} \ \sup_{\mathcal{D} \times \Theta} \| P^n_\theta(x, \cdot) -\pi
\|_{V^\beta} = 0 \;. \] \finA \debutB \item \label{B2} For any level set $\mathcal{D}$ of $V$ and any $\epsilon>0$, \[ \lim_n \sup_{l \geq 0} \sup_{ \mathcal{D} \times \Theta} \mathbb{P}_{x,\theta}^{(l)}\left(
D(\theta_n, \theta_{n-1}) \geq \epsilon \right)=0 \;. \] \finB
\begin{theo}
\label{theo:SLLNUnboundedUnifCase}
Assume A\ref{A2}-\ref{A6} and B\ref{B2}. Then for any
measurable function $f: \mathsf{X} \to \mathbb R$ in $\mathcal{L}_{V^\beta}$ and any initial
distribution $\xi_1,\xi_2$ resp. on $\mathsf{X}, \Theta$ such that $\xi_1(V) <
+\infty$, \[ \lim_{n \to +\infty} n^{-1} \sum_{k=1}^n f(X_k) = \pi(f) \;, \qquad \qquad \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.} \] \end{theo}
As in the case of the convergence of the marginals, when A\ref{A6} and
B\ref{B2} hold with $\mathcal{D} = \mathsf{X}$ and $\beta = 0$, A\ref{A2} and A\ref{A5}
can be omitted. We thus have \begin{prop}
\label{prop:SLLNUnboundedUnifCaseBounded}
Assume that A\ref{A6} and B\ref{B2} hold with $\mathcal{D} = \mathsf{X}$ and
$\beta=0$. Then for any measurable bounded function $f: \mathsf{X} \to \mathbb R$
and any initial distribution $\xi_1,\xi_2$ resp. on $\mathsf{X}, \Theta$ \[ \lim_{n \to +\infty} n^{-1} \sum_{k=1}^n f(X_k) = \pi(f) \;, \qquad \qquad \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.} \] \end{prop}
\subsection{Discussion}\label{sec:discussionUnif} \subsubsection{Non-adaptive case} We start by comparing our assumptions to assumptions in Markov chain theory under which the law of large numbers hold. In the setup above, taking $\Theta=\{\theta_\star\}$ and $H(\theta_\star,x) = \theta_\star$ reduces $\{X_n, n\geq 0\}$ to a Markov chain with transition kernel $P_{\theta_\star}$. Assume that $P_{\theta_\star}$ is Harris-recurrent.
In that case, a condition which is known to be minimal and to imply ergodicity in total variation norm is that $P_{\theta_\star}$ is an aperiodic positive Harris recurrent transition kernel \cite[Theorems 11.0.1 and 13.0.1]{meynettweedie93}. Condition A\ref{A-VCset}(\ref{Anew}) is stronger than positive Harris recurrence since it requires $\sup_\mathcal{C} \mathbb{E}_x [\mathbf{r}(\tau_\mathcal{C})]<+\infty$ for some rate $\mathbf{r}$, $\mathbf{r}(n)>> n$. Nevertheless, as discussed in the proof (see remark~\ref{rem:YanBai}, Section~\ref{sec:Proofs}), the condition $\sum_n \{1/\mathbf{r}(n) \} <+\infty$ is really designed for the adaptive case. A\ref{A-VCset}(\ref{A4rev}) is stronger than what we want to prove (since A\ref{A-VCset}(\ref{A4rev}) implies the conclusion of Theorem~\ref{theo:MarginalUnifCase} in the non-adaptive case); this is indeed due to our technique of proof which is based on the comparison of the adaptive process to a process - namely, a Markov chain with transition kernel $P_\theta$ - whose stationary distribution is $\pi$. Our proof is thus designed to address the adaptive case. Finally, B\ref{B1} is trivially true.
For the strong law of large numbers (Theorem \ref{theo:SLLNUnboundedUnifCase}), B\ref{B2} is still trivially true in the Markovian case and A\ref{A6} is implied by A\ref{A2} and A\ref{A5} combined with the assumption that $P_{\theta_\star}$ is $\phi$-irreducible and aperiodic (see Appendix~\ref{app:UniformControl} and references therein). In the Markovian case, whenever $P_{\theta_\star}$ is $\phi$-irreducible and aperiodic, A\ref{A2} and A\ref{A5} are known sufficient conditions for a strong law of large numbers for $f \in \mathcal{L}_{V^{1-\alpha}}$, which is a bit stronger than the conclusions of Theorem~\ref{theo:SLLNUnboundedUnifCase}. This slight loss of efficiency is due to the technique of proof based on martingale theory (see comments Section~\ref{subsec:MethodsProof}). Observe that in the geometric case, there is the same loss of generality in \cite[Theorem 8]{andrieuetal06}. More generally, any proof of the law of large numbers based on the martingale theory (through for example the use of the Poisson's equation or of the resolvent kernel) will incur the same loss of efficiency since limit theorems exist only for $L^p$-martingale with $p>1$.
\subsubsection{Checking assumptions A\ref{A-VCset}(\ref{A4rev}) and A\ref{A6}} \label{subsec:CheckCond} A\ref{A-VCset}(\ref{A4rev}) and A\ref{A6} are the most technical of our assumptions. Contrary to the case of a single kernel, the relations between A\ref{A-VCset}(\ref{A4rev}) (resp. A\ref{A6}) and A\ref{A-VCset}(\ref{Anew})-A\ref{A2} (resp. A\ref{A2}, A\ref{A5}) are not completely well understood. Nevertheless these assumptions can be checked under conditions which are essentially of the form A\ref{A2}, A\ref{A5} plus the assumptions that each transition kernel $P_\theta$ is $\phi$-irreducible and aperiodic, as discussed in Appendix~\ref{app:UniformControl}.
\subsubsection{On the uniformity in $\theta$ in assumptions A\ref{A-VCset}(\ref{Anew}), A\ref{A-VCset}(\ref{A4rev}), A\ref{A2} and A\ref{A5}} We have formulated A\ref{A-VCset}(\ref{Anew}), A\ref{A-VCset}(\ref{A4rev}), A\ref{A2} and A\ref{A5} such that all the constants involved are independent of $\theta$, for $\theta \in\Theta$. Intuitively, this corresponds to AMCMC algorithms based on kernels with overall similar ergodicity properties. This uniformity assumption might seem unrealistically strong at first. But the next example shows that when these conditions do not hold uniformly in $\theta$ for $\theta \in\Theta$, pathologies can occur if the adaptation parameter can wander to the boundary of $\Theta$.
\begin{example}
The example is adapted from \cite{winkler03}. Let $\mathsf{X}=\{0,1\}$ and
$\{P_\theta,\;\theta\in(0,1)\}$ be a family of transition matrices with
$P_\theta(0,0)=P_\theta(1,1)=1-\theta$. Let $\{\theta_n, n\geq 0\}$,
$\theta_n \in (0,1)$, be a deterministic sequence of real numbers decreasing
to $0$ and $\{X_n, n\geq 0\}$ be a non-homogeneous Markov chain on $\{0,1\}$
with transition matrices $\{P_{\theta_n}, n\geq 0\}$. One can check that
$D(\theta_n,\theta_{n-1})\leq \theta_{n-1}-\theta_n$ for all $n\geq 1$ so
that B\ref{B1} and B\ref{B2} hold.
For any compact subset $\mathsf{K}$ of $(0,1)$, it can be checked that
A\ref{A-VCset}(\ref{Anew}), A\ref{A-VCset}(\ref{A4rev}), A\ref{A2} and
A\ref{A5} hold uniformly for all $\theta\in\mathsf{K}$. But these assumptions
do not hold uniformly for all $\theta\in (0,1)$. Therefore Theorems
\ref{theo:MarginalUnifCase} and \ref{theo:SLLNUnboundedUnifCase} do not
apply. Actually one can easily check that $\mathbb{P}_{x,\theta_0}\left(X_n\in
\cdot\right) \to \pi(\cdot)$ as $n\to\infty$, but that
$\mathbb{E}_{x,\theta_0}\left[\left(n^{-1}\sum_{k=1}^n
f(X_k)-\pi(f)\right)^2\right]$ do not converge to $0$ for bounded
functions $f$. That is, the marginal distribution of $X_n$ converges to $\pi$
but a weak law of large numbers fails to hold. \end{example}
This raises the question of how to construct AMCMC when A\ref{A-VCset}(\ref{Anew}), A\ref{A-VCset}(\ref{A4rev}), A\ref{A2} and A\ref{A5} do not hold uniformly for all $\theta\in\Theta$. When these assumptions hold uniformly on any compact subsets of $\Theta$ and the adaptation is based on stochastic approximation, one approach is to stop the adaptation or to reproject $\theta_n$ back on $\mathcal{K}$ whenever $\theta_n\notin\mathcal{K}$ for some fixed compact $\mathcal{K}$ of $\Theta$. A more elaborate strategy is Chen's truncation method which - roughly speaking - reinitializes the algorithm with a larger compact, whenever $\theta_n\notin\mathcal{K}$ (\cite{chen:zhu:1986,chen:gua:gao:1988}). A third strategy consists in proving a drift condition on the bivariate process $\{(X_n,\theta_n), n \geq 0\}$ in order to ensure the stability of the process (\cite{andrieu:vlad:2008}, see also \cite{benveniste:metivier:priouret:1987}). This question is however out of the scope of this paper; the use of the Chen's truncation method to weaken our assumption is addressed in \cite{atchade:fort:2008b}.
\subsubsection{Comparison with the literature} \label{subsec:CompLite} The convergence of AMCMC has been considered in a number of early works, most under a geometric ergodicity assumption. \cite{haarioetal00} proved the convergence of the adaptive Random Walk Metropolis (ARWM) when the state space is bounded. Their results were generalized to unbounded spaces in \cite{atchadeetrosenthal03} assuming the diminishing adaptation assumption and a geometric drift condition of the form \begin{equation}\label{GeoDrift}P_\theta V(x)\leq \lambda V(x)+b\textbf{1}_C(x),\end{equation} for $\lambda\in (0,1)$, $b<\infty$ and $\theta\in\Theta$.
\cite{andrieuetal06} undertook a thorough analysis of adaptive chains under the geometric drift condition (\ref{GeoDrift}) and proved a strong law of large numbers and a central limit theorem. \cite{andrieuetatchade05} gives a theoretical discussion on the efficiency of AMCMC under (\ref{GeoDrift}).
\cite{rosenthaletroberts05} improves on the literature by relaxing the convergence rate assumption on the kernels. They prove the convergence of the marginal and a weak law of large numbers for bounded functions. But their analysis requires a uniform control on certain moments of the drift function, a condition which is easily checked in the geometric case (i.e. when A\ref{Adrift} or A\ref{A5} is replaced with (\ref{GeoDrift})). Till recently, it was an open question in the polynomial case but this has been recently solved by \cite{Bai:2008} - contemporaneously with our work - who proves that such a control holds under conditions which are essentially of the form A\ref{Adrift}.
\cite{yang:2007} tackles some open questions mentioned in \cite{rosenthaletroberts05}, by providing sufficient conditions - close to the conditions we give in Theorems~\ref{theo:MarginalUnifCase} and \ref{theo:SLLNUnboundedUnifCase} - to ensure convergence of the marginals and a weak law of large numbers for bounded functions. The conditions in \cite[Theorems 3.1 and 3.2]{yang:2007} are stronger than our conditions. But we have noted some skips and mistakes in the proofs of these theorems.
\subsubsection{Comments on the methods of proof} \label{subsec:MethodsProof} The proof of Theorem \ref{theo:MarginalUnifCase} is based on an argument extended from \cite{rosenthaletroberts05} which can be sketched heuristically as follows. For $N$ large enough, we can expect $P^N_{\theta_n}(X_n,\cdot)$ to be within $\epsilon$ to $\pi$ (by ergodicity). On the other hand, since the adaptation is diminishing, by waiting long enough, we can find $n$ such that the distribution of $X_{n+N}$ given $(X_n,\theta_n)$ is within $\epsilon$ to $P^N_{\theta_n}(X_n,\cdot)$. Combining these two arguments, we can then conclude that the distribution of $X_{n+N}$ is within $2\epsilon$ to $\pi$. This is essentially the argument of \cite{rosenthaletroberts05}. The difficulty with this argument is that the distance between $P_{\theta_n}^N(x,\cdot)$ and $\pi$ depends in general on $x$ and can rarely be bounded uniformly in $x$. We solve this problem here by introducing some level set $\mathcal{C}$ of $V$ and by using two basic facts: \textit{(i)} under A\ref{A-VCset}(\ref{Anew}), the process cannot wait too long before coming back in $\mathcal{C}$; \textit{(ii)} under A\ref{A-VCset}(\ref{A4rev}-\ref{A3rev}), a bound on the distance between $P_{\theta_n}^N(x,\cdot)$ and $\pi$ uniformly in $x$, for $x \in \mathcal{C}$, is possible.
The proof of Theorem \ref{theo:SLLNUnboundedUnifCase} is based on a resolvent kernel approach that we adapted from \cite{merlevedeetal06} (see also \cite{mw00}), combined with martingale theory. Another possible route to the SLLN is the Poisson's equation technique which has been used to study adaptive MCMC in \cite{andrieuetal06}. Under A\ref{A2} and A\ref{A5}, a solution $g_\theta$ to the Poisson's equation with transition kernel $P_\theta$ exists for any $f\in\mathcal{L}_{V^\beta}$, $0\leq \beta\leq 1-\alpha$ and $g_\theta\in\mathcal{L}_{V^{\beta+\alpha}}$. But in order to use
$\{g_\theta,\;\theta\in\Theta\}$ to obtain a SLLN for $f$, we typically need to control $|g_\theta-g_{\theta'}|$ which overall can be expensive. Here we avoid these pitfalls by introducing the resolvent $\hat g_a(x,\theta)$ of the process $\{X_n\}$, defined by \[\hat g_a^{(l)}(x,\theta) \eqdef \sum_{j\geq 0}(1-a)^{j+1}\mathbb{E}_{x,\theta}^{(l)}\left[f(X_j)\right] \;, \;\;x\in\mathsf{X},\theta\in\Theta,a\in(0,1), l \geq 0 \;. \]
\section{Examples} \label{sec:Example}
\subsection{A toy example} We first consider an example discussed in \cite{atchadeetrosenthal03} (see also \cite{rosenthaletroberts05}). Let $\pi$ be a target density on the integers $\{1, \cdots, K \}$, $K \geq 4$. Let $\{P_\theta, \theta \in \{1, \cdots, M\} \}$ be a family of Random Walk Metropolis algorithm with proposal distribution $q_\theta$, the uniform distribution on $\{x-\theta, \cdots, x-1, x+1, \cdots, x+\theta \}$.
Consider the sequence $\{(X_n,\theta_{n}), n\geq 0 \}$ defined as follows: given $X_n,\theta_n$, \begin{itemize} \item the conditional distribution of $X_{n+1}$ is $P_{\theta_n}(X_n, \cdot)$. \item if $X_{n+1} = X_n$, set $\theta_{n+1} = \max(1, \theta_n -1)$ with
probability $p_{n+1}$ and $\theta_{n+1} = \theta_n$ otherwise; if $X_{n+1}
\neq X_n$, set $\theta_{n+1} = \min(M, \theta_n +1)$ with probability
$p_{n+1}$ and $\theta_{n+1} = \theta_n$ otherwise. \end{itemize} This algorithm defines a non-homogeneous Markov chain - still denoted $\{(X_n,\theta_{n}), n\geq 0 \}$ - on a canonical probability space endowed with a probability $\mathbb{P}$. The transitions of this Markov process are given by the family of transition kernels $\{\bar P(n; (x,\theta), (dx', d\theta'), n\geq 0 \}$ where \begin{multline*}
\bar P(n; (x,\theta), (dx', d\theta') = P_\theta(x,dx') \; \left( \ensuremath{\mathbbm{1}}_{x=x'} \left\{ p_{n+1} \ \delta_{1 \vee (\theta-1)}(d\theta') + (1-p_{n+1}) \ \delta_{\theta}(d\theta') \right\} \right. \\
\left. + \ensuremath{\mathbbm{1}}_{x\neq x'} \left\{ p_{n+1} \ \delta_{M \wedge
(\theta+1)}(d\theta') + (1-p_{n+1}) \ \delta_{\theta}(d\theta')
\right\} \right) \;. \end{multline*}
In this example, each kernel $P_\theta$ is uniformly ergodic~: $P_\theta$ is $\phi$-irreducible, aperiodic, possesses an invariant probability measure $\pi$ and \[
\lim_n \sup_{x \in \mathsf{X}} \|P_\theta^n(x,\cdot) - \pi(\cdot) \|_{\mathrm{TV}} = 0 \;. \] Since $\Theta$ is finite, this implies that A\ref{A-VCset}(\ref{A4rev}) (resp. A\ref{A6}) hold with $V=1$ (resp. $\mathcal{D} = \mathsf{X}$ and $\beta =0$). Furthermore, $\mathbb{E}_{x,\theta}^{(l)}\left[D(\theta_n, \theta_{n+1})\right] \leq 2 p_{n+1}$ so that B\ref{B1} (resp. B\ref{B2}) hold with any probability measures $\xi_1, \xi_2$ (resp. with $\mathcal{D} = \mathsf{X}$) provided $p_n \to 0$. By Lemma~\ref{lemma:MarginalUnifCaseBounded} combined with Theorem~\ref{theo:MarginalUnifCase}, and by Proposition~\ref{prop:SLLNUnboundedUnifCaseBounded}, we have \begin{prop}
Assume $\lim_n p_n =0$. For any probability distributions $\xi_1, \xi_2$ on
$\mathsf{X}, \Theta$,
\begin{enumerate}[(i)]
\item $\sup_{\{f, |f|_1 \leq 1 \}} |\mathbb{E}_{\xi_1,\xi_2}[f(X_n)] - \pi(f)| \to
0$
\item For any bounded function $f$ \[ n^{-1} \sum_{k=1}^n f(X_k) \to \pi(f) \;, \qquad \qquad \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.} \]
\end{enumerate} \end{prop}
\subsection{The adaptive Random Walk Metropolis of \cite{haarioetal00}} \label{sec:ex2} We illustrate our results with the adaptive Random Walk Metropolis of \cite{haarioetal00}. The Random Walk Metropolis (RWM) algorithm is a popular MCMC algorithm~\cite{hastings:1970,metropolis:1953}. Let a target density $\pi$, absolutely continuous w.r.t. the Lebesgue measure $\mu_{Leb}$ with density still denoted by $\pi$. Choose a proposal distribution with density w.r.t. $\mu_{Leb}$ denoted $q$, and assume that $q$ is a positive symmetric density on $\mathbb R^p$. The algorithm generates a Markov chain $\{X_n, n\geq 0\}$ with invariant distribution $\pi$ as follows. Given $X_n=x$, a new value $Y=x+Z$ is proposed where $Z$ is generated from $q(\cdot)$. Then we either 'accept' $Y$ and set $X_{n+1}=Y$ with probability $\alpha(x,Y)\eqdef\min\left(1,\pi(Y)/\pi(x)\right)$ or we 'reject' $Y$ and set $X_{n+1}=x$.
For definiteness, we will assume that $q$ is a zero-mean multivariate Gaussian distribution (this assumption can be replaced by regularity conditions and moment conditions on the proposal distribution). Given a proposal distribution with finite second moments, the convergence rate of the RWM kernel depends mainly on the tail behavior of the target distribution $\pi$. If $\pi$ is super-exponential in the tails with regular contours, then the RWM kernel is typically geometrically ergodic (\cite{jarnerethansen98}). Otherwise, it is typically sub-geometric (\cite{gersendeetmoulines00,gersendeetmoulines03,doucetal04}).
Define \[ \mu_\star\eqdef\int_\mathsf{X} x \; \pi(x) \; \mu_{Leb}(dx) \;, \qquad \Sigma_\star\eqdef \int_\mathsf{X} xx^T \; \pi(x)\mu_{Leb}(dx) -\mu_\star \; \mu_\star^{T} \;, \] resp. the expectation and the covariance matrix of $\pi$ ($\cdot^T$ denotes the transpose operation). Theoretical results suggest setting the variance-covariance matrix $\Sigma$ of the proposal distribution $\Sigma=c_\star\Sigma_\star$ where $c_\star$ is set so as to reach the optimal acceptance rate $\bar\alpha$ in stationarity (typically $\bar\alpha$ is set to values around $0.3-0.4$). See e.g. \cite{robertsetrosenthal01} for more details. \cite{haarioetal00} have proposed an adaptive algorithm to learn $\Sigma_*$ adaptively during the simulation. This algorithm has been studied in detail in \cite{andrieuetal06} under the assumption that $\pi$ is super-exponential in the tails. An adaptive algorithm to find the optimal value $c_\star$ has been proposed in \cite{atchadeetrosenthal03} (see also \cite{atchade05}) and studied under the assumption that $\pi$ is super-exponential in the tails. We extend these results to cases where $\pi$ is sub-exponential in the tails.
Let $\Theta_+$ be a convex compact of the cone of $p\times p$ symmetric positive definite matrices endowed with the Shur norm $|\cdot|_\mathrm{s}$,
$|A|_\mathrm{s}\eqdef \sqrt{\mathrm{Tr}(A^T \, A)}$. For example, for $\mathsf{a}, M > 0$, $\Theta_+ = \{ \text{$A+\mathsf{a} \, \mathrm{Id}$: $A$ is symmetric
positive semidefinite and } |A|_s \leq M \}$. Next, for $-\infty<\kappa_l<\kappa_u<\infty$ and $\Theta_\mu$ a compact subset of $\mathsf{X}$, we introduce the space $\Theta \eqdef \Theta_\mu \times \Theta_+\times [\kappa_l,\kappa_u]$. For $\theta =(\mu,\Sigma,c)\in \Theta$, denote by $P_\theta$ the transition kernel of the RWM algorithm with proposal $q_{\theta}$ where $q_\theta$ stands for the multivariate Gaussian distribution with variance-covariance matrix $e^c \Sigma$.
Consider the adaptive RWM defined as follows
\begin{algo}\label{arwm1} \begin{description} \item [Initialization] Let $\bar\alpha$ be the target acceptance probability.
Choose $X_0\in\mathsf{X}$, $(\mu_0,\Sigma_0,c_0)\in\Theta$. \item [Iteration] Given $(X_n,\mu_n,\Sigma_n,c_n)$: \begin{description} \item [1] Generate $Z_{n+1}\sim q_{\theta_n} d\mu_{Leb}$ and set $Y_{n+1} = X_n
+Z_{n+1}$. With probability $\alpha(X_n,Y_{n+1})$ set $X_{n+1}=Y_{n+1}$ and
with probability $1-\alpha(X_n,Y_{n+1})$, set $X_{n+1}=X_n$.
\item [2] Set
\begin{align}
\mu & = \mu_n+(n+1)^{-1}\left(X_{n+1}-\mu_n\right) \;, \label{ex2:defiMu} \\
\Sigma & = \Sigma_n+(n+1)^{-1}\left[\left(X_{n+1}-\mu_n\right)\left(X_{n+1}-\mu_n\right)^T-\Sigma_n\right] \;, \label{ex2:defiSigma} \\
c & = c_n+\frac{1}{n+1}\left(\alpha(X_n,Y_{n+1})-\bar\alpha\right) \;.
\label{ex2:defic}
\end{align} \item [3] If $(\mu, \Sigma,c)\in\Theta$, set $\mu_{n+1} = \mu$,
$\Sigma_{n+1}=\Sigma$ and $c_{n+1}=c$. Otherwise, set $\mu_{n+1} = \mu_n$,
$\Sigma_{n+1}=\Sigma_n$ and $c_{n+1}=c_n$. \end{description} \end{description} \end{algo}
This is an algorithmic description of a random process $\{(X_n, \theta_n), n\geq 0\}$ which is a non-homogeneous Markov chain with successive transitions kernels $\{\bar P(n; (x,\theta), (dx',d \theta')), n\geq 0 \}$ given by \begin{multline*}
\bar P(n; (x,\theta), (dx',d \theta')) = \int q_\theta(z) \ \left\{ \alpha(x,x+z) \delta_{x+z}(dx') + (1-\alpha(x,x+z)) \delta_x(dx') \right\} \cdots \\
\left(\ensuremath{\mathbbm{1}}_{\{\phi(\theta,x+z,x') \in
\Theta\}}\delta_{\phi(\theta,x+z,x')}(d\theta') + \ensuremath{\mathbbm{1}}_{\{\phi(\theta,x+z,x')
\notin \Theta\}}\delta_{\theta}(d\theta') \right) \ d\mu_{Leb}(dz) \end{multline*} where $\phi$ is the function defined from the rhs expressions of (\ref{ex2:defiMu}) to (\ref{ex2:defic}). Integrating over $\theta'$, we see that for any $A \in \mathcal{X}$, \[ \int_{A \times \Theta} \bar P(n;(x,\theta),(dx',d\theta')) = P_\theta(x,A) \;. \] \begin{lemma} \label{lem:example:smallset}
Assume that $\pi$ is bounded from below and from above on compact sets. Then
any compact subset $\mathcal{C}$ of $\mathsf{X}$ with $\mu_{Leb}(\mathcal{C})>0$ satisfies
A\ref{A2}. \end{lemma} \begin{proof}
See \cite[Theorem 2.2]{robertsettweedie96}. \end{proof}
Following (\cite{gersendeetmoulines00}), we assume that $\pi$ is sub-exponential in the tails: \debutD \item \label{D1} $\pi$ is positive and continuous on $\mathbb R^p$, and twice
continuously differentiable in the tails. \item \label{D2} there exist $m\in (0,1)$, positive constants $d_i<D_i$, $i=0,1,2$ and
$r,R>0$ such that for $|x|\geq R$: \begin{enumerate}[(i)]
\item \label{D2z} $\pscal{\frac{\nabla \pi(x)}{|\nabla \pi(x)|}}{\frac{x}{|x|}}
\leq -r $.
\item \label{D2a} $d_0|x|^m\leq -\log\pi(x)\leq D_0|x|^m$,
\item \label{D2b} $d_1|x|^{m-1}\leq |\nabla\log\pi(x)|\leq D_1|x|^{m-1}$,
\item \label{D2c} $d_2|x|^{m-2}\leq |\nabla^2\log\pi(x)|\leq D_2|x|^{m-2}$. \end{enumerate} \finD
Examples of target density that satisfies D\ref{D1}-D\ref{D2} are the Weibull distributions on $\mathbb R$ with density $\pi(x) \propto |x|^{m-1} \exp(-\beta
|x|^m)$ (for large $|x|$), $\beta>0$, $m \in (0,1)$. Multidimensional examples are provided in \cite{gersendeetmoulines00}.
\subsubsection{Law of large numbers for exponential functions} In this subsection, we assume that \debutD \item \label{D3} there exist $s_\star>0$, $ 0<\upsilon<1-m$ and $0<\eta<1$ such
that as $|x| \to+\infty$,
\[
\sup_{\theta \in \Theta} \ \int_{\{z, |z| \geq \eta |x|^\upsilon \}} \left(1
\vee \frac{\pi(x)}{\pi(x+z)} \right)^{s_\star} \; \; q_\theta(z) \
\mu_{Leb}(dz) =o\left( |x|^{2(m-1)} \right) \;. \] \finD A sufficient condition for D\ref{D3} is that $\pi(x+z) \geq \pi(x)
\pi(z)$ for any $x$ large enough and $|z| \geq \eta |x|^\upsilon$ (which holds true for Weibull distributions with $0<m<1$). Indeed, we then have \begin{multline*}
\int_{\{z, |z| \geq \eta |x|^\upsilon \}} \left(1 \vee
\frac{\pi(x)}{\pi(x+z)} \right)^{s_\star} \; q_\theta(z) \mu_{Leb}(dz) \\
\leq C\; \exp(-\lambda_\star \eta^2 |x|^{2 \upsilon}) \sup_{\theta \in
\Theta} \ \int \exp(s_\star D_0 |z|^m) \; \exp(\lambda_\star |z|^2) \
q_\theta(z) \mu_{Leb}(dz) \end{multline*} for some constant $C< +\infty$, and $\lambda_\star >0$ such that the rhs is finite.
\begin{lemma}\label{driftRWM}
Assume D\ref{D1}-\ref{D3}. For $0<s \leq s_\star$, define $V_s(x)\eqdef 1 +
\pi^{-s}(x)$. There exist $0< s \leq s_\star$ and for any $\alpha \in (0,1)$,
there exist positive constants $b,c$ and a compact set $\mathcal{C}$ such that \begin{equation*}
\sup_{\theta \in \Theta} P_\theta V_s(x)\leq
V_s(x)-c V^{1-\alpha}_s(x)+b\ensuremath{\mathbbm{1}}_\mathcal{C}(x). \end{equation*} Hence A\ref{Adrift}-\ref{A6} hold. \end{lemma} \begin{lemma} \label{ex:lem:HypB}
Assume D\ref{D1}-\ref{D3}. B\ref{B2} holds and B\ref{B1} holds for any
probability measures $\xi_1$,$\xi_2$ such that $\int |\ln \pi|^{2/m} d \xi_1
< +\infty$. \end{lemma} The proof of Lemmas~\ref{driftRWM} and \ref{ex:lem:HypB} are in Appendix C.
\begin{prop} \label{prop:ex2:CasRapide}
Assume D\ref{D1}-\ref{D3}. Consider the sequence $\{X_n, n \geq 0 \}$ given by the
algorithm \ref{arwm1}.
\begin{enumerate}[(i)]
\item For any probability measures $\xi_1,\xi_2$ such that $\int |\ln
\pi|^{2/m} d \xi_1 < +\infty$, \[
\sup_{\{f, |f|_1 \leq 1 \}} |\mathbb{E}_{\xi_1,\xi_2}[f(X_n)] - \pi(f)| \to 0 \;. \] \item \label{item2} There exists $0 < s \leq s_\star$ such that for any
probability measures $\xi_1,\xi_2$ such that $\int |\pi|^{-s} d \xi_1 <
+\infty$, and any function $ f \in \mathcal{L}_{1+\pi^{-r}}$, $0 \leq r<s$, \[ n^{-1} \sum_{k=1}^n f(X_k ) \to \pi(f) \;, \qquad \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.} \]
\end{enumerate} \end{prop}
The drift function $V_s$ exhibited in Lemma 3.3. is designed for limit theorems relative to functions $f$ increasing as $\exp(\beta |x|^m)$. This implies a condition on the initial distribution $\xi_1$ which has to possess sub-exponential moments (see Proposition~\ref{prop:ex2:CasRapide}(\ref{item2})), which always holds with $\xi_1 = \delta_x$, $ x \in \mathsf{X}$.
\subsubsection{Law of large numbers for polynomially increasing functions}
Proposition~\ref{prop:ex2:CasRapide} also addresses the case when $f$ is of the form $1+|x|^r$, $r>0$. Nevertheless, the conditions on $\xi_1$ and the assumptions D\ref{D3} can be weakened in that case.
We have to find a drift function $V$ such that $V^{1-\alpha}(x) \sim 1+|x|^{r+\iota}$ for some $\alpha \in (0,1)$, $\iota>0$. Under D\ref{D3}, this can be obtained from the proof of Lemma 3.3. and this yields $V(x) \sim 1 +
|x|^{r+\iota+2-m}$ (apply the Jensen's inequality to the drift inequality (\ref{eq:drift:sous-geom}) with the concave function $\phi(t) \sim [\ln t]^{(r+\iota+2)/m-1}$; see \cite[Lemma 3.5]{jarneretroberts02} for similar calculations). Hence, the condition on $\xi_1$ gets into
$\xi_1(|x|^{r+\iota+2-m})< +\infty$ for some $\iota>0$.
Drift inequalities with $V \sim (-\ln \pi)^{s}$ for some $s>2/m-1$, can also be derived by direct computations: in that case, D\ref{D3} can be removed. Details are omitted and left to the interested reader.
To conclude, observe that these discussions relative to polynomially increasing functions can be extended to any function $f$ which is a concave transformation of $\pi^{-s}$.
\section{Proofs of the results of Section~\ref{sec:ResultsUnif}} \label{sec:Proofs}
For a set $\mathcal{C} \in \mathcal{X}$, define the hitting-time on $\mathcal{C} \times \Theta$ of $\{Z_n, n\geq 0 \}$ by $\sigma_\mathcal{C} \eqdef \inf\{n \geq 0, Z_n \in \mathcal{C}
\times \Theta \}$. If $\pi(|f|) < +\infty$, we set $\bar f \eqdef f - \pi(f)$. \subsection{Preliminary results} We gather some useful preliminary results in this section. Section \ref{sec:OptCouplingUnif} gives an approximation of the marginal distribution of the adaptive chain by the distribution of a related Markov chain. In Section \ref{sec:ModMomentsUnif}, we develop various bounds for modulated moments of the adaptive chain as consequences of the drift conditions. In Section \ref{sec:ReturnTimesUnif} we bound the expected return times of the adaptive chain to level sets of the drift function $V$. The culminating result of this subsection is Theorem~\ref{theo:controleG} which gives an explicit bound on the resolvent function $g^{(l)}_a(x,\theta)$.
\subsubsection{Optimal coupling}\label{sec:OptCouplingUnif} \begin{lemma}
\label{lem:coupling}
For any integers $l \geq 0, N \geq 2$, any measurable bounded function $f$ on
$\mathsf{X}^N$ and any $(x,\theta) \in \mathsf{X} \times \Theta$,
\begin{multline*}
\Delta \eqdef \left| \mathbb{E}_{x, \theta}^{(l)}\left[ f(X_1, \cdots, X_N)
\right] - \int_{\mathsf{X}^N} P_{\theta}(x, dx_1) \; \prod_{k=2}^N
P_{\theta}(x_{k-1}, dx_k) f(x_1, \cdots, x_n)\right| \\ \leq |f|_1 \;
\sum_{j=1}^{N-1} \sum_{i=1}^j \mathbb{E}_{x,\theta}^{(l)} \left[D(\theta_i,
\theta_{i-1}) \right] \;.
\end{multline*} \end{lemma} \begin{proof}
We can assume w.l.g. that $|f|_{1} \leq 1$. Set $z_k = (x_k,t_k)$. With the
convention that $\prod_{k=a}^b a_k=1$ for $a>b$ and upon noting that $
\int_\mathsf{X} P_{\theta}(x, dx') h(x') = \int_{\mathsf{X} \times \Theta} \bar
P_{l}(0; (x,\theta), (dx', d \theta'))h(x')$ for any bounded measurable function $h:\;\mathsf{X}\to\mathbb R$, \begin{multline*}
\Delta = \left| \int_{(\mathsf{X} \times \Theta)^N} \sum_{j=1}^{N-1} \bar P_{l}(0;
(x,\theta), dz_1) \;
\prod_{k=2}^{j} \bar P_{l}(k-1; z_{k-1}, d z_{k}) \cdots \right. \\
\left. \left\{ \bar P_{l}(j; z_j, dz_{j+1}) - \bar P_{l}(0; (x_j,\theta),
dz_{j+1}) \right\}
\prod_{k=j+2}^N \bar P_{l}(0; (x_{k-1},\theta), dz_{k}) f(x_1, \cdots, x_N) \right| \\
\leq \sum_{j=1}^{N-1} \int_{\mathsf{X}^j} \bar P_{l}(0; (x,\theta), dz_1) \;
\prod_{k=2}^{j} \bar P_{l}(k-1; z_{k-1}, d z_{k}) \sup_{x\in \mathsf{X}} \|
P_{t_j}( x, \cdot) - P_{\theta}(x, \cdot) \|_{\mathrm{TV}} \end{multline*} where we used that \[ \int_{(\mathsf{X} \times \Theta)^{N-j-1}} \prod_{k=j+2}^N \bar P_{l}(0; (x_{k-1},\theta), dz_{k}) f(x_1, \cdots, x_N) \] is bounded by a function $\Xi(x_{1}, \cdots, x_{j+1})$ that does not depend upon $t_k, k\leq N$ and for any bounded function $\Xi$ on $\mathsf{X}^{j+1}$ \begin{multline*}
\int_{\mathsf{X} \times \Theta} \left\{ \bar P_{l}(j; z_j, dz_{j+1}) - \bar
P_{l}(0;
(x_j,\theta), dz_{j+1}) \right\} \Xi(x_1, \cdots, x_{j+1}) \\
= \int_{\mathsf{X}} \left\{ P_{t_j}( x_j, dx_{j+1}) - P_{\theta}( x_j, dx_{j+1})
\right\} \Xi(x_1, \cdots, x_{j+1}) \leq \sup_{x\in \mathsf{X}} \| P_{t_j}( x,
\cdot) - P_{\theta}(x, \cdot) \|_{\mathrm{TV}} \ |\Xi|_1\;. \end{multline*} Hence \begin{multline*}
\Delta \leq \sum_{j=1}^{N-1} \mathbb{E}_{x,\theta}^{(l)} \left[ \sup_{x\in \mathsf{X}} \|
P_{\theta_j}(x, \cdot) - P_{\theta_0}(x, \cdot)
\|_{\mathrm{TV}} \right] \\
\leq \sum_{j=1}^{N-1} \mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{i=1}^j \sup_{x\in
\mathsf{X}} \| P_{\theta_i}(x, \cdot) - P_{\theta_{i-1}}(x, \cdot) \|_{\mathrm{TV}}
\right] = \sum_{j=1}^{N-1} \sum_{i=1}^j \mathbb{E}_{x,\theta}^{(l)}
\left[D(\theta_i, \theta_{i-1}) \right] \;. \end{multline*} \end{proof}
\begin{lemma} \label{lem:couplingoptimal}
Let $\mu, \nu$ be two probability distributions. There exist a probability space $(\Omega, \mathcal{F}, \mathbb{P})$ and random variables $X,Y$ on $(\Omega, \mathcal{F})$ such that $X \sim \mu$, $Y \sim \nu$ and $\mathbb{P}(X = Y) = 1 - \| \mu - \nu \|_\mathrm{TV}$. \end{lemma} The proof can be found e.g. in \cite[Proposition 3]{roberts:rosenthal:2004}. As a consequence of Lemmas~\ref{lem:coupling} and \ref{lem:couplingoptimal}, we have \begin{prop}
\label{prop:ContructionCouplingOpt}
Let $l \geq 0, N \geq 2$ and set $z = (x,\theta)$. There exists a process
$\{(X_k, \tilde X_k), 0 \leq k \leq N\}$ defined on a probability space
endowed with the probability $\overline{\mathbb{P}}_{z, z}^{(l)}$ such that \[ \overline{\mathbb{P}}_{z, z}^{(l)} \left( X_k = \tilde X_k, 0 \leq k \leq N \right) \geq 1 - \sum_{j=1}^{N-1} \sum_{i=1}^j\mathbb{E}_{z}^{(l)} \left[ D(\theta_i,\theta_{i-1}) \right] \;, \] $(X_0, \cdots, X_{N})$ has the $X$-marginal distribution of $\mathbb{P}^{(l)}_{z}$ restricted to the time-interval $\{0, \cdots, N\}$, and $(\tilde X_0, \cdots, \tilde X_{N})$ has the same distribution as a homogeneous Markov chain with transition kernel $P_{\theta}$ and initial distribution $\delta_x$. \end{prop}
\subsubsection{Modulated moments for the adaptive chain}\label{sec:ModMomentsUnif}
Let $V: \mathsf{X} \to [1, +\infty)$ be a measurable function and assume that there exist $\mathcal{C} \in \mathcal{X}$, positive constants $b,c$ and $0 < \alpha \leq 1$ such that for any $\theta \in \Theta$, \begin{equation}
\label{eq:A2-A5}
P_\theta V \leq V - c V^{1-\alpha} +b \ensuremath{\mathbbm{1}}_\mathcal{C} \;. \end{equation}
\begin{lemma} \label{lem:JarnerRoberts} Assume (\ref{eq:A2-A5}). There exists $\bar b$ such that for any $0 \leq \beta \leq 1$, $\theta \in \Theta$: $P_\theta V^\beta \leq V^\beta - \beta c V^{\beta-\alpha} + \bar b \ensuremath{\mathbbm{1}}_\mathcal{C}$. \end{lemma} \begin{proof}
See \cite[Lemma 3.5]{jarneretroberts02}. \end{proof} \begin{prop}
Assume (\ref{eq:A2-A5}). For any $l\geq 0$, $(x,\theta) \in \mathsf{X} \times
\Theta$, and any stopping-time $\tau$, \[ c \ \mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{k=0}^{\tau-1} \left(k \alpha c + 1
\right)^{\alpha^{-1}-1} \right] \leq V(x) + b \ \mathbb{E}_{x,\theta}^{(l)} \left[
\sum_{k=0}^{\tau-1} \left((k+1) \alpha c + 1 \right)^{\alpha^{-1}-1}
\ensuremath{\mathbbm{1}}_\mathcal{C}(X_k)\right] \;. \] \end{prop} \begin{proof}
The proof can be adapted from \cite[Proposition 2.1]{doucetal04} and
\cite[Proposition 11.3.2]{meynettweedie93}and is omitted. \end{proof}
\begin{prop} \label{prop:ComparaisonGal} Assume (\ref{eq:A2-A5}).
\begin{enumerate}[(i)]
\item \label{prop:CpG1} There exists $\bar b$ such that for any $j \geq 0$,
$0 \leq \beta \leq 1$, $l\geq 0$ and $(x,\theta) \in \mathsf{X} \times \Theta$ \[ \mathbb{E}_{x,\theta}^{(l)} \left[ V^\beta(X_j)\right] \leq V^\beta(x) + \bar b j^\beta \;. \] \item \label{prop:CpG2} Let $0 \leq \beta \leq 1$ and $0 \leq a \leq 1$. For
any stopping-time $\tau$,
\begin{multline*}
\mathbb{E}_{x,\theta}^{(l)} \left[(1-a)^\tau V^\beta(X_{\tau}) \ensuremath{\mathbbm{1}}_{\tau <
+\infty}\right] + \mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{j=0}^{\tau-1}
(1-a)^{j} \; \{ a \; V^\beta(X_{j}) + \beta c (1-a)
V^{\beta-\alpha}(X_{j}) \} \right] \\
\leq V^\beta(x) + \bar b (1-a) \mathbb{E}_{x,\theta}^{(l)} \left[
\sum_{j=0}^{\tau-1} (1-a)^{j} \; \ensuremath{\mathbbm{1}}_\mathcal{C}(X_{j}) \right] \;..
\end{multline*}
\item \label{prop:CpG3} Let $0 \leq \beta \leq 1-\alpha$ and $0<a<1$. For any
stopping-time $\tau$ and any $q \in [1, +\infty]$,
\begin{multline*}
\mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{j=0}^{\tau-1} (1-a)^{j} V^\beta(X_{j}) \right] \\
\leq a^{1/q-1} (1-a)^{-1/q} \; V^{\beta+\alpha/q}(x) \; \left(1 + \bar b
\; \mathbb{E}_{x,\theta}^{(l)}\left[\sum_{j=0}^{\tau-1} (1-a)^j \ensuremath{\mathbbm{1}}_\mathcal{C}(X_j)
\right] \right) \left( \alpha c \right)^{-1/q} \;,
\end{multline*}
(with the convention that $1/q = 0$ when $q = +\infty$).
\end{enumerate} \end{prop}
\begin{proof} The proof is done in the case $l=0$. The general case is similar and omitted.
(\ref{prop:CpG1}) is a trivial consequence of Lemma~\ref{lem:JarnerRoberts}.
(\ref{prop:CpG2}) Let $\beta \leq 1$. Set $\tau_N = \tau \wedge N$ and $Y_n
= (1-a)^n V^\beta(X_n)$. Then
\begin{multline*}
Y_{\tau_N} = Y_0 + \sum_{j=1}^{\tau_N} \left(Y_j - Y_{j-1} \right) = Y_0 +
\sum_{j=1}^{\tau_N} (1-a)^{j-1} \; \left((1-a) V^\beta(X_j) -
V^\beta(X_{j-1}) \right) \\
= Y_0 + \sum_{j=1}^{\tau_N} (1-a)^{j} \; \left(V^\beta(X_j) -
V^\beta(X_{j-1}) \right) - a \sum_{j=1}^{\tau_N} (1-a)^{j-1} \;
V^\beta(X_{j-1}) \;.
\end{multline*} Hence, \begin{multline*}
\mathbb{E}_{x,\theta} \left[Y_{\tau_N} \right] + a \; \mathbb{E}_{x,\theta} \left[
\sum_{j=0}^{\tau_N-1} (1-a)^{j} \; V^\beta(X_{j}) \right] \\
= V^\beta(x) + \sum_{j\geq 1} (1-a)^{j} \; \mathbb{E}_{x,\theta} \left[
\left(V^\beta(X_j) - V^\beta(X_{j-1})
\right) \ensuremath{\mathbbm{1}}_{j \leq \tau_N} \right] \\
\leq V^\beta(x) + \sum_{j\geq 1} (1-a)^{j} \; \mathbb{E}_{x,\theta} \left[\left( -
\beta c \; V^{\beta-\alpha}(X_{j-1}) + \bar b \ensuremath{\mathbbm{1}}_\mathcal{C}(X_{j-1}) \right)
\ensuremath{\mathbbm{1}}_{j \leq \tau_N} \right], \end{multline*} where we used Lemma~\ref{lem:JarnerRoberts} in the last inequality. This implies \begin{multline*}
\mathbb{E}_{x,\theta} \left[Y_{\tau_N} \right] + a \; \mathbb{E}_{x,\theta} \left[
\sum_{j=0}^{\tau_N-1} (1-a)^{j} \; V^\beta(X_{j}) \right] + (1-a) \beta c
\; \mathbb{E}_{x,\theta} \left[ \sum_{j=0}^{\tau_N-1} (1-a)^{j} \;
V^{\beta-\alpha}(X_{j}) \right] \\
\leq V^\beta(x) + \bar b (1-a) \mathbb{E}_{x,\theta} \left[ \sum_{j=0}^{\tau_N-1}
(1-a)^{j} \; \ensuremath{\mathbbm{1}}_\mathcal{C}(X_{j}) \right]. \end{multline*} The results follows when $N \to +\infty$. \\ (\ref{prop:CpG3}) The previous case provides two upper bounds, namely for $0 < \beta \leq 1-\alpha$, \[ a \; \mathbb{E}_{x,\theta} \left[ \sum_{j=0}^{\tau-1} (1-a)^{j} V^\beta(X_{j}) \right] \leq V^\beta(x) + \bar b \; (1-a) \mathbb{E}_{x,\theta} \left[ \sum_{j=0}^{\tau-1}
(1-a)^{j} \; \ensuremath{\mathbbm{1}}_\mathcal{C}(X_{j}) \right], \] and \[ (1-a) \; \left( (\beta +\alpha) c \right) \ \mathbb{E}_{x,\theta} \left[
\sum_{j=0}^{\tau-1} (1-a)^{j} V^{\beta}(X_{j}) \right] \leq V^{\beta+\alpha}(x) + \bar b \mathbb{E}_{x,\theta} \left[ \sum_{j=0}^{\tau-1}
(1-a)^{j} \; \ensuremath{\mathbbm{1}}_\mathcal{C}(X_{j}) \right]. \] We then use the property $\left[c \leq c_1 \wedge c_2 \right]\Longrightarrow c \leq c_1^{1/q} c_2^{1-1/q}$ for any $ q \in [1, +\infty]$. \end{proof}
\begin{prop}
\label{prop:ComparaisonGal2} Assume (\ref{eq:A2-A5}). Let $\{r_n, n\geq 0\}$ be a non-increasing positive sequence. There exists $\bar b$ such that for any $l\geq0$, $(x,\theta) \in \mathsf{X} \times \Theta$, $ 0 \leq \beta \leq 1$ and $n \geq 0$, \[ \beta c \ \mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{k \geq n} r_{k+1}
V^{\beta-\alpha}(X_k) \right] \leq r_n \mathbb{E}_{x,\theta}^{(l)} \left[
V^\beta(X_n)\right] + \bar b \ \mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{k \geq n}
r_{k+1} \ensuremath{\mathbbm{1}}_\mathcal{C}(X_k) \right] \;. \] \end{prop} The proof is on the same lines as the proof of Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG2}) and is omitted.
\subsubsection{Delayed successive visits to an accessible level set of $V$}\label{sec:ReturnTimesUnif} \label{sec:DelayedSuccVisit} Let $\mathcal{D} \in \mathcal{X}$ and two positive integers $n_\star, N$. Define on $(\Omega, \mathcal{F}, \mathbb{P}_{x,\theta}^{(l)})$ the sequence of ${\mathbb{N}}$-valued random variables $\{\tau^n, n\geq 1 \}$ as \[ \tau^0 \eqdef \tau_\mathcal{D} \;, \qquad \tau^1 \eqdef \tau^0 + n_\star + \tau_\mathcal{D} \circ \underline{\theta}^{\tau^0 + n_\star} \;, \qquad \tau^{k+1} \eqdef \tau^k + N + \tau_\mathcal{D} \circ \underline{\theta}^{\tau^k+N} \;, \ \ k\geq 1 \;. \]
\begin{prop} \label{prop:TimeFiniteAS} Assume A\ref{A2} and there exist $V : \mathsf{X} \to [1,+\infty)$ and a constant $b < +\infty$ such that for any $\theta \in \Theta$, $P_\theta V \leq V - 1 + b \ensuremath{\mathbbm{1}}_\mathcal{C}$. Let $\mathcal{D} \in \mathcal{X}$. Let $n_\star, N$ be two non-negative integers. Then \[ \varepsilon \ \nu(\mathcal{D}) \ \mathbb{E}_{x,\theta}^{(l)} \left[
\sum_{k=0}^{\tau_\mathcal{D}-1} \ensuremath{\mathbbm{1}}_\mathcal{C}(X_k)\right] \leq 1 \;, \] and if $\sup_\mathcal{D} V < +\infty$ and $\nu(\mathcal{D})>0$, there exists a (finite) constant $C$ depending upon $\varepsilon, \nu(\mathcal{D}), \sup_\mathcal{D} V, b, n_\star, N$ such that for any $l \geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$ and $k\geq 0$, \[ \mathbb{E}_{x,\theta}^{(l)} \left[ \tau^k \right] \leq k \ C + V(x)\;. \] \end{prop} \begin{proof}
Since $V \geq 1$, Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG2})
applied with $a=0$, $\beta=\alpha=1$, $c=1$ and $\tau = \tau_\mathcal{D}$ implies \[ \mathbb{E}_{x,\theta}^{(l)}\left[ \tau_\mathcal{D} \right] \leq V(x) + \bar b \ \mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{k=0}^{\tau_\mathcal{D}-1} \ensuremath{\mathbbm{1}}_\mathcal{C}(X_k)\right] \;. \] By A\ref{A2}, we have $P_\theta(x,\mathcal{D}) \geq [\varepsilon \nu(\mathcal{D})] \ \ensuremath{\mathbbm{1}}_\mathcal{C}(x)$ for any $(x,\theta)$ so that \[ \varepsilon \nu(\mathcal{D}) \ \mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{k=0}^{\tau_\mathcal{D}-1}
\ensuremath{\mathbbm{1}}_\mathcal{C}(X_k)\right] \leq \mathbb{E}_{x,\theta}^{(l)} \left[
\sum_{k=0}^{\tau_\mathcal{D}-1} P_{\theta_k}(X_k,\mathcal{D})\right] = \mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{k=0}^{\tau_\mathcal{D}-1} \ensuremath{\mathbbm{1}}_{\mathcal{D}}(X_{k+1}) \right] \leq 1 \;. \] Hence $\mathbb{E}_{x,\theta}^{(l)}\left[ \tau_\mathcal{D} \right] \leq V(x) + \bar b[\varepsilon \nu(\mathcal{D})]^{-1}$. By the Markov property and Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}) \begin{multline*}
\mathbb{E}_{x,\theta}^{(l)}\left[ \tau^1 \right] \leq n_\star + V(x) + \bar b
[\varepsilon \nu(\mathcal{D})]^{-1} + \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbb{E}_{Z_{n_\star +
\tau_\mathcal{D}}}^{(n_\star+l+\tau_\mathcal{D})}\left[ \sigma_\mathcal{D}
\right] \right] \\
\leq n_\star + 2 \; \bar b[\varepsilon \nu(\mathcal{D})]^{-1} + V(x) + \sup_\mathcal{D} V
+ n_\star \bar b\;. \end{multline*} The proof is by induction on $k$. Assume that $\mathbb{E}_{x,\theta}^{(l)}\left[
\tau^k \right] \leq k C + V(x)$ with $C \geq2 \bar b[\varepsilon \nu(\mathcal{D})]^{-1}+ \sup_\mathcal{D} V + (N \vee n_\star)(1+\bar b)$. Then using again the Markov property and Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}), and upon noting that $\mathbb{P}_{x,\theta}^{(l)}(Z_{\tau^k} \in \mathcal{D}) =1$, \begin{multline*}
\mathbb{E}_{x,\theta}^{(l)}\left[ \tau^{k+1} \right] \leq N +
\mathbb{E}_{x,\theta}^{(l)}\left[ \tau^{k} \right]+ \mathbb{E}_{x,\theta}^{(l)}\left[
\mathbb{E}_{Z_{\tau^k+N}}^{(\tau^k+N+l)}\left[ \tau_\mathcal{D} \right] \right] \\
\leq N + \bar b[\varepsilon \nu(\mathcal{D})]^{-1} + \mathbb{E}_{x,\theta}^{(l)}\left[
\tau^{k}
\right]+\mathbb{E}_{x,\theta}^{(l)}\left[ V(X_{\tau^k+N}) \right] \\
\leq N + \bar b[\varepsilon \nu(\mathcal{D})]^{-1} + \mathbb{E}_{x,\theta}^{(l)}\left[
\tau^{k} \right]+\mathbb{E}_{x,\theta}^{(l)}\left[
\mathbb{E}_{Z_{\tau^k}}^{(\tau^k+l)}\left[
V(X_{N}) \right]\right] \\
\leq N + \bar b [\varepsilon \nu(\mathcal{D})]^{-1} + \mathbb{E}_{x,\theta}^{(l)}\left[
\tau^{k} \right]+ \left( \sup_\mathcal{D} V + N \bar b \right) \;. \end{multline*} \end{proof}
\subsubsection{Generalized Poisson equation} \label{sec:GeneralPoisson}
Assume (\ref{eq:A2-A5}). Let $0 < a <1$, $l\geq 0$ and $0\leq \beta \leq 1-\alpha$. For $f \in \mathcal{L}_{V^\beta}$ such that $\pi(|f|) < +\infty$, let us define the function \[ \hat g_{a}^{(l)}(x,\theta) \eqdef \sum_{j \geq 0} (1-a)^{j+1} \; \mathbb{E}_{x,\theta}^{(l)}[\bar f(X_j)] \;. \]
\begin{prop} \label{prop:QuasiPoissonEq} Assume (\ref{eq:A2-A5}). Let $0 \leq \beta \leq 1-\alpha$ and $f \in \mathcal{L}_{V^\beta}$. For any $(x,\theta) \in \mathsf{X} \times \Theta$, $l \geq 0$ and $0<a<1$, $\hat g_a^{(l)}$ exists, and \[ \bar f(x) = \frac{1}{1-a} \hat g_a^{(l)}(x,\theta) -\mathbb{E}_{x,\theta}^{(l)} \left[
\hat g_a^{(l+1)}\left(X_1, \theta_1 \right) \right] \;. \] \end{prop} \begin{proof}
By Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}), $\left|
\mathbb{E}_{x,\theta}^{(l)} \left[ \bar f(X_j) \right]\right| \leq |\bar
f|_{V^\beta} \; \left( V^\beta(x) + \bar b j^\beta \right) $. Hence, $ \hat
g_a^{(l)}(x,\theta)$ exists for any $x,\theta,l$. Furthermore, $\hat
g_a^{(l+1)}\left(X_1, \theta_1 \right)$ is $\mathbb{P}_{x,\theta}^{(l)}$-integrable.
By definition of $\hat g_a^{(l)}$ and by the Markov property, \begin{multline*}
\mathbb{E}_{x,\theta}^{(l)} \left[ \hat g_a^{(l+1)}\left(X_1, \theta_1 \right)
\right] = \sum_{j \geq 0} (1-a)^{j+1} \mathbb{E}_{x,\theta}^{(l)} \left[ \bar
f(X_{j+1}) \right] = (1-a)^{-1}\; \sum_{j
\geq 1} (1-a)^{j+1} \mathbb{E}_{x,\theta}^{(l)} \left[ \bar f(X_{j}) \right] \\
= (1-a)^{-1}\; \left( \hat g_a^{(l)}(x,\theta) - (1-a) \bar f(x) \right). \end{multline*} \end{proof}
\begin{theo} \label{theo:controleG} Assume A\ref{A2}-\ref{A6} and B\ref{B2}. Let $0 \leq \beta <1-\alpha$. For any $\epsilon>0$, there exists an integer $n \geq 2$ such that for any $0<a<1$, $f \in \mathcal{L}_{V^\beta}$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$ and $q \in[1, +\infty]$, \begin{multline*}
\left( |\bar f|_{V^\beta} \right)^{-1} \; \left| \hat g_{a}^{(l)}(x,\theta)
\right| \leq 4 \; \epsilon \; \left(1-(1-a)^n \right)^{-1} \; n \\
+ \frac{V^{\beta+\alpha/q}(x)}{a^{1-1/q}(1-a)^{1/q}} (\alpha c)^{-1/q}\;
\left( 1+ \bar b [\varepsilon \nu(\mathcal{D})]^{-1} + 2 \; (1+\bar b n_\star)
(1+\bar b) \ \sup_\mathcal{D} V^{\beta +\alpha/q} \right) \;. \end{multline*} By convention, $1/q =0$ when $q = +\infty$. In particular, $\lim_{a \to 0}
\left( |\bar f|_{V^\beta} \right)^{-1} \; \left| a\hat g_{a}^{(l)}(x,\theta)
\right| = 0$. \end{theo}
\begin{rem}\label{remtheoG} Before dwelling into the proof of the theorem, we first make two important remarks. Firstly, a simplified restatement of Theorem \ref{theo:controleG} is the following. There exists a finite constant $c_0$ such that for any $0<a\leq 1/2$, $f \in \mathcal{L}_{V^\beta}$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$ and $q \in[1, +\infty]$, \begin{equation} \label{eq:MajoRem}
\left| \hat g_{a}^{(l)}(x,\theta)\right|\leq c_0 |\bar f|_{V^\beta} \ a^{-1} \left(1+a^{1/q} V^{\beta+\alpha/q}(x)\right).\end{equation} This follows by taking $\epsilon=1$, say, and upon noting that $n\left(1-(1-a)^n\right)^{-1}\leq 2^{n-1}/a$. The second point is that if we take $a_1,a_2\in (0,1)$ we can write \[ \hat g_{a_1}^{(l)}(x,\theta)-\hat g_{a_2}^{(l)}(x,\theta)=\frac{a_2-a_1}{(1-a_1)(1-a_2)}\times\\
\sum_{k\geq 0}(1-a_1)^{k+1}\mathbb{E}_{x,\theta}^{(l)}\left[ \hat
g_{a_2}^{(l+k)}(X_k, \theta_k)\right] \;. \] By (\ref{eq:MajoRem}) and Proposition \ref{prop:ComparaisonGal} (\ref{prop:CpG3}), it holds
\begin{equation}\label{bounddiffGa}
\left|\hat g_{a_1}^{(l)}(x,\theta)-\hat g_{a_2}^{(l)}(x,\theta)\right|\leq c_1 \ |\bar f|_{V^\beta} \ |a_2-a_1|a_2^{-1}a_1^{-2+1/q}V^{\beta+\alpha/q}(x),\end{equation}
for some finite constant $c_1$, for all $0<a_1,a_2\leq 1/2$, $f \in \mathcal{L}_{V^\beta}$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$ and $q \in[1, +\infty]$. \end{rem}
\begin{proof}
Let $\epsilon>0$. Let us consider the sequence of stopping times $\{\tau^k,
k \geq 0\}$ defined in Section~\ref{sec:DelayedSuccVisit} where $(\mathcal{D}, N,
n_\star)$ are defined below.
\paragraph{\tt Choice of $\mathcal{D}, N, n_\star$.} Choose a level set $\mathcal{D}$ of $V$ large enough so that $\nu(\mathcal{D})>0$. Choose $N$ such that \begin{equation}
\label{eq:Controle3}
\frac{1}{N} \; \sum_{j=0}^{N-1} \sup_{\mathcal{D} \times \Theta} \; \| P_\theta^j(x,\cdot) -
\pi(\cdot) \|_{V^\beta} \leq \epsilon \;, \end{equation} the existence of which is given by A\ref{A6}; and such that - since $\alpha + \beta <1$, -
\begin{equation}
\label{eq:Controle4} (\alpha c)^{-1} \ N^{-1} \left( \sup_\mathcal{D} V^{\beta +\alpha} + \bar b N^{\beta +\alpha} + \bar b [\varepsilon \nu(\mathcal{D})]^{-1} \right) \leq \epsilon \;.
\end{equation}
Set $\epsilon_N \eqdef N^{-2} \{ \epsilon \; \left( \sup_\mathcal{D} V^\beta + \bar
b N^{-1} \sum_{j=1}^{N-1} j^\beta \right)^{-1} \}^{1/(1-\beta)}$ (which can
be assumed to be strictly lower than $N^{-2}$ since $\beta>0$). By
B\ref{B2}, choose $n_\star$ such that for any $q \geq n_\star$, $l\geq 0$, $
\sup_{\mathcal{D} \times \Theta} \mathbb{P}_{x,\theta}^{(l)}(D(\theta_q,\theta_{q-1}) \geq
\epsilon_N/2) \leq \epsilon_N/4$.
By Proposition~\ref{prop:TimeFiniteAS}, $\mathbb{P}_{x,\theta}^{(l)}(\tau^k <
+\infty) =1$ for any $(x,\theta) \in \mathsf{X} \times \Theta$, $l \geq 0$, $k\geq
0$.
\paragraph{\tt Optimal coupling.} With these definitions, $ \sup_{i \geq 1} \sup_{k \geq 1} \mathbb{E}_{x,\theta}^{(l)} \left[ \mathbb{E}_{Z_{\tau^k}}^{(\tau^k+l)} \left[ D(\theta_i,\theta_{i-1}) \right] \right]\leq \epsilon_N$, upon noting that $\mathbb{P}_{x,\theta}^{(l)}( n_\star \leq \tau^k) =1$ and $D(\theta,\theta') \leq 2$. We apply Proposition~\ref{prop:ContructionCouplingOpt} and set $\mathcal{E}_N \eqdef \{X_k = \tilde X_k, 0 \leq k<N \}$. We have for any $l \geq 0$, $k \geq 1$, $(x,\theta) \in \mathsf{X} \times \Theta$,
\begin{equation}
\label{eq:CouplingProbability2}
\mathbb{E}_{x,\theta}^{(l)}\left[ \overline{\mathbb{P}}_{Z_{\tau^k}, Z_{\tau^k}}^{(\tau^k+l)} \left( \mathcal{E}^c_N \right) \right] \leq \sum_{j=1}^{N-1} \sum_{i=1}^j \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbb{E}_{Z_{\tau^k}}^{(\tau^k+l)} \left[ D(\theta_i,\theta_{i-1}) \right] \right] \leq N^2 \epsilon_N < 1 \;.
\end{equation}
Observe that $\mathcal{D},N$ and $n_\star$ do not depend upon $a,l,x,\theta$ and
$f$.
\paragraph{\tt Proof of Theorem~\ref{theo:controleG}.} Assume that for any $0<a<1$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$ and $k \geq 2$, \begin{equation}
\label{eq:ResultPropAdaptive2}
\left| \mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{j=0}^{N-1} (1-a)^{\tau^k+ j+1} \; \bar f
\left( X_{\tau^{k}+j} \right)\right] \right|\leq |\bar f|_{V^\beta} \; 3 N \epsilon \; (1-a)^{n_\star+(k-1)N} \;. \end{equation} We have \[ \hat g_{a}^{(l)}(x,\theta) = \sum_{j \geq 0} (1-a)^{j+1} \left\{
\mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j) \ensuremath{\mathbbm{1}}_{j < \tau^1}\right] + \sum_{k \geq
1} \mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j) \ensuremath{\mathbbm{1}}_{\tau^k \leq j <
\tau^{k+1}}\right] \right\} \;. \] On one hand, by Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG3}) applied with $\tau = \tau_\mathcal{D}$ and Proposition~\ref{prop:TimeFiniteAS}, \begin{multline*}
\left| \sum_{j \geq 0} (1-a)^{j+1} \mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j)
\ensuremath{\mathbbm{1}}_{j < \tau^0}\right] \right| = \left|
\mathbb{E}_{x,\theta}^{(l)}\left[\sum_{j = 0}^{\tau_\mathcal{D}-1} (1-a)^{j+1}
\bar f(X_j) \right] \right| \\
\leq |\bar f|_{V^\beta} \; \mathbb{E}_{x,\theta}^{(l)}\left[\sum_{j =
0}^{\tau_\mathcal{D}-1} (1-a)^{j+1} V^\beta(X_j) \right] \leq |\bar
f|_{V^\beta} \; \frac{V^{\beta+\alpha/q}(x)}{a^{1-1/q}} \frac{\left( 1 + \bar
b [\varepsilon \nu(\mathcal{D})]^{-1}\right)}{(1-a)^{1/q}} (\alpha
c)^{-1/q}\;. \end{multline*} Applied with $\tau = \tau_\mathcal{D}$, Propositions~\ref{prop:ComparaisonGal}(\ref{prop:CpG1} and (\ref{prop:CpG3}) and \ref{prop:TimeFiniteAS} yield \begin{multline*}
|\bar f|_{V^\beta}^{-1} \; \left| \sum_{j \geq 0} (1-a)^{j+1}
\mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j) \ensuremath{\mathbbm{1}}_{ \tau^0 \leq j < \tau^1}\right]
\right| = |\bar f|_{V^\beta}^{-1} \; \left| \mathbb{E}_{x,\theta}^{(l)}\left[
\sum_{j = \tau_\mathcal{D} }^{\tau_\mathcal{D} +n_\star +\tau_\mathcal{D} \circ
\underline{\theta}^{n_\star + \tau_\mathcal{D}} -1} (1-a)^{j+1} \bar f(X_j)
\right]\right|
\\
\leq \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbb{E}_{Z_{\tau_\mathcal{D}}}^{(\tau_\mathcal{D}+l)}\left[
\sum_{j
=0}^{n_\star+\tau_\mathcal{D} \circ \underline{\theta}^{n_\star}-1} (1-a)^{j+1} V^\beta(X_j) \right]\right] \\
\leq \mathbb{E}_{x,\theta}^{(l)}\left[
\mathbb{E}_{Z_{\tau_\mathcal{D}}}^{(\tau_\mathcal{D}+l)}\left[\sum_{j =0}^{n_\star-1}
(1-a)^{j+1} V^\beta(X_j) \right]\right] + \mathbb{E}_{x,\theta}^{(l)}\left[
\mathbb{E}_{Z_{\tau_\mathcal{D}+n_\star}}^{(\tau_\mathcal{D}+n_\star+l)}\left[\sum_{j
=0}^{\tau_\mathcal{D}-1} (1-a)^{j+1} V^\beta(X_j) \right]\right] \\
\leq 2 \; \frac{(1+\bar b n_\star) (1+\bar b)}{a^{1-1/q} (1-a)^{1/q}} (\alpha
c)^{-1/q} \ \sup_\mathcal{D} V^{\beta +\alpha/q} \;. \end{multline*} For $k \geq 1$, \begin{multline*}
\left|\sum_{j \geq 0} (1-a)^{j+1} \; \mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j)
\ensuremath{\mathbbm{1}}_{\tau^k \leq j < \tau^{k+1}}\right] \right| \leq \left|
\mathbb{E}_{x,\theta}^{(l)}\left[\sum_{j = \tau^k}^{\tau^k + N-1} (1-a)^{j+1} \;
\bar
f(X_j)\right] \right| \\
+ \mathbb{E}_{x,\theta}^{(l)}\left[ (1-a)^{\tau^k+N} \;
\mathbb{E}_{Z_{\tau^k+N}}^{(\tau^k+N+l)}\left[ \sum_{j=0}^{\tau_\mathcal{D} -1}
(1-a)^{j+1} \; \left|\bar f\right|(X_j) \right]\right]\;. \end{multline*} By Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}) and (\ref{prop:CpG2}) applied with $\tau = \tau_\mathcal{D}$, Proposition~\ref{prop:TimeFiniteAS} and Eq. (\ref{eq:ResultPropAdaptive2}), and upon noting that $\tau^k \geq n_\star + (k-1) N$ $\mathbb{P}_{(x,\theta)}^{(l)}$-\text{a.s.} , \begin{multline*}
\left| \sum_{j \geq 0} (1-a)^{j+1} \; \mathbb{E}_{x,\theta}^{(l)}\left[\bar f(X_j)
\ensuremath{\mathbbm{1}}_{\tau^k
\leq j < \tau^{k+1}}\right]\right| \\
\leq |\bar f|_{V^\beta} \; \mathbb{E}_{x,\theta}^{(l)}\left[ (1-a)^{n_\star+(k-1)N}
\; \left(
3 N \epsilon + (1-a)^{N} \{ V^{\beta +\alpha}(X_{\tau^k+N}) + \bar b [\varepsilon \nu(\mathcal{D})]^{-1} \} (\alpha c)^{-1}\right) \right] \\
\leq |\bar f|_{V^\beta} \; (1-a)^{n_\star+(k-1)N} \; \left( 3 N \epsilon +
(\alpha c)^{-1} \ \sup_{r,\mathcal{D} \times \Theta} \mathbb{E}_{x,\theta}^{(r)}\left[
V^{\beta
+\alpha}(X_{N}) + \bar b [\varepsilon \nu(\mathcal{D})]^{-1} \right]\right) \\
\leq |\bar f|_{V^\beta} \; (1-a)^{n_\star+(k-1)N} \; \left( 3 N \epsilon +
(\alpha c)^{-1} \left( \sup_{\mathcal{D}} \ V^{\beta +\alpha} + \bar b N^{\beta +\alpha}+ \bar b [\varepsilon \nu(\mathcal{D})]^{-1} \right)\right) \\
\leq 4 \; \epsilon \; |\bar f|_{V^\beta} \; (1-a)^{(k-1)N} \; N \;, \end{multline*} where we used the definition of $N$ (see Eq.~(\ref{eq:Controle4})) and Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}). This yields the desired result.
\paragraph{\tt Proof of Eq.(\ref{eq:ResultPropAdaptive2})} By the strong Markov property and since $\tau^k \geq n_\star + N(k-1)$ $\mathbb{P}_{x,\theta}^{(l)}$-\text{a.s.} \begin{multline*}
\left| \mathbb{E}_{x,\theta}^{(l)} \left[ \sum_{j=0}^{N-1} (1-a)^{\tau^k+ j+1} \;
\bar f \left( X_{\tau^{k}+j} \right)\right] \right| \leq (1-a)^{n_\star+
N(k-1)} \mathbb{E}_{x,\theta}^{(l)} \left[ \left|\mathbb{E}_{Z_{\tau^{k}}}^{(\tau^{k}+l)}
\left[ \sum_{j=0}^{N-1} (1-a)^{j+1} \; \bar f(X_j) \right]\right|\right]. \end{multline*} Furthermore, by Proposition~\ref{prop:ContructionCouplingOpt}, \begin{multline*}
\mathbb{E}_{Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ \sum_{j=0}^{N-1} (1-a)^{j+1} \; \bar
f(X_j) \right] = \overline{\mathbb{E}}_{Z_{\tau^k}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[
\sum_{j=0}^{N-1} (1-a)^{j+1} \; \bar f(X_j) \right]
\\
= \overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ \sum_{j=0}^{N-1}
(1-a)^{j+1} \; \bar f(\tilde X_j) \right] \ + \overline{\mathbb{E}}_{Z_{\tau^{k}},
Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ \sum_{j=0}^{N-1} (1-a)^{j+1} \; \{ \bar
f( X_j) - \bar f(\tilde X_j) \} \ensuremath{\mathbbm{1}}_{\mathcal{E}^c_N}\right]. \end{multline*} On one hand, we have $ \mathbb{P}_{x,\theta}^{(l)}-\text{a.s.} $, \begin{multline*}
\left|\overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[
\sum_{j=0}^{N-1} (1-a)^{j+1} \; \bar f(\tilde X_j) \right] \right| \leq
|\bar f|_{V^\beta} \; \sum_{j=0}^{N-1} (1-a)^{j+1} \; \sup_{\mathcal{D} \times \Theta}
\; \| P_{\theta}^j(x,\cdot) -\pi(\cdot) \|_{V^\beta} \leq |\bar f|_{V^\beta}
\; N \epsilon \end{multline*} by (\ref{eq:Controle3}). On the other hand, $ \mathbb{P}_{x,\theta}^{(l)}-\text{a.s.} $, \begin{multline*}
\left| \overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[
\sum_{j=0}^{N-1} (1-a)^{j+1} \; \{ \bar f( X_j) - \bar f(\tilde X_j) \}
\ensuremath{\mathbbm{1}}_{\mathcal{E}^c_N}\right] \right| \\
\leq |\bar f|_{V^\beta} \; \overline{\mathbb{E}}_{Z_{\tau^{k}},Z_{\tau^{k}}}^{(\tau^{k}+l)}
\left[ \sum_{j=0}^{N-1} (1-a)^{j+1} \; \{ V^\beta( X_j) + V^\beta(\tilde X_j)
\}
\ensuremath{\mathbbm{1}}_{\mathcal{E}^c_N}\right] \\
\leq |\bar f|_{V^\beta} \; \overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)}
\left[ \left( \sum_{j=0}^{N-1} (1-a)^{j+1} \; \left\{ V^\beta ( X_j) +
V^\beta(\tilde X_j) \right\} \right)^{\beta^{-1}}\right]^{\beta} \left(
\overline{\mathbb{P}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left(\mathcal{E}^c_N \right)
\right)^{1-\beta} \end{multline*} by using the Jensen's inequality ($\beta <1$). By the Minkowski inequality, by Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}), and by iterating the drift inequality A\ref{A5} \begin{multline*}
\overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ \left(
\sum_{j=0}^{N-1} (1-a)^{j+1} \; \left\{ V^\beta ( X_j) + V^\beta(\tilde
X_j) \right\}
\right)^{\beta^{-1}}\right]^{\beta} \\
\leq \sum_{j=0}^{N-1} (1-a)^{j+1} \; \left\{ \overline{\mathbb{E}}_{Z_{\tau^{k}},
Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ V ( X_j) \right]^\beta +
\overline{\mathbb{E}}_{Z_{\tau^{k}}, Z_{\tau^{k}}}^{(\tau^{k}+l)} \left[ V ( \tilde X_j)
\right]^\beta \right\}
\\
\leq \sum_{j=0}^{N-1} (1-a)^{j+1} \; \left\{ \sup_{l, \mathcal{D} \times \Theta}
\left(\mathbb{E}_{x,\theta}^{(l)} \left[ V(X_j)\right]\right)^\beta + \left(
\sup_{\mathcal{D} \times \Theta} P^j_{\theta} V(x)\right)^\beta \right\}
\\
\leq 2 \; \sum_{j=0}^{N-1} (1-a)^{j+1} \left( \sup_\mathcal{D} V +j \bar b
\right)^\beta \leq 2 N \left( \sup_\mathcal{D} V^\beta + \bar b N^{-1}
\sum_{j=1}^{N-1} j^\beta\right) \;. \end{multline*} Finally, \[ \mathbb{E}_{x,\theta}^{(l)} \left[ \left( \overline{\mathbb{P}}_{Z_{\tau^k},
Z_{\tau^k}}^{(\tau^k+l)}(\mathcal{E}^c_N) \right)^{1-\beta} \right] \leq \left(\mathbb{E}_{x,\theta}^{(l)} \left[ \overline{\mathbb{P}}_{Z_{\tau^k},
Z_{\tau^k}}^{({\tau^k}+l)}(\mathcal{E}^c_N) \right]\right)^{1-\beta} \leq \left( N^2 \epsilon_N \right)^{1-\beta} \] where we used (\ref{eq:CouplingProbability2}) in the last inequality. To conclude the proof, use the definition of $\epsilon_N$.
\end{proof}
\subsection{Proof of Theorem~\ref{theo:MarginalUnifCase}} Let $\epsilon >0$. We prove that there exists $n_\epsilon$ such that for any
$n\geq n_\epsilon$, $\sup_{\{f, |f|_1 \leq 1\}} \left|\mathbb{E}_{\xi_1,\xi_2}\left[
\bar f(X_n) \right] \right| \leq \epsilon$.
\subsubsection{Definition of $\mathcal{D}$, $N$, $Q$ and $n_\star$} By A\ref{A-VCset}(\ref{Anew}), choose $Q$ such that \begin{equation}
\label{eq:DefinitionL} \sup_l \sup_{(x,\theta) \in \mathcal{C} \times \Theta} \mathbb{E}_{x,\theta}^{(l)} \left[ \mathbf{r}(\tau_\mathcal{C}) \right] \ \sum_{k \geq Q} \frac{1}{\mathbf{r}(k)} \leq \epsilon \;. \end{equation} By A\ref{A-VCset}(\ref{A4rev}), choose $N$ such that \begin{equation}
\label{eq:DefinitionN}
\sup_{(x, \theta) \in \mathcal{C} \times \Theta} V^{-1}(x) \ \| P_\theta^N(x,\cdot) - \pi(\cdot) \|_{\mathrm{TV}} \leq \frac{\epsilon}{Q} \;. \end{equation} By B\ref{B1}, choose $n_\star$ such that for any $n \geq n_\star$, \begin{equation}
\label{eq:DefiNstar}
\mathbb{P}_{\xi_1,\xi_2} \left( D(\theta_n, \theta_{n-1}) \geq \epsilon /(2 (N+Q-1)^2 Q)\right) \leq \frac{\epsilon}{4(N+Q-1)^2 Q } \;. \end{equation}
\subsubsection{Optimal coupling} We apply Proposition~\ref{prop:ContructionCouplingOpt} with $l =0$ and $N \leftarrow N+Q$. Set $\mathcal{E}_{N +Q} \eqdef \{X_k = \tilde X_k, 0 \leq k \leq N +Q \}$. It holds for any $ r \geq n_\star$, \begin{multline} \label{eq:CouplingProb} \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_r \in \mathcal{C}} \; \overline{\mathbb{P}}_{Z_{r}, Z_r}^{(r)} \left(
\mathcal{E}_{N+Q}^c \right) \right] \leq \sum_{j=1}^{N+Q-1} \sum_{i=1}^j \mathbb{E}_{\xi_1,\xi_2}\left[\ensuremath{\mathbbm{1}}_{X_r \in \mathcal{C}} \; \mathbb{E}_{Z_r}^{(r)}\left[D(\theta_i,
\theta_{i-1}) \right] \right] \\ \leq \sum_{j=1}^{N+Q-1} \sum_{i=1}^j \mathbb{E}_{\xi_1,\xi_2}\left[ D(\theta_{i+r},
\theta_{i+r-1}) \right] \leq \epsilon Q^{-1} \;, \end{multline} where in the last inequality, we use that $D(\theta,\theta') \leq 2$ and the definition of $n_\star$ (see Eq.~(\ref{eq:DefiNstar})).
\subsubsection{Proof} Let $n \geq N+Q+n_\star$. We consider the partition given by the last exit from the set $\mathcal{C}$ before time $n-N$. We use the notation $\{X_{n:m} \notin \mathcal{C} \}$ as a shorthand notation for $\bigcap_{k=n}^m \{X_k \notin \mathcal{C} \}$; with the convention that $\{X_{m+1:m} \notin \mathcal{C} \} = \Omega$. We write \begin{multline*}
\mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \right] = \mathbb{E}_{\xi_1,\xi_2} \left[ \bar
f(X_n) \ensuremath{\mathbbm{1}}_{X_{0:n-N} \notin \mathcal{C}} \right] + \sum_{k=0}^{n-N}
\mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C} } \ \ensuremath{\mathbbm{1}}_{X_{k+1:n-N}
\notin \mathcal{C}} \right] \;. \end{multline*}
Since $\bar f$ is bounded on $\mathsf{X}$ by $|\bar f|_1$, we have \[ \mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_{0:n-N} \notin \mathcal{C}} \right] \leq
|\bar f|_1 \ \mathbb{P}_{\xi_1, \xi_2} \left( \tau_\mathcal{C} \geq n-N\right) \leq |\bar f|_1 \ \mathbb{E}_{\xi_1, \xi_2} \left[ \frac{\tau_\mathcal{C} }{n-N} \wedge 1 \right] \;. \]
The rhs is upper bounded by $|\bar f|_1 \ \epsilon$ for $n$ large enough. By definition of $Q$ in (\ref{eq:DefinitionL}), \begin{multline} \label{eq:weaken}
\sum_{k=0}^{n-(N+Q)} \mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}
} \ \ensuremath{\mathbbm{1}}_{X_{k+1:n-N} \notin \mathcal{C}} \right] \leq |\bar f|_1 \
\sum_{k=0}^{n-(N+Q)} \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C} }
\mathbb{P}_{X_k,\theta_k}^{(k)} \left( \tau_\mathcal{C} \geq
n-N-k\right) \ \right] \\
\leq |\bar f|_1 \ \sup_l \sup_{\mathcal{C} \times \Theta} \mathbb{E}_{x,\theta}^{(l)} \left[
\mathbf{r}(\tau_\mathcal{C}) \right] \sum_{k \geq Q} \frac{1}{\mathbf{r}(k)} \leq |\bar
f|_1 \ \epsilon \;. \end{multline} Let $k \in \{n-(N+Q)+1, \cdots, n-N \}$. By definition of $N$ and $n_\star$ (see Eqs.~(\ref{eq:DefinitionN}) and (\ref{eq:DefiNstar})), upon noting that $k \geq n-(N+Q) \geq n_\star$, \begin{multline*}
\mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C} } \ \ensuremath{\mathbbm{1}}_{X_{k+1:n-N}
\notin \mathcal{C}} \right] - |\bar f|_1 \ \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k
\in \mathcal{C}} \ \overline{\mathbb{P}}_{Z_k,Z_k}^{(k)} \left(
\mathcal{E}_{N+Q}^c\right) \right] \\
\leq \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}} \ \overline{\mathbb{E}}_{Z_k,Z_k}^{(k)}
\left[ \bar f(X_{n-k}) \ensuremath{\mathbbm{1}}_{X_{1:n-N-k} \notin \mathcal{C}}
\ensuremath{\mathbbm{1}}_{\mathcal{E}_{N+Q}} \right] \right] \\
\leq \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}} \ \overline{\mathbb{E}}_{Z_k,Z_k}^{(k)}
\left[ \bar f(\tilde X_{n-k}) \ensuremath{\mathbbm{1}}_{\tilde X_{1:n-N-k} \notin \mathcal{C}}
\ensuremath{\mathbbm{1}}_{\mathcal{E}_{N+Q}} \right] \right] \\
\leq \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}} \ \overline{\mathbb{E}}_{Z_k,Z_k}^{(k)}
\left[ \bar f(\tilde X_{n-k}) \ensuremath{\mathbbm{1}}_{\tilde X_{1:n-N-k} \notin \mathcal{C}} \right]
\right] + |\bar f|_1 \ \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}} \
\overline{\mathbb{P}}_{Z_k,Z_k}^{(k)} \left(\mathcal{E}_{N+Q}^c \right) \right] \\
\leq \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in \mathcal{C}} \ \overline{\mathbb{E}}_{Z_k,Z_k}^{(k)}
\left[ \ensuremath{\mathbbm{1}}_{\tilde X_{1:n-N-k} \notin \mathcal{C}} P_{\theta_k}^{N}\bar f(\tilde
X_{n-N-k}) \right] \right] + |\bar f|_1 \ \epsilon Q^{-1} \\
\leq |\bar f|_1 \ \epsilon Q^{-1} \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_{X_k \in
\mathcal{C}} \ \overline{\mathbb{E}}_{Z_k,Z_k}^{(k)}\left[ \ensuremath{\mathbbm{1}}_{\tilde X_{1:n-N-k} \notin \mathcal{C}}
V(\tilde X_{n-N-k}) \right] \right] + |\bar f|_1 \ \epsilon Q^{-1}
\\
\leq |\bar f|_1 \ \epsilon Q^{-1} \left\{ \sup_{(x,\theta) \in \mathcal{C} \times
\Theta} P_\theta V(x) + \sup_\mathcal{C} V \right\} + |\bar f|_1 \ \epsilon
Q^{-1} \;, \end{multline*} where we used A\ref{A-VCset}(\ref{A3rev}) in the last inequality. Hence, \[ \sum_{k=n-(N+Q)+1}^{n-N} \mathbb{E}_{\xi_1,\xi_2} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_k \in
\mathcal{C} } \ \ensuremath{\mathbbm{1}}_{X_{k+1:n-N} \notin \mathcal{C}} \right] \leq \left(1 +
\sup_{(x,\theta) \in \mathcal{C} \times \Theta} P_\theta V(x) + \sup_\mathcal{C} V \right) \epsilon \
|\bar f|_1 \;. \] This concludes the proof.
\begin{rem}
\label{rem:YanBai}
In the case the process is non-adaptive, we can assume w.l.g. that it
possesses an atom $\alpha$; in that case, the lines (\ref{eq:weaken}) can be
modified so that the assumptions $\sum_n \{1/\mathbf{r}(n) \}<+\infty$ can be
removed. In the case of an atomic chain, we can indeed apply the above
computations with $\mathcal{C}$ replaced by $\alpha$ and write:
\begin{multline*}
\sum_{k=0}^{n-(N+Q)} \mathbb{E}_{\xi_1} \left[ \bar f(X_n) \ensuremath{\mathbbm{1}}_{X_k \in \alpha } \
\ensuremath{\mathbbm{1}}_{X_{k+1:n-N} \notin \alpha} \right] \leq |\bar f|_1 \
\sum_{k=0}^{n-(N+Q)} \mathbb{P}_{\alpha} \left( \tau_\alpha \geq n-N-k\right) \\
\leq |\bar f|_1 \ \sum_{k \geq Q} \mathbb{P}_{\alpha} \left( \tau_\alpha \geq
k\right) \;. \end{multline*} The rhs is small for convenient $Q$, provided $\mathbb{E}_\alpha[\mathbf{r}(\tau_\alpha)]<+\infty$ with $\mathbf{r}(n) =n$. Unfortunately, the adaptive chain $\{(X_n, \theta_n), n\geq 0\}$ does not possess an atom thus explaining the condition on $\mathbf{r}$. \end{rem}
\subsection{Proof of Corollary~\ref{coro:MarginalUnifCase}} The condition A\ref{A-VCset}(\ref{A4rev}) is established in Appendix~\ref{app:UniformControl}. Let a level set $\mathcal{D}$ large enough such that $\nu(\mathcal{D}) >0$; then Proposition~\ref{prop:TimeFiniteAS} implies that there exists a constant $c < \infty$ such that for any $l \geq 0$, $\mathbb{E}_{x,\theta}^{(l)}\left[ \tau_\mathcal{D} \right] \leq c V(x)$. This implies that for $0< \eta \leq 1- \alpha$, \begin{multline*}
\mathbb{E}_{x,\theta}^{(l)}\left[ \sum_{k=0}^{\tau_\mathcal{D}} (k+1)^{\eta} \right] \leq
\mathbb{E}_{x,\theta}^{(l)}\left[ \sum_{k=0}^{\tau_\mathcal{D}} \left(\mathbb{E}_{X_k, \theta_k}^{(k+l)}
\left[ \tau_\mathcal{D} \right] \right)^{\eta} \right] \leq c^\eta \
\mathbb{E}_{x,\theta}^{(l)}\left[ \sum_{k=0}^{\tau_\mathcal{D}} V^{1-\alpha}(X_k) \right] \\
\leq C \ \left( V(x) + b \ \mathbb{E}_{x,\theta}^{(l)}\left[ \tau_\mathcal{D} \right] \right)
\leq C' \ V(x) \;, \end{multline*} for some finite constants $C,C'$ independent upon $\theta$. Hence A\ref{A-VCset}(\ref{Anew}) holds with $\mathbf{r}(n) \sim n^{1+\eta}$. Finally, $P_\theta V \leq V - c V^{1-\alpha} +b \ensuremath{\mathbbm{1}}_\mathcal{C}$ implies $P_\theta V \leq V - c \gamma V^{1-\alpha} + b \ensuremath{\mathbbm{1}}_\mathcal{D}$ for any $\gamma \in (0,1)$ and the level set $\mathcal{D} \eqdef \{x, V^{1-\alpha} \leq b [c(1-\gamma)]^{-1} \}$. This yields A\ref{A-VCset}(\ref{A3rev}).
\subsection{Proof of Proposition~\ref{prop:YanBai}} Under A\ref{Adrift}, there exists a constant $C$ - that does not depend upon $\theta$ - such that for any $(x,\theta) \in \mathsf{X} \times \Theta$, $n\geq 0$ and $\kappa \in [1, \alpha^{-1}]$, \[
\ \| P^n_\theta(x,\cdot) - \pi(\theta) \|_\mathrm{TV} \leq C \frac{
V^{\kappa \alpha}(x)}{(n+1)^{\kappa-1}} \;; \] (see Appendix~\ref{app:UniformControl}). To apply \cite[Theorem 13]{rosenthaletroberts05}, we only have to prove that there exists $\kappa \in [1, \alpha^{-1}]$ such that the sequence $\{V^{\kappa \alpha}(X_n); n\geq 0\}$ is bounded in probability, which is equivalent to prove that $\{V^\beta(X_n); n\geq 0\}$ is bounded in probability for some (and thus any) $\beta \in (0,1]$ . This is a consequence of Lemma~\ref{lem:YanBai} applied with $W = V^\beta$ for some $\beta \in (0,1]$ and $\mathbf{r}(n) = (n+1)^{1+\eta}$ for some $\eta>0$ (see the proof of Corollary~\ref{coro:MarginalUnifCase} for similar computations).
\begin{lemma} \label{lem:YanBai} Assume that there exist a set $\mathcal{C}$ and functions $W: \mathsf{X} \to (0, +\infty)$ and $\mathbf{r} : {\mathbb{N}} \to (0, +\infty)$ such that $\mathbf{r}$ is non-decreasing, $P_\theta W \leq W $ on $\mathcal{C}^c$ and \[ \sup_{\mathcal{C} \times \Theta} P_\theta W< +\infty \;, \qquad \qquad \sup_l \sup_{\mathcal{C}\times \Theta } \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbf{r}(\tau_\mathcal{C}) \right] < +\infty \;, \qquad \qquad \sum_k \{1/\mathbf{r}(k) \} < +\infty \] For any probability distributions $\xi_1, \xi_2$ resp. on $\mathsf{X},\Theta$ $\{W(X_n), n\geq 0 \}$ is bounded in probability for the probability $\mathbb{P}_{\xi_1,\xi_2}$. \end{lemma} \begin{proof}
Let $\epsilon >0$. We prove that there exists $M_\epsilon, N_\epsilon$ such that for any $M \geq M_\epsilon$ and $n \geq N_\epsilon$, $\mathbb{P}_{x,\theta}\left(
W(X_n) \geq M \right) \leq \epsilon$. Choose $N_\epsilon$ such that for any $n \geq N_\epsilon$ \[ \mathbb{E}_{\xi_1,\xi_2}\left[\frac{ \tau_{\mathcal{C}}}{n} \wedge 1 \right] \leq \epsilon/3\;, \qquad \qquad \sup_l \sup_{\mathcal{C} \times \Theta} \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbf{r}(\tau_\mathcal{C}) \right]\ \sum_{k \geq n} \{1/\mathbf{r}(k) \} \leq \epsilon/3 \;, \] and choose $M_\epsilon $ such that for any $M \geq M_\epsilon$, $ N_\epsilon \ \sup_{\mathcal{C} \times \Theta} P_\theta W \leq \epsilon M /3$. We write \[ \mathbb{P}_{\xi_1,\xi_2}\left( W(X_n) \geq M \right) = \sum_{k=0}^{n-1} \mathbb{P}_{\xi_1,\xi_2} \left( W(X_n) \geq M,X_k \in \mathcal{C}, X_{k+1:n} \notin \mathcal{C} \right) + \mathbb{P}_{\xi_1,\xi_2} \left( W(X_n) \geq M, X_{0:n} \notin \mathcal{C} \right) \;. \] By the Markov inequality, for $n \geq N_\epsilon$, \[ \mathbb{P}_{\xi_1,\xi_2}\left( W(X_n) \geq M , X_{0:n} \notin \mathcal{C} \right) \leq \mathbb{P}_{\xi_1,\xi_2}\left( X_{0:n} \notin \mathcal{C} \right) \leq \mathbb{P}_{\xi_1,\xi_2}\left( \tau_\mathcal{C} > n \right) \leq \mathbb{E}_{\xi_1,\xi_2}\left[\frac{ \tau_{\mathcal{C}}}{n} \wedge 1 \right] \leq \epsilon/3 \;. \] Furthermore, for $n \geq N_\epsilon$, \begin{multline*}
\sum_{k=0}^{n-N_\epsilon} \mathbb{P}_{\xi_1,\xi_2} \left( W(X_n) \geq M,X_k \in
\mathcal{C}, X_{k+1:n} \notin \mathcal{C} \right) \leq \sum_{k=0}^{n-N_\epsilon}
\mathbb{P}_{\xi_1,\xi_2} \left(X_k \in \mathcal{C}, X_{k+1:n} \notin \mathcal{C} \right) \\
\leq \sum_{k=0}^{n-N_\epsilon} \mathbb{E}_{\xi_1,\xi_2} \left[ \ensuremath{\mathbbm{1}}_\mathcal{C}(X_k) \
\sup_{l} \sup_{\mathcal{C} \times \Theta} \mathbb{P}_{x,\theta}^{(l)}\left( X_{1:n-k}
\notin \mathcal{C} \right)\right] \leq \sum_{k=0}^{n-N_\epsilon} \sup_l
\sup_{\mathcal{C} \times \Theta} \mathbb{P}_{x,\theta}^{(l)} \left( \tau_{\mathcal{C}} \geq n-k
\right) \\
\leq \sum_{k=N_\epsilon}^{n} \frac{1}{\mathbf{r}(k)} \sup_l \sup_{\mathcal{C} \times
\Theta} \mathbb{E}_{x,\theta}^{(l)}\left[ \mathbf{r}(\tau_\mathcal{C}) \right] \leq \epsilon/3
\;. \end{multline*} Finally, for $n \geq N_\epsilon$ we write \begin{multline*}
\sum_{k=n-N_\epsilon+1}^n \mathbb{P}_{x,\theta} \left( W(X_n) \geq M,X_k \in \mathcal{C},
X_{k+1:n} \notin \mathcal{C} \right) \\
\leq \sum_{k=n-N_\epsilon+1}^n \mathbb{E}_{x,\theta} \left[ \ensuremath{\mathbbm{1}}_\mathcal{C}(X_k) \
\mathbb{P}_{X_k,\theta_k}^{(k)}\left(W(X_{n-k}) \geq M, X_{1:n-k} \notin \mathcal{C} \right)
\right] \end{multline*} We have, for any $k \in \{n-N_\epsilon+1, \cdots, n \}$ and $(x,\theta) \in \mathcal{C} \times \Theta$ \begin{multline*}
\mathbb{P}_{x,\theta}^{(k)}\left(W(X_{n-k}) \geq M, X_{1:n-k} \notin \mathcal{C} \right)
\leq \frac{1}{M} \mathbb{E}_{x,\theta}^{(k)}\left[ W(X_{n-k}) \ensuremath{\mathbbm{1}}_{\mathcal{C}^c}(
X_{1:n-k-1}) \right] \leq \frac{1}{M} \mathbb{E}_{x,\theta}^{(k)}\left[ W(X_1)
\right] \end{multline*} where, in the last inequality, we used the drift inequality on $W$ outside $\mathcal{C}$. Hence, \[ \sum_{k=n-N_\epsilon+1}^n \mathbb{P}_{x,\theta} \left( W(X_n) \geq M,X_k \in \mathcal{C},
X_{k+1:n} \notin \mathcal{C} \right) \leq \frac{N_\epsilon}{M} \sup_{\mathcal{C} \times
\Theta} P_\theta W(x) \leq \epsilon /3 \;. \] The proof is concluded. \end{proof}
\subsection{Proof of Theorem~\ref{theo:SLLNUnboundedUnifCase}}
By using the function $\hat{g}_a^{(l)}$ introduced in Section~\ref{sec:GeneralPoisson} and by Proposition~\ref{prop:QuasiPoissonEq}, we write $\mathbb{P}_{x,\theta}-\text{a.s.} $ \begin{multline*}
n^{-1} \sum_{k=1}^n \bar f(X_k) = n^{-1} \sum_{k=1}^n \left( (1-a)^{-1}\hat
g_a^{(k)}(X_k,\theta_k) -\mathbb{E}_{X_k,\theta_k}^{(k)} \left[ \hat
g_a^{(k+1)}\left(X_1, \theta_1 \right) \right] \right) \\
= n^{-1} (1-a)^{-1} \; \sum_{k=1}^n \left\{ \hat g_a^{(k)}(X_k,\theta_k) -
\mathbb{E}_{x,\theta}\left[\hat g_a^{(k)}(X_k,\theta_k) \vert \mathcal{F}_{k-1}
\right]
\right\} \\
+ n^{-1} (1-a)^{-1} \sum_{k=1}^n \left\{ \mathbb{E}_{x,\theta}\left[\hat
g_a^{(k)}(X_k,\theta_k) \vert \mathcal{F}_{k-1} \right] - (1-a)
\mathbb{E}_{x,\theta} \left[ \hat g_a^{(k+1)}\left(X_{k+1}, \theta_{k+1} \right)
\vert \mathcal{F}_{k} \right]
\right\} \\
= n^{-1} (1-a)^{-1} \; \sum_{k=1}^n \left\{ \hat g_a^{(k)}(X_k,\theta_k) -
\mathbb{E}_{x,\theta}\left[\hat g_a^{(k)}(X_k,\theta_k) \vert \mathcal{F}_{k-1}
\right]
\right\} \\
+ n^{-1} (1-a)^{-1} \left\{ \mathbb{E}_{x,\theta}\left[\hat g_a^{(1)}(X_1,\theta_1)
\vert \mathcal{F}_{0} \right] - \mathbb{E}_{x,\theta}\left[\hat
g_a^{(n+1)}(X_{n+1},\theta_{n+1}) \vert
\mathcal{F}_{n} \right] \right\} \\
+ a \; n^{-1} (1-a)^{-1} \; \sum_{k=1}^n \mathbb{E}_{x,\theta} \left[ \hat
g_a^{(k+1)}\left(X_{k+1}, \theta_{k+1} \right) \vert \mathcal{F}_{k}
\right]. \end{multline*} We apply the above inequalities with $a = a_n$ and consider the different terms in turn. We show that they tend $\mathbb{P}_{x,\theta}-\text{a.s.} $ to zero when the deterministic sequence $\{a_n, n \geq 1 \}$ satisfies conditions which are verified e.g. with $a_n = (n+1)^{-\zeta}$ for some $\zeta$ such that \[ \zeta >0 \;, \qquad 2\zeta < 1 - \left(0.5 \vee \beta(1-\alpha)^{-1} \right) \;, \qquad \zeta < 1 - \beta (1-\alpha)^{-1} \;. \] To prove that each term converges a.s. to zero, we use the following characterization \[ \left[ \forall \epsilon>0 \;, \quad \lim_{n \to +\infty} \mathbb{P}\left(\sup_{m
\geq n} |X_m| \geq \epsilon \right) \right] \Longleftrightarrow \left[
\{X_n, n \geq 0 \} \to 0 \qquad \mathbb{P}-\text{a.s.} \right] \;. \]
Hereafter, we assume that $|f|_{V^\beta} =1$. In the following, $c$ (and below, $c_1,c_2$) are constant the value of which may vary upon each appearance.
\paragraph{\tt Convergence of Term 1.} Set $p \eqdef (1-\alpha) /\beta$. We prove that \[ n^{-1} (1-a_n)^{-1} \sum_{k=1}^n \left\{ \hat g_{a_n}^{(k)}(X_k,\theta_k) -
\mathbb{E}_{\xi_1,\xi_2}\left[\hat g_{a_n}^{(k)}(X_k,\theta_k) \vert
\mathcal{F}_{k-1} \right] \right\} \longrightarrow 0 \;, \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.} \] provided the sequence $\{a_n, n\geq 0\}$ is non increasing, $ \lim_{n\to\infty} \ n^{\max(1/p,1/2)-1} /a_n = 0$, $\sum_n n^{-1} [n^{\max(1/p,1/2)-1} /a_n]^p <
+\infty$ and $\sum_n |a_n -a_{n-1}| a_{n-1}^{-2} \ [n^{\max(1/p,1/2)-1} /a_n] < +\infty$. \begin{proof}
Define $D_{n,k} \eqdef \hat g_{a_n}^{(k)}(X_k,\theta_k) -
\mathbb{E}_{\xi_1,\xi_2}\left[\hat g_{a_n}^{(k)}(X_k,\theta_k) \vert
\mathcal{F}_{k-1} \right]$; $S_{n,k} \eqdef \sum_{j=1}^kD_{n,j}$, if $k\leq
n$ and $S_{n,k} \eqdef \sum_{j=1}^nD_{n,j}+\sum_{j=n+1}^kD_{j,j}$ if $k>n$;
and $R_{n} \eqdef \sum_{j=1}^{n-1}D_{n,j}-D_{n-1,j}$. Then for each $n$,
$\{(S_{n,k},\mathcal{F}_k),\;k\geq 1\}$ is a martingale. For $k>n$ and by Lemma
\ref{lem1martingales}, there exists a universal constant $C$ such that
\begin{multline}\mathbb{E}_{\xi_1,\xi_2}\left[|S_{n,k}|^p\right]\leq Ck^{\max(p/2,1)-1}\left(\sum_{j=1}^n\mathbb{E}_{\xi_1,\xi_2}\left[|D_{n,j}|^p\right]+\sum_{j=n+1}^k\mathbb{E}_{\xi_1,\xi_2}\left[|D_{j,j}|^p\right]\right)\\
\leq c_1 \ |\bar f|_{V^\beta} \
k^{\max(p/2,1)-1}a_k^{-p}\sum_{j=1}^k\mathbb{E}_{\xi_1,\xi_2}\left[V(X_j)\right]\leq
c_1 \ |\bar f|_{V^\beta} \ k^{\max(p/2,1)}a_k^{-p}\xi_1(V),
\label{eq:BoundOnSnk} \end{multline} where we used (\ref{eq:MajoRem}) and Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG2}). It follows that for any $n\geq 1$, $\lim_{N\to\infty}
N^{-p}\mathbb{E}_{\xi_1,\xi_2}\left(|S_{n,N}|^p\right)\leq c_1\lim_{N\to\infty}\left(N^{\max(1/p,1/2)-1} /a_N\right)^p=0$. Then by the martingale array extension of the Chow-Birnbaum-Marshall's inequality (Lemma~\ref{lem:Birnbaum}), \begin{multline*}
2^{-p}\delta^p\mathbb{P}_{\xi_1,\xi_2}\left(\sup_{m \geq n} m^{-1} (1-a_m)^{-1} \left|\sum_{j=1}^nD_{n,j}\right|>\delta\right)\\
\leq
\sum_{k=n}^\infty\left(k^{-p}-(k+1)^{-p}\right)\mathbb{E}_{\xi_1,\xi_2}\left[|S_{n,k}|^p\right]+
\left(\sum_{k=n+1}^\infty
k^{-1}\mathbb{E}_{\xi_1,\xi_2}^{1/p}\left[|R_k|^p\right]\right)^p
\;. \end{multline*} Under the assumptions on the sequence $\{a_n, n\geq 0\}$ and given the bound (\ref{eq:BoundOnSnk}), the first term in the rhs tends to zero as $n \to +\infty$. To bound the second term, we first note that $\{(\sum_{j=1}^kD_{n,j}-D_{n-1,j},\mathcal{F}_k),\;k\geq 1\}$ is a martingale for each $n$. Therefore, by Lemma \ref{lem1martingales} and the definition of $D_{n,j}$ \begin{multline*}
\mathbb{E}_{\xi_1,\xi_2}\left[|R_n|^p\right]\leq
C \ n^{\max(p/2,1)-1}\sum_{j=1}^{n-1}\mathbb{E}_{\xi_1,\xi_2}\left[|D_{n,j}-D_{n-1,j}|^p\right] \\
\leq 2 C \ n^{\max(p/2,1)-1}\sum_{j=1}^{n-1}\mathbb{E}_{\xi_1,\xi_2}\left[|\hat
g_{a_n}^{(j)}(X_j,\theta_j) -\hat g_{a_{n-1}}^{(j)}(X_j,\theta_j)|^p\right]
\;.\end{multline*} Then, using (\ref{bounddiffGa}) (with $q=\infty$) and the usual argument of bounding moments of $V^\beta(X_j)$, we get
\[\mathbb{E}_{\xi_1,\xi_2}^{1/p}\left[|R_n|^p\right]\leq c_1 \ |\bar f|_{V^\beta} \ n^{\max(1/2,1/p)} \ |a_n - a_{n-1}| \ a_n^{-1} a_{n-1}^{- 2}\xi_1(V).\] Under the assumptions, $\sum_n n^{-1}
\mathbb{E}_{\xi_1,\xi_2}^{1/p}\left[|R_n|^p\right] < +\infty$ and this concludes the proof.
\end{proof}
\paragraph{\tt Convergence of Term 2.} We prove that \[ n^{-1} (1-a_n)^{-1} \mathbb{E}_{\xi_1,\xi_2}\left[\hat g_{a_n}^{(1)}(X_1,\theta_1)
\vert \mathcal{F}_{0} \right] \longrightarrow 0 \;, \] provided $\lim_n n a_n = +\infty$ and $\lim_n a_n =0$. \begin{proof}
By Theorem~\ref{theo:controleG} applied with $q= +\infty$, it may be
proved that there exist constants $c,N$ such that
\[ \left|
\mathbb{E}_{\xi_1,\xi_2}\left[\hat g_{a_n}^{(1)}(X_1,\theta_1) \vert \mathcal{F}_{0}
\right] \right| \leq c a_n^{-1} \xi_1(V) + c \left(1-(1-a_n)^N \right)^{-1} N \] Divided by $n^{-1} (1-a_n)$, the rhs tends to zero as $n \to +\infty$. \end{proof}
\paragraph{\tt Convergence of Term 3.} We prove that \[ n^{-1} (1-a_n)^{-1} \mathbb{E}_{\xi_1,\xi_2}\left[\hat
g_{a_n}^{(n+1)}(X_{n+1},\theta_{n+1}) \vert \mathcal{F}_{n} \right] \longrightarrow 0 \;, \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.}
\] provided the sequence $\{n^{-1} a_n^{-1}, n\geq 1 \}$ is non-increasing, $\lim_n n^{1-\beta(1-\alpha)^{-1}} a_n = +\infty$, $\sum_n (n a_n)^{-(1-\alpha)\beta^{-1}} < +\infty$ and $\lim_n a_n =0$. \begin{proof}
There exist constants $c_1,c_2,N$ such that for any $n$ large enough (i.e.
such that $1-a_n \geq 1/2$) and $p \eqdef (1-\alpha) \beta^{-1} >1$
\begin{multline*}
\mathbb{P}_{\xi_1,\xi_2} \left( \sup_{m \geq n} m^{-1} (1-a_m)^{-1} \; \left|
\mathbb{E}_{\xi_1,\xi_2}\left[ \hat g_{a_m}^{(m+1)}(X_{m+1},\theta_{m+1}) \vert
\mathcal{F}_{m} \right] \right| \geq \delta \right) \\
\leq 2^p \delta^{-p} \; \mathbb{E}_{\xi_1,\xi_2}\left[ \sup_{m \geq n} m^{-p}
\left| \mathbb{E}_{\xi_1,\xi_2}\left[ \hat g_{a_m}^{(m+1)}(X_{m+1},\theta_{m+1})
\vert
\mathcal{F}_{m} \right] \right|^p \right] \\
\leq 2^p \delta^{-p} \; \sum_{m \geq n} m^{-p} \; \mathbb{E}_{\xi_1,\xi_2}\left[
\left| \mathbb{E}_{\xi_1,\xi_2}\left[ \hat g_{a_m}^{(m+1)}(X_{m+1},\theta_{m+1})
\vert \mathcal{F}_{m} \right]
\right|^p \right] \\
\leq 2^p \delta^{-p} \; \sum_{m \geq n} m^{-p} \; \mathbb{E}_{\xi_1,\xi_2}\left[
\left| \hat g_{a_m}^{(m+1)}(X_{m+1},\theta_{m+1}) \right|^p \right] \\
\leq 2^{2p-1} \; \delta^{-p} \; \sum_{m \geq n} m^{-p} \; \left\{
\frac{c_1}{a_m^p} \; \mathbb{E}_{\xi_1,\xi_2}\left[ V^{\beta p}(X_{m+1}) \right]
+ c_2 \left(\frac{N}{(1-(1-a_m)^{N})}\right)^p \right\}
\end{multline*}
where we used Theorem~\ref{theo:controleG} with $q = +\infty$. Furthermore
by Propositions~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}) and
\ref{prop:ComparaisonGal2} and the drift inequality,
\begin{multline*}
\mathbb{P}_{\xi_1,\xi_2} \left( \sup_{m \geq n} m^{-1} (1-a_m)^{-1} \; \left|
\mathbb{E}_{\xi_1,\xi_2}\left[ \hat g_{a_m}^{(n+1)}(X_{m+1},\theta_{m+1})
\vert
\mathcal{F}_{m} \right] \right| \geq \delta \right) \\
\leq \frac{2^p c_3}{\delta^{p}} \; \left\{ n^{-p} a_n^{-p}
\mathbb{E}_{\xi_1,\xi_2}[V(X_n)] + \sum_{m \geq n} m^{-p} a_m^{-p} + \sum_{m
\geq n} m^{-p} \;
\left(\frac{N}{(1-(1-a_m)^{N})}\right)^p \right\} \\
\leq \frac{2^pc_3}{\delta^{p}} \; \left\{ n^{-p} a_n^{-p} \left(\xi_1(V) + n
\bar b \right) + \bar b \sum_{m \geq n} m^{-p} a_m^{-p} + \sum_{m \geq
n} m^{-p} \; \left(\frac{N}{(1-(1-a_m)^{N})}\right)^p \right\} \;.
\end{multline*}
Under the stated conditions on $\{a_n, n\geq 1
\}$, the rhs tends to zero as $n \to +\infty$. \end{proof}
\paragraph{\tt Convergence of Term 4.} We prove that \[ a_n n^{-1} (1-a_n)^{-1} \sum_{k=1}^n \mathbb{E}_{\xi_1,\xi_2}\left[\hat
g_{a_n}^{(k+1)}(X_{k+1},\theta_{k+1}) \vert \mathcal{F}_{k} \right] \longrightarrow 0 \;, \mathbb{P}_{\xi_1,\xi_2}-\text{a.s.} \] provided $\{a_n^{1 \wedge [(1-\alpha-\beta)/\alpha]} \; n^{-1}, n\geq 1\}$ is non-increasing, $\sum_n a_n^{1 \wedge [(1-\alpha-\beta)/\alpha]} \; n^{-1} < +\infty$, and $\lim_n a_n =0$.
\begin{proof} Choose $q \geq 1$ such that $\beta + \alpha/q \leq 1-\alpha$. Fix $\epsilon >0$. From Theorem~\ref{theo:controleG}, there exist constants $C,N$ such that for any $n\geq1$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$,
\[
\left| \hat g_{a_n}^{(l)}(x,\theta) \right| \leq C \ a_n^{1/q-1}\ V^{\beta +
\alpha /q}(x) + 4 \epsilon N (1-(1-a_n)^N)^{-1} \;. \] Hence for $n$ large enough such that $(1-a_n) \geq 1/2$ \begin{multline*}
\left| a_n n^{-1} (1-a_n)^{-1} \sum_{k=1}^n \mathbb{E}_{\xi_1,\xi_2}\left[\hat
g_{a_n}^{(k+1)}(X_{k+1},\theta_{k+1}) \vert \mathcal{F}_{k} \right] \right| \\
\leq 8 a_n \epsilon N (1-(1-a_n)^N)^{-1} + 2 C \ a_n^{1/q } n^{-1} \;
\sum_{k=1}^n \mathbb{E}_{\xi_1,\xi_2} \left[ V^{\beta +
\alpha /q}(X_{k+1}) \vert \mathcal{F}_k\right] \\
\leq 8 a_n \epsilon N (1-(1-a_n)^N)^{-1} + 2 C \ a_n^{1/q} n^{-1} \;
\sum_{k=1}^n V^{1-\alpha}(X_k) + 2 C\; \ a_n^{1/q} \bar b \;, \end{multline*} where we used $\beta + \alpha/q \leq 1-\alpha$ and Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}) in the last inequality. Since $\lim_n a_n =0$ and $\lim_n a_n \epsilon N (1-(1-a_n)^N)^{-1} = \epsilon$, we only have to prove that $a_n^{1/q} \; n^{-1} \sum_{k=1}^n V^{1-\alpha}(X_k)$ converges to zero $\mathbb{P}_{\xi_1,\xi_2}$-\text{a.s.} By the Kronecker Lemma (see e.g \cite[Section 2.6]{halletheyde80}), this amounts to prove that $\sum_{k \geq 1} a_k^{1/q} k^{-1} \; V^{1-\alpha}(X_k)$ is finite \text{a.s.} This property holds upon noting that by Proposition~\ref{prop:ComparaisonGal2} and Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}) \begin{multline*}
\mathbb{E}_{\xi_1,\xi_2} \left[ \sum_{k \geq n} a_k^{1/q} k^{-1} \;
V^{1-\alpha}(X_k) \right] \leq a_n^{1/q} n^{-1} \; \mathbb{E}_{\xi_1,\xi_2} \left[
V(X_n)\right] +
\sum_{k \geq n} a_k^{1/q} k^{-1} \\
\leq a_n^{1/q} n^{-1} \; \left( \xi_1(V) + \bar b n \right)+ \sum_{k \geq n}
a_k^{1/q} k^{-1}, \end{multline*} and the rhs tends to zero under the stated assumptions. \end{proof}
\subsection{Proof of Proposition~\ref{prop:SLLNUnboundedUnifCaseBounded}} We only give the sketch of the proof since the proof is very
similar to that of Theorem~\ref{theo:SLLNUnboundedUnifCase}. We start with
proving a result similar to Theorem~\ref{theo:controleG}. Since $\mathcal{D} =
\mathsf{X}$, the sequence $\{\tau^k, k\geq 0\}$ is deterministic and $\tau^{k+1} =
\tau^k + N +1$. By adapting the proof of Theorem~\ref{theo:controleG} ($f$ is
bounded and $\mathcal{D} = \mathsf{X}$), we establish that for any $\epsilon>0$, there
exists an integer $n \geq 2$ such that for any $0<a<1$, any bounded function
$f$, $l\geq 0$, $(x,\theta) \in \mathsf{X} \times \Theta$ \[
\left( |\bar f|_{1} \right)^{-1} \; \left| \hat g_{a}^{(l)}(x,\theta) \right|
\leq n+ \epsilon \; \left(1-(1-a)^n \right)^{-1} \; n \;. \] We then introduce the martingale decomposition as in the proof of Theorem~\ref{theo:SLLNUnboundedUnifCase} and follow the same lines (with any $p>1$).
\appendix
\section{Explicit control of convergence} \label{app:UniformControl} We provide sufficient conditions for the assumptions A\ref{A-VCset}(\ref{A4rev}) and A\ref{A6}. The technique relies on the explicit control of convergence of a transition kernel $P$ on a general state space $(\mathbb{T}, \mathcal{B}(\mathbb{T}))$ to its stationary distribution $\pi$. \begin{prop} \label{prop:ExplicitControlCvg} Let $P$ be a $\phi$-irreducible and aperiodic transition kernel on $(\mathbb{T}, \mathcal{B}(\mathbb{T}))$. \begin{enumerate}[(i)] \item \label{block1} Assume that there exist a probability measure $\nu$ on
$\mathbb{T}$, positive constants $\varepsilon, b,c$, a measurable set
$\mathcal{C}$, a measurable function $V: \mathbb{T} \to [1, +\infty)$ and $0<
\alpha \leq 1$ such that \begin{equation}
\label{eq:HypPropExplicitControlCvg}
P(x,\cdot) \geq \ensuremath{\mathbbm{1}}_\mathcal{C}(x) \; \varepsilon \ \nu(\cdot) \;, \qquad \qquad PV \leq V - c \ V^{1-\alpha} +b \ \ensuremath{\mathbbm{1}}_\mathcal{C} \;. \end{equation} Then $P$ possesses an invariant probability measure $\pi$ and $\pi(V^{1-\alpha})< + \infty$. \item \label{block2} Assume in addition that $ c \ \inf_{\mathcal{C}^c} V^{1-\alpha}
\geq b$, $\sup_\mathcal{C} V < + \infty$ and $\nu(\mathcal{C})>0$. Then there exists a
constant $C$ depending upon $\sup_\mathcal{C} V$, $\nu(\mathcal{C})$ and $\varepsilon,
\alpha,b,c$ such that for any $0 \leq \beta \leq 1-\alpha$ and $1 \leq \kappa
\leq\alpha^{-1}(1-\beta)$, \begin{equation}
\label{eq:ConcPropExplicitControlCvg}
(n+1)^{\kappa-1} \; \| P^n(x,\cdot) - \pi(\cdot) \|_{V^\beta} \leq C \ V^{\beta +
\alpha \kappa}(x). \end{equation} \end{enumerate} \end{prop} \begin{proof}
The conditions (\ref{eq:HypPropExplicitControlCvg}) imply that $V$ is
unbounded off petite set and $P$ is recurrent. It also implies that $\{V<
+\infty \}$ is full and absorbing: hence there exists a level set $\mathcal{D}$ of
$V$ large enough such that $\nu(\mathcal{D}) >0$. Following the same lines as in
the proof of Proposition~\ref{prop:TimeFiniteAS}, we prove that $\sup_\mathcal{D}
\mathbb{E}_x[\tau_\mathcal{D}] < +\infty$. The proof of (\ref{block1}) in concluded by
\cite[Theorems 8.4.3., 10.0.1]{meynettweedie93}. The proof of (\ref{block2})
is given in e.g. \cite{gersendeetmoulines03} (see also
\cite{andrieu:fort:2005,douc:moulines:soulier:2007}).
\end{proof}
When $b \leq c$, $ c \ \inf_{\mathcal{C}^c} V^{1-\alpha} \geq b$. Otherwise, it is easy to deduce the conditions of (\ref{block2}) from conditions of the form (\ref{block1}).
\begin{coro}
Let $P$ be a phi-irreducible and aperiodic transition kernel on $(\mathbb{T},
\mathcal{B}(\mathbb{T}))$. Assume that there exist positive constants $ b,c$,
a measurable set $\mathcal{C}$, an unbounded measurable function $V: \mathbb{T} \to
[1, +\infty)$ and $0< \alpha \leq 1$ such that $P V \leq V - c V^{1-\alpha}
+b \ensuremath{\mathbbm{1}}_\mathcal{C}$. Assume in addition that the level sets of $V$ are $1$-small.
Then there exist a level set $\mathcal{D}$ of $V$, positive constants
$\varepsilon_\mathcal{D}$, $c_\mathcal{D}$ and a probability measure $\nu_\mathcal{D}$ such
that \[ P(x,\cdot) \geq \ensuremath{\mathbbm{1}}_\mathcal{D}(x) \; \varepsilon_\mathcal{D} \ \nu_\mathcal{D}(\cdot) \;, \qquad \qquad PV \leq V - c_\mathcal{D} \ V^{1-\alpha} +b \ \ensuremath{\mathbbm{1}}_\mathcal{D} \;, \] and $\sup_\mathcal{D} V < +\infty$, $\nu_\mathcal{D}(\mathcal{D}) >0$, and $c_\mathcal{D} \ \inf_{\mathcal{D}^c} V^{1-\alpha} \geq b$. \end{coro} \begin{proof}
For any $0 < \gamma <1$, $PV \leq V - \gamma \; c \ V^{1-\alpha} +b \
\ensuremath{\mathbbm{1}}_{\mathcal{D}_\gamma}$ with ${\mathcal{D}_\gamma} \eqdef \{ V^{1-\alpha} \leq b [c
(1-\gamma)]^{-1} \}$. Hence, $\sup_{\mathcal{D}_\gamma} V < +\infty$; and for
$\gamma$ close to $1$, we have $\gamma c \; \inf_{\mathcal{D}^c_\gamma}
V^{1-\alpha} \geq b$. Finally, the drift condition
(\ref{eq:HypPropExplicitControlCvg}) implies that the set $\{V < +\infty \}$
is full and absorbing and thus the level sets $\{V \leq d \}$ are accessible
for any $d$ large enough. \end{proof}
The $1$-smallness assumption is usually done for convenience and is not restrictive. In the case the level sets are petite (and thus $m$-small for some $m \geq 1$), the explicit upper bounds get intricate and are never detailed in the literature (at least in the polynomial case). Nevertheless, it is a recognized fact that the bounds derived in the case $m=1$ can be extended to the case $m>1$.
\section{$L^p$-martingales and the Chow-Birnbaum-Marshall's inequality} We deal with martingales and martingale arrays in the paper using the following two results. \begin{lemma}\label{lem1martingales} Let $\{(D_k,\mathcal{F}_k),\;1\leq k\geq 1\}$ be a martingale difference sequence and $M_n=\sum_{k=1}^nD_k$. For any $p>1$, \begin{equation}
\mathbb{E}\left[\left|M_n\right|^p\right]\leq Cn^{\max(p/2,1)-1}\sum_{k=1}^n \mathbb{E}\left(\left|D_k\right|^p\right),\end{equation} where $C=\left(18pq^{1/2}\right)^p$, $p^{-1}+q^{-1}=1$. \end{lemma} \begin{proof} By Burkholder's inequality (\cite{halletheyde80}, Theorem 2.10) applied to the martingale $\{(M_n,\mathcal{F}_n),\;n\geq 1\}$, we get \[
\mathbb{E}\left(\left|M_n\right|^p\right)\leq C\mathbb{E}\left[\left(\sum_{k=1}^k|D_k|^2\right)^{p/2}\right],\] where $C=\left(18pq^{1/2}\right)^p$, $p^{-1}+q^{-1}=1$. The proof follows by noting that
\begin{equation}\label{eq:prop1}\left(\sum_{k=1}^n|D_k|^2\right)^{p/2}\leq n^{\max(p/2,1)-1}\sum_{k=1}^n\left|D_k\right|^p.\end{equation}
To prove (\ref{eq:prop1}), note that if $1<p\leq 2$, the convexity inequality $(a+b)^\alpha\leq a^\alpha+b^\alpha$ which hold true for all $a,b\geq 0$ and $0\leq \alpha\leq 1$ implies that $\left(\sum_{n=1}^n|D_k|^2\right)^{p/2}\leq \sum_{k=1}^n |D_k|^p$. If $p>2$, Holder's inequality gives $\left(\sum_{k=1}^n|D_k|^2\right)^{p/2}\leq n^{p/2-1}\left(\sum_{k=1}^n|D_k|^p\right)$. \end{proof}
Lemma~\ref{lem:Birnbaum} can be found in \cite{Atchade:2009} and provides a generalization to the classical Chow-Birnbaum-Marshall's inequality. \begin{lemma} \label{lem:Birnbaum} Let $\{D_{n,i},\mathcal{F}_{n,i},\;1\leq i\leq n\}$, $n\geq 1$ be a martingale-difference array and $\{c_n,\;n\geq 1\}$ a non-increasing sequence of positive numbers. Assume that $\mathcal{F}_{n,i}=\mathcal{F}_i$ for all $i,n$. Define \[S_{n,k} \eqdef \sum_{i=1}^k D_{n,i},\;\; \mbox{ if }1\; \leq k\leq n \;\; \mbox{ and }\;\;\; S_{n,k} \eqdef \sum_{i=1}^n D_{n,i}+\sum_{j=n+1}^kD_{j,j},\;\;\;k>n;\] \[R_n \eqdef \sum_{j=1}^{n-1}\left(D_{n,j}-D_{n-1,j}\right).\] For $n\leq m\leq N$, $p\geq 1$ and $\lambda>0$
\begin{multline}2^{-p}\lambda^p\mathbb{P}\left(\max_{n\leq m\leq N}c_m|M_{m,m}|>\lambda\right)\leq c_N^p\mathbb{E}\left(|S_{n,N}|^p\right)+\sum_{j=n}^{N-1}\left(c_j^p-c_{j+1}^p\right)\mathbb{E}\left(|S_{n,j}|^p\right) \\
+ \mathbb{E}\left[\left(\sum_{j=n+1}^N c_j|R_j|\right)^p\right].\end{multline} \end{lemma}
\section{Proofs of Section~\ref{sec:ex2}}
In the proofs, $C$ will denote a generic finite constant whose actual value might change from one appearance to the next. The proofs below differ from earlier works (see e.g. \cite{gersendeetmoulines00,doucetal04}) since $q$ is not assumed to be compactly supported.
\subsection{Proof of Lemma~\ref{driftRWM}} \begin{lemma} \label{lem:tool1:proof:driftRWM}
Assume D\ref{D1}-\ref{D2}. For all $x$ large enough and $|z| \leq \eta
|x|^\upsilon$, $t \mapsto V_s(x+tz)$ is twice continuously differentiable on $[0,1]$. There exist a constant $C < +\infty$ and a positive function
$\varepsilon$ such that $\lim_{|x| \to\infty} \varepsilon(x) = 0$, such that for all $x$ large enough, $|z| \leq \eta |x|^\upsilon$ and $s \leq s_\star$, \[
\sup_{t \in [0,1]} |\nabla^2 V_s(x+tz)| \leq C \; s V_s(x) |x|^{2(m-1)} \left
(s + \varepsilon(x) \right) \;. \] \end{lemma} \begin{proof}
$|x +z | \geq |x| -\eta |x|^\upsilon \geq (1-\eta) |x|^\upsilon$ so that $t
\mapsto V_s(x+tz)$ is twice continuously differentiable on $[0,1]$ for $|x|$
large enough. We have
\begin{multline*}
|\nabla^2 V_s(x+tz)| \leq s V_s(x) \ \ \frac{V_s(x+tz)}{V_s(x)} | \nabla
\ln \pi(x+tz) \nabla \ln \pi(x+tz)^T | \cdots \\
\left( s + \frac{|\nabla^2 \ln \pi(x+tz) |}{| \nabla \ln \pi(x+tz) \nabla
\ln \pi(x+tz)^T |}\right)
\end{multline*}
Under the stated assumptions, there exists a constant $C$ such that for any
$x$ large enough and $|z| \leq \eta |x|^\upsilon$ \[
\sup_{t \in [0,1] } \left( s + \frac{|\nabla^2 \ln \pi(x+tz) |}{| \nabla \ln
\pi(x+tz) \nabla \ln \pi(x+tz)^T |}\right) \leq s +
\frac{D_2}{d_1^2(1-\eta)} |x|^{-m \upsilon} \;, \] and \[
\sup_{t \in [0,1] } | \nabla \ln \pi(x+tz) \nabla \ln \pi(x+tz)^T | \leq
|x|^{2(m-1)} D_1^2 \left( 1 -\eta |x|^{\upsilon-1} \right)^{2(m-1)} \;.. \] Finally, \[ \sup_{t \in [0,1] ,s \leq s_\star}\left(\frac{\pi(x+tz)}{\pi(x)} \right)^{-s}
\leq 1 + s_\star D_1 \ |z| \sup_{t \in [0,1] } |x+tz|^{m-1} \sup_{t \in [0,1],s
\leq s_\star }\left(\frac{\pi(x+tz)}{\pi(x)} \right)^{-s} \]
which yields the desired result upon noting that $|z| |x+tz|^{m-1} \leq \eta
|x|^{\upsilon+m-1} (1-\eta |x|^{\upsilon -1})$ is arbitrarily small for $x$ large enough. \end{proof}
We now turn to the proof of Lemma~\ref{driftRWM}. For $x\in\mathsf{X}$, define $R(x):=\{y\in\mathsf{X}:\; \pi(y)<\pi(x)\}$ and $R(x)-x \eqdef \{y-x:\; y\in R(x)\}$. We have: \begin{eqnarray*}
P_\theta V_s(x)-V_s(x)&=&\int\left(V_s(x+z)-V_s(x)\right)q_\theta(z) \ \mu_{Leb}(dz) \\ &&+ \int_{R(x)-x}\left(V(x+z)-V(x)\right)\left(\frac{\pi(x+z)}{\pi(x)}-1\right) q_\theta(z) \ \mu_{Leb}(dz) \;. \end{eqnarray*}
If $x$ remains in a compact set $\mathcal{C}$, using D\ref{D2}(\ref{D2a}) and the continuity of $x \mapsto V_s(x)$, we have $V_s(x+z)\leq C(1+ \exp(s D_0
|z|^m))$. It follows that \[
\sup_{\theta \in \Theta} \sup_{x \in \mathcal{C}} \{ P_\theta V_s(x) - V_s(x) \} \leq C \ \sup_{\theta \in \Theta} \int_{R(x)-x} (1+ \exp(s D_0 |z|^m)) \ q_\theta(z) \ \mu_{Leb}(dz) < +\infty \;. \]
More generally, let $x$ large enough. Define $l(x) \eqdef \log\pi(x)$, $R_V(x,z)\eqdef V_s(x+z)-V_s(x)+ s V_s(x) \pscal{z}{\nabla l(x)}$, $R_\pi(x,z)\eqdef \pi(x+z)(\pi(x))^{-1}-1-\pscal{z}{\nabla l(x)}$. Using the fact that the mean of $q_\theta$ is zero, we can write: $P_\theta V_s(x) - V_s(x)=I_1(x,\theta,s)+I_2(x,\theta,s)+I_3(x,\theta,s) $ where \[I_1(x,\theta,s)\eqdef - s V_s(x) \int_{R(x)-x} \pscal{z}{\nabla l(x)}^2 \ q_\theta(z) \ \mu_{Leb}(dz) \;,\] \[I_2(x,\theta,s)\eqdef \int R_V(x,z) \; q_\theta(z) \ \mu_{Leb}(dz)+\int_{R(x)-x}R_V(x,z)\left(\frac{\pi(x+z)}{\pi(x)}-1\right) \ q_\theta(z) \ \mu_{Leb}(dz) \;,\] and \[I_3(x,\theta,s) \eqdef - s V_s(x) \ \int_{R(x)-x} R_\pi(x,z)\pscal{z}{\nabla l(x)} \ q_\theta(z) \ \mu_{Leb}(dz) \;.\] \subsubsection{First term} It follows from \cite[Lemma B.3. and proof of Proposition 3]{gersendeetmoulines00} that, under D\ref{D2}(\ref{D2z}), there exists $b>0$, such that for all $\theta \in \Theta$, \[ \int_{R(x)-x} \pscal{z}{\nabla l(x)}^2 \ q_\theta(z) \ \mu_{Leb}(dz) \geq b \;
|\nabla l(x) |^2 \;. \] Hence, $\sup_{\theta \in \Theta} I_1(x,\theta,s) \leq -s \; V_s(x) \ b \; d_1^2
|x|^{2(m-1)}$.
\subsubsection{Second term}
For $z\in R(x)-x$, $\pi(x+z)<\pi(x)$. Therefore $|I_2(x,\theta,s)|\leq 2\int
|R_V(x,z)|q_\theta(z) \ \mu_{Leb}(dz)$. By Lemma~\ref{lem:tool1:proof:driftRWM}, there exists $C< + \infty$ - independent of $s$ for $s \leq s_\star$- such that for any $|z| \leq \eta |x|^\upsilon$, \[
|R_V(x,z) |\leq C \; s \; V_s(x) \ |x|^{2(m-1)} \ |z|^2 \ \left(s +
\varepsilon(x) \right)\;. \] This implies that there exists a constant $C< +\infty$ - independent of $s$ for $s \leq s_\star$ - such that \begin{multline*}
\int |R_V(x,z)|q_\theta(z) \ \mu_{Leb}(dz) \leq C \; s \; V_s(x) \
|x|^{2(m-1)} \ \left(s +
\varepsilon(x) \right) \ \int |z|^2 q_\theta(z) \mu_{Leb}(dz) \\
+ V_s(x) \; \int_{\{z, |z| \geq \eta |x|^\upsilon \}}
\frac{V_s(x+z)}{V_s(x)} \ q_\theta(z) \mu_{Leb}(dz) \\
+ C \; V_s(x) \; |x|^{m-1} \int_{\{z, |z| \geq \eta |x|^\upsilon \}} |z| \;
q_\theta(z) \mu_{Leb}(dz) \;. \end{multline*} There exists a constant $C$ such that for $\theta \in \Theta$ and $s \leq s_\star$, the first term in the rhs is upper bounded by $C \; s \; V_s(x) \
|x|^{2(m-1)} \left(s + \varepsilon(x) \right)$. Under D\ref{D3}, the second term is upper bounded by $V_s(x) \; |x|^{2(m-1)} \; \varepsilon(x) $ with
$\lim_{|x| \to +\infty} \varepsilon(x) = 0$ uniformly in $\theta$ for $\theta \in \Theta$, and in $s$ for $s \leq s_\star$. Since $q_\theta$ is a multivariate Gaussian distribution, there exists $\lambda_\star>0$ such that
$\sup_{\theta \in\Theta} \int \exp(\lambda_\star |z|^2) q_\theta(z) \mu_{Leb}(dz)< +\infty$. Under D\ref{D3}, the third term is upper bounded by $C
\; V_s(x) \; |x|^{2(m-1)} \; \exp(-\lambda \eta^2 |x|^{2 \upsilon})$ for some $\lambda \in (0,\lambda_\star)$, uniformly in $\theta$ for $\theta \in \Theta$, and in $s$ for $s \leq s_\star$. Hence, we proved that there exists $C_\star< \infty$ such that for any $s \leq s_\star$, \[
\sup_{\theta \in \Theta} |I_2(x,\theta,s)|\leq C_\star \; V_s(x) \;
|x|^{2(m-1)} \; \left(s^2+ \varepsilon(x) \right) \;, \] for a positive function $\varepsilon$ independent of $s$ and such that
$\lim_{|x| \to +\infty} \varepsilon(x) = 0$.
\subsubsection{Third term} Following the same lines as in the control of $I_2(x,\theta,s)$, it may be proved that \begin{multline*}
I_3(x,\theta,s) \leq s V_s(x) D_1 |x|^{m-1} \int_{\{z, |z| \geq \eta
|x|^\upsilon \}} |z | \left( 1 + D_1 |z| |x|^{m-1} \right) q_\theta(z) \mu_{Leb}(dz) \\
+ C \ V_s(x) |x|^{3(m-1)} \; \int_{\{z, |z| \leq \eta |x|^\upsilon \}} |z|^3
\ q_\theta(z) \mu_{Leb}(dz) \leq C \ V_s(x) |x|^{2(m-1)} \varepsilon(x) \end{multline*} for a positive function $\varepsilon$ independent of $s,\theta$ and such that
$\lim_{|x| \to +\infty} \varepsilon(x) = 0$.
\subsubsection{Conclusion} Let $\alpha \in (0,1)$. By combining the above calculations, we prove that by choosing $s$ small enough such that $c_\star \eqdef b d_1^2 - C_\star s>0$, we have \begin{align}
\sup_{\theta \in \Theta} P_\theta V_s(x) & \leq V_s(x) - c_\star V_s(x)
|x|^{2(m-1)} + b_\star \ensuremath{\mathbbm{1}}_\mathcal{C}(x) \label{eq:drift:sous-geom} \\
& \leq V_s(x) - 0.5 c_\star V_s^{1-\alpha}(x) + b_\star \ensuremath{\mathbbm{1}}_\mathcal{C}(x) \end{align} for a compact set $\mathcal{C}$. This proves A\ref{Adrift}(ii) and A\ref{A5}. A\ref{A6} follows from the results of Appendix~\ref{app:UniformControl}. A\ref{Adrift}(iii) and A\ref{A2} follow from Lemma~\ref{lem:example:smallset}.
\subsection{Proof of Lemma~\ref{ex:lem:HypB}} An easy modification in the proof of \cite[Proposition 11]{andrieuetal06} (to adjust for the difference in the drift function) shows that
$D(\theta,\theta')\leq 2\int_\mathsf{X} |q_{e^c\Sigma}(x)-q_{e^{c'}\Sigma'}(x)| \mu_{Leb}(dx)$. We then apply \cite[Lemma 12]{andrieuetal06} to obtain that
$D(\theta,\theta')\leq C \, |e^c \Sigma-e^{c'}\Sigma'|_\mathrm{s}$ where $C$ is a finite constant depending upon the compact $\Theta$. Hereafter, $C$ is finite and its value may change upon each appearance. For any $l,n\geq 0$, $\epsilon>0$, $x \in \mathbb R^p$ and $\theta\in\Theta$, we have \begin{eqnarray*} \mathbb{P}^{(l)}_{x,\theta} \left(D(\theta_n,\theta_{n+1})\geq \epsilon\right) &\leq& \epsilon^{-1}\mathbb{E}^{(l)}_{x,\theta}\left[D(\theta_n, \theta_{n+1})\right]\\
&\leq& C \, \mathbb{E}^{(l)}_{x,\theta}\left[ |c_{n+1}-c_n| + |\Sigma_{n+1} - \Sigma_n|_\mathrm{s} \right]\\
&\leq& C \, (l+n+1)^{-1}\left(1+\mathbb{E}^{(l)}_{x,\theta}\left[|X_{n+1}|^2\right]+\sqrt{\mathbb{E}^{(l)}_{x,\theta}\left[|X_{n+1}|^2\right]}\right) \;. \end{eqnarray*}
D\ref{D2}(\ref{D2a}) implies that we can find $C<\infty$ such that $|x|^2\leq C \; \phi(V_s(x))$ for all $x\in\mathsf{X}$ where $\phi(t) = [\ln t]^{2/m}$. From the drift condition (Lemma~\ref{driftRWM}), Proposition~\ref{prop:ComparaisonGal}(\ref{prop:CpG1}) and the concavity of $\phi$, we deduce that there exists $C$ such that
$\mathbb{E}^{(l)}_{x,\theta}\left[|X_n|^2\right]\leq C\; [\ln V_s(x) ]^{2/m} \; [\ln n]^{2/m}$. We conclude that for any probability $\xi_1$ such that $\xi_1([\ln V_s]^{2/m}) < +\infty$, $\lim_n \mathbb{P}_{\xi_1,\xi_2} \left(D(\theta_n,\theta_{n+1}) \geq \epsilon\right)=0$ and for any level set $\mathcal{D}$ of $V_s$, \[ \lim_{n\to\infty}\sup_{l\geq 0}\sup_{\mathcal{D}\times
\Theta}\mathbb{P}^{(l)}_{x,\theta} \left(D(\theta_n,\theta_{n+1}) \geq
\epsilon\right)=0 \;.\]
{\bf Acknowledgment:} We would like to thank Michael Woodroofe for helpful discussions on the resolvent approach to limit theorems and Prof. Pierre Priouret and Christophe Andrieu for helpful discussions. We also thank M. Vihola for helpful comments.
\end{document} |
\begin{document}
\title[W&R]{On the reach of isometric embeddings into Wasserstein type spaces}
\author[J.~Casado]{Javier Casado$^{\ast}$}
\author[M.~Cuerno]{Manuel Cuerno$^{\ast\ast}$}
\author[J.~Santos-RodrΓguez]{Jaime Santos-RodrΓguez$^{\ast\ast\ast}$}
\thanks{$^*$Supported in part by the FPU Graduate Research Grant FPU20/01444, and by research grants
MTM2017-β85934-βC3-β2-βP and PID2021-124195NB-C32 from the Ministerio de Econom\'ia y Competitividad de Espa\~{na} (MINECO)}
\thanks{$^{\ast\ast}$Supported in part by the FPI Graduate Research Grant PRE2018-084109, and by research grants
MTM2017-β85934-βC3-β2-βP and PID2021-124195NB-C32 from the Ministerio de Econom\'ia y Competitividad de Espa\~{na} (MINECO)} \thanks{$^{\ast\ast\ast}$ Supported in part by a Margarita Salas Fellowship CA1/RSUE/2021-00625, and by research grants
MTM2017-β85934-βC3-β2-βP, PID2021-124195NB-C32 from the Ministerio de Econom\'ia y Competitividad de Espa\~{na} (MINECO)}
\address[J.~Casado]{Department of Mathematics, Universidad Aut\'onoma de Madrid and ICMAT CSIC-UAM-UC3M, Spain} \email{[email protected]}
\address[M.~Cuerno]{Department of Mathematics, Universidad Aut\'onoma de Madrid and CSIC-UAM-UC3M, Spain and Department of Mathematical Sciences, Durham University, UK}
\email{[email protected], [email protected]}
\address[J.~Santos-Rodr\'iguez]{Department of Mathematics, Universidad Aut\'onoma de Madrid, Spain and Department of Mathematical Sciences, Durham University, UK} \email{[email protected], [email protected]}
\date{\today}
\subjclass[2020]{49Q20, 28A33, 30L15, 49Q22, 53C21, 55N31} \keywords{Wasserstein distance, Optimal transport, Metric geometry, TDA}
\begin{abstract} We study the reach (in the sense of Federer) of the natural isometric embedding $X\hookrightarrow W_p(X)$ of $X$ inside its $p$-Wasserstein space, where $(X,\dist)$ is a geodesic metric space. We prove that if a point $x\in X$ can be joined to another point $y\in X$ by two minimizing geodesics, then $\reach(x, X\subset W_p(X)) = 0$. This includes the cases where $X$ is a compact manifold or a non-simply connected one. On the other hand, we show that $\reach(X\subset W_p(X)) = \infty$ when $X$ is a CAT(0) space. The infinite reach enables us to examine the regularity of the projection map. Furthermore, we replicate these findings by considering the isometric embedding $X\hookrightarrow W_\vartheta(X)$ into an Orlicz--Wasserstein space, a generalization by Sturm of the classical Wasserstein space. Lastly, we establish the nullity of the reach for the isometric embedding of $X$ into $\dgm_\infty$, the space of persistence diagrams equipped with the bottleneck distance. \end{abstract} \setcounter{tocdepth}{1}
\maketitle
\section{Introduction} The concept of the reach of a subset in Euclidean space was first introduced by Federer in \cite{federer}. It is used as a way to measure how much the subset folds in on itself (i.e. how far apart two pieces of the set are in the ambient space despite them being far in the intrinsic metric of the set). Loosely speaking (see Definition \ref{def.reach}) a subset $A\subset X$ has positive reach if there is a neighbourhood of any point on $A$ such that every point in this neighbourhood has a unique metric projection into $A$. That is, every $x\in X$ inside that neighbourhood is sent by the projection to its unique nearest point in $A$.
The reach of a subset has been of interest not only for its geometric and topological properties (see for example \cite{lytchakkapovitchreach,lytchakreach2,lytchakreach1}) but also for its application as a useful parameter for manifold learning and topological data analysis (see \cite{harvey,latschevreach} and references therein). In the survey \cite{surveyreach}, the interested reader can also find a summary of some results for sets of positive reach.
Given a geodesic metric space $(X, \dist),$ one can equip the space of probability measures supported on $X$ with a distance induced by the solutions to an optimal transport problem. Usually the cost comes from taking the $p-$power of the distance function, the so called $p-$\textit{Wasserstein spaces.} One advantage of considering these ambient spaces is that they share many geometrical properties with the base space $X$ such as non-branching of geodesics, compactness, and lower sectional curvature bounds amongst others.
In this article we focus on determining the reach of the image of the natural isometric embedding, given by mapping each point $x \in X$ to the corresponding Dirac delta $\delta_x\in W_p(X).$ We denote this by \(\reach(X\subset W_p(X))\), where $W_p(X)$ is the $p$--Wasserstein space of $X$ (see Section \ref{wassersteintype}).
Our first result shows that the cost considered affects the reach of the embedding significantly:
\begingroup \def\ref{reachPD}{\ref{reach1wasserstein}} \begin{theorem} Let $(X,\dist)$ be a metric space, and consider its $1$--Wasserstein space, $W_1(X).$ Then, for every accumulation point $x\in X$, $\reach(x, X\subset W_1(X)) = 0$. In particular, if $X$ is not discrete, $\reach(X\subset W_1(X)) = 0$.
\end{theorem} \addtocounter{theorem}{-1} \endgroup
Geometric features of $X$ also play an important role. In the presence of multiple geodesics joining the same pair of points we obtain the following:
\begingroup \def\ref{reachPD}{\ref{teorema22}} \begin{theorem}
Let $X$ be a geodesic metric space, and $x\in X$ a point such that there exists another $y\in X$ with the property that there exist at least two different minimising geodesics from $x$ to $y$. Then, for every $p>1,$ \[ \reach(x, X\subset W_p(X))=0. \] In particular, if there exists a point $x\in X$ satisfying that property, $\reach(X\subset W_p(X))=0$ for every $p>1$. \end{theorem} \addtocounter{theorem}{-1} \endgroup
This theorem leads us to obtain two interesting corollaries related to two important classes of manifolds:
\begingroup \def\ref{corollarynotsimply}{\ref{corollarycompact}} \begin{coro} If $M$ is a compact manifold, then $\reach(x,M\subset W_p(M)) =0$ for every $x\in M$ and every $p>1$. \end{coro} \addtocounter{coro}{-1} \endgroup
\begingroup \def\ref{corollarynotsimply}{\ref{corollarynotsimply}} \begin{coro} If $M$ is a not simply connected complete manifold, then $\reach(x,M\subset W_p(M)) =0$ for every $x\in M$ and $p>1$. \end{coro} \addtocounter{coro}{-1} \endgroup
In \cite{kell}, Kell studied several convexity conditions, such as \textit{(resp. strictly, uniformly) $p$--convexity} or \textit{Busemann}, on the distance of a geodesic metric space and some other more general conditions about metric spaces, such as \textit{reflexivity} (see also Definitions \ref{kelldefinitions1} and \ref{kelldefinitions2}) obtaining existence and uniqueness of \textit{barycenters}, i.e., certain points on the metric space that minimise the distance to a given measure/density. In order to formalise that concept, we define a \emph{barycenter} as a point in $X$ that minimises the distance between a given element from a Wasserstein type space and some isometric embedding of the metric space inside that space. Kell's conditions allow us to determine that the reach is infinite for a broad class of spaces:
\begingroup \def\ref{reachPD}{\ref{reachpositivowass}} \begin{theorem} Let $(X,\dist)$ be a reflexive metric space, Then the following assertions hold:
\begin{enumerate}
\item If $X$ is strictly $p$--convex for $p\in[1,\infty)$ and uniformly $\infty$--convex if $p=\infty$, then\begin{equation}
\reach(X\subset W_r(X))=\infty\text{ for }r>1.
\end{equation}
\item If $X$ is Busemann, strictly $p$--convex for some $p\in[1,\infty],$ and uniformly $q$--convex for some $q\in[1,\infty]$, then\begin{equation}
\reach(X\subset W_r(X))=\infty\text{ for }r>1.
\end{equation}
\end{enumerate} \end{theorem} \addtocounter{theorem}{-1} \endgroup
We also study properties of the projection map \textit{projection map}, i.e., \begin{align*}
\proj_2: W_2(X)&\to X\\
\mu&\mapsto r_\mu, \end{align*}that sends each measure to its $2-$barycenter (i.e. the barycenter on the $2$--Wasserstein space), and showing that this map is in fact a submetry for a certain class of spaces.
\begingroup \def\ref{reachPD}{\ref{thmsubmetry}} \begin{theorem}
Let $(\mathbb{E}^n,\dist)$ the Euclidean space with the canonical distance, then $\proj_2$ is a submetry. \end{theorem} \addtocounter{theorem}{-1} \endgroup
Our next results focus on the embedding into Wasserstein type spaces with more general metrics, such as the Orlicz-Wasserstein spaces defined by Sturm in \cite{sturm} and the space of persistence diagrams, the key tool in Topological Data Analysis \cite{chazalintro}.
For the Orlicz-Wasserstein spaces we require reasonable assumptions on the cost (stated in the theorem) in order to ensure that the natural embedding using Dirac deltas is indeed isometric. In a similar fashion to the case of $p-$Wasserstein spaces, we obtain:
\begingroup \def\ref{reachPD}{\ref{reachceroorlicz}} \begin{theorem} Let $X$ be a geodesic metric space, and $x\in X$ a point such that there exists another $y\in X$ with the property that there exists at least two different minimising geodesics from $x$ to $y$. Suppose $X$ is isometrically embedded into an Orlicz-Wasserstein space $W_\vartheta(X)$. Then, for every $\varphi$ (as explained in Subsection \ref{subseccionorlicz}) such that $\varphi(t_0) \neq t_0$ for some $t_0>1$, \[ \reach(x, X\subset W_\vartheta(X))=0. \] In particular, if there exists a point $x\in X$ satisfying that property, $\reach(X\subset W_\vartheta(X))=0$ for every $p>1$. \end{theorem} \addtocounter{theorem}{-1} \endgroup
The last case of isometric embedding into a Wasserstein type space is the one into $\dgm$, the space of persistence diagrams. We can equip $\dgm$ with Wasserstein type distances, involving a minimisation process as in an optimal transport problem. The \textit{bottleneck distance}, $w_\infty$, is one of the most used distances in $\dgm$. Bubenik and Wagner proved in \cite{bubenik} the existence of an isometric embedding of separable and bounded metric spaces into $(\dgm_\infty,w_\infty)$. We have studied the reach of these embeddings:
\begingroup \def\ref{reachPD}{\ref{reachPD}} \begin{theorem} Let $(X,\dist)$ be a separable, bounded metric space and $(\dgm_{\infty},w_\infty)$ the space of persistence diagrams with the bottleneck distance. If $x\in X$ is an accumulation point, then \[
\reach(x, X\subset \dgm_\infty)=0.\]
In particular, if $X$ is not discrete, $\reach(X\subset \dgm_\infty)=0.$ \end{theorem} \addtocounter{theorem}{-1} \endgroup
The paper is organized as follows: In Section \ref{metricdefinitions}, we state the necessary technical definitions that we need. Section \ref{wassersteintype} is devoted to present the Wasserstein type spaces we use and some of their properties. Sections \ref{reachwasserstein}, \ref{reachwassersteinorliz} and \ref{reachpersistence} contain the results about the reach of spaces embedded into their $p-$Wasserstein space, their Orlicz--Wassertein space and the persistence diagram space respectively.
The authors would like to express their sincere gratitude to Professor Luis Guijarro for his invaluable comments and insights during the elaboration of this paper. They would also like to extend their appreciation to Professors Fernando Galaz-GarcΓa and David GonzΓ‘lez for their enlightening discussions and contributions to the final manuscript.
Additionally, the authors wish to acknowledge the Department of Mathematical Sciences at Durham University for their warm hospitality and the excellent working conditions provided during the final months of preparing this paper.
\section{Preliminaries}{\label{metricdefinitions}} \subsection{Reach} First we recall the definition of the reach of a subset of a metric space. \begin{definition}[Unique points set and reach, \cite{federer}]\label{def.reach}
Let $(X, \dist)$ be a metric space and $A\subset X$ a subset. We define the set of points having a unique metric projection in $A$ as
\[\unp(A) = \{x \in X : \text{there exists a unique $a$ such that } \dist(x,A) = \dist(x,a)\}.\]
For $a\in A$, we define the \emph{reach} of $A$ at $a$, denoted by $\reach(a, A)$, as
\[\reach(a, A) = \sup \{ r\ge 0 : B_r(a) \subset \unp(A)\}.\]
Finally, we define the \emph{global reach} by
\[\reach(A) = \inf_{a\in A} \reach(a, A).\] \end{definition} The intuitive idea is that $\reach(A)=0$ if and only if we do \textit{not} have an $\varepsilon$--neighbourhood of $A$ admitting a metric projection into $A$. Conversely, $\reach(A)=\infty$ will occur if and only if the entirety of $X$ admits a metric projection into $A$.
\subsection{CAT(0) spaces} We recall the definition of a CAT(0) metric space.
\begin{definition}
A complete metric space $(X,\dist)$ is CAT(0) if for all $z$, $y \in X$ there exists $m\in X$ such that for all $x\in X$,
\[\dist(x,m)^2\leq \cfrac{\dist(x,y)^2 + \dist(x,z)^2 }2 - \cfrac{\dist(y,z)^2}4.\] \end{definition} This is a generalization of the concept of nonpositive curvature for Riemannian manifolds to metric spaces. So, in particular, the Euclidean space or the hyperbolic space are examples of CAT(0) spaces. A few basic properties of these spaces are: \begin{enumerate}
\item For any two points in $X$, there exists a unique geodesic segment between them.
\item $X$ is simply connected. \end{enumerate}
\subsection{General metric definitions}
Most of the definitions in this section appear in \cite[Section 1]{kell}. First, we recall the well known definition of existence of \textit{midpoints}:
\begin{definition}[Midpoints]
We say that $(X,\dist)$ admits \textit{midpoints} if , for every $x,y\in X$, there is $m(x,y)\in X$ such that \[\dist(x,m(x,y))=\dist(y,m(x,y))=\frac12\dist(x,y).\] \end{definition}
This technical detail allows us to present the following definitions:
\begin{definition}[$p$--convex, $p$--Busemann curvature and uniformly $p$--convex]{\label{kelldefinitions1}} Let $(X,\dist)$ be a metric space that admits midpoints. \begin{enumerate}
\item $X$ is \textit{$p$--convex} for some $p\in[1,\infty]$ if, for each triple $x,y,z\in X$ and each midpoint $m(x,y)$ of $x$ and $y$, \[
\dist(m(x,y),z)\leq\left(\frac12\dist(x,z)^p+\frac12\dist(y,z)^p\right)^{1/p}.
\]The space $X$ is called \textit{strictly $p$--convex} for $p\in(1,\infty]$ if the inequality is strict for $x\neq y$ and \textit{strictly $1$--convex} if the inequality is strict whenever $\dist(x,y)>|\dist(x,z)-\dist(y,z)|$.
\item $X$ satisfies the \textit{$p$--Busemann curvature condition} if, for all $x_0,x_1,y_0,y_1\in X$ with midpoints $m_x=m(x_0,x_1)$ and $m_y=m(y_0,y_1)$, \[
\dist(m_x,m_y)\leq\left(\frac12\dist(x_0,y_0)^p+\frac12\dist(x_1,y_1)^p\right)^{1/p}
\]for some $p\in[1,\infty]$. If $X$ satisfies the $p$--Busemann condition, we say that $(X,\dist)$ is \textit{$p$--Busemann}. In particular, if $p=1$, we say that $(X,\dist)$ is \textit{Busemann}.
It turns out that $(X,\dist)$ is a Busemann space if and only if \[
\dist(m(x,z),m(x,y))\leq\frac12\dist(z,y).
\]
\item $X$ is \textit{uniformly $p$--convex} for some $p\in[1,\infty]$ if, for all $\epsilon>0$, there exists $\rho_p(\epsilon)\in(0,1)$ such that, for every $x,y,z\in X$ satisfying \[
\dist(x,y)>\epsilon\left(\frac12\dist(x,z)^p+\frac12\dist(y,z)^p\right)^{1/p}\text{, for some }p>1,
\]or \[
\dist(x,y)>|\dist(x,z)-\dist(y,z)|+\epsilon\left(\frac12\dist(x,z)+\frac12\dist(y,z)\right)\text{, for }p=1,
\]the following inequality holds: \[
\dist(m(x,y),z)\leq(1-\rho_p(\epsilon))\left(\frac12\dist(x,z)^p+\dist(y,z)^p\right)^{1/p}.
\]For example, every $\mathrm{CAT}(0)$--space is uniformly $2$--convex. \end{enumerate} \end{definition}
\begin{rema}
By \cite[Lemma 1.4., Corollary 1.5.]{kell}, the following assertions hold: \begin{itemize}
\item A uniformly $p$--convex metric space is uniformly $p'$--convex for all $p'\geq p$.
\item Assume $(X,\dist)$ is Busemann. Then $(X,\dist)$ is strictly (resp. uniformly) $p$--convex for some $p\in[1,\infty]$ if and only if it is strictly (resp. uniformly) $p$--convex for all $p\in[1,\infty]$.
\item Any $\mathrm{CAT}(0)$--space is both Busemann and uniformly $2$--convex, thus uniformly $p$--convex for every $p\in[1,\infty]$.
\end{itemize} \end{rema}
In order to apply some of Kell's results, we introduce the notion of \textit{reflexivity} on metric spaces.
\begin{definition}[Reflexive metric space, Definition 2.1. \cite{kell}]{\label{kelldefinitions2}}
Let $I$ be a directed set. A metric space $(X,\dist)$ is \textit{reflexive} if, for every non--increasing family $\{C_i\}_{i\in I}\subset X$ of non--empty bounded closed convex subsets (i.e. $C_i\subset C_j$ whenever $i\geq j$), we have \[
\bigcap_{i\in I}C_i\neq\emptyset.
\] \end{definition}
\section{Wasserstein type spaces and distances}{\label{wassersteintype}}
In this section we will recall the standard notions of optimal transport and Wasserstein distance. Then we will provide an introduction to the Orlicz--Wasserstein spaces initially proposed by Sturm in \cite{sturm}. Finally, we present our last Wasserstein-type space, the one formed by persistence diagrams, the key element in the field of Topological Data Analysis \cite{chazalintro}.
\subsection{Wasserstein space}
From now on, $X$ will be a metric space with distance function $\dist$. Denote by $\mathcal P(X)$ the set of probability measures on $X$ and by $\mathcal P _p (X)$ the probability measures with finite $p$-moment, i.e. \[ \mathcal P_p(X) := \{ \mu \in \mathcal P(X) : \int_X \dist(x, x_0)^p d \mu(x) < \infty \text{ for some $x_0\in X$} \}. \]
\begin{definition}[Transference plan] A \emph{transference plan} between two positive measures $\mu, \nu \in \mathcal P(X)$ is a finite positive measure $\pi \in \mathcal P (X \times X)$ which satisfies that, for all Borel subsets $A, B$ of $X$, \[ \pi(A\times X) = \mu(A), \quad \text{and} \quad \pi(X \times B) = \nu(B). \] \end{definition}
Note that we require $1=|\mu| = | \nu| = \pi( X \times X)$, so we are not considering all measures of the product space. We denote by $\Gamma(\mu, \nu)$ the set of transference plans between the measures $\mu$ and $\nu$. Then, we define the $p$--Wasserstein distance for $p\ge 1$ between two probability measures as \[ W_p( \mu, \nu) := \left( \min_{\pi \in \Gamma(\mu, \nu)} \int_{X \times X }\dist(x,y)^p d\pi (x, y) \right) ^{\frac{1}{p}} . \] The metric space $(\mathcal P_p (X) , W_p)$ is denoted as the \emph{$p$--Wasserstein space of $X$.}
It is easy to see that, for $x, y \in X$, $W_p(\delta_x, \delta_y) = \dist(x,y)$. Therefore the inclusion $x \mapsto \delta_x$ is an isometric embedding of $X$ inside $W_p(X)$.
\begin{rema}
When we are calculating $W_p(\delta_x, \mu)$, there exists only one pairing $\pi = \delta_x \otimes \mu \in \Gamma(\delta_x, \mu)$ between a delta and a general probability measure. Therefore the Wasserstein distance can be easily computed by
\[
W_p^p(\delta_x, \mu) = \int_X \dist(x, y) ^p d\mu(y).
\]
In particular, fixing $a, x, y\in X$, and $0 \le \lambda \le 1$, we have
\[
W_p^p(\delta_a, \lambda \delta_x + (1-\lambda) \delta_y) = \lambda \dist(a, x)^p + (1-\lambda)
\dist (a, y) ^p.
\] \end{rema}
\subsection{Orlicz--Wasserstein space}\label{subseccionorlicz}
Let $\vartheta: \mathbb{R}^+ \to \mathbb{R}^+$ be a strictly increasing, continuous function. Assume $\vartheta$ admits a representation $\vartheta = \varphi \circ \psi$ as a composition of a convex and a concave function $\varphi$ and $\psi$, respectively. This includes all $\mathcal C^2$ functions \cite[Example 1.3.]{sturm}.
\begin{definition}[$L^\vartheta$--Wasserstein space and distance]
Let $(X, \dist)$ be a complete separable metric space. The $L^\vartheta$--Wasserstein space $\mathcal P_\vartheta (X) $ is defined by all probability measures $\mu$ in $X$ such that
\[ \int_X \varphi \left(\frac{1}{t} \psi(\dist(x,y))\right) d \mu(x) < \infty.\]
The $L^\vartheta$--Wasserstein distance of two probability measures $\mu, \nu \in \mathcal P_\vartheta (X)$ is defined as
\[
W_\vartheta (\mu, \nu) = \inf \left\{ t>0 :
\inf_{\pi \in \Gamma(\mu, \nu) } \int_{X\times X} \varphi \left( \frac1t \psi(\dist(x,y))\right) d\pi(x,y) \le 1 \right\}.
\] \end{definition} The function $W_\vartheta$ is a complete metric on $\mathcal P_\vartheta(X)$ (see \cite{sturm}, Proposition 3.2). The metric space $(\mathcal P_\vartheta (X), W_\vartheta)$ is known as the \textit{$\vartheta$-Orlicz--Wasserstein space of $X$}.
Notice that for every $x\in X$, the probability measure $\delta_x$ belongs to $\mathcal P_\vartheta(X)$. Therefore, we can embed the metric space $X$ inside its Orlicz--Wasserstein space by mapping $x \mapsto \delta_x$. In addition, this map is an isometric embedding if and only if $\psi \equiv \operatorname{Id}$ and $\varphi(1) = 1$.
\subsection{Space of persistence diagrams and bottleneck distance}
We will now define $(\dgm_p,w_p)$ the space of persistence diagrams with a Wasserstein metric. For that purpose we being with the basic notion of the elements of our metric space:
\begin{definition}[Persistence diagram, \cite{bubenik}]
A \textit{persistence diagram} is a function from a countable set $I$ to $\mathbb{R}^2_<$, i.e. $D\colon I\to\mathbb{R}^2_<$, where $\mathbb{R}^2_<=\{(x,y)\in\mathbb{R}^2\colon x<y\}$. \end{definition}
\begin{rema}
In this definition, all the points have multiplicity one. Other authors suggest considering persistence diagrams as multisets of points, i.e. sets of points where we can repeat points (see \cite{guijarro1,guijarro2, Mileyko2011,turnerfrechet}). This consideration is closer to the performance of the persistence diagrams in the TDA setting as various homological features can have the same birth and death.
Also, in \cite{guijarro1,guijarro2}, the authors extend the notion of persistence diagrams beyond the Euclidean setting and present a general definition for points in metric spaces. \end{rema}
Once we have the points of our metric space, we want to define distance functions on it. For that purpose, first, we present two preliminary definitions.
\begin{definition}[Partial matching, \cite{bubenik}]
Let $D_1\colon I_1\to\mathbb{R}^2_<$ and $D_2:I_2\to\mathbb{R}_<^2$ be persistence diagrams. A \textit{partial matching} between them is a triple $(I_1',I_2',f)$ such that $I_1'\subseteq I_1$, $I_2'\subseteq I_2$, and $f\colon I_1'\to I_2'$ is a bijection. \end{definition}
In the same spirit of the original $p$--Wasserstein distance, we want to define a new one between persistence diagrams $D_1$ and $D_2$ as the minimal cost of a partial matching between them. In particular, the cost of a partial matching will be the $\ell^p$ norm of distances between matched pairs and the distances between unmatched pairs and $\Delta$, as Bubenik stated on \cite[Section 2.1.]{bubenik} , where $\Delta$ denotes the diagonal in $\mathbb{R}^2$.
\begin{definition}[Cost of a partial matching, \cite{bubenik}]
Let $D_1\colon I_1\to\mathbb{R}^2_<$ and $D_2\colon I_2\to\mathbb{R}^2_<$ be persistence diagrams and $(I_1',I_2',f)$ a partial matching between them. We endow $\mathbb{R}^2$ with the infinity metric $\dist_\infty(a,b)=\norm{a-b}_{\infty}=\max(|a_x-b_x|,|a_y-b_y|)$. Observe that, for $a\in\mathbb{R}^2_<$, we have that $\dist_\infty(a,\Delta)=\inf_{t\in\Delta}\dist_\infty(a,t)=(a_y-a_x)/2$. We denote by $\cost_p(f)$ the \textit{$p$--cost of $f$}, defined as follows. For $p<\infty$, let \[
\cost_p(f)=\left(\sum_{i\in I_1'}\dist_\infty(D_1(i),D_2(f(i)))^p+\sum_{i\in I_1\backslash I_1'}\dist_{\infty}(D_1(i),\Delta)^p+\sum_{I_2\backslash I_2'}\dist_\infty(D_2(i),\Delta)^p\right)^{1/p},
\]and for $p=\infty$, let \[
\cost_\infty(f)=\max\left\{\sup_{i\in I_1'}\dist_\infty(D_1(i),D_2(f(i))),\sup_{i\in I_1\backslash I_1'}\dist_\infty(D_1(i),\Delta),\sup_{i\in I_2\backslash I_2'}\dist_\infty(D_2(i),\Delta)\right\}.
\]
If any of the terms in either expression is unbounded, we declare the cost to be infinity. \end{definition}
Now we can define the distance functions and the metric space of persistence diagrams:
\begin{definition}[$p$--Wasserstein distance and bottleneck distance of persistence diagrams, \cite{cohenbottleneck}]
Let $1\leq p\leq\infty$ and $D_1$, $D_2$ persistence diagrams. Define \[
\tilde{w}_p(D_1,D_2)=\inf\{\cost_p(f)\colon f\text{ is a partial matching between }D_1\text{ and }D_2\}.
\]Let $(\dgm_p,w_p)$ denote the metric space of persistence diagrams $D$ such that $\tilde{w}_p(D,\emptyset)<\infty$ with the relation $D_1\sim D_2$ if $\tilde{w}_p(D_1,D_2)=0$, where $\emptyset$ is the unique persistence diagram with empty indexing set. The metric $w_p$ is called the \textit{$p$--Wasserstein distance} and $w_\infty$ is called the \textit{bottleneck distance}. \end{definition}
\section{Reach of the Wasserstein space}{\label{reachwasserstein}}
The $p$--Wasserstein space of a metric space $(X,\dist)$ gives us an example of a well-known isometric embedding between a total space and an infinite dimensional space (in this sense, there also exists the Kuratowski embedding between a compact metric space $(Y,\dist_Y)$ and the space of functions $L^\infty(Y)$).
We first recall the definition of the \textit{set of unique points} and \textit{reach}:
\begin{definition}[Unique points set and reach, \cite{federer}]
Let $(X, \dist)$ be a metric space and $A\subset X$ a subset. We define the set of points having a unique metric projection in $A$ as
\[\unp(A) = \{x \in X : \text{there exists a unique $a$ such that } \dist(x,A) = \dist(x,a)\}.\]
For $a\in A$, we define the \emph{reach} of $A$ at $a$, denoted by $\reach(a, A)$, as
\[\reach(a, A) = \sup \{ r\ge 0 : B_r(a) \subset \unp(A)\}.\]
Finally, we define the \emph{global reach} by
\[\reach(A) = \inf_{a\in A} \reach(a, A).\] \end{definition}
The set of unique points of the isometric embedding of a metric space into its $p$--Wasserstein space is dense into the total space:
\begin{prop}
Let $(X,\dist)$ be a non-branching metric space and $W_p(X)$ with $p>1$ its $p$--Wasserstein space. Then the set of unique points $\unp(X\subset W_p(X))$ is dense in $W_p(X)$.
\end{prop}
\begin{proof}
Let $\mu \in W_p(X)$ be a measure with $x \in X$ a barycenter. Take $\nu$ inside a geodesic between $\delta_0$ and $\mu.$ Suppose that there exists some other point $z\in X$ that is as barycenter for $\nu.$ This implies that $W_p(\nu, \delta_z) \leq W_p(\nu, \delta_x)$ and with this we get
\[W_p(\mu,\delta_z)\leq W_p(\mu,\nu)+W_p(\nu,\delta_z)\leq W_p(\mu,\nu)+W_p(\nu,\delta_x)= W_p(\mu,\delta_x). \]
So then $z$ is also a barycenter for $\mu.$ Furthermore we notice that there is a branching geodesic joining $\mu$ with $\delta_z.$ This gives us the contradiction as $W_p(X)$ is non-branching.
Then $\nu$ is a measure in $\unp(X\subset W_p(X))$ which can be taken arbitrarily close to $\mu.$
\end{proof}
This density fact motivates the question of the existence of metric spaces with positive reach into its $p$--Wasserstein space.
\subsection{Null reach} The first result of this paper is that the reach of a metric space inside its $1$--Wasserstein space is always 0. The proof follows the idea of the proof of Theorem 1.6. of \cite{cuerno}.
\begin{thm}\label{reach1wasserstein}
Let $(X,\dist)$ be a metric space, and consider its $1$--Wasserstein space, $W_1(X).$ Then, for every accumulation point $x\in X$, $\reach(x, X\subset W_1(X)) = 0$. In particular, if $X$ is not discrete, $\reach(X\subset W_1(X)) = 0$. \end{thm}
\begin{proof}
Following \cite{cuerno}, let $\epsilon>0$. We will show that inside $B_\epsilon(x)\subset W_1(X)$ there exists at least one measure $\mu\notin\unp(X)$.
By hypothesis, there exists $y\in X$, $y\neq x$, such that $d(x,y)<\epsilon$. Then \[
\mu:=\frac12\delta_x+\frac12\delta_y.
\]First, notice that $\mu\neq\delta_z$ for any $z\in X$ because the support of $\mu$ is different from the support of any of the $\delta_z\in X$.
In addition, due to the triangle inequality, \begin{equation}{\label{puntomedio}}
W_1(\delta_a,\mu)=\frac{1}{2}\dist(a,x)+\frac12\dist(a,y)\geq\frac12\dist(x,y).
\end{equation}
By inequality \eqref{puntomedio} above, we can clearly see that $\mu\in B_\epsilon(x)$, because \[
W_1(\delta_x,\mu)=\frac12\dist(x,y)<\epsilon.
\]
Finally, we observe that both $a=x$ and $a=y$ minimize the distance to $\mu$. Therefore, $\mu\notin \unp (X)$ and $\reach(x, X\subset W_1(X)) = 0$. \end{proof} Note that the hypothesis of the point being an accumulation point is necessary, because, if $x_0\in X$ is an isolated point, then the quantity $\ell = \inf_{x\in X} \dist(x, x_0)$ is strictly positive, and $B_{\ell/2}(x)$ admits a unique metric projection to $X$.
An interesting observation is that, combining the same argument in the proof of Theorem \ref{reach1wasserstein} with the previous remark, if $X$ is a discrete metric space isometrically embedded into another metric space $Y$, then $\reach(X \subset Y) = \inf_{x_1\neq x_2} \dist(x_1, x_2)/2>0$.
Now we will provide results about the reach of a geodesic metric space inside its $p$--Wasserstein space with $p>1$. We have found that these results are closely related to the uniqueness of the geodesics. This next proposition has important consequences about the reach inside a Wasserstein space, as it constructs measures with possibly several projections in $X$.
\begin{prop}\label{proposicion21} Let $(X,\dist)$ be a geodesic metric space, and $x, y\in X$ two points with $x\neq y$. Consider the probability measure $\mu= \lambda \delta_x + (1-\lambda) \delta_y$, for $0<\lambda<1$. Then $\mu$ minimizes its $p$--Wasserstein distance to $X$ exactly once for every minimizing geodesic between $x$ and $y$. \end{prop} \begin{proof}
The proof is structured in the following way: First, we choose a candidate for the distance--minimizer of $\mu$, supposing it lies inside a minimizing geodesic. Then, we show that the global minimum distance can only be achieved inside a minimizing geodesic.
Choose $\gamma(t) \colon [0,1] \to X$ a minimizing geodesic from $x$ to $y$. We can compute the cost $W_p^p( \delta_{\gamma(t)}, \mu)$ and then minimize in $t$. Indeed,
\begin{equation}{\label{ecuacionwassers}} W_p^p(\delta_{\gamma(t)}, \mu) = \lambda \dist(\gamma(t), x)^p + (1-\lambda) \dist(\gamma(t), y) ^p = (\lambda t^p + (1-\lambda) (1-t)^p )\dist( x, y)^p. \end{equation}
The minimum will be achieved at the parameter $t_0$ which verifies $\dfrac{d}{dt}\bigg|_{t=t_0} W_p^p (\delta_{\gamma(t)}, \mu) = 0$. We know this because that derivative is negative for $t=0$, and positive for $t=1$, and vanishes at only one point $t=t_0$. An easy computation shows us that the only solution in our interval is \[ t_0 = \frac{(1-\lambda)^{p-1}}{\lambda^{p-1}+(1-\lambda)^{p-1}}. \] Thus, the Wasserstein distance between $\mu$ and this \textit{geodesic minimum} is \[W_p^p(\delta_{\gamma(t_0)}, \mu ) = \frac{\lambda(1-\lambda)^{(p-1)p} + (1-\lambda) \lambda^{(p-1)p}}{(\lambda^{p-1}+ (1-\lambda)^{p-1})^p} \cdot \dist^p(x,y).\] Observe that this value is independent from the minimizing geodesic $\gamma$ of our choice.
Finally, we only have to prove that the minimum can only be achieved inside a minimizing geodesic. For that purpose, we will choose any $a \in X$, and we will construct another point $a'$ inside a minimizing geodesic segment $\gamma$ verifying $W_p^p(\delta_{a}, \mu) \ge W_p^p(\delta_{a'}, \mu) $.
The case $\dist(a, y) \ge \dist(x, y)$ is straightforward, as choosing $a' = x$ we have \begin{align*}
W_p^p (\delta_a, \mu) &= \lambda \dist(a, x) ^p + (1-\lambda) \dist(a, y) ^p\\
&\ge (1-\lambda) \dist(a, y) ^p \\
&\ge (1-\lambda) \dist(x, y) ^p = W_p^p (\delta_x, \mu). \end{align*}
Now, if $\dist(a, y) < \dist(x, y)$, we can pick $a'$ inside $\gamma$ at distance $\dist(a, y)$ to $y$. Observe that $\dist(a, x) \ge \dist(a', x)$ or $\gamma$ would not be minimizing. Then \begin{align*} W_p^p (\delta_a, \mu) &= \lambda \dist(a, x) ^p + (1-\lambda) \dist(a, y) ^p \\ &= \lambda \dist(a, x) ^p + (1-\lambda) \dist(a', y) ^p \\ &\ge \lambda \dist(a', x) ^p + (1-\lambda) \dist(a', y) ^p = W_p^p (\delta_{a'}, \mu). \end{align*} Therefore, the minimum can only be achieved inside minimizing geodesics between $x$ and $y$ and our proof is complete. \end{proof}
Now, we will apply the preceding proposition to construct measures with multiple projections close to any point in $X$. We will use this to derive sufficient conditions for attaining $\reach(p,X)=0$ for all $p\in X$.
\begin{thm}\label{teorema22}
Let $X$ be a geodesic metric space, and $x\in X$ a point such that there exists another $y\in X$ with the property that there exist at least two different minimising geodesics from $x$ to $y$. Then, for every $p>1,$ \[ \reach(x, X\subset W_p(X))=0. \] In particular, if there exists a point $x\in X$ satisfying that property, $\reach(X\subset W_p(X))=0$ for every $p>1$. \end{thm}
\begin{proof} The probability measure $\mu_\lambda = \lambda \delta_x + (1-\lambda )\delta_y$ will have at least two different points minimizing its distance to $X$ by proposition \ref{proposicion21}.
Now simply observe that $W_p^p(\mu_\lambda, \delta_x) = (1-\lambda) \dist(x, y)^p$, which decreases to 0 when $\lambda \to 1 $. Hence $\reach(x, X) = 0$ for every $x\in X$ satisfying that property, and therefore $\reach(X\subset W_p(X))=0$. \end{proof} When $X$ is a Riemannian manifold, some common hypothesis will grant us reach $0$. For example, a classic result by Berger (see for example \cite[Chapter 13, Lemma 4.1]{docarmo}) proves that our theorem can be applied when $X$ is compact. In this case, for any $p\in X$, there always exists another $q\in X$ such that there exist two minimizing geodesics starting at $p$ to $q$. More precisely, for every $p\in X$ we can choose a maximum $q$ of the function $\dist(p, \cdot)$ and there will be at least two minimal geodesics from $p$ to $q$. There is a similar result in \cite{luisyfernando}, where it is shown that for every $p$, there exists $q\in X$ such that $p$ and $q$ are joined by several minimizing geodesics.
\begin{cor}{\label{corollarycompact}} If $M$ is a compact Riemannian manifold, then $\reach(x, M\subset W_p(M)) =0$ for every $p>1$ and $x\in M$. \end{cor}
Also, we can apply our Theorem \ref{teorema22} to the non simply connected case:
\begin{cor}{\label{corollarynotsimply}}
If $M$ is a complete Riemannian manifolds with non--trivial fundamental group (i.e. not simply connected), then $\reach(x, M\subset W_p(M)) =0$ for every $p>1$ and $x\in M$. \end{cor} \begin{proof}
Consider the universal cover $\pi \colon \tilde{M} \to M$. Let $x\in M$, and let $\tilde{x}$ be a point with $\pi(\tilde{x}) = x$. Denote by $G$ the fundamental group of $M$. We know that $G$ acts on $\tilde{M}$ by isometries and that $G\tilde{x}$ is a discrete, locally finite set. Then, we may take $\tilde{x}'\in G\tilde{x}$ at minimal distance from $\tilde{x}$.
Then we can take a minimizing geodesic $\tilde{\gamma}: [0, \ell] \to \tilde{M}$ from $\tilde{x}$ to $\tilde{x}'$, and the projection $\gamma= \pi \circ \tilde{\gamma}$ will be a geodesic loop such that $\gamma(0)= \gamma(\ell) = x$, and $\gamma$ is globally minimizing on $[0, \ell/2]$ and $[\ell/2, \ell]$. Otherwise, by taking a shorter curve to the midpoint $\gamma(\ell/2)$ and lifting it we could construct a shorter geodesic from $\tilde{x}$ to another point in $G\tilde{x}$ and our two points would not be at minimal distance. \end{proof}
\subsection{Infinite reach}
For this subsection, we will use results obtained by Kell \cite{kell}, employing the metric definitions presented in Section \ref{metricdefinitions}. The combination of these elements yields results that imply infinite reach for certain metric spaces.
\begin{thm}{\label{reachpositivowass}} Let $(X,\dist)$ be a reflexive metric space. Then the following assertions hold:
\begin{enumerate}
\item If $X$ is strictly $p$--convex for $p\in[1,\infty)$ or uniformly $\infty$--convex if $p=\infty$, then\begin{equation}
\reach(X\subset W_r(X))=\infty\text{, for }r>1.
\end{equation}
\item If $X$ is Busemann, strictly $p$--convex for some $p\in[1,\infty]$ and uniformly $q$--convex for some $q\in[1,\infty]$, then\begin{equation}
\reach(X\subset W_r(X))=\infty\text{, for }r>1.
\end{equation}
\end{enumerate} \end{thm}
\begin{proof}
In \cite[Theorem 4.4.]{kell}, Kell establishes that any $p$-convex and reflexive metric space possesses $p$-barycenters, as he defined them in \cite[Definition 4.3.]{kell}. His Theorem 4.4. establishes the existence of such barycenters but not uniqueness. To establish it, we require the conditions we stated in both cases of our theorem. Now we present how these restrictions give us infinity reach.
\begin{enumerate}
\item Following \cite[Corollary 4.5.]{kell}, the spaces $(X,\dist)$ which satisfy the hypotheses in item (1) of the theorem have unique $r$--barycenters for $r>1$. In other words, every $\mu\in W_r(X)$ has a unique barycenter. This finishes the proof of the first assertion of the theorem.
\item Following \cite[Lemma 1.4.]{kell}, if $(X,\dist)$ is strictly (resp. uniformly) $p$--convex for some $p$, then it is strictly (resp. uniformly) $p$--convex for all $p$. Hence, we are in the case (1). \qedhere
\end{enumerate} \end{proof}
As we pointed in Section \ref{metricdefinitions}, $\mathrm{CAT}(0)$--spaces are a well--known example of metric spaces satisfying some of the hypotheses in Theorem \ref{reachpositivowass}. In that sense, there is a straightforward corollary to our Theorem \ref{reachpositivowass} in terms of $\mathrm{CAT}(0)$--spaces:
\begin{cor}{\label{reachinfinitocat0}}
Let $(X,\dist)$ be a reflexive $CAT(0)$--space, then \[
\reach(X\subset W_p(X))=\infty, \text{ for }p>1.
\] \end{cor}
\begin{proof}
As Kell stated in \cite[Last line of Introduction]{kell}, $\mathrm{CAT}(0)$-spaces are both Busemann spaces and uniformly $p$--convex for every $p\in[1,\infty]$.
Moreover, from the definition of $\mathrm{CAT}(k)$-spaces, with $k=0$, we have that
\begin{align*}
\dist(m(x,y),z)&\leq\dist_{\mathbb{E}^n}(m(x',y'),z')\\
&\frac12\left(\dist_{\mathbb{E}^n}(x',z')+\dist_{\mathbb{E}^n}(y',z')\right)=\frac12\left(\dist(x,z)+\dist(y,z)\right).
\end{align*} Hence, $\mathrm{CAT}(0)$--spaces are strictly $1$--convex and, by \cite[Lemma 1.4]{kell} they are strictly $p$--convex for all $p$. The conclusion now follows from item (2) in Theorem \ref{reachpositivowass}. \end{proof}
\begin{rema} It is easy to check, from the definition, that $\mathrm{CAT}(0)$--spaces are contractible, and, therefore, simply connected. This is a necessary condition for Theorem \ref{reachpositivowass}, because if this were not the case, we would have a closed geodesic and Proposition \ref{proposicion21} would give us zero reach for the points inside that geodesic. \end{rema}
As particular cases of $\mathrm{CAT}(0)$--spaces, we have Hadamard manifolds (complete, simply connected Riemannian manifolds with non-positive sectional curvature everywhere) and, in particular, Euclidean $n$--space. So, as a corollary, we obtain the following:
\begin{cor}\mbox{}
\begin{enumerate}
\item Let $(M^n,g)$ be a Hadamard manifold. Then \[
\reach(M^n\subset W_p(M^n))=\infty, \textit{ for }p>1.
\]
\item Let $\mathbb{E}^n$ be the Euclidean $n$--space. Then \[
\reach(\mathbb{E}^n\subset W_p(\mathbb{E}^n))=\infty, \textit{ for }p>1.
\] \end{enumerate} \end{cor}
Other authors have considered the existence of barycenters in the $\mathrm{CAT}(\kappa)$--space context, specifically $\kappa=0$. In \cite[Proposition 4.3.]{sturmnonpositive}, Sturm proved the existence and uniqueness of barycenters for $\mathrm{CAT}(0)$--spaces only for the $2$--Wasserstein space. In \cite[Theorem B]{yokota}, Yokota stated a condition on $\mathrm{CAT}(\kappa)$--spaces, with $\kappa>0$, to have unique barycenters. This condition is related to the size of the diameter of the $\mathrm{CAT}(\kappa)$--space, which needs to be small in order to have unique barycenters.
\subsection{Projection map}
The infinity of the reach leads to a natural question about the regularity of the \textit{projection map}, i.e., \begin{align*}
\proj_p: W_p(X)&\to X\\
\mu&\mapsto r_\mu, \end{align*}where $r_\mu\in X$ denotes the barycenter of the measure $\mu$, that is, the only point in $X$ that minimizes the distance to $\mu$.
Let $(X,\dist)$ be a metric space for which Theorem \ref{reachpositivowass} holds. Then $X$ has infinity reach; in other words, every measure has a unique barycenter and $\proj_p$ is well--defined. Moreover, the fibres of the map are convex. Let $\mu$, $\nu\in\{\sigma\in W_p(X)\colon r_\sigma=a\}$, $\lambda\in(0,1)$ and $b\in X$. Then \[ W_p^p(\lambda\mu+(1-\lambda)\nu,\delta_b)=\lambda W_p^p(\mu,\delta_b)+(1-\lambda)W_p^p(\nu,\delta_b)\geq\lambda W_p^p(\mu,\delta_a)+(1-\lambda)W_p^p(\nu,\delta_a), \]since $\mu$, $\nu\in\{\sigma\in W_p(X)\colon r_\sigma=a\}$.
A \textit{submetry} between two metric spaces $X, Y,$ is a map $f\colon Y \to X$ such that, for every $a\in Y$ and every $r\ge 0$, we have $f(B_Y(a, r)) = B_X(f(a), r)$. For more information about this type of maps, we refer the reader \cite{beressub,guijarrobere,guijarrosub,kaplytchaksub}.
We briefly recall Kuwae's property \textbf{B}, (see Section $4.3$ in \cite{kell} and references therein). Take two geodesics $\gamma, \eta$ such that they intersect at an unique point $p_0.$ Assume that for all points $z \in \gamma [0,1]$
the minimum of the map $t \mapsto \|z-\eta_t\|$ is achieved only by the point $p_0.$ Then for every point $w \in \eta[0,1]$ the minimum of the map $t \mapsto \|w-\gamma_t\|$ is achieved only by $p_0.$
\begin{thm}{\label{thmsubmetry}}
Let $(X,\|\cdot\|)$ be a reflexive Banach space equipped with a strictly convex norm and satisfying property \textbf{B}. Then $proj_2$ is a submetry. \end{thm}
\begin{proof}
First let us make a couple observations. From the strict convexity of the norm it follows that between any two points $x,y \in X$ there is a unique geodesic joining them, more precisely it is the curve $[0,1]\ni t \mapsto (1-t)x+ty.$
In particular this tells us that $m(x,y)= \frac{1}{2}x+\frac{1}{2}y.$
Let $p>1$ and $x,y,z\in X$ then
\begin{align*}
\|m(x,y)-z\|^p &= \|\frac{1}{2}x+\frac{1}{2}y-z\|^p \\
&< 2^{p-1}\left(\|\frac{1}{2}(x-z)\|^p+\|\frac{1}{2}(y-z)\|^p\right)\\
&= 2^{p-1}\left(\frac{1}{2^p}\|x-z\|^p+\frac{1}{2^p}\|y-z\|^p\right)\\
&= \frac{1}{2}\|x-z\|^p+\frac{1}{2}\|y-z\|^p
\end{align*}
Hence $(X,\|\cdot\|)$ is strictly $p-$convex and so it satisfies the conditions of Theorem \ref{reachpositivowass}, with this barycenters exist and are unique. Therefore the projection map $\proj_2$ is well defined.
Now notice that
\begin{align*}
\|m(x,y)-m(y,z)\| &= \|\frac{1}{2}(x+z)-\frac{1}{2}(y+z)\|\\
&= \frac{1}{2}\|x-y\|.
\end{align*} Which implies that for $p>1$
\[\|m(x,y)-m(y,z)\|^p < \frac{1}{2}\|x-y\|^p, \] i.e. it is $p-$Busemann. Then the $2-$Jensen inequality (see Section $4.3$ in \cite{kell}) holds and so in addition we have that by Proposition $4.8$ in \cite{kell} $\proj_2$ is $1-$Lipschitz. Let $B_r(\mu)$ be a ball in the Wasserstein space. We just proved that \[\proj_2(B_r(\mu) )\subset B_r(\proj_2(\mu)).\] Then, it suffices to see that every point in $B_r(\proj_2(\mu))$ is the image of a point (the barycenter of a measure) in $B_r(\mu)$. Fix $\mu, r\ge 0$ and let $b\in B_r(\proj_2(\mu))$. Let $T$ be the translation from $\proj_2(\mu)$ to $b$. Let us show that $T_\# \mu$ has $b$ as a barycenter. For any $a\in X$,
\begin{align*}
W_2^2(T_\# \mu, \delta_{T(a)}) &= \int_{X} \|x-T(a)\|^2 \, d(T_\# \mu )(x)\\
&= \int_{X} \|T(x)-T(a)\|^2 \, d\mu (x)
\\ &= \int_{X} \|x-a\|^2 \,d\mu(x) = W_2^2(\mu, \delta_a).
\end{align*}
Hence, if $a=\proj_2(\mu)$, then $a$ minimizes the distance from $X$ to $\mu$, and then $T(a)=b$ minimizes the distance to $T_\#\mu$.
It remains to see that $T_\#\mu$ is contained in $B_r(\mu)$. Choosing $(\operatorname{Id}, T)_\# \mu$ as a transport plan in $\Pi(\mu, T_\#\mu)$,
\begin{align*}
W_2^2(\mu, T_\#\mu) &= \inf_{\pi \in \Pi(\mu, T_\#\mu)} \int_{X \times X} \|x-y\|^2 \, d\pi(x,y) \\ &\le \int_{X} \|x-T(x)\|^2 \, d\mu(x) = \| \proj_2(\mu)-b\|^2 < r^2.
\end{align*}
Therefore, $T_{\#}\mu\in B_r(\mu).$ \end{proof}
Examples of spaces satisfying the assumptions of Theorem include Hilbert spaces and $L^p$ spaces (see Examples $4.5,$ and $4.6$ in \cite{kuwae}).
\section{Reach of the Orlicz--Wasserstein space}{\label{reachwassersteinorliz}} An introduction to Orlicz--Wasserstein spaces can be found in subsection \ref{subseccionorlicz}. More information about this type of spaces along with a proof of their completeness can be found in \cite{sturm}. \subsection{Null reach}
We start this section with a simple remark. \begin{rema} Let $\varphi\equiv Id$. Observe that $\psi \circ \dist$ is a distance when $\psi$ is a positive concave function with $\psi(0)=0$. Then $W_\vartheta$ is a 1-Wasserstein distance for the metric space $(X, \psi \circ \dist)$. Therefore, \[
\reach(x, X\subset W_{\vartheta}(X))=0
\]
whenever $x\in X$ is an accumulation point, by Theorem \ref{reach1wasserstein}. \end{rema}
We can replicate Proposition \ref{proposicion21} for the case where $X$ is isometrically embedded into an Orlicz--Wasserstein space using a more delicate argument.
\begin{prop}\label{proposicion33} Let $X$ be a geodesic metric space, and let $x, y\in X$ be two points with $x\neq y$. Consider the probability measure $\mu= \lambda \delta_x + (1-\lambda) \delta_y$, for $0<\lambda<1$. Then, the following assertions hold: \begin{enumerate} \item $\mu$ can only minimize its $\vartheta$--Wasserstein distance to $X$ inside a minimizing geodesic between $x$ and $y$. \item If $\lambda$ is close to one, and there exists a constant $c>1$ such that $\varphi^{-1}(t)< t $ for every $t>c$, then the minimum will be attained inside the interior of each geodesic. \end{enumerate} \end{prop} \begin{proof}
First we will see that the minimum can only be attained inside a geodesic. For that purpose, we will replicate the argument in the proof of Proposition \ref{proposicion21}. That is, given $a\in X$, we construct $a'\in\gamma([0, \ell])$, where $\gamma$ is a minimizing geodesic, with
\[
W_\vartheta(\delta_a, \mu) > W_\vartheta(\delta_{a'}, \mu) .
\]
Again, it suffices to consider the case $\dist(a, y) \le \dist(x, y)$. We can pick $a' \in \gamma([0,\ell])$ such that $\dist(a', y) = \dist(a, y)$. Then, $\dist(a', x) < \dist(a, x)$ or $a$ is also inside a minimizing geodesic.
Let \[
S=\left\{ t> 0: \lambda \varphi\left( \frac{1}{t} \dist(a, x) \right) + (1-\lambda) \varphi \left( \frac{1}{t} \dist(a, y) \right) \le 1 \right\}.
\]As we have only one transport plan $\pi = \delta_a \otimes \mu$, we can write
\[
W_\vartheta(\delta_a, \mu) = \inf S.
\]
Thus, it is enough to see that, if $t_0$ verifies the inequality inside that infimum for $a$, then it will verify it for $a'$. Indeed,
\begin{align*}
1 &\ge \lambda \varphi\left( \frac1{t_0}\dist(a, x) \right) + (1-\lambda) \varphi \left( \frac{1}{{t_0}} \dist(a, y) \right) \\ &= \lambda \varphi\left( \frac1{t_0} \dist(a, x) \right) + (1-\lambda) \varphi \left( \frac{1}{{t_0}} \dist(a', y) \right)\\
&> \lambda \varphi\left( \frac1{t_0}\dist(a', x) \right) + (1-\lambda) \varphi \left( \frac{1}{{t_0}} \dist(a', y) \right).
\end{align*}
The last inequality comes from the monotonicity of $\varphi$, and the assumption $\dist(a', x) < \dist(a,x)$. Observe that, because the previous inequality is strict, we will have a strict inequality in $W_\vartheta(\delta_a, \mu) > W_\vartheta(\delta_{a'}, \mu) $.
Now we will prove the second part of our proposition. Assuming $\lambda$ close to 1, and that $\varphi$ differs from the identity for big enough values, we will see that there are points $a \in \gamma((0, \ell) )$ with
\begin{equation}\label{ecuacionprop33}
W_\vartheta(\delta_a, \mu) \le \min \{W_\vartheta(\delta_{x}, \mu),W_\vartheta(\delta_{y}, \mu) \}. \end{equation}
First, we observe that the right hand side in inequality \ref{ecuacionprop33} above is easy to compute. Using that $\varphi^{-1}$ is an increasing function,
\begin{align*}
W_\vartheta(\delta_x, \mu) &= \inf \left\{ t>0 : (1-\lambda) \varphi\left( \frac{1}{t} \dist(x, y) \right) \le 1 \right\} \\
&=\inf \left\{ t>0 : \varphi\left( \frac{1}{t} \dist(x, y) \right) \le \frac{1}{1-\lambda} \right\} \\
&=\inf \left\{ t>0 : \frac{1}{t} \le \frac{\varphi^{-1} \left( \frac{1}{1-\lambda}\right)}{\dist(x,y)} \right\}
\\ &= \inf \left\{ t>0 : \frac{\dist(x,y)}{\varphi^{-1} \left( \frac{1}{1-\lambda}\right)} \le t \right\} \\
&= \frac{\dist(x,y)}{\varphi^{-1} \left( \frac{1}{1-\lambda}\right)}.
\end{align*} Similarly, $W_\vartheta(\delta_y, \mu) =\dfrac{\dist(x,y)}{\varphi^{-1} \left( 1/\lambda\right)} $. If we want $\lambda$ close to one, we can suppose $\lambda > 1- \lambda$. Therefore, $1/{(1-\lambda)} > {1}/{\lambda}$, and because $\varphi^{-1}$ is increasing, \[ \varphi^{-1} \left(1/{(1-\lambda)} \right)> \varphi^{-1} \left({1}/{\lambda} \right). \] Thus, we know that \[t_0 := \min \{W_\vartheta(\delta_{x}, \mu),W_\vartheta(\delta_{y}, \mu) \} = \frac{\dist(x,y)}{\varphi^{-1} \left( \frac{1}{1-\lambda}\right)}.\] Now, we will show that we can find a point inside the geodesic $a = \gamma(s)$, $s\in (0, \ell)$ verifying inequality \eqref{ecuacionprop33}. It suffices to see that $t_0 \in S$, because $W_\vartheta(\delta_a, \mu)$ is the infimum of $S$ and by definition will be smaller. First, observe that, by monotonicity of $\varphi^{-1}$ the inequality defining $S$ is equivalent to \[ \varphi^{-1} \left( \lambda \varphi\left( \frac1t \dist(a, x) \right) + (1-\lambda) \varphi \left( \frac{1}{t} \dist(a, y) \right) \right) \le \varphi^{-1} (1) = 1. \] By concavity of $\varphi^{-1}$, it is enough to have \[ \lambda \frac{1}{t} \dist(a, x) + (1-\lambda) \frac{1}{t} \dist(a, y) \le 1. \]
We will evaluate $t=t_0$ and look for a condition in $s$ so the preceding inequality is verified. Observe that $\dist(a, x) = s$, $\dist(a, y) = \ell - s$ and $\dist(x,y) = \ell$. Then
\begin{align*}
\lambda \frac{1}{t_0} \dist(a, x) + (1-\lambda) \frac{1}{t_0} \dist(a, y) \le 1 &\iff
\lambda \frac{s}{\ell} \cdot \varphi^{-1} (1/(1-\lambda)) + (1-\lambda) \frac{\ell-s}{\ell} \cdot \varphi^{-1} (1/(1-\lambda)) \le 1 \\ &\iff \frac{\lambda s}{\ell} + \frac{\ell-s}{\ell} - \lambda \cdot \frac{\ell-s}{\ell} \le \frac{1}{\varphi^{-1} (1/(1-\lambda))}
\\ &\iff
s \cdot ( 2\lambda -1) \le \ell \left( \frac{1}{\varphi^{-1} (1/(1-\lambda))} + 1-\lambda
\right) \\
&\iff s \le \ell \left( \frac{1}{\varphi^{-1} (1/(1-\lambda))} -(1-\lambda)
\right) \cdot (2\lambda-1) ^{-1}. \end{align*} If we show that our bound for $s$ is strictly positive, the minimum will be attained inside the geodesic and we will finish the proof. Choosing $\lambda$ close enough to 1, we have $(2\lambda-1) >0$ and $1/(1-\lambda) > c$. Therefore, $\varphi^{-1}( 1/(1-\lambda)) - 1/(1-\lambda) < 0 $ and, because the function $t \mapsto 1/t$ is decreasing, $\dfrac1{\varphi^{-1}( 1/(1-\lambda))} - (1-\lambda) > 0 $ and we have finished our proof. \end{proof} An immediate consequence from our proposition is the following theorem, providing us with examples of manifolds with zero reach inside their Orlicz--Wasserstein space:
\begin{thm}{\label{reachceroorlicz}} Let $X$ be a geodesic metric space, and $x\in X$ a point such that there exists another $y\in X$ with the property that there exists at least two different minimising geodesics from $x$ to $y$. Suppose $X$ is isometrically embedded into an Orlicz-Wasserstein space $W_\vartheta(X)$. Then, for every $\varphi$ such that $\varphi(t_0) \neq t_0$ for some $t_0>1$, \[ \reach(x, X\subset W_\vartheta(X))=0. \] In particular, if there exists a point $x\in X$ satisfying that property, $\reach(X\subset W_\vartheta(X))=0$ for every $p>1$. Also, in compact manifolds and non-simply connected manifolds, $\reach(x, X\subset W_\vartheta(X)) = 0$ for every $x\in X$. \end{thm} \begin{proof}
The proof is identical to the one from Theorem \ref{teorema22}. It remains to see that $\varphi(t_0) \neq t_0$ implies the condition we ask for in Proposition \ref{proposicion33}. Indeed, the convexity and $\varphi(1) = 1$ imply $\varphi(t) > t$ for every $t>t_0$. And, because $\varphi^{-1}$ is increasing, we also have $t > \varphi^{-1}(t)$ for every $t>t_0$, which is what we need to apply Proposition \ref{proposicion33}. \end{proof}
\subsection{Positive Reach} Similarly to the $p$--Wasserstein case, several results by Kell \cite{kell} imply that reflexive $\mathrm{CAT}(0)$--spaces inside some Orlicz--Wasserstein spaces have infinite reach.
\begin{thm}{\label{cat0orlicz}}
Let $(X,\dist)$ be a reflexive $\mathrm{CAT}(0)$--space. Suppose $\varphi$ is a convex function which can be expressed as $\varphi(r) = \psi(r^p)$, where $\psi$ is another convex function and $p>1$. Then \begin{equation}{\label{reachpositivoorlicz}}
\reach(X\subset W_{\vartheta}(X))=\infty,
\end{equation}where $\psi\equiv\identity$ and $\varphi(1)=1$. \end{thm}
\begin{proof}
As we pointed in the proof of Corollary \ref{reachinfinitocat0}, $\mathrm{CAT}(0)$ spaces are strictly $p$-convex, so by \cite[Lemma A.2.]{kell} they are strictly Orlicz $\varphi$-convex. Thus, the result is derived directly from \cite[Theorem A.4.]{kell} which confirms the existence of unique barycenters for every $\mu\in W_{\vartheta}(X)$. \end{proof}
\begin{rema}
All proper metric spaces (i. e., those where every bounded closed set is compact) is reflexive \cite{huff, kell}. As Caprace pointed out in \cite{caprace}, \textit{symmetric spaces} of non--compact type (i.e. with non-positive sectional curvature and no non-trivial Euclidean factor) and \textit{Euclidean buildings} are proper $CAT(0)$ spaces and are examples for which Theorem \ref{cat0orlicz} holds. \end{rema}
\section{Reach of the Persistence Diagram space}{\label{reachpersistence}}
In \cite[Theorem 19]{bubenik}, Bubenik and Wagner construct an explicit isometric embedding of bounded separable metric spaces into $(\dgm_\infty,w_\infty)$. \begin{align*}
\varphi:(X,\dist)&\to(\dgm_\infty,w_\infty)\\
x&\mapsto\{(2c(k-1),2ck+\dist(x,x_k))\}_{k=1}^\infty, \end{align*}where $c>\diam(X)=\sup\{\dist(x,y)\colon x,y\in X\}$ and $\{x_k\}_{k=1}^\infty$ is a countable, dense subset of $(X,\dist)$. The authors stated that this embedding can be thought of as a shifted version of the Kuratowski embedding (for more information about this embedding see \cite{cuerno}).
\begin{thm}{\label{reachPD}}
Let $(X,\dist)$ be a separable, bounded metric space and $(\dgm_{\infty},w_\infty)$ the space of persistence diagrams with the bottleneck distance. If $x\in X$ is an accumulation point, then \[
\reach(x, X\subset \dgm_\infty)=0.\]
In particular, if $X$ is not discrete, $\reach(X\subset \dgm_\infty)=0.$ \end{thm} \begin{proof}
For every two points $x, y\in X$, we can construct a persistence diagram $P$ with at least those two points minimizing the bottleneck distance from the diagram $P$ to the embedded space $\varphi(X)$. That $P$ will be a midpoint between $\varphi(x)$ and $\varphi(y)$, so by choosing $y$ arbitrarily close to $x$, we will have a diagram with several barycenters ($x$ and $y$) that is also arbitrarily close to $x$. Therefore, $ \reach(x,X\subset \dgm_\infty)=0$ for every accumulation point $x\in X$, and, thus, $\reach(X\subset\dgm_\infty)=0$.
Then, it suffices to prove our first claim. For $x, y \in X$, choose the diagram
\[P = \left\{\left(2c(k-1), 2ck + \frac{\dist(x, x_k) + \dist(y, x_k)}{2}\right)\right\}_{k=1}^\infty.\]
Now, observe that
\begin{align*}
w_\infty(\varphi(x), P) &=
\sup_{k\in \mathbb{N}}\left| \dist(x, x_k) - \frac{\dist(y, x_k) + \dist(x, x_k)}{2} \right|
\\ &=\sup_{k\in \mathbb{N}} \frac{|\dist(x, x_k) - \dist(y, x_k)|}{2} = \frac12 w_\infty(\varphi(x), \varphi(y)) = \frac {\dist(x,y)}{2}.
\end{align*}
And, by a symmetric argument,
\[w_\infty(\varphi(y), P)=\frac {\dist(x,y)}{2}.\]
Note that, similarly to the end of the proof of \cite[Theorem 19]{bubenik}, any other pairing between points of the diagrams would pair two points from different vertical lines. Those points would be at distance at least $2c$. On the other hand, any possibly unpaired points are at distance at least $c$ from the diagonal. So those pairings would have a cost bigger than $c>\dist(x,y)/2$, and therefore we always pair points in the same vertical lines.
Now, if $z\in X$, we will see that $P$ is at distance at least $\frac12 \dist(x,y)$ from $z$. Indeed, we can give a lower bound for the distance simply by ommiting the supremum: \begin{align*}
w_\infty(\varphi(z), P) &= \sup_{k\in \mathbb{N}} \left|\dist(z, x_k) - \frac{\dist(x, x_k) + \dist(y, x_k)}{2} \right|\\&\ge \left|\dist(z, x_k) - \frac{\dist(x, x_k) + \dist(y, x_k)}{2} \right|.
\end{align*}
Looking at $x_k$ arbitrarily close to $z$, we get that
\[
w_\infty(\varphi(z), P) \ge \left| \frac{\dist(x, z) + \dist(y, z)}{2}\right| \ge \frac{\dist(x,y)}{2}.
\]
This proves that $P$ is not in the image of $\varphi$, and that $\varphi(x), \varphi(y)$ both minimize the distance from $P$ to $\varphi(X)$, as we wanted to see. \end{proof}
\nocite{*} \printbibliography
\end{document} |
\begin{document}
\title{Asymptotic stability of ground states in 2D nonlinear Schr\" odinger equation including subcritical cases}
\begin{abstract}
\noindent We consider a class of nonlinear Schr\"{o}dinger equation in two space
dimensions with an attractive potential. The nonlinearity is local but rather general
encompassing for the first time both subcritical and supercritical (in $L^2$) nonlinearities. We study the asymptotic
stability of the nonlinear bound states, i.e. periodic in time
localized in space solutions. Our result shows that all solutions
with small initial data, converge to a nonlinear bound state.
Therefore, the nonlinear bound states are asymptotically stable. The proof hinges on dispersive estimates that we
obtain for the time dependent, Hamiltonian, linearized dynamics
around a careful chosen one parameter family of bound states that ``shadows" the nonlinear evolution of the
system. Due to the generality of the methods we develop we expect
them to extend to the case of perturbations of large bound states and to other nonlinear dispersive wave type equations.
\end{abstract}
\makeatletter \@addtoreset{equation}{section} \makeatother \renewcommand{\thesection.\arabic{equation}}{\thesection.\arabic{equation}}
\section{Introduction}
In this paper we study the long time behavior of solutions of the nonlinear Schr\" odinger equation (NLS) with potential in two space dimensions (2-d): \begin{eqnarray} i\partial_t u(t,x)&=&[-\Delta_x+V(x)]u+g(u), \quad t\in\mathbb{R},\quad x\in\mathbb{R}^2\label{eq:ufull}\\ u(0,x)&=&u_0(x)\label{eq:ic} \end{eqnarray} where the local nonlinearity is constructed from the real valued, odd, $C^2$ function $g:\mathbb{R}\mapsto\mathbb{R}$ satisfying \begin{equation}\label{gest}
g(0)=g'(0)=0\quad {\rm and}\quad |g''(s)|\leq C(|s|^{\alpha_1}+|s|^{\alpha_2}),\quad s\in\mathbb{R},\ \frac{1}{2}<\alpha_1\le\alpha_2<\infty\end{equation} which is then extended to a complex function via the gauge symmetry: \begin{equation}\label{gsym} g(e^{i\theta}s)=e^{i\theta}g(s),\qquad \theta\in\mathbb{R}. \end{equation} The equation has important applications in statistical physics, optics and water waves. It describes certain limiting behavior of Bose-Einstein condensates \cite{dgps:bec,lsy:2d} and propagation of time harmonic waves in wave guides \cite{kn:kp,kn:Marcuse,nm:no}. In the latter, $t$ plays the role of the coordinate along the axis of symmetry of the wave guide.
It is well known that this nonlinear equation admits periodic in time, localized in space solutions (bound states or solitary waves). They can be obtained via both variational techniques \cite{bl:i,str:sw,rw:bs} and bifurcation methods \cite{pw:cm,rw:bs,kz:as2d}. Moreover the set of periodic solutions can be organized as a $C^2$ manifold (center manifold), see \cite{gnt:as,km:as3d} or next section. Orbital stability of solitary waves, i.e. stability modulo the group of symmetries $u\mapsto e^{-i\theta}u,$ was first proved in \cite{rw:bs,mw:ls}, see also \cite{gss:i,gss:ii,ss:ins}.
The main result of this paper is that solutions of \eqref{eq:ufull}-\eqref{eq:ic} with small initial data asymptotically converge to a bound state, see Theorem \ref{th:main}. While asymptotic stability results for bound states in NLS have first appeared in the work of A. Soffer and M. I. Weinstein \cite{sw:mc1,sw:mc2}, and continued in \cite{pw:cm,kn:Wed,bp:asi,bp:asii,bs:as,sc:as,gnt:as}, our main contribution is to allow for subcritical and critical ($L^2$) nonlinearities, $\frac{1}{2}<\alpha_1\le 1$ in \eqref{gest}. To accomplish this we carefully project the nonlinear dynamics onto the center manifold of bound states and use linearization around this time changing projection to study the motion in the radiative directions, i.e. directions that are not in the tangent space of the center manifold. Previously, linearization around a fixed bound state has been used, see the papers cited above. By continuously adapting the linear dynamics to the actual nonlinear evolution of the solution we can more precisely capture the effective potential induced by the nonlinearity $g$ into a time dependent linear operator. Once we have a good understanding of this time dependent linear dynamics, i.e. we have good dispersive estimates for its semigroup of operators, see Section \ref{se:lin}, we obtain information for the nonlinear dynamics via Duhamel formula and contraction principles for integral equations, see Section \ref{se:main}. Note that we have recently used a similar technique to show that in the critical (cubic) case, \eqref{eq:ufull} with $g(s)=s^3,\ s\in\mathbb{R},$ the center manifold of bound states is an attractor for small initial data, see \cite{kz:as2d}. In this paper the technique is much refined, we use a better projection of the dynamics on the center manifold and sharper estimates for the linear dynamics. The refinements not only allow us to treat a much larger spectrum of nonlinearities including, for the first time, the subcritical ones but also allow us to obtain actual convergence of the solution to a bound state.
However, the main challenge for our approach is to obtain good dispersive estimates for the semigroup of operators generated by the time dependent linearization that we use. This is accomplished in Section \ref{se:lin} via a perturbative method similar to the one we developed in \cite{kz:as2d}. As described in that section, we could have obtained sharper estimates by using a generalized Fourier multiplier technique to remove the singularity of
$$\|e^{i(\Delta -V) t}\|_{L^{1}\mapsto L^\infty}\sim |t|^{-1},$$ see \cite[Section 4]{km:as3d}. We chose not to do it because it requires stronger hypotheses on $V$ without allowing us to enlarge the spectrum of nonlinearities that we can treat.
Finally, we remark that our method is quite general, based solely on linearization around nonlinear bound states and estimates for integral operators with dispersive kernels. Therefore we expect it to generalize to the case of large nonlinear 2D ground states, see for example \cite{sc:as}, the presence of multiple families of bound states, see for example \cite{sw:sgs}, or to the case of time dependent nonlinearity, see \cite{ckp:res}. In all three cases our method will not only allow to treat the less dispersive environment, 2D compared to 3D, but it should greatly reduce the restrictions on the nonlinearity. The first author and collaborators are currently working on adapting the method to other dimensions and other dispersive wave type equations. The work in 3-D is complete, see \cite{km:as3d}.
\noindent{\bf Notations:} $H=-\Delta+V;$
$L^p=\{f:\mathbb{R}^2\mapsto \mathbb{C}\ |\ f\ {\rm measurable\
and}\ \int_{\mathbb{R}^2}|f(x)|^pdx<\infty\},\ 1\le p<\infty,$ endowed with the standard norm
$\|f\|_{L^p}=\left(\int_{\mathbb{R}^2}|f(x)|^pdx\right)^{1/p},$ while for $p=\infty,$ $L^\infty=\{f:\mathbb{R}^2\mapsto \mathbb{C}\
|\ f\ {\rm measurable\ and}\ {\rm essup}|f(x)|<\infty\},$ and it is endowed with the norm: $\|f\|_{L^\infty}={\rm essup}|f(x)|$;
$<x>=(1+|x|^2)^{1/2},$ and for $\sigma\in\mathbb{R},\ 1\le p<\infty,$ $L^p_\sigma$ denotes the $L^p$ space with weight $<x>^{p\sigma},$ i.e. the space of functions $f(x)$ such that $(<x>^{\sigma}f(x))^p$ are integrable endowed with the norm
$\|f(x)\|_{L^p_\sigma}=\|<x>^{\sigma}f(x)\|_p,$ while for
$p=\infty,$ $L^\infty_\sigma$ denotes the vector space of measurable functions $f(x)$ such that ${\rm essup}|<x>^{\sigma}f(x)|<\infty$ endowed with the norm
$\|f(x)\|_{L^\infty_\sigma}=\|<x>^{\sigma}f(x)\|_{L^\infty};$
$\langle f,g\rangle =\int_{\mathbb{R}^2}\overline f(x)g(x)dx$ is the scalar product in $L^2$ where $\overline z=$ the complex conjugate of the complex number $f;$
$P_c$ is the projection associated to the continuous spectrum of the self adjoint operator $H$ on $L^2,\ {\rm range} P_c={\cal H}_0;$
$H^n$ denote the Sobolev spaces of measurable functions having all distributional partial derivatives up to order $n$ in $L^2,
\|\cdot\|_{H^n}$ denotes the standard norm in this spaces.
\section{Preliminaries. The center manifold.}\label{se:prelim}
The center manifold is formed by the collection of periodic solutions for (\ref{eq:ufull}): \begin{equation}\label{eq:per}
u_E(t,x)=e^{-iEt}\psi_E(x) \end{equation} where $E\in\mathbb{R}$ and $0\not\equiv\psi_E\in H^2(\mathbb{R}^2)$ satisfy the time independent equation: \begin{equation}\label{eq:ev} [-\Delta+V]\psi_E+g(\psi_E)=E\psi_E \end{equation} Clearly the function constantly equal to zero is a solution of (\ref{eq:ev}) but (iii) in the following hypotheses on the potential $V$ allows for a bifurcation with a nontrivial, one (complex) parameter family of solutions:
\noindent{\bf (H1)} Assume that \begin{itemize}
\item[(i)] There exists $C>0$ and $\rho >3$ such that:
$$|V(x)|\le C<x>^{-\rho},\quad {\rm for\ all}\ x\in\mathbb{R}^2;$$
\item[(ii)] $0$ is a regular point\footnote{see
\cite[Definition 7]{ws:de2} or $M_\mu=\{0\}$ in relation (3.1) in \cite{mm:ae}}
of the spectrum of
the linear operator $H=-\Delta+V$ acting on $L^2;$
\item [(iii)]$H$ acting on $L^2$ has exactly one
negative eigenvalue $E_0<0$ with corresponding normalized
eigenvector $\psi_0.$ It is well known that $\psi_0(x)$ can be
chosen strictly positive and exponentially decaying as
$|x|\rightarrow\infty.$ \end{itemize}
\par\noindent Conditions (i)-(ii) guarantee the applicability of dispersive estimates of Murata \cite{mm:ae} and Schlag \cite{ws:de2} to the Schr\" odinger group $e^{-iHt}.$ These estimates are used for obtaining Theorems \ref{th:lin1} and \ref{th:lin2}, see also \cite[section 4]{kz:as2d}. In particular (i) implies the local well posedness in $H^1$ of the initial value problem (\ref{eq:ufull}-\ref{eq:ic}), see section \ref{se:main}.
By the standard bifurcation argument in Banach spaces \cite{ln:fa} for (\ref{eq:ev}) at $E=E_0,$ condition (iii) guarantees existence of nontrivial solutions. Moreover, these solutions can be organized as a $C^2$ manifold (center manifold) for $x\in\mathbb{R}^n,$ see \cite[section 2]{km:as3d} or \cite{gnt:as}. The proofs for the following results can be found in \cite[section 2]{km:as3d} or \cite{gnt:as}:
\begin{proposition}\label{pr:cm} There exist $\delta>0,$ the $C^2$ function
$$h:\{a\in\mathbb{R}\times\mathbb{R}\ :\ |a|<\delta\}\mapsto L^2_\sigma\cap H^2 ,\ \sigma\in\mathbb{R}$$ and the $C^1$ function
$E:(-\delta,\delta)\mapsto\mathbb{R}$ such that for $|E-E_0|<\delta$
and $|\langle\psi_0,\psi_E\rangle |<\delta$ the eigenvalue problem (\ref{eq:ev}) has a unique solution up to multiplication with $e^{i\theta},\ \theta\in [0,2\pi),$ which can be represented as a center manifold:
\begin{equation}\label{eq:cm}
\psi_E=a\psi_0+h(a),\ E=E(|a|), \quad \langle\psi_0,h(a)\rangle =0,\quad
h(e^{i\theta}a)=e^{i\theta}h(a),\
|a|<\delta .\end{equation} Moreover
$E(|a|)=\mathcal{O}(|a|^{1+\alpha_1})$,
$h(a)=\mathcal{O}(|a|^{2+\alpha_1}),$ and for $a\in\mathbb{R},\
|a|<\delta,$ $h(a)$ is a real valued function with
$\frac{d^2h}{da^2}(a)=\mathcal{O}(|a|^{\alpha_1})$ and $\frac{dh}{da}(0)=0.$ \end{proposition}
Since $\psi_0(x)$ is exponentially decaying as
$|x|\rightarrow\infty$ the proposition implies that $\psi_E\in L^2_\sigma .$ A regularity argument, see \cite{sw:mc1}, gives a stronger result:
\begin{corollary}\label{co:decay} For any $\sigma\in\mathbb{R},$ there exists a finite constant $C_\sigma$ such that:
$$\|<x>^\sigma\psi_E\|_{H^2}\le C_\sigma\|\psi_E\|_{H^2}.$$ \end{corollary}
We are going to decompose the solution of \eqref{eq:ufull}-\eqref{eq:ic} into a projection onto the center manifold and a correction. To insure that the correction disperses to infinity on long times we require that the correction is always in the invariant subspace of the linearized dynamics at the projection that complements the tangent space to the center manifold. A short description of the decomposition follows, for more details and the proofs see \cite{km:as3d}.
Consider the linearization of \eqref{eq:ufull} at a function on the center manifold $\psi_E=a\psi_0+h(a),\ a=a_1+ia_2\in\mathbb{C},\
|a|<\delta:$
\begin{equation}\label{eq:ldE}
\frac{\partial w}{\partial t}=-iL_{\psi_E}[w]-iEw
\end{equation} where
\begin{equation}\label{def:linop}
L_{\psi_E}[w]=(-\Delta+V-E)w+Dg_{\psi_E}[w]=(-\Delta+V-E)w+\lim_{\varepsilon\in\mathbb{R},\ \varepsilon\rightarrow
0}\frac{g(\psi_E+\varepsilon w)-g(\psi_E)}{\varepsilon}
\end{equation}
\begin{remark}\label{rmk:dgest} Note that for $a\in\mathbb{R}$ we have $\psi_E=a\psi_0+h(a)$ is real valued and
$$Dg_{\psi_E}[w]=g'(\psi_E)\Re w+i\frac{g(\psi_E)}{\psi_E}\Im w
=\frac{1}{2}\left(g'(\psi_E)+\frac{g(\psi_E)}{\psi_E}\right)w
+\frac{1}{2}\left(g'(\psi_E)-\frac{g(\psi_E)}{\psi_E}\right)\overline{w}$$ hence
\begin{equation}\label{est:dg}
|Dg_{\psi_E}[w]|\le |w|\max\left\{|g'(\psi_E)|,
\left|\frac{g(\psi_E)}{\psi_E}\right|\right\}\le
C(|\psi_E|^{1+\alpha_1}+|\psi_E|^{1+\alpha_2})|w|
\end{equation}
where we used \eqref{gest}. For $a=|a|e^{i\theta}\in\mathbb{C}$ we have, using the equivariant symmetry \eqref{gsym},
$\psi_E=a\psi_0+h(a)=e^{i\theta}(|a|\psi_0+h(|a|)=e^{i\theta}\psi_E^{\rm real},$ where $\psi_E^{\rm real}$ is real valued, and $Dg_{\psi_E}[w]=e^{i\theta}Dg_{\psi_E^{\rm real}}[e^{-i\theta}w],$ hence \eqref{est:dg} is valid for any $\psi_E$ on the manifold of ground states. \end{remark}
{\bf Properties of the linearized operator}:
\begin{enumerate}
\item $L_{\psi_E}$ is real linear and symmetric with respect to the
real scalar product $\Re\langle\cdot,\cdot\rangle,$ on
$L^2(\mathbb{R}^2),$ with domain $H^2(\mathbb{R}^2).$
\item Zero is an e-value for $-iL_{\psi_E}$ and its generalized
eigenspace includes $\left\{\frac{\partial\psi_E}{\partial a_1},\frac{\partial\psi_E}{\partial
a_2}\right\}$
\item ${\rm span}_{\mathbb{R}}\left\{\frac{\partial\psi_E}{\partial a_1},\frac{\partial\psi_E}{\partial
a_2}\right\}$ and
${\cal H}_a=\left\{-i\frac{\partial\psi_E}{\partial a_2},i\frac{\partial\psi_E}{\partial
a_1}\right\}^\perp,$
where orthogonality is with respect to the real scalar product in $L^2(\mathbb{R}^2),$ are
invariant subspaces for $-iL_{\psi_E}$ and, by possible choosing
$\delta>0$ smaller than the one in Proposition \ref{pr:cm}, we
have:
$$L^2(\mathbb{R}^2)={\rm span}_{\mathbb{R}}\left\{\frac{\partial\psi_E}{\partial a_1},\frac{\partial\psi_E}{\partial
a_2}\right\}\oplus {\cal H}_a,\qquad {\rm for\ all}\ a\in\mathbb{C},\ |a|<\delta.$$ Note that ${\cal H}_0$ coincides
with the subspace of $L^2$ associated to the continuous spectrum of
the self-adjoint operator
$H=-\Delta+V.$
\item the above decomposition can be extended to
$H^{-1}(\mathbb{R}^2):$
\begin{equation}\label{h-1decomp}H^{-1}(\mathbb{R}^2)={\rm span}_{\mathbb{R}}\left\{\frac{\partial\psi_E}{\partial a_1},\frac{\partial\psi_E}{\partial
a_2}\right\}\oplus {\cal H}_a,\qquad {\rm for\ all}\ a\in\mathbb{C},\ |a|<\delta,\end{equation}
where
$${\cal H}_a=\left\{\phi\in H^{-1}\ |\ \Re\langle -i\frac{\partial\psi_E}{\partial
a_2},\ \phi\rangle=0,\ {\rm and}\ \Re\langle i\frac{\partial\psi_E}{\partial
a_1},\ \phi\rangle=0\right\}$$
\end{enumerate}
Our goal is to decompose the solution of \eqref{eq:ufull} at each time into:
$$u=\psi_E+\eta=a\psi_0+h(a)+\eta,\qquad \eta\in{\cal H}_a$$ which insures that $\eta$ is not in the non-decaying directions of the linearized equation \eqref{eq:ldE} at $\psi_E.$ The fact that this can be done in an unique manner is a consequence of the following lemma:
\begin{lemma}\label{lem:decomp} There exists $\delta /2>\delta_1>0$ such that
any $\phi\in H^{-1}(\mathbb{R}^2)$ satisfying $\|\phi\|_{H^{-1}}\le\delta_1$ can be uniquely decomposed:
$$\phi =\psi_E+\eta=a\psi_0+h(a)+\eta$$
where $a=a_1+ia_2\in\mathbb{C},\ |a|<\delta,\ \eta\in {\cal H}_a.$ Moreover the maps $\phi\mapsto a$ and $\phi\mapsto \eta$ are $C^1$ and there exists the constant $C$ independent on $\phi$ such that
$$|a|\le 2\|\phi\|_{H^{-1}},\qquad \|\eta\|_{H^{-1}}\le C\|\phi\|_{H^{-1}},$$ while for $\phi\in L^2(\mathbb{R}^2)$ we have $\eta\in L^2(\mathbb{R}^2)$ and:
$$|a|\le 2\|\phi\|_{L^2},\qquad \|\eta\|_{L^2}\le C\|\phi\|_{L^2}.$$ \end{lemma}
\begin{remark}\label{rmk:inv} The above lemma uses the implicit function theorem applied to $$F:\mathbb{R}^2\times H^{-1}(\mathbb{R}^2)\mapsto\mathbb{R}^2\qquad
F(a_1,a_2,\phi)=\left[\begin{array}{c}\Re\langle \Psi_1,\ \psi_E-\phi\rangle\\
\Re\langle \Psi_2,\ \psi_E-\phi\rangle\end{array}\right]$$ where $\psi_E=(a_1+ia_2)\psi_0+h(a_1+ia_2)$ and
\begin{eqnarray}
\Psi_1(a_1,a_2)&=&-i\frac{\partial\psi_E}{\partial
a_2}\left(\Re\langle -i\frac{\partial\psi_E}{\partial
a_2},\ \frac{\partial\psi_E}{\partial
a_1}\rangle\right)^{-1}\nonumber\\
\Psi_2(a_1,a_2)&=&i\frac{\partial\psi_E}{\partial
a_1}\left(\Re\langle i\frac{\partial\psi_E}{\partial
a_1},\ \frac{\partial\psi_E}{\partial
a_2}\rangle\right)^{-1}\nonumber
\end{eqnarray} form the dual basis of $\left\{\frac{\partial\psi_E}{\partial a_1},\frac{\partial\psi_E}{\partial a_2}\right\}$ with respect to the decomposition \eqref{h-1decomp}. Note that $$\frac{\partial F}{\partial (a_1,a_2)}(a_1,a_2,\phi)=\mathbb{I}_{\mathbb{R}^2}-M(a_1,a_2,\phi)$$ where the entries of the two by two matrix $M$ are $$M_{ij}=\Re\langle \frac{\partial\Psi_i}{\partial a_j},\ \phi-\psi_E\rangle$$ and, consequently, $M(0,0,0)$ is the zero matrix. Thus the implicit function theorem applies to $F=0,$ in a neighborhood of $(a_1,a_2,\phi)=(0,0,0)$ and the number $\delta_1$ in the above lemma is chosen such that:
$$\left|\Re\langle i\frac{\partial\psi_E}{\partial
a_1},\ \frac{\partial\psi_E}{\partial
a_2}\rangle\right|\ge \frac{1}{2},\qquad {\rm whenever}\
|(a_1,a_2)|\le 2\delta_1,$$ and the norm of the matrix $M$ as a linear, bounded operator on $\mathbb{R}^2$ satisfies: \begin{equation}\label{M-bound}
\|M_\phi\|=\|M(a_1(\phi),a_2(\phi),\phi)\|\le\frac{1}{2},\qquad {\rm whenever}\ \|\phi\|_{H^{-1}}\le\delta_1, \end{equation} see \cite[section 2]{km:as3d} for details. \end{remark}
We need one more technical result relating the spaces ${\cal H}_a$ and the space corresponding to the continuous spectrum of $-\Delta+V:$
\begin{lemma}\label{le:pcinv} With $\delta_1$ given by the previous lemma we have that for any
$a\in\mathbb{C},\ |a|\le 2\delta_1,$ the linear map $P_c|_{{\cal H}_a}:{\cal H}_a\mapsto {\cal
H}_0$ is invertible, and its inverse $R_a :{\cal H}_0\mapsto {\cal
H}_a$ satisfies:
\begin{eqnarray}
\|R_a\zeta\|_{L^2_{-\sigma}}&\le
&C_{-\sigma}\|\zeta\|_{L^2_{-\sigma}},\qquad \sigma\in\mathbb{R}\ {\rm and\ for\ all}\ \zeta\in {\cal H}_0\cap L^2_{-\sigma}\nonumber\\%\label{raest1}
\|R_a\zeta\|_{L^p}&\le
&C_p\|\zeta\|_{L^p},\qquad 1\le p\le\infty\ {\rm and\ for\ all}\ \zeta\in {\cal H}_0\cap
L^p\nonumber\\%\label{raest2}
\overline{R_a\zeta}&=&R_a\overline\zeta\nonumber
\end{eqnarray} where the constants $C_{-\sigma},\ C_p>0$ are independent of
$a\in\mathbb{C},\ |a|\le 2\delta_1.$
\end{lemma}
We are now ready to prove our main result.
\section{The Main Result}\label{se:main}
\begin{theorem}\label{th:main} If hypothesis \eqref{gest}, \eqref{gsym}, $(H1)$ hold and $$\frac{1}{2}<\alpha_1$$ then there
exists $q'_0<\frac{4+2\alpha_2}{3+2\alpha_2}$ and $\varepsilon_0>0$ such that for all initial conditions
$u_0(x)$ satisfying
$$\max\{\|u_0\|_{L^{q'_0}},\|u_0\|_{H^1}\}\le \varepsilon_0
$$ the initial value problem (\ref{eq:ufull})-(\ref{eq:ic}) is
globally well-posed in $H^1,$ and the solution decomposes into a radiative part and a part that asymptotically converges to a ground state.
More precisely, there exist a $C^1$ function $a:\mathbb{R}\mapsto\mathbb{C}$ such that, for all $t\in\mathbb{R}$ we have: $$ u(t,x)=\underbrace{a(t)\psi_0(x)+h(a(t))}_{\psi_E(t)}+\eta(t,x) $$ where $\psi_E(t)$ is on the central manifold (i.e it is a ground state) and $\eta(t,x)\in {\cal H}_{a(t)},$ see Proposition \ref{pr:cm} and Lemma \ref{lem:decomp}. Moreover, there exists the ground states $\psi_{E_{\pm\infty}}$ and the $C^1$ function $\tilde\theta:\mathbb{R}\mapsto\mathbb{R}$ such that
$\lim_{|t|\rightarrow\infty}\theta(t)=0$ and:
\begin{equation}\label{conv:psie}\lim_{t\rightarrow\pm\infty}\|\psi_E(t)-e^{-it(E_\pm-\theta(t))}\psi_{E_{\pm\infty}}\|_{H^2\bigcap
L^2_\sigma}=0,\ \sigma\in\mathbb{R}
\end{equation} while $\eta$ satisfies the following decay estimates. Fix $p_0>\max\{\frac{2}{\alpha_1-1/2},\ (4+2\alpha_2)\frac{q_0-2}{q_0-(4+2\alpha_2)}\},$ where $q_0=\frac{q'_0}{q'_0-1}>4+2\alpha_2.$ Then for $2\le p\le\frac{p_0q_0}{p_0+q_0-2}$ we have:
\begin{equation}\label{lpdecay}
\|\eta(t)\|_{L^p}\le \left\{\begin{array}{cl}
C\varepsilon_0\frac{\log^{\frac{1-2/p}{1-2/p_0}}(2+|t|)}{(1+|t|)^{1-2/p}}
& {\rm if}\ \alpha_1\ge 1\ {\rm or}\ \alpha_1<1\ {\rm and}\ \ p\le \frac{2}{1-\alpha_1+2/p_0}, \\
& \\
C\varepsilon_0\frac{\log^{\frac{\alpha_1-2/p_0}{1-2/p_0}}(2+|t|)}{(1+|t|)^{\alpha_1-2/p_0}}
& {\rm if}\ \alpha_1<1\ {\rm and}\ p>
\frac{2}{1-\alpha_1+2/p_0},
\end{array}\right.
\end{equation} for some constant $C=C(p_0).$
\end{theorem} \begin{remark}\label{rmk:rad} The estimates on $\eta$ show that the component of the solution that does not converge to a ground states disperses like the solution of the free Schr\" odinger equation except for a logarithmic correction in $L^p$ spaces for critical and supercritical regimes, $\alpha_1\ge 1.$ In subcritical regimes, $\alpha_1<1,$ the decay rate remains comparable to the free Schr\" odinger one in $L^p$ spaces for $2\le p <2/(1-\alpha_1),$
while it saturates to $|t|^{\alpha_1-1-0}$ in $L^p,\ p\ge 2/(1-\alpha_1).$ \end{remark}
\par{\bf Proof of Theorem \ref{th:main}.} It is well known that under hypothesis $(H1)(i)$ the initial value problem (\ref{eq:ufull})-(\ref{eq:ic}) is locally well posed in the energy space $H^1$ and its $L^2$ norm is conserved, see for example \cite[
Corollary 4.3.3. at p. 92]{caz:bk}. Global well posedness follows via energy estimates from $\|u_0\|_{H^1}$ small, see \cite[Corollary 6.1.5 at p. 165]{caz:bk}.
We choose $\varepsilon_0\le \delta_1$ given by Lemma
\ref{lem:decomp}. Then, for all times, $\|u(t)\|_{H^{-1}}\le
\|u(t)\|_{L^2}\le\varepsilon_0\le\delta_1$ and, via Lemma \ref{lem:decomp}, we can decompose the solution into a solitary wave and a dispersive component: \begin{equation}\label{dc}
u(t)=a(t)\psi_0+h(a(t))+\eta(t)=\psi_E(t)+\eta(t),\qquad {\rm where}\ |a(t)|=|a_1(t)+ia_2(t)|\le 2\varepsilon_0\le 2\delta_1\ \forall t\in\mathbb{R}. \end{equation} Note that since $a\mapsto h(a)$ is $C^2,$ see Proposition \ref{pr:cm}, and $a$ is uniformly bounded in time we deduce that there exists the constant $C_H>0$ such that:
$$\max\left\{\|\psi_E(t)\|_{H^2},\|\frac{\partial\psi_E}{\partial a_1}(t)\|_{H^2},\|\frac{\partial\psi_E}{\partial a_2}(t)\|_{H^2}\right\}\le C_H\varepsilon_0,\qquad {\rm for\ all}\ t\in\mathbb{R},$$ which combined with Corollary~\ref{co:decay} implies that for any $\sigma\in\mathbb{R}$ there exists a constant $C_{H,\sigma}>0$ such that:
\begin{equation}\label{est:psieh2}
\max\left\{\|<x>^\sigma\psi_E(t)\|_{H^2},\|<x>^\sigma\frac{\partial\psi_E}{\partial a_1}(t)\|_{H^2},\|<x>^\sigma\frac{\partial\psi_E}{\partial a_2}(t)\|_{H^2}\right\}\le C_{H,\sigma}\varepsilon_0,\qquad {\rm for\ all}\ t\in\mathbb{R}.
\end{equation} Consequently, using the continuous imbedding $H^2(\mathbb{R}^2)\hookrightarrow L^p(\mathbb{R}^2),\ 2\le p\le\infty$ and $L^2_{\sigma}(\mathbb{R}^2)\hookrightarrow L^1(\mathbb{R}^2),\ \sigma>1$ we have that for all $1\le p\le\infty$ and all $\sigma\in\mathbb{R},$ there exists the constants $C_{p,\sigma}$ such that \begin{equation}\label{est:pPsi}
\sup_{t\in\mathbb{R}}\max\left\{\|\psi_E(t)\|_{L^p_\sigma},\|\frac{\partial\psi_E}{\partial a_1}(t)\|_{L^p_\sigma},\|\frac{\partial\psi_E}{\partial a_2}(t)\|_{L^p_\sigma},\|\Psi_1(a(t))\|_{L^p_\sigma},\|\Psi_1(a(t))\|_{L^p_\sigma}\right\}\le C_{p,\sigma}\varepsilon_0,\end{equation} see Remark \ref{rmk:inv} for the definitions of $\Psi_j(a),\ j=1,2.$
In addition,
since
$$u\in C(\mathbb{R},H^{1}(\mathbb{R}^2))\cap
C^1(\mathbb{R},H^{-1}(\mathbb{R}^2)),$$ and $u\mapsto a$ respectively $u\mapsto \eta$ are $C^1,$ see Lemma \ref{lem:decomp}, we get that $a(t)$ is $C^1$ and $\eta\in C(\mathbb{R},H^{1})\cap
C^1(\mathbb{R},H^{-1}).$
The solution is now described by the $C^1$ function $a:\mathbb{R}\mapsto\mathbb{C}$ and $\eta(t)\in C(\mathbb{R},H^1)\cap C^1(\mathbb{R},H^{-1}).$ To obtain estimates for them it is useful to first remove their dominant phase. Consider the $C^2$ function: \begin{equation}\label{def:theta}
\theta(t)=\int_0^tE(|a(s)|)ds \end{equation} and
\begin{equation}\label{def:tu}
\tilde u(t)=e^{i\theta(t)}u(t),
\end{equation} then $\tilde u(t)$ satisfies the differential equation:
\begin{equation}\label{eq:tu}
i\partial\tilde u(t)=-E(|a(t)|)\tilde u(t)+(-\Delta+V)\tilde u(t)+g(\tilde u(t)),
\end{equation} see \eqref{eq:ufull} and \eqref{gsym}. Moreover, like $u(t),$ $\tilde u(t)$ can be decomposed:
\begin{equation}\label{decomp:tu}
\tilde u(t)=\underbrace{\tilde a(t)\psi_0+h(\tilde a(t))}_{\tilde\psi_E(t)}+\tilde\eta(t)
\end{equation} where
\begin{equation}\label{def:taeta}
\tilde a(t)=e^{i\theta(t)}a(t),\qquad
\tilde\eta(t)=e^{i\theta(t)}\eta(t)\in {\cal H}_{\tilde a(t)}
\end{equation} By plugging in \eqref{decomp:tu} into \eqref{eq:tu} we get
\begin{eqnarray}
i\frac{\partial\tilde\eta}{\partial
t}+iD\tilde\psi_E|_{\tilde a} \frac{d\tilde a}{dt}&=&(-\Delta+V-E(|a|)(\tilde\psi_E+\tilde\eta)+g(\tilde\psi_E)+g(\tilde\psi_E+\tilde\eta)-g(\tilde\psi_E)\nonumber\\
&=&L_{\tilde\psi_E}\tilde\eta+g_2(\tilde\psi_E,\tilde\eta)\nonumber
\end{eqnarray} or, equivalently,
\begin{equation}\label{eq:tudecomp}
\frac{\partial\tilde\eta}{\partial t}+
\underbrace{\frac{\partial\tilde\psi_E}{\partial a_1}\frac{d\tilde a_1}{dt}+
\frac{\partial\tilde\psi_E}{\partial a_2}\frac{d\tilde a_2}{dt}}
_{\in {\rm span}_\mathbb{R}\{\frac{\partial\tilde\psi_E}{\partial a_1},\frac{\partial\tilde\psi_E}{\partial
a_2}\}}=\underbrace{-iL_{\tilde\psi_E}\tilde\eta}_{\in {\cal H}_{\tilde a}}-ig_2(\tilde\psi_E,\tilde\eta)
\end{equation} where $L_{\tilde\psi_E}$ is defined by \eqref{def:linop}:
$$L_{\tilde\psi_E}\tilde\eta=
(-\Delta+V-E(|\tilde a|))\tilde\eta+\frac{d}{d\varepsilon}g(\tilde\psi_E+\varepsilon\tilde\eta)|_{\varepsilon=0}$$
and we used $|a|=|\tilde a|,$ while $g_2$ is defined by:
\begin{equation}\label{g2}
g_2(\tilde\psi_E,\tilde\eta)=
g(\tilde\psi_E+\tilde\eta)-g(\tilde\psi_E)-\frac{d}{d\varepsilon}g(\tilde\psi_E+\varepsilon\tilde\eta)|_{\varepsilon=0}
\end{equation} and we also used the fact that $\tilde\psi_E$ is a solution of the eigenvalue problem \eqref{eq:ev}. Note that $g_2$ is at least quadratic in the second variable, more precisely:
\begin{lemma}\label{le:g2est} There exists a constant $C>0$ such that for all $a,z\in\mathbb{C}$ we have:
$$|g_2(a,z)|\le
C(|a|^{\alpha_1}+|a|^{\alpha_2}+|z|^{\alpha_1}+|z|^{\alpha_2})|z|^2$$ \end{lemma}
\noindent{\bf Proof:} From the definition \eqref{g2} of $g_2$ we have:
$$
g_2(a,z)=g(a+z)-g(a)-Dg_{a}[z]=\int_0^1\left(Dg_{a+\tau z}-Dg_{a}\right)[z]d\tau
=\int_0^1\int_0^1D^2g_{a+s\tau z}[\tau z][z]d\tau ds.$$ Now \eqref{gest} and \eqref{gsym} imply that there exists a constant $C_1>0$ such that the bilinear form $Dg$ on $\mathbb{C}\times \mathbb{C}$ satisfies:
\begin{equation}\label{est:d2g}
\|D^2g_b\|\le C_1(|b|^{\alpha_1}+|b|^{\alpha_2}),\qquad \forall
b\in\mathbb{C}.\end{equation} Hence
$$|g_2(a,z)|\le C_1\left((2\max (|a|,|z|))^{\alpha_1}+(2\max
(|a|,|z|))^{\alpha_2}\right)\frac{1}{2}|z|^2,$$ which proves the lemma.\ $\Box$
We now project \eqref{eq:tudecomp} onto the invariant subspaces of $-iL_{\tilde\psi_E},$ namely ${\rm span}_\mathbb{R} \{\frac{\partial\tilde\psi_E}{\partial a_1},\frac{\partial\tilde\psi_E}{\partial
a_2}\},$ and ${\cal H}_{\tilde a}.$ More precisely, we evaluate both the left and right hand side of \eqref{eq:tudecomp} which are functionals in $H^{-1}(\mathbb{R}^2)$ at $\Psi_j=\Psi_j(\tilde a(t)),\ j=1,2,$ see Remark \ref{rmk:inv}, and take the real parts. We obtain:
$$\left[\begin{array}{c}\Re\langle\Psi_1,\frac{\partial\tilde\eta}{\partial
t}\rangle\\ \Re\langle\Psi_2,\frac{\partial\tilde\eta}{\partial
t}\rangle\end{array}\right]+\frac{d}{dt}\left[\begin{array}{c}\tilde
a_1\\ \tilde a_2\end{array}\right]=\left[\begin{array}{c}
g_{21}(\tilde\psi_E,\tilde\eta)\\
g_{22}(\tilde\psi_E,\tilde\eta)\end{array}\right]$$ where
\begin{equation}\label{def:g2j}
g_{2j}(\tilde\psi_E,\tilde\eta)=\Re\langle\Psi_j,-ig_2(\tilde\psi_E,\tilde\eta)\rangle ,\qquad j=1,2.
\end{equation} Note that from Lemma~\ref{le:g2est} and H\" older inequality we have for all $t\in\mathbb{R}:$
\begin{eqnarray}
\lefteqn{|g_{2j}(\tilde\psi_E(t),\tilde\eta(t))|\le
C\int_{\mathbb{R}^2}|\Psi_j(t,x)|\left(|\tilde\psi_E(t,x)|^{\alpha_1}+|\tilde\psi_E(t,x)|^{\alpha_2}
+|\tilde\eta(t,x)|^{\alpha_1}+|\tilde\eta(t,x)|^{\alpha_2}\right)\
|\tilde\eta(t,x)|^{2}dx}\label{est:g2j}\\
&\le & C\left[
\|\Psi_j(t)\|_{L^{r_0}}\left(\|\tilde\psi_E(t)\|_{L^\infty}^{\alpha_1}+\|\tilde\psi_E(t)\|_{L^\infty}^{\alpha_2}\right)
\|\tilde\eta(t)\|_{L^{p_2}}^2+\|\Psi_j(t)\|_{L^{r_1}}\|\tilde\eta(t)\|_{L^{p_2}}^{2+\alpha_1}
+\|\Psi_j(t)\|_{L^{r_2}}\|\tilde\eta(t)\|_{L^{p_2}}^{2+\alpha_2}\right],\nonumber
\end{eqnarray} where $r_0^{-1}+(p_2/2)^{-1}=1,\ r_j^{-1}+(p_2/(2+\alpha_j))^{-1}=1,\ j=1,2.$
To calculate $\Re\langle\Psi_j,\frac{\partial\tilde\eta}{\partial
t}\rangle,\ j=1,2$ we use the fact that $\tilde\eta(t)\in {\cal
H}_{\tilde a},$ for all $t\in\mathbb{R},$ i.e. $$\Re\langle\Psi_j(\tilde
a(t)),\tilde\eta(t)\rangle\equiv 0.$$ Differentiating the latter with respect to $t$ we get:
$$\Re\langle\Psi_j,\frac{\partial\tilde\eta}{\partial
t}\rangle=-\Re\langle\frac{\partial\Psi_j}{\partial a_1}\frac{d\tilde a_1}{dt}+
\frac{\partial\Psi_j}{\partial a_2}\frac{d\tilde a_2}{dt},\tilde\eta\rangle\qquad j=1,2$$ which replaced into above leads to:
\begin{equation}\label{eq:ta}
\frac{d}{dt}\left[\begin{array}{c}\tilde
a_1\\ \tilde
a_2\end{array}\right]=(\mathbb{I}_{\mathbb{R}^2}-M_{\tilde u})^{-1}\left[\begin{array}{c}
g_{21}(\tilde\psi_E,\tilde\eta)\\
g_{22}(\tilde\psi_E,\tilde\eta)\end{array}\right],
\end{equation} where the two by two matrix $M_{\tilde u}$ is defined in Remark \ref{rmk:inv}. In particular
$$\left[\begin{array}{c}\Re\langle\Psi_1,\frac{\partial\tilde\eta}{\partial
t}\rangle\\ \Re\langle\Psi_2,\frac{\partial\tilde\eta}{\partial
t}\rangle\end{array}\right]=-M_{\tilde u}(\mathbb{I}_{\mathbb{R}^2}-M_{\tilde u})^{-1}\left[\begin{array}{c}
g_{21}(\tilde\psi_E,\tilde\eta)\\
g_{22}(\tilde\psi_E,\tilde\eta)\end{array}\right],$$ which we use to obtain the component in ${\cal H}_{\tilde a}=\{\Psi_1(\tilde a),\Psi_2(\tilde a)\}^\perp$ of \eqref{eq:tudecomp}:
$$\frac{\partial\tilde\eta}{\partial
t}=-iL_{\tilde\psi_E}\tilde\eta-ig_2(\tilde\psi_E,\tilde\eta)
-(\mathbb{I}-M_{\tilde u})^{-1}g_3(\tilde\psi_E,\tilde\eta),$$ where $g_3$ is the projection of $-ig_2$ onto ${\rm span}_\mathbb{R}\{\frac{\partial\tilde\psi_E}{\partial
a_1},\frac{\partial\tilde\psi_E}{\partial
a_2}\}$ relative to the decomposition \eqref{h-1decomp}:
\begin{equation}\label{def:g3}
g_3(\tilde\psi_E,\tilde\eta)=g_{21}(\tilde\psi_E,\tilde\eta)\frac{\partial\tilde\psi_E}{\partial
a_1}+g_{22}(\tilde\psi_E,\tilde\eta)\frac{\partial\tilde\psi_E}{\partial
a_2},
\end{equation} see \eqref{def:g2j} for the definitions of $g_{2j},\ j=1,2,$ and $\mathbb{I}-M_{\tilde u}$ is the linear operator on the two dimensional real vector space ${\rm span}_\mathbb{R}\{\frac{\partial\tilde\psi_E}{\partial a_1},\frac{\partial\tilde\psi_E}{\partial a_2}\}$ whose matrix representation relative to the basis $\{\frac{\partial\tilde\psi_E}{\partial a_1},\frac{\partial\tilde\psi_E}{\partial a_2}\}$ is $\mathbb{I}_{\mathbb{R}^2}-M_{\tilde u}.$ It is easier to switch back to the variable $\eta(t)=e^{-i\theta(t)}\tilde\eta(t)\in {\cal H}_a:$
\begin{equation}\label{eq:eta}
\frac{\partial\eta}{\partial
t}=-i(-\Delta+V)\eta-iDg_{\psi_E}\eta-ig_2(\psi_E,\eta)
-(\mathbb{I}-M_u)^{-1}g_3(\psi_E,\eta),
\end{equation} where we used the equivariant symmetry \eqref{gsym} and its obvious consequences for the symmetries of $Dg,\ g_2,\ g_3$ and $M.$ Since by Lemma \ref{le:pcinv} it is sufficient to get estimates for $\zeta(t)=P_c\eta (t),$ we now project \eqref{eq:eta} onto the continuous spectrum of $-\Delta+V:$
\begin{equation}\label{eq:zeta}
\frac{\partial\zeta}{\partial
t}=-i(-\Delta+V)\zeta-iP_cDg_{\psi_E}R_a\zeta-iP_cg_2(\psi_E,R_a\zeta)
-P_c(\mathbb{I}-M_u)^{-1}g_3(\psi_E,R_a\zeta),
\end{equation} where $R_a:{\cal H}_0\mapsto {\cal H}_a$ is the inverse of $P_c$ restricted to ${\cal H}_a,$ see Lemma \ref{le:pcinv}.
Consider the initial value problem for the linear part of \eqref{eq:zeta}:
\begin{eqnarray}
\frac{\partial z}{\partial
t}&=&-i(-\Delta+V) z-iP_cDg_{\psi_E(t)}R_{a(t)} z(t)\label{eq:zm}\\
z(s)&=&v\in {\cal H}_0\nonumber
\end{eqnarray} and write its solution in terms of a family of operators:
\begin{equation}\label{def:Omega}
\Omega(t,s): {\cal H}_0\mapsto {\cal H}_0,\qquad \Omega(t,s)v=z(t),\ \ t,\ s\in\mathbb{R}.\end{equation} In Section \ref{se:lin} we show that such a family of operators exists, is uniformly bounded in $t,\ s$ with respect to the $L^2$ norm and it has very similar properties with the unitary group of operators $e^{-i(-\Delta+V)(t-s)}P_c$ generated by the Schr\" odinger operator $-i(-\Delta+V)P_c.$ In particular $\Omega(t,s)$ satisfies certain dispersive decay estimates in weighted $L^2$ spaces and $L^p,\ p>2$ spaces, see Theorem \ref{th:lin1} and Theorem \ref{th:lin2}. For all these results to hold we only need to choose $\varepsilon_0$ small enough such that $\varepsilon_0 C_{H,4\sigma /3}\le \varepsilon_1,$ where $\sigma >1 $ and $\varepsilon_1>0$ are fixed in Section 4 and the constant $C_{H,4\sigma /3}$ is the one from \eqref{est:psieh2}.
Using Duhamel formula, the solution $\zeta\in C(\mathbb{R},H^{1}\cap {\cal H}_0)\cap
C^1(\mathbb{R},H^{-1}(\mathbb{R}^2)\cap {\cal H}_0)$ of \eqref{eq:zeta} also satisfies: \begin{eqnarray} \zeta(t)&=&\Omega(t,0)\zeta(0)-i\int_0^t\Omega(t,s)P_cg_2(\psi_E(s),R_{a(s)}\zeta(s))ds\nonumber\\
&&-\int_0^t\Omega(t,s)P_c(\mathbb{I}-M_{u(s)})^{-1}g_3(\psi_E(s),R_{a(s)}\zeta(s))ds.\label{int:zeta} \end{eqnarray} Note that the right hand side of \eqref{int:zeta} contains only terms that are quadratic and higher order in $\zeta,$ see Lemma~\ref{le:g2est} and \eqref{est:g2j}. As in \cite{kz:as2d,km:as3d} this is essential in controlling low power nonlinearities and it is the main difference between our approach and the existing literature on asymptotic stability of coherent structures for dispersive nonlinear equations, see \cite[p. 449]{kz:as2d} for a more detailed discussion.
To obtain estimates for $\zeta$ we apply a contraction mapping argument to the fixed point problem (\ref{int:zeta}) in the following Banach space. Fix $p_0>2$ such that
\begin{equation}\label{def:p0}
p_0>\max\left\{\frac{2}{\alpha_1-1/2},\
(4+2\alpha_2)\frac{q_0-2}{q_0-(4+2\alpha_2)}\right\},
\end{equation} and let
\begin{equation}\label{def:p2}
p_2=\frac{p_0q_0}{p_0+q_0-2},
\end{equation} and
\begin{equation}\label{def:p1}
p_1=\frac{2}{1-\alpha_1+2/p_0},\qquad {\rm if}\ \alpha_1<1,
\end{equation} then \begin{itemize}
\item[Case I] if $\alpha_1\ge 1,$ or $1/2<\alpha_1<1$ and $p_1\ge
p_2,$ let:
$$
Y=\left\{v\in C(\mathbb{R},L^2\cap L^{p_2})\ :\ \sup_{t\in\mathbb{R}}
\|v(t)\|_{L^2}<\infty,
\ \sup_{t\in\mathbb{R}}
\frac{(1+|t|)^{1-\frac{2}{p_2}}}{[\log(2+|t|)]^{\frac{1-\frac{2}{p_2}}{1-\frac{2}{p_0}}}}\|v(t)\|_{L^{p_2}}<\infty
\right\};$$
\item[Case II] if $1/2<\alpha_1<1$ and $p_1<
p_2,$ let:
\begin{eqnarray}
Y&=&\left\{v\in C\left(\mathbb{R},L^2\cap L^{p_1}\cap L^{p_2}\right)\ :\ \sup_{t\in\mathbb{R}}
\|v(t)\|_{L^2}<\infty,\right.\nonumber\\
&&\sup_{t\in\mathbb{R}}
\frac{(1+|t|)^{1-\frac{2}{p_1}}}{[\log(2+|t|)]^{\frac{1-\frac{2}{p_1}}{1-\frac{2}{p_0}}}}\|v(t)\|_{L^{p_1}}<\infty,\
\left.\sup_{t\in\mathbb{R}}
\frac{(1+|t|)^{\alpha_1-\frac{2}{p_0}}}{[\log(2+|t|)]^{\frac{\alpha_1-\frac{2}{p_0}}{1-\frac{2}{p_0}}}}\|v(t)\|_{L^{p_2}}<\infty
\right\};\nonumber\end{eqnarray}
\end{itemize} endowed with the norm $$
\|v\|_{Y}=\max\left\{\sup_{t\in\mathbb{R}} \|v(t)\|_{L^2},\ \sup_{t\in\mathbb{R}}
\frac{(1+|t|)^{1-\frac{2}{p_2}}}{[\log(2+|t|)]^{\frac{1-\frac{2}{p_2}}{1-\frac{2}{p_0}}}}\|v(t)\|_{L^{p_2}}
\right\}$$ in Case I, while in Case II
$$
\|v\|_{Y}=\max\left\{\sup_{t\in\mathbb{R}}
\|v(t)\|_{L^2},\ \sup_{t\in\mathbb{R}}
\frac{(1+|t|)^{1-\frac{2}{p_1}}}{[\log(2+|t|)]^{\frac{1-\frac{2}{p_1}}{1-\frac{2}{p_0}}}}\|v(t)\|_{L^{p_1}},\ \sup_{t\in\mathbb{R}}
\frac{(1+|t|)^{\alpha_1-\frac{2}{p_0}}}{[\log(2+|t|)]^{\frac{\alpha_1-\frac{2}{p_0}}{1-\frac{2}{p_0}}}}\|v(t)\|_{L^{p_2}}
\right\}.$$
Consider now the nonlinear operator in (\ref{int:zeta}):
\begin{equation}\label{def:N}
N(v)(t)=-i\int_0^t\Omega(t,s)P_cg_2(\psi_E(s),R_{a(s)}v(s))ds
-\int_0^t\Omega(t,s)P_c(\mathbb{I}-M_{u(s)})^{-1}g_3(\psi_E(s),R_{a(s)}v(s))ds.\end{equation} We have:
\begin{lemma}\label{lm:lip}
$N : Y\rightarrow Y$ is well defined, and locally Lipschitz,
i.e. there exists $\tilde{C}>0$, such that $$\|Nv_1
-Nv_2\|_{Y}\leq\tilde{C}(\|v_1\|_{Y}+\|v_2\|_{Y}+\|v_1\|_{Y}^{1+\alpha_1}+\|v_2\|_{Y}^{1+\alpha_1}+
\|v_1\|_{Y}^{1+\alpha_2}+\|v_2\|_{Y}^{1+\alpha_2})\|v_1 -v_2\|_{Y}. $$ \end{lemma}
Assuming that the lemma has been proven then we can apply the contraction principle for \eqref{int:zeta} in a closed ball in the Banach space $Y$ in the following way. Let $$v=\Omega(t,0)\zeta(0)$$ then by Theorem \ref{th:lin2}
$$\|v\|_Y\le
\max\{C_2,C_{p_0,p_1},C_{p_0,p_2}\}\|\zeta(0)\|_{L^2\cap L^{q_0'}}$$ where we used the interpolation
$\|\zeta(0)\|_{L^r}\le\|\zeta(0)\|_{L^2\cap L^{q_0'}},\ q_0'\le r\le 2$ with $r=q'$ and $r=p'$ defined in theorem \ref{th:lin2} for $p=p_j,\ j=1,2.$ Recall that
$$\zeta(0)=P_c\eta(0)=P_cu_0-h(a(0))=u_0-\langle\psi_0,u_0\rangle\psi_0-h(a(0))$$ where $u_0=u(0)$ is the initial data, see also \eqref{dc}. Hence
$$\|\zeta(0)\|_{L^2\cap L^{q_0'}}\le\|u_0\|_{L^2\cap
L^{q_0'}}+\|u_0\|_{L^2}\|\psi_0\|_{L^{q_0'}}+D_1\|u_0\|_{L^2}\le
D\varepsilon_0$$ where $D_1,\ D>0$ are constants independent on $u_0$ and the estimate on $h(a(0))$ follows from Proposition
\ref{pr:cm} and $|a(0)|\le 2\|u_0\|_{L^2}$ see Lemma \ref{lem:decomp}.
Therefore we can choose $\varepsilon_0$ small enough such that
$R=2\|v\|_Y$ satisfies $$Lip\stackrel{def}{=}2\tilde{C}(R+R^{1+\alpha_1}+ R^{1+\alpha_2})<1.$$ In this case the integral operator given by the right hand side of \eqref{int:zeta}: $$K(\zeta)=v+N(\zeta)$$
leaves $B(0,R)={\zeta\in Y:\|\zeta\|_Y\le R}$ invariant and it is a contraction on it with Lipschitz constant $ Lip$ defined above. Consequently the equation \eqref{int:zeta} has a unique solution in $B(0,R)$ and because $\zeta(t)\in C(\mathbb{R},H^1)\hookrightarrow C(\mathbb{R},L^2,L^{p_1},L^{p_2})$ already verified the equation we deduce that $\zeta(t)$ is in $B(0,R)$, in particular it satisfies the estimates \eqref{lpdecay}.
Then $\eta(t)=R_a(t)\zeta(t)$ satisfies \eqref{lpdecay} because of Lemma \ref{le:pcinv}. Moreover, the system of ODE's \eqref{eq:ta} has integrable in time right hand side because the matrix has norm bounded by $2,$ see \eqref{M-bound}, while $g_{2j}$ satisfy \eqref{est:g2j} where $\tilde\eta(t)$ differs from $\eta(t)$ by only a phase and the $L^p,\ 1\le p\le\infty$ norms of $\Psi_j(t),\ \psi_E(t)$ are uniformly bounded in time, see \eqref{est:pPsi}. Consequently $\tilde a_1(t)$ and $\tilde a_2(t)$ converge as $t\rightarrow\pm\infty,$ and there exist the constants $C, \epsilon >0$ such that:
$$\lim_{t\rightarrow\pm\infty}\tilde a(t)=\lim_{t\rightarrow\pm\infty}\tilde
a_1(t)+i\tilde a_2(t)\stackrel{def}{=} a_{\pm\infty},\qquad
|\tilde a(\pm t)-a_{\pm\infty}|\le C(1+t)^{-\epsilon},\ {\rm for\
all}\ t\ge 0.$$
We can now define
\begin{equation}\label{def:psieinfty}
\psi_{E_{\pm\infty}}=a_{\pm\infty}\psi_0+h(a_{\pm\infty}),
\end{equation} and we have
\begin{equation}\label{conv:tpsie}\lim_{t\rightarrow\pm\infty}\|\tilde\psi_E(t)-\psi_{E_{\pm\infty}}\|_{H^2\cap
L^2_\sigma}=0,\ {\rm for}\ \sigma\in\mathbb{R}
\end{equation} where we used \eqref{decomp:tu} and the continuity of $h(a),$ see Proposition \ref{pr:cm}. In addition, since $E:[-2\delta_1,\delta_1]\mapsto (-\delta,\delta)$ is a $C^1$ function, see Proposition \ref{pr:cm}, the following limits exist together with the constant $C_1>0$ such that:
$$\lim_{t\rightarrow\pm\infty}E(|\tilde a(t)|)=E_{\pm\infty},\qquad
|E(|\tilde a(\pm t)|)-E_{\pm\infty}|\le C_1(1+t)^{-\epsilon} \ {\rm for\
all}\ t\ge 0.$$ If we define
\begin{equation}\label{def:ttheta}
\tilde\theta(t)=\left\{\begin{array}{lr}\frac{1}{t}\int_0^{t}E(|\tilde
a(s)|)-E_{+\infty}ds & {\rm if}\ t>0\\ 0 & {\rm if}\
t=0\\ \frac{1}{t}\int_0^{t}E(|\tilde
a(s)|)-E_{-\infty}ds & {\rm if}\ t<0\end{array}\right.
\end{equation} then
$$\lim_{|t|\rightarrow\infty}\tilde\theta(t)=0$$ and
\begin{equation}\label{rel:theta1}\theta(t)=\int_0^{t}E(|a(s)|)ds=\left\{\begin{array}{lr}
t(E_{+\infty}+\tilde\theta(t)) & {\rm if}\ t\ge 0\\ t(E_{+\infty}+\tilde\theta(t)) & {\rm if}\ t< 0
\end{array}\right.\end{equation}
where we used $|a(t)|=|\tilde a(t)|,$ see \eqref{def:taeta}.
In conclusion, since $\psi_E(t)=e^{i\theta(t)}\tilde\psi_E(t),$ see \eqref{dc}, \eqref{decomp:tu} and \eqref{def:taeta}, we get from \eqref{conv:tpsie} and \eqref{rel:theta1} the convergence \eqref{conv:psie}.
It remains to prove Lemma \ref{lm:lip}:
\noindent{\bf Proof of Lemma \ref{lm:lip}:} It suffices to prove the estimate:
\begin{equation}\label{est:N}
\|Nv_1-Nv_2\|_{Y}\leq\tilde{C}(\|v_1\|_{Y}+\|v_2\|_{Y}+\|v_1\|_{Y}^{1+\alpha_1}+\|v_2\|_{Y}^{1+\alpha_1}+
\|v_1\|_{Y}^{1+\alpha_2}+\|v_2\|_{Y}^{1+\alpha_2})\|v_1 -v_2\|_{Y},
\end{equation} because plugging in $v_2\equiv 0$ and using $N(0)\equiv 0,$ see \eqref{def:N}, will then imply $N(v_1)\in Y$ whenever $v_1\in Y.$
Note that via interpolation in $L^p$ spaces we have for all $v\in Y$ and any $2\le p\le p_2:$
\begin{equation}\label{est:lp}
\|v(t)\|_{L^p}\le \left\{\begin{array}{cl}
\|v\|_Y\frac{\log^{\frac{1-2/p}{1-2/p_0}}(2+|t|)}{(1+|t|)^{1-2/p}}
& {\rm if}\ \alpha_1\ge 1\ {\rm or}\ \alpha_1<1\ {\rm and}\ \ p\le \frac{2}{1-\alpha_1+2/p_0}, \\
& \\
\|v\|_Y\frac{\log^{\frac{\alpha_1-2/p_0}{1-2/p_0}}(2+|t|)}{(1+|t|)^{\alpha_1-2/p_0}}
& {\rm if}\ \alpha_1<1\ {\rm and}\ p>
\frac{2}{1-\alpha_1+2/p_0}.
\end{array}\right.
\end{equation} Now, from \eqref{g2}, we have for any $v_1,\ v_2\in Y:$
\begin{eqnarray}
g_2(\psi_E,R_av_1)-g_2(\psi_E,R_av_2)&=&g(\psi_E+R_av_1)-g(\psi_E+R_av_2)-Dg_{\psi_E}[R_a(v_1-v_2)]\nonumber\\
&=&\int_0^1\left(Dg_{\psi_E+R_a(\tau
v_1+(1-\tau)v_2)}-Dg_{\psi_E}\right)[R_a(v_1-v_2)]d\tau\nonumber\\
&=&\int_0^1\int_0^1D^2g_{\psi_E+sR_a(\tau
v_1+(1-\tau)v_2)}[R_a(\tau v_1+(1-\tau)v_2)][R_a(v_1-v_2)]d\tau
ds\nonumber\\
&=&
A_1(\psi_E,v_1,v_2)+A_2(\psi_E,v_1,v_2)+A_3(\psi_E,v_1,v_2),\label{def:A123}
\end{eqnarray} where we consider $\chi_j(t,x),\ j=1,2$ to be the characteristic function of the set $S_1=\{(t,x)\in\mathbb{R}\times\mathbb{R}^2\ :\
|\psi_E(t,x)|\ge\max (|R_{a(t)}v_1(t,x)|,|R_{a(t)}v_2(t,x)|)\},$ respectively\newline $S_2= \{(t,x)\in\mathbb{R}\times\mathbb{R}^2\
:\ \max (|R_{a(t)}v_1(t,x)|,|R_{a(t)}v_2(t,x)|)\le 1\}$ and
\begin{eqnarray}
A_1(\psi_E,v_1,v_2)&=&\int_0^1\int_0^1\chi_1 D^2g_{\psi_E+sR_a(\tau
v_1+(1-\tau)v_2)}[R_a(\tau v_1+(1-\tau)v_2)][R_a(v_1-v_2)]d\tau
ds,\nonumber\\
A_2(\psi_E,v_1,v_2)&=&\int_0^1\int_0^1(1-\chi_1)\chi_2 D^2g_{\psi_E+sR_a(\tau
v_1+(1-\tau)v_2)}[R_a\tau v_1+(1-\tau)v_2)][R_a(v_1-v_2)]d\tau
ds,\nonumber\\
A_3(\psi_E,u_1,u_2)&=&\int_0^1\int_0^1(1-\chi_1)(1-\chi_2) D^2g_{\psi_E+sR_a(\tau
v_1+(1-\tau)v_2)}[R_a(\tau v_1+(1-\tau)v_2)][R_a(v_1-v_2)]d\tau
ds.\nonumber
\end{eqnarray} Note that there exists a constant $C>0$ such that for any $\psi_E,\ v_1,\ v_2\in Y,$ any $t\in\mathbb{R}$ and almost all $x\in\mathbb{R}^2$ we have the pointwise estimates:
\begin{eqnarray}
|A_1(\psi_E(t,x),v_1(t,x),v_2(t,x))|&\le
&C\left(2^{\alpha_1}|\psi_E(t,x)|^{\alpha_1}+2^{\alpha_2}|\psi_E(t,x)|^{\alpha_2}\right)(|R_{a(t)}v_1(t,x)|+|R_{a(t)}v_2(t,x)|)\nonumber\\
&&\times |R_{a(t)}(v_1(t,x)-v_2(t,x))|\nonumber\\
|A_2(\psi_E(t,x),v_1(t,x),v_2(t,x))|&\le
&2^{\alpha_1}C\left(|R_{a(t)}v_1(t,x)|^{1+\alpha_1}+|R_{a(t)}v_2(t,x)|^{1+\alpha_1}\right)|R_{a(t)}(v_1(t,x)-v_2(t,x))|\nonumber\\
|A_3(\psi_E(t,x),v_1(t,x),v_2(t,x))|&\le
&2^{\alpha_2}C\left(|R_{a(t)}v_1(t,x)|^{1+\alpha_2}+|R_{a(t)}v_2(t,x)|^{1+\alpha_2}\right)|R_{a(t)}(v_1(t,x)-v_2(t,x))|\nonumber
\end{eqnarray} where we used \eqref{est:d2g}. Consequently, for any $\sigma\in\mathbb{R}$ there exists a constant $C_{\sigma}>0$ such that for any $t\in\mathbb{R}:$
\begin{eqnarray}
\|A_1(\psi_E(t),v_1(t),v_2(t))\|_{L^2_\sigma}&\le
&C\|2^{\alpha_1}|\psi_E(t)|^{\alpha_1}+2^{\alpha_2}|\psi_E(t)|^{\alpha_2}\|_{L^s_\sigma}(\|R_{a(t)}v_1(t)\|_{L^{p_2}}+\|R_{a(t)}v_2(t)\|_{L^{p_2}})\nonumber\\
&&\times \|R_{a(t)}(v_1(t)-v_2(t))\|_{L^{p_2}}\nonumber\\
&\le &\frac{C_\sigma
\log^{a_1}(2+|t|)}{(1+|t|)^{b_1}}
(\|v_1\|_Y+\|v_2\|_Y)\|v_1-v_2\|_Y\label{est:A1}\\
\end{eqnarray} where $\frac{1}{s}+\frac{2}{p_2}=\frac{1}{2},$ and, for $\Psi_j,\ j=1,2$ defined in Remark~\ref{rmk:inv}:
\begin{eqnarray}
|\Re\langle\Psi_j(a(t)),-iA_1(\psi_E(t),v_1(t),v_2(t))\rangle|&\le &\|\Psi_j(a(t))\|_{L^2_{-\sigma}}\|A_1(\psi_E(t),v_1(t),v_2(t))\|_{L^2_\sigma}\nonumber\\
&\le & C_{2,-\sigma}\frac{C_\sigma
\log^{a_1}(2+|t|)}{(1+|t|)^{b_1}}
(\|v_1\|_Y+\|v_2\|_Y)\|v_1-v_2\|_Y\label{est:g2ja1}
\end{eqnarray} where
\begin{equation}\label{def:ab1}
b_1=\left\{\begin{array}{lr} 2-\frac{4}{p_2} & {\rm in\ Case\ I},\\
2\alpha_1-\frac{4}{p_0} & {\rm in\ Case\
II},\end{array}\right.\qquad a_1=\left\{\begin{array}{lr} 2\frac{1-2/p_2}{1-2/p_0} & {\rm in\ Case\ I},\\
2\frac{\alpha_1-2/p_0}{1-2/p_0} & {\rm in\ Case\
II},\end{array}\right.
\end{equation} see the definition of the Banach space $Y,$ and we used H\" older inequality together with \eqref{est:pPsi} and Lemma \ref{le:pcinv}.
Similarly, for any $1\le r'\le 2$ we have $(2+\alpha_1)r'\le (2+\alpha_2)r'\le p_2,$ hence the above pointwise estimates and \eqref{est:lp} imply that there exists a constant $C_{r'}>0$ such that for any $t\in\mathbb{R}:$
\begin{eqnarray}
\|A_2(\psi_E(t),v_1(t),v_2(t))\|_{L^{r'}}&\le &
2^{\alpha_1}C\|\ |R_{a(t)}v_1(t)|^{1+\alpha_1}+|R_{a(t)}v_2(t)|^{1+\alpha_1}\|_{L^{\frac{(2+\alpha_1)r'}{1+\alpha_1}}}\|R_{a(t)}(v_1(t)-v_2(t))\|_{L^{(2+\alpha_1)r'}}\nonumber\\
&\le & \frac{C_{r'}
\log^{a_2(r')}(2+|t|)}{(1+|t|)^{b_2(r')}}
(\|v_1\|_Y^{1+\alpha_1}+\|v_2\|_Y^{1+\alpha_1})\|v_1-v_2\|_Y,\label{est:A2}
\end{eqnarray} where
\begin{equation}\label{def:ab2}
\begin{array}{lll} b_2(r')=\alpha_1+\frac{2}{r}, & a_2(r')=\frac{\alpha_1+2/r}{1-2/p_0}, & {\rm if}\ \alpha_1\ge 1\ {\rm or}\ \alpha_1<1\ {\rm and}\ \ (2+\alpha_1)r'\le p_1,\\
b_2(r')=(2+\alpha_1)(\alpha_1-\frac{2}{p_0}), & a_2(r')=(2+\alpha_1)\frac{\alpha_1-2/p_0}{1-2/p_0}, & {\rm if}\ \alpha_1<1\ {\rm and}\ \ (2+\alpha_1)r'>
p_1,
\end{array}
\end{equation} with $1/r+1/r'=1,$ and
\begin{eqnarray}
\|A_3(\psi_E(t),v_1(t),v_2(t))\|_{L^{r'}}&\le
&2^{\alpha_2}C\|\ |R_{a(t)}v_1(t)|^{1+\alpha_2}+|R_{a(t)}v_2(t)|^{1+\alpha_2}\|_{L^{\frac{(2+\alpha_2)r'}{1+\alpha_2}}}\|R_{a(t)}(v_1(t)-v_2(t))\|_{L^{(2+\alpha_2)r'}}\nonumber\\
&\le &\frac{C_{r'}
\log^{a_3(r')}(2+|t|)}{(1+|t|)^{b_3(r')}}
(\|v_1\|_Y^{1+\alpha_2}+\|v_2\|_Y^{1+\alpha_2})\|v_1-v_2\|_Y,\label{est:A3}
\end{eqnarray} where
\begin{equation}\label{def:ab3}
\begin{array}{lll} b_3(r')=\alpha_2+\frac{2}{r}, & a_3(r')=\frac{\alpha_2+2/r}{1-2/p_0}, & {\rm if}\ \alpha_1\ge 1\ {\rm or}\ \alpha_1<1\ {\rm and}\ \ (2+\alpha_2)r'\le p_1,\\
b_3(r')=(2+\alpha_2)(\alpha_1-\frac{2}{p_0}), & a_3(r')=(2+\alpha_2)\frac{\alpha_1-2/p_0}{1-2/p_0}, & {\rm if}\ \alpha_1<1\ {\rm and}\ \ (2+\alpha_2)r'>
p_1.
\end{array}
\end{equation} Moreover, using Cauchy-Schwartz inequality and \eqref{est:pPsi} we have:
\begin{eqnarray}
|\Re\langle\Psi_j(a(t)),-iA_2(\psi_E(t),v_1(t),v_2(t))\rangle|&\le &\|\Psi_j(a(t))\|_{L^2}\|A_2(\psi_E(t),v_1(t),v_2(t))\|_{L^{2}}\nonumber\\
&\le & C_{2,0}\frac{C_{2}
\log^{a_2(2)}(2+|t|)}{(1+|t|)^{b_2(2)}}
(\|v_1\|_Y^{1+\alpha_1}+\|v_2\|_Y^{1+\alpha_1})\|v_1-v_2\|_Y,\label{est:g2ja2}
\end{eqnarray} and
\begin{equation}\label{est:g2ja3}
|\Re\langle\Psi_j(a(t)),-iA_3(\psi_E(t),v_1(t),v_2(t))\rangle|\le C_{2,0}\frac{C_{2}
\log^{a_3(2)}(2+|t|)}{(1+|t|)^{b_3(2)}}
(\|v_1\|_Y^{1+\alpha_2}+\|v_2\|_Y^{1+\alpha_2})\|v_1-v_2\|_Y.
\end{equation}
Now, from \eqref{def:g3} and \eqref{def:g2j} we have
\begin{eqnarray}
\lefteqn{g_3(\psi_E,R_av_1)-g_3(\psi_E,R_av_2)}\nonumber\\
&=&\Re\langle\Psi_1(a),-i(g_2(\psi_E,R_av_1)-g_2(\psi_E,R_av_2))\rangle
\frac{\partial\psi_E}{\partial a_1}+\Re\langle\Psi_2(a),-i(g_2(\psi_E,R_av_1)-g_2(\psi_E,R_av_2))\rangle
\frac{\partial\psi_E}{\partial a_2}\nonumber\\
&=&\Re\langle\Psi_1(a),-i(A_1+A_2+A_3)(\psi_E,v_1,v_2)\rangle
\frac{\partial\psi_E}{\partial a_1}+\Re\langle\Psi_2(a),-i(A_1+A_2+A_3)(\psi_E,v_1,v_2))\rangle
\frac{\partial\psi_E}{\partial a_2}.\nonumber
\end{eqnarray} Consequently, for
\begin{equation}\label{def:A4}
A_4(\psi_E,v_1,v_2)\stackrel{def}{=}(\mathbb{I}-M_u)^{-1}(g_3(\psi_E,R_av_1)-g_3(\psi_E,R_av_2))
\end{equation} we have that for any $\sigma\in\mathbb{R}$ there exists a constant $C_\sigma>0$ such that:
\begin{eqnarray}
\lefteqn{\|A_4(\psi_E(t),v_1(t),v_2(t))\|_{L^2_\sigma}\le \max\left\{\|\frac{\partial\psi_E}{\partial
a_1}(t)\|_{L^2_\sigma},\|\frac{\partial\psi_E}{\partial
a_2}(t)\|_{L^2_\sigma}\right\}\sqrt{2}\|(\mathbb{I}-M_{u(t)})^{-1}\|_{\mathbb{R}^2\mapsto\mathbb{R}^2}}\nonumber\\
&&\times\sqrt{|\Re\langle\Psi_1(a(t)),-i(A_1+A_2+A_3)(t)\rangle|^2+|\Re\langle\Psi_2(a(t)),-i(A_1+A_2+A_3)(t)\rangle|^2}\nonumber\\
&\le &\frac{C_\sigma
\log^{a_4}(2+|t|)}{(1+|t|)^{b_4}}
(\|v_1\|_{Y}+\|v_2\|_{Y}+\|v_1\|_{Y}^{1+\alpha_1}+\|v_2\|_{Y}^{1+\alpha_1}+
\|v_1\|_{Y}^{1+\alpha_2}+\|v_2\|_{Y}^{1+\alpha_2})\|v_1-v_2\|_Y\label{est:A4}
\end{eqnarray} where
\begin{equation}\label{def:ab4}
b_4=\min\{b_1,b_2(2),b_3(2)\},\qquad a_4=\max\{a_1,a_2(2),a_3(3)\},
\end{equation} and we used \eqref{est:pPsi}, \eqref{M-bound}, \eqref{est:g2ja1}, \eqref{est:g2ja2}, and \eqref{est:g2ja3}.
We are now ready to prove the Lipschitz estimate for the nonlinear operator $N,$ \eqref{est:N}. From its definition \eqref{def:N} and \eqref{def:A123}, \eqref{def:A4} we have for any $v_1,v_2\in Y,$ any $2\le p\le p_2,$ and a fixed $\sigma>1:$
\begin{eqnarray}
\lefteqn{\|N(v_1)(t)-N(v_2)(t)\|_{L^p}=\left\|\int_0^t\Omega(t,s)P_c(-iA_1-iA_2-iA_3-A_4)(\psi_E(s),v_1(s),v_2(s))ds\right\|_{L^p}}\nonumber\\
&\le &\int_0^t\|\Omega(t,s)\|_{L^2_\sigma\mapsto
L^p}\left(\|A_1(\psi_E(s),v_1(s),v_2(s))\|_{L^2_\sigma}+\|A_4(\psi_E(s),v_1(s),v_2(s))\|_{L^2_\sigma}\right)ds\nonumber\\
&+&\int_0^{|t|}\|\Omega(t,s)\|_{L^{q'}\cap L^{p'}\mapsto
L^p}\left(\|A_2(\psi_E(s),v_1(s),v_2(s))\|_{L^{q'}\cap L^{p'}}+\|A_3(\psi_E(s),v_1(s),v_2(s))\|_{L^{q'}\cap L^{p'}}\right)ds.\nonumber
\end{eqnarray} where
\begin{equation}\label{def:p'q'}
1/p'+1/p=1,\ q'=p'(p_0-2)/(p_0-p'),\ 1/q+1/q'=1.\end{equation} From Theorem~\ref{th:lin1} and estimates \eqref{est:A1}, \eqref{est:A4} we get:
\begin{eqnarray}
\lefteqn{\int_0^{|t|}\|\Omega(t,s)\|_{L^2_\sigma\mapsto
L^p}\left(\|A_1(\psi_E(s),v_1(s),v_2(s))\|_{L^2_\sigma}+\|A_4(\psi_E(s),v_1(s),v_2(s))\|_{L^2_\sigma}\right)ds}\nonumber\\
&\le &(\|v_1\|_{Y}+\|v_2\|_{Y}+\|v_1\|_{Y}^{1+\alpha_1}+\|v_2\|_{Y}^{1+\alpha_1}+
\|v_1\|_{Y}^{1+\alpha_2}+\|v_2\|_{Y}^{1+\alpha_2})\|v_1-v_2\|_Y\nonumber\\
&&\times\int_0^t\frac{C_p}{|t-s|^{1-2/p}}\left[\frac{C_\sigma \log^{a_1}(2+|s|)}{(1+|s|)^{b_1}}
+\frac{C_\sigma
\log^{a_4}(2+|s|)}{(1+|s|)^{b_4}}\right]ds\nonumber
\end{eqnarray} while from Theorem~\ref{th:lin2} and estimates \eqref{est:A2}, \eqref{est:A3} we get:
\begin{eqnarray}
\lefteqn{\int_0^{|t|}\|\Omega(t,s)\|_{L^{q'}\cap L^{p'}\mapsto
L^p}\|A_2(\psi_E(s),v_1(s),v_2(s))\|_{L^{q'}\cap L^{p'}}ds\le
(\|v_1\|_{Y}^{1+\alpha_1}+\|v_2\|_{Y}^{1+\alpha_1})\|v_1-v_2\|_Y}\nonumber\\
&&\times\int_0^t\frac{C_{p_0,p}\log^{\frac{1-2/p}{1-2/p_0}}(2+|t-s|)}{|t-s|^{1-2/p}}
\max\left\{\frac{C_{q'} \log^{a_2(q')}(2+|s|)}{(1+|s|)^{b_2(q')}},\frac{C_{p'}
\log^{a_2(p')}(2+|s|)}{(1+|s|)^{b_2(p')}}\right\}ds\nonumber
\end{eqnarray} and
\begin{eqnarray}
\lefteqn{\int_0^{|t|}\|\Omega(t,s)\|_{L^{q'}\cap L^{p'}\mapsto
L^p}\|A_3(\psi_E(s),v_1(s),v_2(s))\|_{L^{q'}\cap L^{p'}}ds \le
(\|v_1\|_{Y}^{1+\alpha_1}+\|v_2\|_{Y}^{1+\alpha_1})\|v_1-v_2\|_Y}\nonumber\\
&&\times\int_0^t\frac{C_{p_0,p}\log^{\frac{1-2/p}{1-2/p_0}}(2+|t-s|)}{|t-s|^{1-2/p}}
\max\left\{\frac{C_{q'} \log^{a_3(q')}(2+|s|)}{(1+|s|)^{b_3(q')}},\frac{C_{p'}
\log^{a_3(p')}(2+|s|)}{(1+|s|)^{b_3(p')}}\right\}ds.\nonumber
\end{eqnarray}
In Case I, i.e. $\alpha_1\ge 1,$ or $1/2<\alpha_1<1$ and $p_1\ge
p_2,$ since $\alpha_2\ge\alpha_1$ and $p_2\ge 4+2\alpha_2>4,$ we have from \eqref{def:ab1}, \eqref{def:ab2}, \eqref{def:ab3} and \eqref{def:ab4} for $r'\in\{q',p',2\}$ and $1/r+1/r'=1:$ $$b_1=2-\frac{4}{p_2}>1,\ b_2(r')=\alpha_1+\frac{2}{r}>1,\ b_3(r')=\alpha_2+\frac{2}{q}>1,\ b_4=\min\{b_1,b_2(2),b_3(2)\}>1.$$ We now use the following known convolution estimate:
\begin{equation}\label{est:conv}
\int_0^{|t|}\frac{\log^a(2+|t-s|)}{|t-s|^b}\frac{\log^c(2+|s|)}{(1+|s|)^d}ds\le
C(a,b,c,d)\frac{\log^a(2+|t|)}{(1+|t|)^b},\qquad {\rm for}\ d>1,\
b<1,\end{equation} to bound the integral terms above and obtain for all $2\le p\le p_2:$
\begin{eqnarray}
\lefteqn{\|N(v_1)(t)-N(v_2)(t)\|_{L^p}\le
C_p\frac{\log^{\frac{1-2/p}{1-2/p_0}}(2+|t|)}{(1+|t|)^{1-2/p}}}\nonumber\\
&&\times (\|v_1\|_{Y}+\|v_2\|_{Y}+\|v_1\|_{Y}^{1+\alpha_1}+\|v_2\|_{Y}^{1+\alpha_1}+
\|v_1\|_{Y}^{1+\alpha_2}+\|v_2\|_{Y}^{1+\alpha_2})\|v_1-v_2\|_Y\label{est:Np1}\\
\end{eqnarray} which, upon moving the time dependent terms to the left hand side and taking supremum over $t\in\mathbb{R}$ when $p\in\{2,p_2\},$ leads to \eqref{est:N} for $\tilde C=\max\{C_2,C_{p_2}\}.$
In Case II, i.e. $1/2<\alpha_1<1$ and $p_1<
p_2,$ we have from \eqref{def:ab1} $b_1=2(\alpha_1-\frac{2}{p_0})>1$ because $p_0>2/(\alpha_1-1/2),$ see \eqref{def:p0}. From \eqref{def:ab2}, under the restriction $2\le p\le p_1,$ with $p',\ q',\ q$ defined by \eqref{def:p'q'}, we have either:
$$b_2(p')>b_2(q')=\alpha_1+2/q>1,$$ or
$$b_2(p')=b_2(q')=(2+\alpha_1)(\alpha_1-2/p_0)>(2+\alpha_1)/2>1.$$ Since $\alpha_2\ge \alpha_1$ implies $b_3(\cdot)\ge b_2(\cdot)$ we deduce that, under the restriction $2\le p\le p_1,$ we also have
$$b_3(p')\ge b_3(q')\ge b_2(q')>1,$$ and
$$b_4=\min\{b_1,b_2(2),b_3(2)\}>1.$$ We can again apply \eqref{est:conv} to the above integral terms and get for $2\le p\le p_1$ the estimate \eqref{est:Np1}. For $p>p_1$ one can show that $(2+\alpha_1)q'<p_1$ hence $b_2(q')=\alpha_1+2/q,$ and, in the particular case of $p=p_2,$ we get
$$b_2(q'_2)=\alpha_1+2/q_2<1,$$ where $q'_2,\ q_2$ are given by \eqref{def:p'q'}. We now have from convolution estimates:
$$\int_0^{|t|}\frac{\log^{\frac{1-2/p_2}{1-2/p_0}}(2+|t-s|)}{|t-s|^{1-2/p_2}}\frac{\log^{a_2(q'_2)}(2+|s|)}{(1+|s|)^{b_2(q'_2)}}ds
\le
C(p_2)\frac{\log^{\frac{1-2/p_2}{1-2/p_0}+a_2(q'_2)}(2+|t|)}{(1+|t|)^{\alpha_1+2/q_2-2/p_2}} \le
\tilde C(p_2)\frac{\log^{\frac{\alpha_1-2/p_0}{1-2/p_0}}(2+|t|)}{(1+|t|)^{\alpha_1-2/p_0}},$$ where we used \eqref{def:p'q'} and $p_2\le p_0$ to obtain:
$$\frac{2}{p_2}-\frac{2}{q_2}=\frac{2}{p_0}\left(\frac{1-2/p_2}{1-2/p_0}\right)\le
\frac{2}{p_0}.$$ Since $b_2(p'_2)>b_2(q'_2)$ and $b_3(p'_2)\ge b_3(q'_2)\ge
b_2(q'_2)$ we deduce
\begin{eqnarray}
\lefteqn{\|N(v_1)(t)-N(v_2)(t)\|_{L^{p_2}}\le
\tilde C_{p_2}\frac{\log^{\frac{\alpha_1-2/p_0}{1-2/p_0}}(2+|t|)}{(1+|t|)^{\alpha_1-2/p_0}}}\nonumber\\
&&\times (\|v_1\|_{Y}+\|v_2\|_{Y}+\|v_1\|_{Y}^{1+\alpha_1}+\|v_2\|_{Y}^{1+\alpha_1}+
\|v_1\|_{Y}^{1+\alpha_2}+\|v_2\|_{Y}^{1+\alpha_2})\|v_1-v_2\|_Y\nonumber\\
\end{eqnarray} which, combined with \eqref{est:Np1} for $p\in\{2,p_1\},$ after moving the time dependent terms on the left hand side and taking supremum over $t\in\mathbb{R},$ gives \eqref{est:N} in the Case II with $\tilde C=\max\{C_2,C_{p_1},\tilde C_{p_2}\}.$
This finishes the proof of Lemma~\ref{lm:lip} and of Theorem~\ref{th:main}. $\Box$
\section{Linear Estimates}\label{se:lin}
\par Consider the linear Schr\" odinger equation with a potential in two space dimensions: \[
\begin{cases}
i\frac{\partial u}{\partial t}=(-\Delta+V(x))u\\
u(0)=u_0.
\end{cases}
\] It is known that if $V$ satisfies hypothesis (H1)(i) and (ii) then the radiative part of the solution, i.e. its projection onto the continuous spectrum of $H=-\Delta +V,$ satisfies the estimates:
\begin{equation}\label{Murata}
\|e^{-iHt}P_c u_0\|_{L^2_{-\sigma}}\le C_M
\frac{1}{(1+|t|)\log^2(2+|t|)}\|u_0\|_{L^2_\sigma},\qquad t\in\mathbb{R},
\end{equation} for any $\sigma >1$ and some constant $C_M>0$ depending only on $\sigma$ see \cite[Theorem 7.6 and Example 7.8]{mm:ae}, and
\begin{equation}\label{est:Lp}
\|e^{-iHt}P_c u_0\|_{L^p}\le \frac{C_p}{|t|^{1-2/p}}\|u_0\|_{L^{p'}}
\end{equation} for some constant $C_p>0$ depending only on $p\ge 2$ and $p'$ given by $p'^{-1}+p^{-1}=1.$ The case $p=\infty$ in (\ref{est:Lp}) is proven in \cite{ws:de2}. The conservation of the $L^2$ norm, see \cite[Corollary 4.3.3]{caz:bk}, gives the $p=2$ case:
$$\|e^{-iHt}P_c u_0\|_{L^2}=
\|u_0\|_{L^{2}}.$$ The general result (\ref{est:Lp}) follows from Riesz-Thorin interpolation.
\par We would like to extend these estimates to the linearized dynamics around the center manifold. In other words we consider the linear equation \eqref{eq:zm}, with initial data at time
$s:$\begin{eqnarray}
\frac{\partial z}{\partial
t}&=&-i(-\Delta+V) z-iP_cDg_{\psi_E(t)}R_{a(t)} z(t)\nonumber\\
z(s)&=&v\in {\cal H}_0\nonumber
\end{eqnarray} Note that this is a nonautonomous problem as the bound state $\psi_E$ around which we linearize may change with time.
\par By Duhamel's principle we have:
\begin{eqnarray}
z(t)=e^{-iH(t-s)}P_c v-i\int_s^t e^{-iH(t-\tau)} P_cDg_{\psi_E(\tau)}R_{a(\tau)} z(\tau)d\tau
\label{rel:Duhamellin}
\end{eqnarray}
\par As in (\ref{def:Omega}) we denote
\begin{equation}\label{def:Omega1}
\Omega(t,s)v\stackrel{def}{=}z(t).
\end{equation} Relying on the fact that $\psi_E(t)$ is small and localized uniformly in $t\in\mathbb{R},$ we have shown in \cite[Section 4]{kz:as2d} for the particular case of cubic nonlinearity, $g(s)=s^3,\ s\in\mathbb{R},$ that estimates of type (\ref{Murata})-(\ref{est:Lp}) can be extended to the operator $\Omega(t,s).$ Due to \eqref{est:dg} which implies for $\sigma\ge 0$ and $1\le p'\le 2:$
\begin{eqnarray}
\|Dg_{\psi_E}R_{a} z\|_{L^2_\sigma}&\le &
C\left(\|\psi_E\|_{L^\infty_{2\sigma/(1+\alpha_1)}}^{1+\alpha_1}+\|\psi_E\|_{L^\infty_{2\sigma/(1+\alpha_2)}}^{1+\alpha_2}\right)\
C_{-\sigma}\|z\|_{L^2_{-\sigma}}\label{est:dgls}\\
\|Dg_{\psi_E}R_{a} z\|_{L^{p'}}&\le &
C\left(\|\psi_E\|_{L^{(1+\alpha_1)q}_{\sigma/(1+\alpha_1)}}^{1+\alpha_1}+\|\psi_E\|_{L^{(1+\alpha_2)q}_{\sigma/(1+\alpha_2)}}^{1+\alpha_2}\right)\
C_{-\sigma}\|z\|_{L^2_{-\sigma}},\quad
\frac{1}{p'}=\frac{1}{q}+\frac{1}{2}\label{est:dglps}\\
\|Dg_{\psi_E}R_{a} z\|_{L^{p'}}&\le &
C\left(\|\psi_E\|_{L^{(1+\alpha_1)q}}^{1+\alpha_1}+\|\psi_E\|_{L^{(1+\alpha_2)q}}^{1+\alpha_2}\right)\
C_{r}\|z\|_{L^r},\quad
\frac{1}{p'}=\frac{1}{q}+\frac{1}{r}\label{est:dglp}
\end{eqnarray} see also Lemma \ref{le:pcinv}, we can use, with obvious modifications, the arguments in \cite[Section 4]{kz:as2d} to show that:
\begin{theorem} \label{th:lin1} Fix $\sigma >1.$ There exists $\varepsilon_1>0$ such that if $\|<x>^{4\sigma /3}\psi_E(t)\|_{H^2}<\varepsilon_1$ for all $t\in\mathbb{R},$ then there exist constants $C,\ C_p>0$ with the property that for any $t,\ s\in\mathbb{R}$ the following hold:
\begin{eqnarray}
\|\Omega(t,s)\|_{L^2_\sigma\mapsto L^2_{-\sigma}}&\le &
\frac{C}{(1+|t-s|)\log^2(2+|t-s|)},\nonumber\\
\|\Omega(t,s)\|_{L^{p'}\mapsto L^2_{-\sigma}}&\le
&\frac{C_p}{|t-s|^{1-\frac{2}{p}}},\ {\rm for\ any}\ 2\le p<\infty\ {\rm where}\ p'^{-1}+p^{-1}=1,\nonumber\\
\|\Omega(t,s)\|_{L^2_\sigma\mapsto L^p}&\le &
\frac{C_p}{|t-s|^{1-\frac{2}{p}}},\ {\rm for\ any}\ p\ge 2
\end{eqnarray} \end{theorem} and, for: \begin{equation}\label{def:T} T(t,s)=\Omega(t,s)-e^{-iH(t-s)}P_c, \end{equation} \begin{lemma}\label{le:T} Assume that
$\|<x>^{4\sigma /3}\psi_E(t)\|_{H^2}<\varepsilon_1,\ t\in\mathbb{R},$ where $\varepsilon_1$ is the one used in Theorem~\ref{th:lin1}. Then for each $1< q'\le 2$ and $2<p<\infty$ there exist the constants $C_{q'},\ C_{p,q'}>0$ such that for all $t,\ s\in\mathbb{R}$ we have: \begin{eqnarray}
\|T(t,s)\|_{L^1\cap L^{q'}\mapsto L^2_{-\sigma}}&\le &
\frac{C_{q'}}{1+|t-s|},\nonumber\\
\|T(t,s)\|_{L^1\cap L^{q'}\mapsto L^p}&\le &\frac{C_{p,q'}\log(2+|t-s|)}
{(1+|t-s|)^{1-\frac{2}{p}}}.\nonumber
\end{eqnarray} \end{lemma} Note that according to the proofs in \cite[Section 4]{kz:as2d} $C_{q'}\rightarrow\infty$ as $q'\rightarrow 1$ and $C_{p,q'}\rightarrow\infty$ as $q'\rightarrow 1$ or $p\rightarrow\infty.$ These could be prevented and an estimate of the type
\begin{equation}\label{est:Timp}
\|T(t,s)\|_{L^1\mapsto L^\infty}\le \frac{C\log(2+|t-s|)} {1+|t-s|} \end{equation} can be obtained by avoiding the singularity of
$\|e^{-iHt}\|_{L^1\mapsto L^\infty}\sim t^{-1}$ at $t=0$ via a generalized Fourier multiplier technique developed in \cite[Appendix and Section 4]{km:as3d}. We choose not to use it here because it requires stronger restrictions on the potential $V(x)$ like its Fourier transform should be in $L^1$ while its gradient should be in $L^p,$ for some $p\ge 2,$ and convergent to zero as
$|x|\rightarrow\infty.$
We now present an improved $L^2$ estimate for the family of operators $T(t,s):$ \begin{lemma}\label{le:T2} Assume that
$\|<x>^{4\sigma /3}\psi_E(t)\|_{H^2}<\varepsilon_1,\ t\in\mathbb{R},$ where $\varepsilon_1$ is the one used in Theorem~\ref{th:lin1}. Then there exists the constants $C_{2}>0$ such that for all $t,\ s\in\mathbb{R}$ we have: $$
\|T(t,s)\|_{L^2\mapsto L^2}\le C_2$$ \end{lemma}
\par {\bf Proof:} We are going to use a Kato type smoothing estimate: \begin{equation}\label{est:kato}
\|<x>^{-\sigma}e^{-iH t}P_c f(x)\|_{L^2_t(\mathbb{R},L^2_x)}\le C_{K}\|f\|_{L^2}, \end{equation} see for example \cite{kn:RS4}. We claim that the previous estimate still holds if we replace $e^{-iH(t-s)}P_c$ by $\Omega(t,s)$, namely, there exists a constant $\tilde C_K>0$ such that for any $s\in\mathbb{R}:$ \begin{equation}\label{est:katoo}
\|<x>^{-\sigma}\Omega(\cdot,s)f\|_{L^2_t(\mathbb{R},L^2_x)}\le
\tilde C_{K} \|f\|_{L^2}. \end{equation} Indeed, from \eqref{def:Omega1} and \eqref{rel:Duhamellin}, we have $$<x>^{-\sigma}\Omega(t,s)v=<x>^{-\sigma} e^{-H(t-s)}P_c v+\int_s^t <x>^{-\sigma} e^{-iH(t-\tau)}P_cDg_{\psi_E(\tau)}[R_a(\tau)\Omega(\tau,s)v]d\tau$$ and using \eqref{est:dgls}:
\begin{eqnarray}
\|\Omega(t,s)v\|_{L^2_{-\sigma}}&\le &\|e^{-H(t-s)}P_c v\|_{L^2_{-\sigma}} +\int_s^t
\|e^{-iH(t-\tau)}P_c\|_{L^2_\sigma\mapsto L^2_{-\sigma}}\|Dg_{\psi_E(\tau)}\Omega(\tau,s)v(s)\|_{L^2_\sigma}d\tau \nonumber\\ &\le
&\|e^{-iH(t-s)}v\|_{L^2_{-\sigma}}
+C\sup_{\tau\in\mathbb{R}}\left(\|\psi_E(\tau)\|_{L^\infty_{2\sigma/(1+\alpha_1)}}^{1+\alpha_1}+\|\psi_E(\tau)\|_{L^\infty_{2\sigma/(1+\alpha_2)}}^{1+\alpha_2}\right)\nonumber\\ &\times &\int_\mathbb{R}
\frac{\|\Omega(\tau,s)v(s)\|_{L^2_{-\sigma}}}{
(1+|t-\tau|)\log^2(2+|t-\tau|)}d\tau .\nonumber
\end{eqnarray} By Young inequality:
$\|f*g\|_{L^2(\mathbb{R})}\le\|f\|_{L^1(\mathbb{R})}\|g\|_{L^2(\mathbb{R})}$ and \eqref{est:kato} we get
$$\|\Omega(\cdot,s)v\|_{L^2(\mathbb{R},L^2_{-\sigma})}\le C_K\|v\|_{L^2_x}+C\varepsilon_1
\|\Omega(\cdot,s)v\|_{L^2(\mathbb{R},L^2_{-\sigma})}$$ which implies \eqref{est:katoo}.
Finally we turn to the estimate in $L^2_x$ for $T(t,s):$ \begin{eqnarray}
\lefteqn{\|T(t,s)v\|_{L^2_x}^2=}\nonumber\\&=& \langle \int_s^t e^{-iH(t-\tau)}P_cDg_{\psi_E}[R_a\Omega(\tau,s)v]d\tau, \int_s^t e^{-iH(t-\tau ')} P_cDg_{\psi_E}[R_a\Omega(\tau ',s)v]d\tau '\rangle\nonumber\\ &=&\int_s^t\int_s^t d\tau d\tau' \langle Dg_{\psi_E}[R_a\Omega(\tau,s)v], e^{-iH(\tau-\tau')}P_cDg_{\psi_E}[R_a\Omega(\tau ',s)v]\rangle\nonumber\\ &\le &C\sup_{\tau\in\mathbb{R}}
\left(\|\psi_E(\tau)\|_{L^\infty_{2\sigma/(1+\alpha_1)}}^{1+\alpha_1}+\|\psi_E(\tau)\|_{L^\infty_{2\sigma/(1+\alpha_2)}}^{1+\alpha_2}\right)^2\nonumber\\ &\times &\int_s^t\int_s^t d\tau d\tau'
\underbrace{\|\Omega(\tau,s)v\|_{L^2_{-\sigma}}}_{\in L^2(\mathbb{R})}
\underbrace{\|e^{-iH(\tau-\tau')}P_c\|_{L^2_{-\sigma}\mapsto L^2_{-\sigma}}}_{\in L^1(\mathbb{R})} \underbrace{\|\Omega(\tau
',s)v\|_{L^2_{-\sigma}}}_{\in L^2(\mathbb{R})}. \nonumber \end{eqnarray} Using \eqref{Murata} combined with Young then H\" older inequalities the integral above is bounded by
$$C_M\|\Omega(\cdot,s)v\|_{L^2(\mathbb{R},L^2_{-\sigma})}^2\le C_M \tilde C_K^2\|v\|_{L^2_x}.$$ where, for the last inequality we employed \eqref{est:katoo}. Consequently, there exist a constant $C_2$ such that for any $t,\ s\in\mathbb{R}:$
$$\|T(t,s)v\|_{L^2_x}\le C_2\|v\|_{L^2_x}.$$ This finishes the proof of the Lemma. $\Box$
Fix now $2<p_0<\infty$ and let $p'_0=p_0/(p_0-1).$ By applying Riesz-Thorin interpolations to the operators $T(t,s)$ satisfying for all $t,\ s\in\mathbb{R}:$
\begin{eqnarray}
\|T(t,s)\|_{L^2\mapsto L^2}&\le &C_2\nonumber\\
\|T(t,s)\|_{L^1\cap L^{p'_0}\mapsto L^{p_0}}&\le &\frac{C_{p_0}\log(2+|t-s|)}
{(1+|t-s|)^{1-\frac{2}{p_0}}}\nonumber
\end{eqnarray} we obtain that for any $2\le p\le p_0$ there exists a constant $C_{p_0,p}$ between $C_2$ and $C_{p_0}$ such that:
$$\|T(t,s)\|_{L^{q'}\cap L^{p'}\mapsto L^{p}}\le \frac{C_{p_0,p}\log^{\frac{1-2/p}{1-2/p_0}}(2+|t-s|)}
{(1+|t-s|)^{1-\frac{2}{p}}},\ {\rm where}\ p'=\frac{p}{p-1},\
q'=p'\frac{p_0-2}{p_0-p'}.$$ Finally, using \eqref{def:T} and the estimates for the Schr\" odinger group \eqref{est:Lp} we get:
\begin{theorem}\label{th:lin2} Fix $2<p_0<\infty$ and assume that
$\|<x>^{4\sigma /3}\psi_E(t)\|_{H^2}<\varepsilon_1,\ t\in\mathbb{R}$ where $\varepsilon_1$ is the constant obtained in Theorem~\ref{th:lin1}. Then there exists the constants $C_2,\ C_{p_0,p}>0$ such that for all $2\le p\le p_0$ and $t,\ s\in\mathbb{R}$ the following estimates hold: \begin{eqnarray}
\|\Omega(t,s)\|_{L^2\mapsto L^2}&\le &C_{2};\nonumber\\
\|\Omega(t,s)\|_{L^{q'}\cap L^{p'}\mapsto L^p}&\le &
\frac{C_{p_0,p}\log(2+|t-s|)^{\frac{1-2/p}{1-2/p_0}}}{|t-s|^{1-\frac{2}{p}}}, \ {\rm where}\ p'=\frac{p}{p-1},\
q'=p'\frac{p_0-2}{p_0-p'}.\nonumber \end{eqnarray} \end{theorem}
Note that the estimates for the family of operators $\Omega(t,s)$ given by the above theorem are similar to the standard $L^{p'}\mapsto L^p$ estimates for Schr\" odinger operators (\ref{est:Lp}) except for the logarithmic correction and a smaller domain of definition $L^{q'}\cap L^{p'}\subset L^{p'}$ where $q'<p'$ when $p'<2.$ If we would have proven \eqref{est:Timp} then we could use $p_0=\infty,$ hence $q'=p'$ in the above theorem and obtain:
$$\|\Omega(t,s)\|_{L^{p'}\mapsto L^p}\le
\frac{C_{p}\log(2+|t-s|)^{1-2/p}}{|t-s|^{1-\frac{2}{p}}} \qquad {\rm where}\ p'=\frac{p}{p-1}.$$
\noindent{\bf Acknowledgements:} E. Kirr was partially supported by NSF grants DMS-0405921, DMS-0603722 and DMS-0707800.
\end{document} |
\begin{document}
\title{Decay of the Maxwell field on the Schwarzschild manifold}
\begin{abstract} We study solutions of the decoupled Maxwell equations in the exterior region of a Schwarzschild black hole. In stationary regions, where the Schwarzschild coordinate $r$ ranges over $2M < r_1 < r < r_2$, we obtain a decay rate of $t^{-1}$ for all components of the Maxwell field. We use vector field methods and do not require a spherical harmonic decomposition.
In outgoing regions, where the Regge-Wheeler tortoise coordinate is large, ${r_*}>\epsilon t$, we obtain decay for the null components with rates of $|\phi_+| \sim |\alpha| < C r^{-5/2}$, $|\phi_0| \sim |\rho| + |\sigma| < C r^{-2} |t-{r_*}|^{-1/2}$, and $|\phi_{-1}| \sim |\underline{\alpha}| < C r^{-1} |t-{r_*}|^{-1}$. Along the event horizon and in ingoing regions, where ${r_*}<0$, and when $t+{r_*}>1$, all components (normalized with respect to an ingoing null basis) decay at a rate of $C {u_+}^{-1}$ with ${u_+}=t+{r_*}$ in the exterior region. \end{abstract}
\section{Introduction} \label{sIntro}
The subject of this paper is the study of decay of solutions to the decoupled Maxwell equations in the exterior of a Schwarzschild black hole. The Maxwell field is a $2$-form which we may write in abstract index notation as an antisymmetric $(0,2)$-tensor field on a manifold ${\mathcal{M}}$, \begin{align*} F\in&\Omega^2({\mathcal{M}}) &\text{or}&& F_{\alpha\beta}=&-F_{\beta\alpha} . \end{align*} It satisfies the Maxwell equations: \begin{align} *\text{d}*F=&0 &\text{or}&&\nabla^\alpha F_{\alpha\beta}=&0 \label{eMaxwellEquationDiv}\\ \text{d}F=&0 && &\nabla_{[\alpha} F_{\beta\gamma]}=&0 .\label{eMaxwellEquationAlt} \end{align} The exterior region of the Schwarzschild solution is a Lorentz manifold on which the metric is given in terms of coordinates $t\in\mathbb R$, $r>2M$, $(\theta,\phi)\in S^2$ by \begin{align} ds^2 =&-(1-2M/r) dt^2 +(1-2M/r)^{-1} dr^2 +r^2(d\theta^2 +\sin^2\theta d\phi^2) . \label{eSchwarzschildMetric} \end{align}
This problem comes from general relativity. In general relativity, a model of the universe consists of a space-time manifold ${\mathcal{M}}$, possibly fields describing matter, and a Lorentz (pseudo-) metric $\text{\bf{g}}$ which satisfies Einstein's equation. Gravity is described by the curvature of $\text{\bf{g}}$. The simplest and longest-known solution is Minkowski space, $\mathbb R^{1+3}$ with the flat metric $-dt^2+dx^2+dy^2+dz^2$. After this, the Schwarzschild manifold is the longest-known solution to Einstein's equation. It is the paradigmatic example of the class of black hole solutions, which play an important role in relativity. The Maxwell field describes electromagnetic radiation. In Einstein's equations, the energy-momentum tensor of the matter fields should influence the curvature. By decoupled, we mean that the electromagnetic field does not influence the Schwarzschild solution, which is taken as a fixed background manifold. We call the Schwarzschild solution the Schwarzschild manifold and use the word solution to refer to solutions of the Maxwell equations \eqref{eMaxwellEquationDiv}-\eqref{eMaxwellEquationAlt}.
Since $F$ is a tensor, there is no coordinate independent norm with which to measure it (or, at least, not all components of it). To discuss the decay of $F$, we make a choice of basis and show that the corresponding components decay. A simple choice of basis consists of the coordinate vector fields rescaled so that they have unit length ($|g(X,X)|=1$). The rescaled vectors are \begin{align*} {\hat{T}}=&(1-2M/r)^{-1/2}\partial_t ,& {\hat{R}}=&(1-2M/r)^{1/2}\partial_r ,& {\hat{\Theta}}=&r^{-1}\partial_{\theta} ,& {\hat{\Phi}}=&r^{-1}\sin(\theta)^{-1}\partial_{\phi}. \end{align*} Given a time-like vector, there is a natural decomposition of the Maxwell field into electric and magnetic components. Since the Schwarzschild manifold has a time-translation symmetry, this provides a natural choice of time-like direction, ${\hat{T}}$. The corresponding electric and magnetic components are \begin{align*} \vec{E}_X =& F_{\vecTunitX} &X\in\{{\hat{R}}, {\hat{\Theta}}, {\hat{\Phi}}\} ,\\ \vec{B}_X =& F_{YZ} & \text{$X, Y, Z$ a cyclic permutation of ${\hat{R}}, {\hat{\Theta}}, {\hat{\Phi}}$} , \\
|\vec{E}|^2=& |\vec{E}_{\hat{R}}|^2 + |\vec{E}_{\hat{\Theta}}|^2 + |\vec{E}_{\hat{\Phi}}|^2 ,\\
|\vec{B}|^2=& |\vec{B}_{\hat{R}} |^2 + |\vec{B}_{\hat{\Theta}}|^2 + |\vec{B}_{\hat{\Phi}}|^2 . \end{align*}
Now that we have a choice of components for the Maxwell field, it is possible to state the main decay result of this paper. \begin{theorem}[Decay in stationary regions] \label{tDecayInStationaryRegions} Let $2M<r_1<r_2<\infty$. There is a constant $C$ and a norm\footnote{The norms used are stated explicitly in section \ref{sStationaryDecay}. For this norm to be finite, it is sufficient that the initial data and its first eight derivatives are bounded and decay like $r^{-(5/2+\epsilon)}$ (see remark \ref{SimplifiedInitialData}). The initial data does not need to decay at the bifurcation sphere, $r\rightarrow2M$. We do not use a spherical harmonic decomposition in our analysis; however, from the structure of the Maxwell equations, spherically symmetric solutions can have no time dependence and cannot decay sufficiently rapidly for the norm $H[\MaxF](0)$ to be finite (see appendix \ref{sExclusionOfNonRadiatable}). } $H[\MaxF](0)$ depending only on $F$ and its derivatives on the hyper-surface $\{0\}\times(2M,\infty)\times S^2$ such that if $F$ is a solution to the Maxwell equations \eqref{eMaxwellEquationDiv}-\eqref{eMaxwellEquationAlt}, then for all $t\in\mathbb R$, $r\in[r_1,r_2]$, $(\theta,\phi)\in S^2$, \begin{align*}
|\vec{E}| +|\vec{B}|
\leq& C (1+|t|)^{-1}H[\MaxF](0) . \end{align*} \end{theorem}
The major advance of this work is to find decay rates which govern all components of the (decoupled) Maxwell field explicitly. The rates we obtain for stationary regions with $r\in(r_1,r_2)$ are significantly slower than the rate of $t^{-5/2}$ which can be obtained in Minkowski space using vector field methods \cite{CK} and the rate of $t^{-3}$ which was derived formally for the Schwarzschild manifold \cite{Price,NewPrice}. Outside of outgoing light-cones, ie where $t<{r_*} =r +\ln((r-2M)/2M) +C$, decay rates at the same rate as in Minkowski space have already been obtained \cite{IngleseNicolo}. In the outgoing region, we obtain similar results, which we explain below. Certain components of the Maxwell tensor satisfy a scalar wave equation. These components are the zero-weight (spinor or null) components. Previous results for wave equations were sufficiently strong to prove decay for the zero-weight component with a rate of $t^{-1}$ in stationary regions and the appropriate decay in outgoing regions \cite{BlueSterbenz}, although this application was not explicitly stated. $L^\infty_{\text{loc}}$ decay without a rate has also been explicitly obtained using very different techniques \cite{FinsterSmollerMaxwellLG}. The existence and asymptotic completeness of wave operators taking data on the initial surface $t=0$ to the surfaces at $r=2M$ and at infinity has also been shown \cite{Bachelot}.
Our method starts by using the energy-momentum tensor to generate a positive, conserved energy from the time translation symmetry and a stronger ``conformal energy'' from a vector field $K$. This follows ideas in \cite{CK} and is very closely related to the analysis of the wave equation in \cite{BlueSofferLongPaper,BlueSterbenz,DafermosRodnianski}. Before the wave estimates were known, a similar method was used \cite{IngleseNicolo}. The growth of the conformal energy is bounded by a ``trapping term'' consisting of the $\vec{E}_{\hat{R}}$ and $\vec{B}_{\hat{R}}$ components localized near the photon sphere, $r=3M$. In the geometric optics limit, electromagnetic radiation follows null geodesics, which can orbit at $r=3M$. Energy can decay arbitrarily slowly from this region, at least for the wave equation \cite{Ralston}. Thus, it should be expected that there is an obstruction to dispersion near this surface. The trapping term can be controlled because the $\vec{E}_{\hat{R}}$ and $\vec{B}_{\hat{R}}$ components each satisfy a scalar wave equation of a type that's been previously studied \cite{BlueSterbenz}. This wave equation and the terminology ``zero-weight components'' for $\vec{E}_{\hat{R}}$ and $\vec{B}_{\hat{R}}$ follow from the analysis of the Price equations \eqref{ePricei}-\eqref{ePriceiv} first appearing in \cite{Price}. We refer to this reduction to scalar wave equations as ``spin reduction''. The control on the conformal energy allows us to conclude:
\begin{lemma} \label{lSfcDecayInIntro} There is a constant $C$ and a norm $H[\MaxF](0)$ depending only on $F$ and its derivatives on the hyper-surface $\{0\}\times(2M,\infty)\times S^2$ such that if $F$ is a solution to the Maxwell equations \eqref{eMaxwellEquationDiv}-\eqref{eMaxwellEquationAlt}, then for any $2M<r_1<r_2<\infty$ and $t$ sufficiently large, on the surface\footnote{A more general surface is permitted in the statement of lemma \ref{lSurfaceEnergyBound}} $\mathcal{S}=\{t\}\times[r_1,r_2]\times S^2$, \begin{align*}
\int_{\mathcal{S}} \left(|\vec{E}|^2 +|\vec{B}|^2\right) \horifac r^2d\rs d^2\omega \leq& C t^{-2} H[\MaxF](0)^2 . \end{align*} \end{lemma}
From Soolev estimates and integrated decay estimates, like lemma \ref{lSfcDecayInIntro}, for the Lie derivative of $F$, it is possible to prove pointwise decay estimates. In Minkowski space, the four coordinate directions generate symmetries, so that the Lie derivatives of a Maxwell field also satisfies the Maxwell equations. Although we lack a full set of symmetries, we do have 3 from the time-translation and angular-rotation symmetries. To control a fourth direction, we use the Maxwell equations to ``trade'' the derivatives in the directions of the three symmetries for a radial derivative. With Lie derivatives in all directions controlled, we conclude that theorem \ref{tDecayInStationaryRegions} holds.
To further explain our results and those of others, we describe the geometry of the Schwarzschild manifold and its importance. This description can be found in most introductory relativity texts (ie \cite{EllisHawking,MTW}). The Lorentz metric is most simply given in terms of coordinates $(t,r,\theta,\phi)$ by \eqref{eSchwarzschildMetric}. As $r\rightarrow\infty$, this metric approaches the flat, Minkowski metric written in spherical coordinates, $ds^2=-dt^2+dr^2+r^2(d\theta^2+sin^2\theta d\phi^2)$. For $r>r_0>2M$, the Schwarzschild solution describes the space-time of a vacuum outside a star of radius $r_0$ and mass $M$. The restriction on $r$ can be relaxed by considering extensions of this manifold. The metric is clearly well-defined in the exterior region $t\in\mathbb R$, $r\in(2M,\infty)$, $(\theta,\phi)\in S^2$, and in the interior region $t\in\mathbb R$, $r\in(0,2M)$, $(\theta,\phi)\in S^2$. In the interior region, since $(1-2M/r)$ is negative, $r$ is a time-like coordinate, and $t$ is space-like. The maximal analytic extension of any open subset of the Schwarzschild solution is illustrated in the conformal diagram in figure \ref{figSchwMaxExtension}, in which the angular variables are suppressed. There are two exterior regions ($I$ and $III$) and two interior regions ($II$ and $IV$). By an appropriate choice of coordinates, each interior can be smoothly joined to each exterior along a null surface $r=2M$. The manifold is also smooth at the bifurcation sphere where the four regions meet. However, as $r\rightarrow0$, the curvature polynomial $R_{\alpha\beta\gamma\delta}R^{\alpha\beta\gamma\delta}$ diverges.
\begin{figure}
\caption{A conformal diagram for the maximal extension of the Schwarzschild manifold (suppressing the spherical coordinates). Thin lines represent boundary points at infinity. Thick lines represent the singularity at $r\rightarrow0$. Dotted lines represent the event horizon. Regions $I$ and $III$ are exterior regions, and regions $II$ and $IV$ are interior regions. The surfaces $\mathfrak{I}^\pm$ represent future and past null infinity. The points $i^\pm$ represent future and past null infinity. The points $i^0$ represent spatial infinity. }
\label{figSchwMaxExtension}
\end{figure}
The Schwarzschild manifold is a prototypical solution to Einstein's equation which has inspired many key concepts in general relativity. The asymptotic approach of the metric to the flat, Minkowski metric is known as asymptotic flatness. In the conformal compactification of each exterior region of the Schwarzschild solution, each outgoing geodesic (with $r\rightarrow\infty$ as $t\rightarrow\infty$) ends on future null infinity $\mathfrak{I}^+$, and each ingoing null geodesic (with $r\rightarrow\infty$ as $t\rightarrow-\infty$) starts on past null infinity $\mathfrak{I}^-$. In essence, an asymptotically flat manifold is defined to be one with a future null infinity. A black hole is a region of space-time which cannot be joined by future-directed, null or time-like curves to $\mathfrak{I}^+$, and an event horizon is its boundary. The future, interior region of the Schwarzschild solution is a black hole, and the surfaces where $r=2M$ are the event horizons. In the Schwarzschild manifold, the (future) singularity at $r\rightarrow0$ is separated from $\mathfrak{I}^+$ by the event horizon. The singularity theorems state that under a broad range of conditions, future singularities must form \cite{EllisHawking}. The weak cosmic censorship conjecture asserts that under some genericity condition, which is not yet known, future singularities are always separated from $\mathfrak{I}^+$ by an event horizon. There is a three parameter family of asymptotically flat, known, exact solutions to Einstein's solutions which represent massive, rotating, charged black holes. This is the Kerr-Newman class, and the Schwarzschild solutions are the solutions with positive mass and zero angular momentum and charge. These are stationary, in the sense that they have a time-translation symmetry sufficiently close to null infinity. These solutions also have singularities, but if the angular momentum and charge are beneath a critical threshold, then the singularities are separated from the asymptotically flat regions, in the sense that a future-directed, time-like curve from a point in an exterioir region will either escape to null or space-like infinity or cross the event horizon, but not both. The Kerr-Newman solutions are believed to be the only asymptotically flat, stationary solutions. Physicists believe that all black holes should approach one of the stationary, Kerr-Newman solutions. It is not yet known if a small perturbation of a Cauchy surface for one of the Kerr-Newman solutions will evolve into a solution which remains similar to one of the known solutions. This is the question of black hole stability.
Stability for Minkowski space was a major and difficult result \cite{CKNL}. Einstein's equations are a complicated system of nonlinear equations in which the geometry is dynamic. The linearization of Einstein's equations about Minkowski space forms a system called the spin 2 field equations. Obtaining decay estimates for the spin $2$ field was one step in this proof \cite{CK}. Decay estimates for the decoupled Maxwell equations were proven at the same time.
The question of stability of the Schwarzschild solution has also inspired the study of linear fields. In the linearization of Einstein's equation, certain components are determined by the solution to a simple wave equation \cite{ReggeWheeler}, and the remaining components are determined by the solution to a more complicated wave equation \cite{Zerilli}. Using spinors, Price was able to present a more unified presentation for all components of several important, physical systems, according to their spin. Any wave equation is said to have spin $0$. The Dirac system has spin $1/2$. The Maxwell field has spin $1$. For any solution of Einstein's equations in vacuum, the non-vanishing components of the curvature satisfy certain relations from the Bianchi identities and Einstein's equation. In Minkowski space, since the curvature is zero, solutions to the linearization of Einstein's equation satisfy the same relations, which are called the spin $2$ field equations. In the Schwarzschild manifold, since the curvature is non-vanishing, in the linearization of Einstein's equations, there are additional terms arising from the derivative with respect to the perturbed metric of the original Christoffel terms. Thus, Price distinguishes between the spin $2$ field equations and the linearization of Einstein's equation. Formal arguments suggest a rate of $t^{-3}$ for fields of all integer spin and for the linearization of Einstein's equations \cite{Price}. A similar, spinorial presentation of these systems has been made for the Kerr-Newman solutions \cite{Teukolsky}. In each case, certain components were found to satisfy scalar wave equations and then acted as forcing terms in the equations governing the remaining components.
Most of the subsequent analysis has been focused on the decoupled wave equation. The literature is vast, and we list only some of the results. Solutions are known to remain uniformly bounded in time \cite{KayWald}. The scattering theory, concerning the map from the initial data to the the limit on $\mathfrak{I}^+$ and the event horizon, has also been studied on the Schwarzschild manifold \cite{DimockKayScattering} and on the more general Kerr-Newman solutions \cite{Hafner}. On the Schwarzschild manifold, vector-field techniques have been used to obtain decay results in three main steps \cite{BlueSterbenz,BlueSofferLongPaper,DafermosRodnianski}. First, the vector field $K$ is used to introduce a conformal energy, which is not conserved because of trapping. Second, a radial vector field is used to prove a local decay estimate to control the trapping term. In $\mathbb R^{1+3}$, the radial derivative can be used to make a somewhat similar estimate \cite{Morawetz}. In $\mathbb R^{1+3}$, estimates involving $K$ and the radial vector field are both referred to as Morawetz estimates. In this step, a spherical harmonic decomposition was used in the proofs, but this is no longer necessary \cite{DafermosRodnianskiNoSHDecomp}. Because the scalar equation governing the zero-weight components of the Maxwell field has a simple structure, in appendix \ref{sOneDWaveAnalysis}, we are able to modify the earlier method to obtain decay without using a spherical harmonic. Third, the conformal energy is used to control norms. In \cite{BlueSterbenz,DafermosRodnianski}, a strong local decay estimate is proven and additional angular derivatives are used to obtain a $L^\infty_{\text{loc}}$ decay rate of $t^{-1}$ and a similar decay rate in outgoing regions. In \cite{DafermosRodnianski}, an additional vector field, $Y$, is used to also prove decay estimates along the event horizon. These require weighted $H^4$ or $H^5$ norms of the initial data to be bounded. By an $H^k$ norm, we mean, roughly speaking, that the $k$th derivative of a solution $u$ is square integrable. In \cite{BlueSofferLongPaper}, only weighted $H^{1+\epsilon}$ norms of the initial data are needed, but a weaker local decay estimate was obtained, which led to less control on the conformal energy and a decay rate of $t^{-1/3}$ for a weighted, spatial $L^6$ norm. Using an entirely different technique, based on a representation of the propagator, $L^\infty_\text{loc}$ decay has been proven for the wave equation on subcritical Kerr-Newman solutions \cite{FinsterKamranSmollerYauWave}.
The spin $1/2$ system is the Dirac model for the electron. On the sub-critical Kerr-Newman solutions, scattering results \cite{HafnerNicolasDirac} and $L^\infty_{\text{loc}}$ decay \cite{FinsterKamranSmollerYauDirac} have also been proven.
For the linearization of Einstein's equations about the Kerr-Newman solutions, the equations found in \cite{Teukolsky} were found to have no unstable modes \cite{Whiting}. For the linearization about the Schwarzschild solution, the simpler equations in \cite{ReggeWheeler} satisfy an integrated decay estimate \cite{BlueSofferReggeWheeler} and $L^\infty_{\text{loc}}$ decay \cite{FinsterSmollerMaxwellLG}. Although the application is not explicitly stated, decay at a rate of $t^{-1}$ follows from \cite{BlueSterbenz}.
For the full Einstein equations on a black hole background, decay results are known in the spherically symmetric case, when the Einstein equations are coupled to a scalar wave equation and the Maxwell field \cite{DafermosRodnianskiSpherical}. By Birkhoff's theorem (see \cite{EllisHawking}), the Schwarzschild manifold is the only spherically symmetric solution to Einstein's equation (treating Minkowski space as the special sub-case with $M=0$). The decay rate obtained for the scalar field is ${u_+}^{-3+\epsilon}$ along the event horizon. The ${u_+}^{-3+\epsilon}$ decay rate is known as Price's law and has important implications for the strong cosmic censorship conjecture \cite{DafermosCensorship}.
To discuss decay outside regions of fixed $r$, it is necessary to introduce components with respect to a null tetrad, a basis built from null vectors with certain properties. Physicists may know these as spinor components \cite{Price,Stewart}, and mathematicians, as the null decomposition \cite{CK}. We present one tetrad here and discuss exactly what we mean by a null tetrad in section \ref{sNotation}.
We start by introducing the Regge-Wheeler radial coordinate, ${r_*}$, defined by \begin{align*} \frac{d r}{d{r_*}}=& (1-2M/r) , & r(0)=& 3M . \end{align*} The exterior region of the Schwarzschild solution is given by $(t,{r_*},\theta,\phi)$ ranging over $\mathbb R\times\mathbb R\times S^2$. In these coordinates, the Lorentz metric becomes \begin{align*} \text{\bf{g}}=&-(1-2M/r) dt^2 +(1-2M/r) d{r_*}^2 +r^2(d\theta^2 +\sin^2\theta d\phi^2) . \end{align*} From this form of the metric, it is clear that any multiple of the vectors $\partial_t\pm\partial_{\rs}$ are null. To define our null tetrad, we use $e_{A}$ and $e_{B}$ to denote an orthonormal basis of tangents vectors to $S^2$ and $\epsilon$ to denote the antisymmetric, Levi-Civita tensor on $S^2$. The null tetrad we will use to state our results is \begin{align*} &(\partial_t+\partial_{\rs}) ,& &(1-2M/r)^{-1}(\partial_t-\partial_{\rs}) ,& &r^{-1} e_{A} , & &r^{-1} e_{B} . \end{align*} The covariant derivative of this null tetrad along ingoing, radial, null geodesics is zero. Thus, having found a natural choice of null tetrad on the initial surface $t=0$, we have extended it to the entire future of the initial surface by parallel transport along null geodesics falling into the black hole. This is useful for considering limits as $r\rightarrow 2M$. Had we extended the basis by parallel transport along outgoing null geodesics, to study the problem as $r\rightarrow\infty$, the factor of $(1-2M/r)^{-1}$ would have been on $(\partial_t+\partial_{\rs})$ instead of $(\partial_t-\partial_{\rs})$. However, since $(1-2M/r)\rightarrow1$ as $r\rightarrow\infty$, the difference between our choice of null tetrad and the natural choice is vanishingly small. Therefore, we use our choice of null tetrad throughout the future, $t\geq0$.
The decay of these spinor or null components is not simply a decay in time. This is known from the behavior in $\mathbb R^{1+3}$. In that case, the heuristic is that the bulk of solutions to the Maxwell equations travel out along the light-cone $t\sim|\vec{x}|$. In any fixed region, there is decay because the wave leaves the region. As the light-cone expands, the average value of the wave intensity drops. Moving with the wave, the intensity decays as it is spread over the increasing area of the light-cone. Thus, some of the decay occurs as a result of the wave being far from the light cone, and some occurs as a result of the light-cone being very large. Similar behavior occurs on the Schwarzschild manifold. The null coordinates \begin{align*} {u_+}=& t+{r_*} &\text{and}&& {u_-}=& t-{r_*} \end{align*} are used to measure the distance from the light-cone, and $r$ is used to measure the size of the light-cone in the outgoing direction. In the ingoing direction, the radius of the surface of the light-cone also goes like $r$, but since this approaches $2M$, the decay occurs only in the null coordinates.
\begin{theorem}[Decay outside stationary regions] \label{tNearAndFarDecay} There is a constant $C$ and a norm $H[\MaxF](0)$ depending only on $F$ and its derivatives on the hyper-surface $\{0\}\times(2M,\infty)\times S^2$ such that if $F$ is a solution to the Maxwell equations \eqref{eMaxwellEquationDiv}-\eqref{eMaxwellEquationAlt}, then for all $t\geq0$, ${r_*}>1$, $(\theta,\phi)\in S^2$, \begin{align*}
|F(\partial_t+\partial_{\rs},r^{-1}e_{A})|\leq& C r^{-3/2}{u_+}^{-1} H[\MaxF](0) ,\\
|F(\partial_t+\partial_{\rs},(1-2M/r)^{-1}(\partial_t-\partial_{\rs}))|+|F(r^{-1}e_{A},r^{-1}e_{B})\epsilon^{AB}| \leq& C r^{-2}\left(\frac{{u_+}-|{u_-}|}{{u_+}(1+|{u_-}|)}\right)^{1/2} H[\MaxF](0) ,\\
|F((1-2M/r)^{-1}(\partial_t-\partial_{\rs}),r^{-1}e_{A})| \leq& Cr^{-1}(1+|{u_-}|)^{-1} H[\MaxF](0) . \end{align*} If we restrict to ${u_-}\leq 0$, then \begin{align*}
|F((1-2M/r)^{-1}(\partial_t-\partial_{\rs}),r^{-1}e_{A})| \leq& Cr^{-1}(1+|{u_-}|)^{-3/2} H[\MaxF](0) . \end{align*} Under the same hypotheses, then for all $t\geq0$, ${r_*}<-1$, $(\theta,\phi)\in S^2$ such that ${u_+}>1$, \begin{align*}
|F(\partial_t+\partial_{\rs},r^{-1}e_{A})| \leq& C {u_+}^{-1}H[\MaxF](0) ,\\
|F(\partial_t+\partial_{\rs},(1-2M/r)^{-1}(\partial_t-\partial_{\rs}))|+|F(r^{-1}e_{A},r^{-1}e_{B})\epsilon^{AB}| \leq& C {u_+}^{-1}H[\MaxF](0) ,\\
|F((1-2M/r)^{-1}(\partial_t-\partial_{\rs}),r^{-1}e_{A})| \leq& C {u_+}^{-1}H[\MaxF](0) . \end{align*} \end{theorem}
This gives a ${u_+}^{-1}$ decay rate for all components (since, either $r>C{u_+}$ or ${u_-}>C{u_+}$ in the far region ${r_*}>1$). Outside the outgoing light-cone, where $0<t<{r_*}-1$, the decay rates are $r^{-5/2}$, $r^{-2}{u_-}^{-1/2}$, and $r^{-1}{u_-}^{-3/2}$. These are the same rates as can be obtained in Minkowski space using vector field methods, with ${u_-}=t-|\vec{x}|$ in Minkowski space. (Faster decay rates can be obtained using conformal compactification and other methods.) Approaching null infinity inside the light-cone, with $(1+\epsilon){r_*}>t>{r_*}>0$, the decay rates are $r^{-3/2}{u_+}^{-1}$, $r^{-2}{u_-}^{-1/2}$, and $r^{-1}{u_-}^{-1}$. Thus, the decay rates for the first two components are the same as in Minkowski space, but the last component decays more slowly than in Minkowski space. The slow decay for this component comes from the slow decay rate of $t^{-1}$ in stationary regions.
In addition to the Maxwell equations, one can imagine studying the spin $2$ field equations. A spin $2$ field is a $(0,4)$ tensor with the following symmetries \begin{align} W_{\beta\alpha\gamma\delta}=&-W_{\alpha\beta\gamma\delta} \label{eSpinTwoDefnFirst}\\ W_{\alpha\beta\delta\gamma}=&-W_{\alpha\beta\gamma\delta} \label{eSpinTwoASSecond}\\ W_{[\alpha\beta\gamma]\delta}=&0 \label{eFirstBianchiSpinTwo}\\ W_{\alpha\beta\gamma}{}^\alpha=& 0 \label{eSpinTwoTraceFree} , \end{align} and which satisfies the spin $2$ field equations \begin{align} \nabla^{\gamma}W_{\gamma\delta\alpha\beta}=&0 \label{eSpinTwoDivFree} \\ \nabla_{[\epsilon}W_{\gamma\delta]\alpha\beta}=& 0 .\label{eSpinTwoDefnLast} \end{align} The symmetries of a spin $2$ field are similar to the antisymmetry of a Maxwell field, and the spin $2$ field equations are similar to the Maxwell equations. If the vacuum Einstein equations are satisfied, then the Ricci curvature vanishes, and the Weyl curvature satisfies the spin $2$ field equations. In $\mathbb R^{1+3}$, the spin $2$ field equations are a good model for the linearization of Einstein's equation about the Minkowski solution, but this is not true for the linearization about other solutions. In Cartesian coordinates on $\mathbb R^{1+3}$, the Christoffel symbols and the curvature are zero. If one introduces a perturbed metric on Minkowski space and treats the Weyl tensor as a tensor field on the original space-time, then the difference between the covariant derivative of the Weyl tensor with respect to the perturbed metric and the original metric will be second order in the perturbation. Thus, ignoring second order terms, the perturbed Weyl tensor satisfies the spin $2$ field equations on the original metric. In this sense, the spin $2$ field equations are the linearization of the vacuum-Einstein equation about Minkowski space. This is the motivation for studying the spin $2$ field in \cite{CK}. When linearizing around a curved space-time, the Christoffel symbols do not vanish, and the linearized Einstein equations do not reduce to the spin $2$ field equations. More drastically, there is a Buchdahl constraint \cite{Stewart} from applying two covariant derivatives, two contractions, and the spin $2$ field equations, \begin{align*} \text{Riem}^{\gamma\delta\epsilon}{}_{(\alpha} W_{\beta)\epsilon\gamma\delta} = 0 , \end{align*} where $\text{Riem}$ is the Riemann curvature of the background. On the Schwarzschild manifold, this forces $W({\hat{T}}+{\hat{R}},{\hat{T}}-{\hat{R}},{\hat{T}}\pm{\hat{R}},r^{-1}e_{A})$ to vanish everywhere. This forces the derivative of other components to vanish, so that there is only a finite dimensional family of spherically symmetric solution. These are similar to the spherically, and non-decaying solutions, which are discussed in appendix \ref{sExclusionOfNonRadiatable}.
Nonetheless, one can ignore the Buchdahl constraint and study the spin $2$ field equations. The system has been studied formally as a system of transport equations \cite{Price}. Here we report that it is possible to use a method similar to that we used for the Maxwell equations. It is well-known in the literature that the Bel-Robinson can be used to define a conserved, positive-definite quantity from a time-like Killing vector in the same way that the energy-momentum tensor can for the Maxwell field. In addition to the conserved energy generated this way, one can use a quantity defined in terms of the time-translation symmetry and the vector field $K$. One can again use a method of ``spin-reduction'' to introduce a ``pseudo-Maxwell tensor'', $\tilde{F}_{\alpha\beta}=W_{\alpha\beta\gamma\delta}{\hat{T}}^\gamma{\hat{R}}^\delta$, which satisfies the Maxwell equations. One can use control of the pseudo-Maxwell tensor to control the trapping terms for the spin $2$ field to control integrated norms of the spin $2$ field. One can then use the symmetries of the Schwarzschild space-time, the field equations, and Sobolev estimates to prove $t^{-1}$ pointwise-in-time decay for the non-spherically symmetric components of the spin $2$ field. Clearly this is pointless, since the dynamics of the spin $2$ field are trivial. However, we expect that a similar analysis will apply to the genuine, linearized gravity system. The linearized gravity equations are more complicated than the spin $2$ field equations because there are terms involving the perturbed Christoffel symbols contracted against the unperturbed and nonvanishing Weyl tensor.
In section \ref{sNotation}, we introduce several sets of vector fields to provide a simpler notation for discussing the null decomposition of the Maxwell field and symmetries. The null decomposition and spinor decomposition are essentially equivalent. We estimate energies in section \ref{sEnergies}. We first review the use of the energy-momentum tensor, and then use it to define an energy and a weighted, conformal energy. The growth of the conformal energy is controlled by a trapping term which depends only on the zero-weight component. These satisfy a simple wave equation, which allows us to bound the conformal energy. In section \ref{sStationaryDecay}, we use this bound and trade Lie derivatives to prove theorem \ref{tDecayInStationaryRegions}. In section \ref{sMovingDecay}, we use the bounds and integration along null geodesics to prove theorem \ref{tNearAndFarDecay}. In appendix \ref{sExclusionOfNonRadiatable}, we show that there are no spherically symmetric components of the Maxwell field which decay sufficiently rapidly at infinity. In appendix \ref{sOneDWaveAnalysis}, we analyse the wave equation governing the zero-weight component using a simplified version of the arguments in \cite{BlueSterbenz}. This simplified version does not require a spherical harmonic decomposition.
\section{Notation} \label{sNotation} The main purpose of this section is to collect various vector fields and components of the Maxwell field, so that the reader can compare the notation used in different places in this paper and elsewhere.
We begin with some simple notation. We sometimes use vectors as indices on tensors to denote the corresponding component. We use the notation $\text{\bf{g}}$ to denote the metric, $\omega=(\theta,\phi)\in S^2$ for the angular coordinate, $\not\!\nabla$ for the angular derivative, and $\Omega$ to denote the metric on $S^2$. Except in the statement of results, we use $(t,{r_*},\theta,\phi)$ coordinates, unless otherwise specified. We use $\underline{\vecX}$ to denote the one form generated by lowering a vector field $X$ with the metric. We use the insertion operation $\insertion{X}$ which takes the $(0,m)$ tensor $A$ to the $(0,m-1)$ tensor $\insertion{X}A$ defined by $\insertion{X}A(Y_1,\ldots,Y_{m-1})=A(X,Y_1,\ldots,Y_{m-1})$. We use $C$ to denote an arbitrary constant which may change from line to line in a calculation.
We will prove estimates for $t\geq0$. Because the Schwarzschild manifold is time symmetric, similar estimates hold for $t\in\mathbb R$. In particular, estimates in stationary regions will remain the same, and in ingoing and outgoing directions, $t$ and $\partial_t$ must be replaced by $-t$ and $-\partial_t$.
\subsection{Coordinates, Bases, and Field Components} \label{ssComponents} Recall the definition of the coordinates $t$, ${r_*}$, $\theta$, $\phi$, ${u_-}$, and ${u_+}$ from the introduction.
We frequently use the coordinate vector fields \begin{align*} T=&\partial_t ,& R=&\partial_{\rs} ,& \Theta=&\partial_{\theta} ,& \Phi=&\partial_{\phi}, \end{align*} and the corresponding normalized vector fields \begin{align*} {\hat{T}}=&(1-2M/r)^{-1/2}\partial_t ,& {\hat{R}}=&(1-2M/r)^{-1/2}\partial_{\rs} ,& {\hat{\Theta}}=&r^{-1}\partial_{\theta} ,& {\hat{\Phi}}=&r^{-1}\sin(\theta)^{-1}\partial_{\phi} . \end{align*} From the definition of ${r_*}$, the definition of ${\hat{R}}$ given here is the same as the one given in the introduction.
We also use null tetrads. In the standard presentation of a null tetrad, the tangent space is complexified. A basis $\{l_\text{ex}, n_\text{ex}, m, \bar{m}\}$ in which $l_\text{ex}$ and $n_\text{ex}$ are (real) null vectors, $\bar{m}$ is the complex conjugate of $m$, $\text{\bf{g}}(l_\text{ex},n_\text{ex})=-2$, $\text{\bf{g}}(m,\bar{m})=2$, and all other inner products between the basis vectors are zero. If $X$ and $Y$ are unit vectors orthogonal to $l_\text{ex}$ and $n_\text{ex}$, a null tetrad can be defined by taking $m = X+ iY$. Because of this, we will also call a basis $\{l_\text{ex}, n_\text{ex}, X, Y\}$ a null tetrad if $\text{\bf{g}}(l_\text{ex},n_\text{ex})=-2$, $\text{\bf{g}}(X,X)=\text{\bf{g}}(Y,Y)=1$, and all other inner products are zero. We will generally ignore the distinction between the two definitions of a null tetrad.
One advantage of null tetrads is that they assign weights to certain quantities. Rescaling $l_\text{ex}$ and $n_\text{ex}$ by $\lambda$ and $\lambda^{-1}$ respectively preserves the null tetrad structure, as does rescaling $m$ by $e^{is}$ (and $\bar{m}$ by the conjugate, $e^{-is}$). If, under such a change of basis, a quantity transforms as a power of $\lambda$ or of $e^{-is}$ then the corresponding powers are the conformal and spin weights of the quantity.
We use several null tetrads. The first is the ``stationary'' tetrad: \begin{align*} \hat{l}=&{\hat{T}}+{\hat{R}} = (1-2M/r)^{-1/2}(\partial_t +\partial_{\rs}) \\ \hat{n}=&{\hat{T}}-{\hat{R}} = (1-2M/r)^{-1/2}(\partial_t -\partial_{\rs}) \\ m=& {\hat{\Theta}} +i{\hat{\Phi}} = \frac1r\partial_{\theta} +\frac{i}{r\sin\theta}\partial_{\phi} \\ \bar{m}=& {\hat{\Theta}} -i{\hat{\Phi}} =\frac1r\partial_{\theta} -\frac{i}{r\sin\theta}\partial_{\phi} . \end{align*} Price \cite{Price} uses a basis which is parallelly transported along outgoing, radial, null geodesics, $\gamma(s)=(s,s+C,\theta_0,\phi_0)$ (in the $(t,{r_*},\theta,\phi)$ coordinates). The null vectors are \begin{align*} {\tilde{l}}=& (1-2M/r)^{-1}(T +R) , \\ {\tilde{n}}=& T -R , \end{align*} and the angular basis vectors remain the same. To prove theorem \ref{tNearAndFarDecay}, in section \ref{sMovingDecay}, we use a basis adapted to ingoing, radial, null geodesics. Certain expressions are simplified by using the following coordinate-like vector fields. \begin{align*} {L}=& \partial_t+\partial_{\rs} =T +R,\\ {N}=& \partial_t-\partial_{\rs}=T -R ,\\ {M}=& \Theta +\frac{i}{\sin\theta} \Phi =\partial_{\theta} +\frac{i}{\sin\theta}\partial_{\phi} . \end{align*} Christodoulou and Klainerman \cite{CK} avoid complexifying the tangent space by using an orthonormal basis tangent to the sphere at each point. We use $e_{A}$ and $e_{B}$ to denote an orthonormal basis on $S^2$. Thus, $r^{-1}e_{A}$ and $r^{-1}e_{B}$ are unit vectors in the Schwarzschild manifold. The indices $A,B,\ldots$ are used for directions tangent to the sphere. In summary, we have three null tetrads and a coordinate null basis, \begin{align} &\{\hat{l},\hat{n},m,\bar{m}\} ,\label{eStationaryTetrad}\\ &\{{\tilde{l}},{\tilde{n}},m,\bar{m}\} ,\label{ePriceTetrad} \\ &\{\hat{l},\hat{n},r^{-1}e_{A},r^{-1}e_{B}\} ,\label{eCKTetrad} \\ &\{{L},{N},{M},{\bar{M}}\} .\label{eCoordTetrad} \end{align}
The bases can be used to define the corresponding components of the Maxwell field. The electric and magnetic decomposition was already explained in the introduction. We now introduce a null decomposition and spinor components. These are very closely related but differ in the notation and slightly in the definition. The null decomposition consists of two scalars, $\rho$ and $\sigma$, and two $1$-forms tangent to spheres, ${\alpha}$ and ${\underline{\alpha}}$. The spinor components are three complex-valued functions. These are defined in terms of the tetrad in \eqref{eCKTetrad} and in \eqref{eStationaryTetrad} by \begin{align*} {\alpha}(e_{A}) =&F(\hat{l},e_{A}) & \phi_1 =& F(\hat{l},m) \\ {\rho} =& \frac12 F(\hat{l},\hat{n}) & \phi_0 =&\frac12 (F(\hat{l},\hat{n}) +iF(\bar{m},m))\\ {\sigma} =& \frac12 F(e_{A},e_{B})\epsilon^{AB} & \\ {\underline{\alpha}}(e_{A})=&F(\hat{n},e_{A}) ,& \phi_{-1}=&F(\hat{n},\bar{m}) . \end{align*} The spin component index in $\phi_i$ refers to both the conformal and spin weight. These components are related by \begin{align*}
\phi_1=& {\alpha}(m) & |\phi_1|^2=&|{\alpha}|^2 \\
\phi_0 =& {\rho} +i{\sigma} & |\phi_0|^2 =&|{\rho}|^2+|{\sigma}|^2 \\
\phi_{-1}=& {\underline{\alpha}}(\bar{m}) & |\phi_{-1}|^2 =&|{\underline{\alpha}}|^2 . \end{align*} The null decomposition, ${\alpha}$, ${\rho}$, ${\sigma}$, and ${\underline{\alpha}}$, more accurately represents the geometric behavior of the components. The spinor notation reveals the spin and conformal weight more easily, simplifies several expressions, and suggests connections between the spin $0$ wave equation, the spin $1$ Maxwell equation, and the spin $2$ equations. We typically write expressions in terms of the spinor components but think in terms of the null decomposition.
The spinor components in \cite{Price} are slightly different from the ones we use. Since $\phi_i$ has conformal weight $i$, replacing the null tetrad \eqref{eStationaryTetrad} by \eqref{ePriceTetrad} will take $\phi_i$ to $(1-2M/r)^{i/2}\phi_i$. These are the components initially used in \cite{Price}.
The spinor components are related to the electric and magnetic components by \begin{align*} \phi_1 =&(\vec{E}_{\hat{\Theta}}+\vec{B}_{\hat{\Phi}}) + i(\vec{E}_{\hat{\Phi}}-\vec{B}_{\hat{\Theta}})\\ \phi_0 =&\vec{E}_{\hat{R}} +i\vec{B}_{\hat{R}} \\ \phi_{-1} =&(\vec{E}_{\hat{\Theta}}-\vec{B}_{\hat{\Phi}}) - i(\vec{E}_{\hat{\Phi}}+\vec{B}_{\hat{\Theta}}) .\\
|\phi_1|^2 +2|\phi_0|^2 + |\phi_{-1}|^2
=&|{\alpha}|^2+2(|{\rho}^2|+|{\sigma}|^2)+|{\underline{\alpha}}|^2
= 2(|\vec{E}|^2+|\vec{B}|^2) . \end{align*}
Certain calculations are simplified by using the null basis \eqref{eCoordTetrad}. We define Maxwell field components associated to this null basis by \begin{align*} \Phi_{1} =& F({L},{M}) &=&r(1-2M/r)^{1/2}\phi_1 \\ \Phi_{0} =& \frac12(F({L},{N})(1-2M/r)^{-1}r^2 +F({\bar{M}},{M}) ) &=&r^2\phi_0\\ \Phi_{-1} =& F({N},{\bar{M}}) &=&r(1-2M/r)^{1/2}\phi_{-1} . \end{align*}
\newcommand{U_+}{U_+} \newcommand{U_-}{U_-} To discuss the maximally extended Schwarzschild solution in a neighborhood of the bifurcation sphere, it is typical to introduce coordinates \begin{align*} U_+=& e^{{u_+}/4M} ,\\ U_-=& -e^{-{u_-}/4M} . \end{align*} In the exterior region, these range over $U_+\in(0,\infty)$ and $U_-\in(-\infty,0)$. The coordinates $(U_+,U_-,\theta,\phi)$ can be used in a neighborhood of the bifurcation sphere, and the bifurcation sphere corresponds to $(U_+,U_-)=(0,0)$. The surface $t=0$ corresponds to $U_+U_-=-1$. This surface extends through the bifurcation sphere to the surface $t=0$ in the other exterior region. Since \begin{align*} e^\frac{{u_+}+{u_-}}{4M}=& C r e^{\frac{r}{2M}}(1-2M/r) , \end{align*} on the initial surface $t=0$, the coordinate vector fields are \begin{align*} \frac{\partial}{\partial U_+}=& C r^{1/2} e^\frac{r}{4M} (1-2M/r)^{1/2} (T +R ), \\ \frac{\partial}{\partial U_-}=& C r^{1/2} e^\frac{r}{4M} (1-2M/r)^{1/2} (T -R ). \end{align*} Thus, on the initial data surface and near the bifurcation sphere, the coordinate vector fields $\frac{\partial}{\partial U_+}$ and $\frac{\partial}{\partial U_-}$ are related to $\hat{l}$ and $\hat{n}$ by bounded, nonvanishing functions. If $\{\frac{\partial}{\partial U_+},\frac{\partial}{\partial U_-},\Theta,\Phi\}$ are used to define a tetrad, the corresponding components of the Maxwell field are equivalent to $\phi_i$. Since these vector fields are coordinate vector fields, they commute. To restrict attention to the region near the bifurcation sphere, we will often apply smooth, cut-off functions $\chi_{<0}({u_+})$ and $\chi_{<0}({u_-})$ which are smooth, identically zero for ${u_+}>1$ and ${u_-}>1$ respectively, and identically one for ${u_+}>0$ and ${u_-}>0$ respectively. The vector fields $\chi_{<0}({u_+})\frac{\partial}{\partial U_+}, \chi_{<0}({u_-})\frac{\partial}{\partial U_-},\Theta,\Phi$ still commute.
\newcommand{\hat{\tilde{\mathbb{X}}}}{\hat{\tilde{\mathbb{X}}}}
\subsection{Norms and Lie derivatives} \label{ssLieDerivatives} With the goal of applying derivatives to the components of the Maxwell tensor, we introduce several collections of vector fields. Since the vector fields $\Theta$ and $\Phi$ are not smooth, we use the three rotations of $S^2$ about the coordinate axes, $\Theta_i$. We treat these as vector fields on the Schwarzschild manifold. The collections of vector fields we will use are \begin{align*} \mathbb{O} =& \{ \Theta_i \} ,\\ \hat{\mathbb{O}} =& \{ r^{-1} \Theta_i \} ,\\ \mathbb{T} =& \{ T,\Theta_i \} ,\\ \mathbb{X} =& \{ R, T,\Theta_i \} ,\\ {\hat{\mathbb{X}}} =& \{ {\hat{R}}, {\hat{T}}, r^{-1} \Theta_i \} , \\ \hat{\tilde{\mathbb{X}}} =& \{ \chi_{<0}({u_+})\frac{\partial}{\partial U_+}, \chi_{<0}({u_-})\frac{\partial}{\partial U_-},\Theta_i \} . \end{align*} Since the Schwarzschild manifold is static and spherically symmetric, $\mathbb{T}$ generates symmetries of the space-time. The normalized vectors in ${\hat{\mathbb{X}}}$ are used to define the norms of the electric and magnetic components of the Maxwell tensor. (The three $\Theta_i$'s can be used to define corresponding components of the electric and magnetic field. Taken together these give the angular components.) On the initial data hypersurface $t=0$ and near the bifurcation sphere (ie, where ${r_*}<0$), the coordinate vectors in $\hat{\tilde{\mathbb{X}}}$ can also be used to define the Maxwell field components.
We now recall some convenient notation for discussing collections of vectors and scalar functions from \cite{CK}. For two sets of vector fields, $\mathbb{A}_i$, the covariant and Lie derivatives are \begin{align*}
\mathcal{L}_{\mathbb{A}_1} \mathbb{A}_2 =& \{ \mathcal{L}_{X_1} X_2 | X_i\in\mathbb{A}_i \} ,\\
\nabla_{\mathbb{A}_1} \mathbb{A}_2 =& \{ \nabla_{X_1} X_2 | X_i\in\mathbb{A}_i \} . \end{align*} For two such sets and a $(0,2)$ tensor $A$, the components of $A$ with respect to the vector fields are the collection of scalar functions \begin{align*}
A(\mathbb{A}_1,\mathbb{A}_2)=& \{ A(X_1,X_2) | X_i\in\mathbb{A}_i \} . \end{align*} Similarly, for a set of vectors $\mathbb{A}$ and a collection of scalar functions $\{f_i \}$, the derivatives are defined as \begin{align*} \mathcal{L}_\mathbb{A}\{f_i\} =\nabla_\mathbb{A}\{f_i\} = \mathbb{A} \{f_i\}
=& \{ X f | X\in\mathbb{A}, f\in\{f_i\} \}. \end{align*} This definition holds since the Lie, covariant, and directional derivatives are the same operation when applied to scalar functions. For tensor fields, a similar notation can be used to generate collections of tensor fields and to consider their components. For example, \begin{align*} (\mathcal{L}_{\mathbb{A}_1} A)(\mathbb{A}_2,\mathbb{A}_3)
=& \{ (\mathcal{L}_{X_1} A)(X_2,X_3) |X_i\in\mathbb{A}_i \} . \end{align*} The same can be defined for iterated Lie or covariant derivatives.
The norm of a $1$-form or a $(0,2)$ tensor with respect to a set of vector fields is \begin{align*} \TensorNorm{\underline{\vecZ}}{\mathbb{A}}
=& \sum_{X\in\mathbb{A}} |\underline{\vecZ}(X)| , \\ \TensorNorm{A}{\mathbb{A}}
=& \sum_{X,Y\in\mathbb{A}} |A(X,Y)| . \end{align*} The $n$-derivative norms of a $(0,m)$ tensor with respect to components in $\mathbb{A}_1$ and derivatives in the $\mathbb{A}_2$ directions are defined to be \begin{align*} \TensorDNorm{A}{\mathbb{A}_1}{n}{\mathbb{A}_2}^2 =& \sum_{k=0}^{n} \TensorNorm{ \mathcal{L}_{\mathbb{A}_2}^k A} {\mathbb{A}_1}^2 \\
=& \sum_{k=0}^{n} \sum_{X_1,\ldots,X_k\in\mathbb{A}_2, Y_1,\ldots,Y_m\in\mathbb{A}_1} | (\mathcal{L}_{X_k}\ldots\mathcal{L}_{X_1}A)(Y_1,\ldots,Y_m) |^2 . \end{align*} We note that this notation can be applied equally well with $S^2$ tangent $1$-forms, such as ${\alpha}$ and ${\underline{\alpha}}$ as any other forms.
\section{Control of Energies} \label{sEnergies} \subsection{Summary of the Lagrangian method} We briefly outline the Lagrangian theory for a general field $\phi$ on a manifold, $M$. It is assumed that there is a scalar Lagrangian $L[x,\phi,\nabla\phi]$ which is used to define the action, \begin{align*} S=& \int_{M} L[x,\phi,\nabla\phi] d^4x. \end{align*} If $\phi$ is a minimizer (or, more generally, a critical point) of the action, then $\phi$ will satisfy the Euler-Lagrange equation \begin{align*} \frac{\VarDerivL}{\delta\phi}-\nabla^\alpha\frac{\VarDerivL}{\delta \nabla^\alpha\phi} =&0 . \end{align*} One can then define the energy-momentum tensor from this \begin{align*} \text{\bf T}_{\alpha\beta} =& \frac12 \left(\nabla_{\alpha}\phi \frac{\VarDerivL}{\delta \nabla^\alpha\phi} -\text{\bf{g}}_{\alpha\beta}L \right) , \end{align*} which, by the Euler-Lagrange equation, satisfies \begin{align} \nabla^{\alpha}\text{\bf T}_{\alpha\beta}=&0 . \label{eEMSDivFree} \end{align} For any vector field $X$, the generalized momentum vector $\GenMomentum{X}$ and deformation $2$-tensor $\Deformation{X}$ are \begin{align*} \GenMomentumFormg{X}=& \insertion{X} \text{\bf T} & \GenMomentum{X}_\alpha=&\text{\bf T}_{\alpha\beta}X^{\beta} \\ \Deformation{X}(Y,Z) =& (\nabla_Y\underline{\vecX})(Z)+(\nabla_Z\underline{\vecX})(Y)& \Deformation{X}_{\alpha\beta}=& \nabla_{\alpha}X_\beta + \nabla_{\beta}X_\alpha , \end{align*} which are related by Stokes' theorem \begin{align*} &&\int_{\partial\Omega} \GenMomentum{X}_{\alpha} d\nu^{\alpha} =& \frac12 \int_\Omega \Deformation{X}_{\alpha\beta}\text{\bf T}^{\alpha\beta} d^4x . \end{align*} This is particularly useful for a Killing vector field, for which $\Deformation{X}=0$.
For any vector field $X$, we will define the corresponding energy to be the hyper-surface integral of the generalized momentum \begin{align*} \GenEnergy{X}[F](\mathcal{S}) =\int_\mathcal{S} \GenMomentum{X}_{\alpha} d\nu^{\alpha} . \end{align*} This depends on the Maxwell field $F$ through the energy-momentum tensor. Frequently, we will be interested in $t=\text{const}$ hyper-surfaces, for which we define \begin{align*} \GenEnergy{X}[F](t) =&\intTslice{t} \GenMomentum{X}_{\alpha} d\nu^{\alpha} =\intTslice{t} \GenMomentum{X}_{\alpha} {\hat{T}}^\alpha \horifac^\frac12 r^2d\rs d^2\omega . \end{align*} When the deformation tensor vanishes, by integrating over a space-time slab, one gets a conserved quantity: \begin{align*} \GenEnergy{X}[F](t_2)-\GenEnergy{X}[F](t_1) =0 . \end{align*} In applying this integration by parts, we require decay as ${r_*}\rightarrow\infty$, but merely smoothness as ${r_*}\rightarrow-\infty$, since in the maximal extension of the Schwarzschild manifold, for all values of $t$, the limit $r\rightarrow2M$, tends towards the same limiting sphere, the bifurcation sphere.
\subsection{Quantitative effect of trapping} The energy-momentum tensor for the Maxwell field is \begin{align} \text{\bf T}_{\alpha\beta}=& F_{\alpha\gamma}F_\beta{}^{\gamma} -\frac14 \text{\bf{g}}_{\alpha\beta} F^{\gamma\delta}F_{\gamma\delta} \\ =& \frac12\left( F_{\alpha\gamma}F_\beta{}^{\gamma} + (*F)_{\alpha\gamma}(*F)_\beta{}^{\gamma} \right) . \label{eMaxEMS} \end{align} It satisfies \eqref{eEMSDivFree} and is trace-free \begin{align*} \text{\bf{g}}_{\alpha\beta} \text{\bf T}^{\alpha\beta}=0 . \end{align*} Formally, one may assume that the Maxwell field is generated from a vector potential $A_\alpha$ by $F=\text{d}A$, $F_{\alpha\beta}=\nabla_\alphaA_\beta-\nabla_\betaA_\alpha$ and take the Lagrangian to be $L= (1/2) F^{\gamma\delta}F_{\gamma\delta}=2(\nabla_\gammaA_\delta)(\nabla^\gammaA^\delta)$, in which case, the Lagrangian theory for the field $A$ gives the Maxwell equations \eqref{eMaxwellEquationDiv} as the Euler-Lagrange equations\footnote{The other equation, \eqref{eMaxwellEquationAlt}, holds because $\text{d}^2=0$. } and \eqref{eMaxEMS} as the energy-momentum, which satisfies \eqref{eEMSDivFree}. Unfortunately, not all Maxwell fields can be represented in this way as an exterior derivative\footnote{The ``magnetically charged solution'', $F=q_{B} \sin(\theta) d\theta\wedge d\phi$ is not an exterior derivative. See appendix \ref{sExclusionOfNonRadiatable}. }. However, by direct computation from the Maxwell equation, it follows that the energy-momentum tensor in \eqref{eMaxEMS} satisfies \eqref{eEMSDivFree} so that Stokes' theorem can still be applied.
The energy-momentum tensor is strictly positive when evaluated on time-like vectors. We will mainly be interested in time-like vectors with no angular components. Since any time-like vector with no angular component is a linear combination of $\hat{l}$ and $\hat{n}$, to show the positivity of the stress-energy tensor, it is sufficient to compute the components in these null directions. These components are \begin{align}
\text{\bf T}(\hat{l},\hat{l})=& |\phi_1|^2 , \\
\text{\bf T}(\hat{l},\hat{n})=& |\phi_0 |^2 , \\
\text{\bf T}(\hat{n},\hat{n})=& |\phi_{-1}|^2 . \label{eStressEnergyComponents} \end{align}
The Schwarzschild manifold is static, so there is a conserved energy. The energy associated to the generator of $t$-translation, $\partial_t$, is strictly positive, \begin{align*} \GenEnergy{T}[F](t)
=&\frac12 \intTslice{t} \left(|\vec{E}|^2 +|\vec{B}|^2\right) \horifac r^2d\rs d^2\omega \\
=&\frac14 \intTslice{t} \left(|{\alpha}|^2 +2|{\rho}|^2 +2|{\sigma}|^2 +|{\underline{\alpha}}|^2\right) \horifac r^2d\rs d^2\omega \\
=&\frac14 \intTslice{t} \left(|\phi_1|^2 +2|\phi_0|^2 +|\phi_{-1}|^2\right) \horifac r^2d\rs d^2\omega . \end{align*} The corresponding deformation tensor is \begin{align*} \nabla\underline{\vecT} =&-(1-2M/r)^{-1}\underline{\vecT}\otimes(-\frac{M}{r^2})\underline{\vecR} +(1-2M/r)^{-1}\underline{\vecR}\otimes(-\frac{M}{r^2})\underline{\vecT} , \\ \Deformation{T} =& 0 . \end{align*} (We will need to compute deformation tensors later, but, in this case, we could simply have argued that the deformation tensor must vanish since $\partial_t$ generates a symmetry of the metric.) From the vanishing of the deformation tensor, we have a conservation law \begin{align*} \GenEnergy{T}[F](t) =&\GenEnergy{T}[F](0) . \end{align*} This immediately gives an upper bound on the average value of the components of the Maxwell tensor in any region bounded away from the event horizon.
By applying Lie derivatives, we can get additional conservation laws. If $X$ generates a symmetry and $F$ solves the Maxwell equations, then $\mathcal{L}_X F$ will also be a solution of the Maxwell equations. For each set of symmetries and integer $k$, we have the conserved quantities \begin{align*} \GenEnergy{T}[\mathcal{L}_\mathbb{O}^kF](t)=&\GenEnergy{T}[\mathcal{L}_\mathbb{O}^kF](0) ,\\ \GenEnergy{T}[\mathcal{L}_\mathbb{T}^kF](t)=&\GenEnergy{T}[\mathcal{L}_\mathbb{T}^kF](0) . \end{align*}
We now improve these estimates and reveal the effect of trapping, by considering the conformal energy. Following earlier work \cite{CK,BlueSofferLongPaper,BlueSterbenz,DafermosRodnianski}, we let \begin{align*} K=& (t^2+{r_*}^2)\partial_t +2t{r_*}\partial_{\rs} \\ =&\frac12\left({u_+}^2{L} +{u_-}^2{N} \right). \end{align*} We will call this the conformal vector field, but it is also one of the vector fields known as the Morawetz vector field. It is an analogue of a vector field used in $\mathbb R^{1+n}$ to prove decay for the wave equation, the Maxwell equation, and the spin $2$ field. The analogue in $\mathbb R^{1+n}$ generates a positive quantity, so it is not surprising that the same holds on the Schwarzschild manifold. We define the conformal energy to be \begin{align} \GenEnergy{K}[F](t) =& \intTslice{t} \GenMomentum{K}_\alpha d\nu^{\alpha} \nonumber\\
=& \intTslice{t} \left((1/2)(t^2+{r_*}^2)(|\vec{E}|^2+|\vec{B}|^2) +2t{r_*}(\vec{E}_{\hat{\Theta}}\vec{B}_{\hat{\Phi}}-\vec{E}_{\hat{\Phi}}\vec{B}_{\hat{\Theta}})\right) \horifac r^2d\rs d^2\omega\nonumber \\ =& (1/4)\intTslice{t} \left(
(t+{r_*})^2|{\alpha}|^2 +2(t^2+{r_*}^2)(|{\rho}|^2+|{\sigma}|^2) +(t-{r_*})^2|{\underline{\alpha}}|^2\right) \horifac r^2d\rs d^2\omega \nonumber \\
=& (1/4)\intTslice{t} \left( {u_+}^2|\phi_1|^2 + ({u_+}^2+{u_-}^2)|\phi_0|^2 +{u_-}^2|\phi_{-1}|^2\right) \horifac r^2d\rs d^2\omega . \nonumber \end{align}
In the null decomposition or spinor representation, all the terms in the integrand are non-negative, and, inside the light-cone $|{r_*}|<(1-\epsilon)t$, the coefficients on the Maxwell field components grow like $t^2$. Thus, once we show that the conformal energy is bounded, there will be decay for the localized field components.
The following lemma gives an almost conservation law for the conformal energy. It states that, to bound the conformal energy, it is sufficient to prove sufficiently strong decay in a particularly region bounded away from the event horizon. There are two important observations to make from this lemma and its proof: (i) an estimate for the two field components $\vec{E}_{\hat{R}}$ and $\vec{B}_{\hat{R}}$ will control all the field components through the conformal charge, and (ii) it is sufficient to control these field components only in a region near the photon sphere $r=3M$.
\begin{lemma}[Trapping lemma] There is a positive function $\chi_{\text{trap}}$ supported in a bounded range of ${r_*}$ values such that if $F$ is a solution to the Maxwell equations \eqref{eMaxwellEquationDiv}-\eqref{eMaxwellEquationAlt}, then \begin{align} \GenEnergy{K}[F](t_2)-\GenEnergy{K}[F](t_1) \leq& \intTslab{t_1}{t_2} t\chi_{\text{trap}} \left(\vec{E}_{\hat{R}}^2 +\vec{B}_{\hat{R}}^2\right) \horifac r^2 d\rs d^2\omega dt \nonumber\\
\leq& \intTslab{t_1}{t_2} t\chi_{\text{trap}} |\phi_0|^2 \horifac r^2 d\rs d^2\omega dt . \label{eTrappingEstimate} \end{align} \begin{proof} The deformation tensor for $K$ is given by \begin{align*} \nabla\underline{\vecR}
=&r^{-1}(1-2M/r)\text{\bf{g}} - r^{-1}(1-3M/r)(-\underline{\vecTunit}\otimes\underline{\vecTunit}+\underline{\vecRunit}\otimes\underline{\vecRunit} ) ,\\ \nabla\underline{K} =&(t^2+{r_*}^2)\nabla\underline{\vecT} + 2t{r_*}\nabla\underline{\vecR} \nonumber\\ &-(1-2M/r)^{-1}\underline{\vecT}\otimes 2t\underline{\vecT} +(1-2M/r)^{-1}\underline{\vecR}\otimes 2{r_*}\underline{\vecT}\nonumber\\ &-(1-2M/r)^{-1}\underline{\vecT}\otimes 2{r_*}\underline{\vecR} +(1-2M/r)^{-1}\underline{\vecR}\otimes 2t\underline{\vecR} ,\\ \Deformation{K} =&2t{r_*} \Deformation{R} +4t (-\underline{\vecTunit}\otimes\underline{\vecTunit} +\underline{\vecRunit}\otimes\underline{\vecRunit})\nonumber\\ =&4t\frac{{r_*}}{r}(1-2M/r)\text{\bf{g}} + 4t\left(1-\frac{{r_*}}{r}(1-3M/r)\right)(-\underline{\vecTunit}\otimes\underline{\vecTunit}+\underline{\vecRunit}\otimes\underline{\vecRunit} ) . \end{align*} Because the Maxwell energy-momentum tensor is trace-free, the contraction of the first term against $\text{\bf T}$ is zero at each point. The importance of $r=3M$, where the orbiting geodesics are located, is immediate from the second term. The contraction against the energy-momentum tensor is \begin{align*} \Deformation{K}_{\alpha\beta}\text{\bf T}^{\alpha\beta} =& 4t\left(\frac{{r_*}}{r}(1-3M/r)-1\right)(\text{\bf T}_{{\hat{T}}{\hat{T}}}-\text{\bf T}_{{\hat{R}}{\hat{R}}}) , \\ =& 4t\left(\frac{{r_*}}{r}(1-3M/r)-1\right) \text{\bf T}(\hat{l},\hat{n}) .
\end{align*} From this, we have the following almost-conservation law \begin{align} \GenEnergy{K}[F](t_2)-\GenEnergy{K}[F](t_1) =& 2\intTslab{t_1}{t_2} t\left(1-\frac{{r_*}}{r}(1-3M/r)\right) \left(\vec{E}_{\hat{R}}^2 +\vec{B}_{\hat{R}}^2\right) \horifac r^2 d\rs d^2\omega dt \nonumber \\
=& 2\intTslab{t_1}{t_2} t\left(1-\frac{{r_*}}{r}(1-3M/r)\right) |\phi_0|^2 \horifac r^2 d\rs d^2\omega dt . \label{eMaxConfEstimate} \end{align} We refer to $1-\frac{{r_*}}{r}(1-3M/r)$ as the trapping term.
For $r\rightarrow 2M$, ${r_*}\rightarrow-\infty$ and $1-3M/r\rightarrow -1/2$, so $1-(1-3M/r){r_*}/r$ is negative. The explicit expression for ${r_*}$ in terms of $r$ is \begin{align*} {r_*}=& r +2M\log\left(\frac{r-2M}{2M}\right) - 3M +2M\log2 . \end{align*} Because of the logarithmic term, as $r\rightarrow\infty$, $(1-\frac{{r_*}}{r}(1-3M/r))=(r-{r_*})/r +O(1/r) < -2M \log(r) +O(1/r)$ which is negative for sufficiently large $r$. Since the trapping term has negative limit at $\pm\infty$, it is positive only in a compact interval.
We now introduce a smooth, compactly supported function $\chi_{\text{trap}}$ which dominates the trapping term. This function is chosen to satisfy $4\left((1-3M/r){r_*}/r-1 \right)<\chi_{\text{trap}}$. This gives the desired result. \end{proof} \end{lemma}
\subsection{Spin reduction} In this section, we obtain a decay result for the zero-weight component. From the previous section, we know this is enough to control energies involving all the components. It is known that the evolution of the zero-weight component can be determined from a wave equation without referring to the other components. Thus, we can reduce the problem from the Maxwell equations to a wave equation. Since physicists refer to wave equations as spin $0$ equations and the system of the Maxwell equations as a spin $1$ system, we use ``spin reduction'' to refer to this reduction.
The Maxwell equations can be written as a fairly simple system in terms of the null coordinate bases and the corresponding components. This is a result due to Price \cite{Price}, although, he uses a null tetrad, which makes the corresponding expressions look significantly different. By direct computation and application of the Maxwell equations, \begin{align} {N}\Phi_{1} =& {M} \Phi_{0} (1-2M/r) r^{-2} ,\label{ePricei}\\ {L}\Phi_{0} =& {\bar{M}}\Phi_{1} +\cot\theta \Phi_{1} ,\label{ePriceii}\\ {N}\Phi_{0} =& -{M}\Phi_{-1} -\cot\theta \Phi_{-1} ,\label{ePriceiii}\\ {L}\Phi_{-1} =& -{\bar{M}} \Phi_{0} (1-2M/r) r^{-2} . \label{ePriceiv} \end{align}
We refer to these as the ``Price equations''.
The cotangent terms appear to be singular; however, if $\Phi_{1}$ is treated as spherical $1$-forms, then the combination of the angular derivative and the cotangent term can be written simply as \begin{align} {\bar{M}}\Phi_{1} +\cot\theta \Phi_{1} =& (1-2M/r)^{-1/2}\left(\text{div}{\alpha} + i\text{curl}{\alpha} \right), \\ {M}\Phi_{-1} +\cot\theta \Phi_{-1} =& (1-2M/r)^{-1/2}\left(\text{div}{\underline{\alpha}} - i\text{curl}{\underline{\alpha}} \right), \label{eMPlusCotIsDivCurl} \end{align} where $\text{div}$ and $\text{curl}$ are the spherical divergence and curl. If we'd defined a coordinate based null decomposition $A(e_{A})=F({L},e_{A})=(1-2M/r)^{-1/2}{\alpha}(e_{A})$, then we'd have exactly ${\bar{M}}\Phi_{1} +\cot\theta \Phi_{1} = \text{div} A +i\text{curl} A$, and similarly for the other components. One important consequence of this is that the right-hand sides of \eqref{ePriceii} and \eqref{ePriceiii} are controlled by \begin{align*}
|{\bar{M}}\Phi_{1} +\cot\theta \Phi_{1}|
+|{M}\Phi_{-1} +\cot\theta \Phi_{-1}| \leq& (1-2M/r)^{-1/2} r \TensorDNorm{F}{{\hat{\mathbb{X}}}}{1}{\mathbb{O}} . \end{align*}
Another important consequence of the Price equations is that the zero weight term satisfies a wave equation. From \eqref{ePriceiii} and \eqref{ePriceiv}, \begin{align*} {L}{N} \Phi_{0} =& ({M}+\cot\theta){\bar{M}}\Phi_{0} (1-2M/r) r^{-2} ,\\ -\partial_t^2 \Phi_{0} =& -\partial_{\rs}^2\Phi_{0} +r^{-2}(1-2M/r)(-\Delta_{S^2})\Phi_{0} . \end{align*} If there were an additional $(2M/r^3)(1-2M/r) \Phi_{0}$ term on the right, then $\Phi_{0}$ would be a solution to the wave equation on the Schwarzschild manifold, $\nabla^\alpha\nabla_\alpha (r^{-1}\Phi_{0})$. Even in the absence of this term, the previous analysis of wave equations is sufficiently general to apply to a wave equation of this form \cite{BlueSterbenz}. In fact, the wave equation under consideration is simpler than the true wave equation\footnote{If $\nabla^\alpha\nabla_\alpha(r^{-1}u)=0$, then $u$ satisfies $-\partial_t^2u= -\partial_{\rs}^2u +Vu +V_L(-\Delta_{S^2})u$ with $V=2M r^{-3} (1-2M/r)$, thus, the equation governing $\Phi_{0}$ is closer to $\nabla^\alpha\nabla_\alpha(r^{-1}u)=0$ than to $\nabla^\alpha\nabla_\alpha u=0$. } $\nabla^\alpha\nabla_\alpha(r^{-1}u)=0$, and, in appendix \ref{sOneDWaveAnalysis}, we provide a stream-lined version of the method from \cite{BlueSterbenz}.
For solutions to a wave equation, there are estimates on the weighted space-time integral we need to control for the conformal estimate. If $u$ is a solution to \begin{align} -\partial_t^2u=& -\partial_{\rs}^2u + V_L(-\Delta_{S^2})u \label{ePriceWaveEqn} \end{align} with \begin{align*} V_L=r^{-2}(1-2M/r) , \end{align*} then the energy and conformal charge are defined in terms of their densities by \begin{align*}
e=& |\partial_tu|^2 + |\partial_{\rs}u|^2 + V_L |\not\!\nablau|^2 ,\\
e_\mathcal{C}=& \frac14 |(t+r)(\partial_t+\partial_{\rs})u|^2
+\frac14|(t-r)(\partial_t-\partial_{\rs})u|^2
+\frac12 (t^2+{r_*}^2)V_L|\not\!\nablau|^2 +e ,\\ E[u](t)=& \frac12 \intTslice{t} e d\rs d^2\omega ,\\ E_\mathcal{C}[u](t)=& \frac12 \intTslice{t} e_\mathcal{C} d\rs d^2\omega . \end{align*} The energy and conformal energy are generated from $T$ and $K$. (In fact, the conformal energy is generated by $K+T$ to provide better control at time $t$ near $0$.) Since $T$ generates a symmetry, the energy is conserved. As with the conformal energy for the Maxwell field, the conformal energy is not conserved, and the energy density near the photon sphere $r=3M$ must be controlled. The important results for this discussion are that, at any time $t>0$, $k\in\mathbb N$, and any compactly supported function $\chi$, \begin{align} E[u](t)=&E[u](0) ,\nonumber\\ E[\not\!\nabla^ku](t)=&E[\not\!\nabla^ku](0) ,\nonumber\\ E_\mathcal{C}[u(t)]\leq& E_\mathcal{C}[u(0)] + C E[\Delta_{S^2}^2u(0)] ,\nonumber\\
\intTslabInfinity{0} \frac{|u|^2}{(1+{r_*}^2)^2} d\rs d^2\omega dt \leq& E[u](0) ,\label{eWaveLocalDecay} \\
\intTslabInfinity{0} t \chi |\not\!\nablau|^2 d\rs d^2\omega dt \leq& C E_\mathcal{C}[u](0) +E[\Delta_{S^2}^2u](0). \nonumber \end{align} In appendix \ref{sExclusionOfNonRadiatable}, we exclude spherically symmetric harmonics, so, from dropping the angular derivatives on the left-hand side of the previous estimate, \begin{align}
\intTslabInfinity{0} t \chi_{\text{trap}}|u|^2 \horifac r^2 d\rs d^2\omega dt \leq& C\left( E_\mathcal{C}[u](0) + C E[\Delta_{S^2}^2u](0) \right).\label{eWaveEqnTrappingEstimate} \end{align}
We apply this with $u=\Phi_{0}$ in the following lemma.
\begin{lemma} \label{lSurfaceEnergyBound} If $F$ is a solution to the Maxwell equations \eqref{eMaxwellEquationDiv}-\eqref{eMaxwellEquationAlt} and $n\geq0$, then \begin{align} \GenEnergy{K}[\mathcal{L}_\mathbb{T}^n F](t) \leq& C \left(\normKangN{n+1}+\normTangN{n+5}\right) , \label{eConformalEnergyBound}\\
\intTslabInfinity{0} t\chi_{\text{trap}} |\phi_0|^2 \horifac r^2 d\rs d^2\omega dt \leq& C\left( \sum_{k=0}^1\GenEnergy{K}[\mathcal{L}_\mathbb{O}^k F](0) +\sum_{k=0}^{5}\GenEnergy{T}[\mathcal{L}_\mathbb{O}^k F](0) \right) . \label{eGlobalConfEstimate} \end{align} Furthermore, if the normal to $\mathcal{S}$ has uniformly bounded below $\hat{l}$ and $\hat{n}$ components, then \begin{align*}
\int_\mathcal{S} \left( |\phi_1|^2 +2|\phi_0|^2 +|\phi_{-1}|^2\right) \horifac r^2d\rs d^2\omega \leq& C \max_\mathcal{S}({u_-}^{-2},{u_+}^{-2}) \left(\normKangN{1}+\normTangN{5}\right) . \end{align*} \begin{proof} Taking $\Phi_{0}=u$, the energy associated to the Maxwell field $F$ and that of the scalar wave $u$ are closely related. From the Price equations \eqref{ePricei}-\eqref{ePriceiv} and the geometric interpretation of the ${M}+\cot\theta$ terms in \eqref{eMPlusCotIsDivCurl}, \begin{align} E[u](t)=& \GenEnergy{T}[\mathcal{L}_\mathbb{O} F](t) , \label{ePrivevsTrueEnergy}\\ E[\Delta_{S^2}^2u](t)=& \GenEnergy{T}[\mathcal{L}_\mathbb{O}^5 F](t) , \label{ePrivevsTrueEnergyDeriv}\\ E_\mathcal{C}[u](t)=& \GenEnergy{K}[\mathcal{L}_\mathbb{O} F](t) \label{ePrivevsTrueConfEnergy}. \end{align} Estimate \eqref{eWaveEqnTrappingEstimate} can be written as \begin{align*}
\intTslabInfinity{0} t\chi_{\text{trap}} |\phi_0|^2 \horifac r^2d\rs d^2\omega \leq& C( \GenEnergy{K}[\mathcal{L}_\mathbb{O} F](0) +\sum_{k=0}^{5}\GenEnergy{T}[\mathcal{L}_\mathbb{O}^k F](0) ) . \end{align*} From this estimate and the trapping estimate \eqref{eTrappingEstimate}, the estimate \eqref{eConformalEnergyBound} follows. If one of the derivatives in \eqref{eConformalEnergyBound} is in the angular direction instead of the time direction, then it would not be necessary to drop the angular derivative in \eqref{eWaveEqnTrappingEstimate}, and only $n$ and $n+4$ derivatives would be needed on the $K$ and $T$ energies respectively.
Since the integral of the trapping term has been controlled in the entire of the exterior of the Schwarzschild manifold, we have a uniform bound on the integral of $\GenMomentum{K}$ on any hyper-surface. If the hyper-surface has a normal with uniformly bounded below $\hat{l}$ and $\hat{n}$ components, then the integral will be bounded below by $C({u_+}^2|\phi_1|^2 +({u_-}^2+{u_+}^2)|\phi_0|^2 +{u_-}^2|\phi_{-1}|^2)$. This provides the final estimate. \end{proof} \end{lemma}
We remark that from equations \eqref{ePrivevsTrueEnergy}-\eqref{ePrivevsTrueConfEnergy}, we could have bounded the energies $\GenEnergy{T}[\mathcal{L}_\AngAlgGenF](t)$ and $\GenEnergy{K}[\mathcal{L}_\mathbb{O} F](t)$ by immediately appealing to results for the wave equation. However, we would still need to present the energy and conformal energy for the spin $0$ wave equation and for the spin $1$ Maxwell equation, relate them, and present the Price equations. While it would have been possible for us to omit the Lagrangian theory and the trapping lemma, this would have removed the motivation for considering the energy and conformal energy.
\section{Pointwise decay in stationary regions} \label{sStationaryDecay} In this section, our goal is to prove $L^\infty$ decay in regions where $2M<r_1<r<r_2$. We refer to these as stationary regions since the range of the radial coordinate does not change in $t$. Restricting attention to a stationary region, the integrand in the conformal energy behaves like $t^2$ times the Maxwell field components squared. Since the conformal energy is bounded, the field components decay in $L^2_{\text{loc}}$ like $t^{-1}$.
Control on radial derivatives is the main thing that we need to improve this from decay in mean to pointwise decay. Sobolev estimates can be used to convert $L^2_{\text{loc}}$ decay for derivatives into $L^\infty_{\text{loc}}$ decay. For this, we need decay on the spatial derivatives of the Maxwell field. From spherical symmetry, the Lie derivative of the Maxwell field in the direction of an angular derivative, $\mathcal{L}_{\Theta_i}F$, also satisfies the Maxwell equations and has the same type of decay in mean as $F$. Since $R$ does not generate a symmetry, the Lie derivative in that direction will not solve the Maxwell equations.
To control the radial derivatives, we use the structure of the Maxwell equations. Using the staticity of the Schwarzschild manifold, we can control $t$ derivatives, $\mathcal{L}_{T}F$, in $L^2_{\text{loc}}$. In a fixed, compact range of $r$ values, the covariant derivatives of the coordinate basis are controlled by finite multiples of the coordinate bases again. We are working in $L^2$ where we already control all the components. Thus, we control the difference between components of the covariant derivative in a direction and the covariant derivative of the components of the Maxwell tensor (ie, $\nabla_\alpha F_{\beta\gamma}X^\ibY^\gamma\sim \nabla_\alpha(F_{\beta\gamma}X^\ibY^\gamma)$).
The notation in subsection \ref{ssLieDerivatives} can be used to define a ``big-$O$'' notation to estimate the difference between two functions depending on position and a tensor field. We say a function of position and a $(0,2)$-tensor field is equal to another such function up to norm terms and in an interval, if, on any bounded interval of ${r_*}$ values, there is a constant such that, for any $(0,2)$ tensor, the difference between the two functions is bounded by a constant times the norm of the tensor \begin{align*} f=&h +\ErrorTermsOf{A} &\Longleftrightarrow&
&|f(t,{r_*},\theta,\phi,A) - h(t,{r_*},\theta,\phi,A)| \leq& C\TensorNorm{A}{\mathbb{X}} . \end{align*} Similarly, for two collections of such functions, we say \begin{align*} \{ f \} =& \{ h \} +\ErrorTermsOf{A} \end{align*} if for each $f$ there is an $h$ such that $f=h+\ErrorTermsOf{\MaxF}{A}$ and vice versa. We say \begin{align*} \{ f \} \lesssim& \{ h \} +\ErrorTermsOf{A} \end{align*}
if, for any bounded interval in ${r_*}$, there is a constant $C$ such that each $|f|$ is bounded by $C$ times the sum of the absolute values of the $h$'s plus $\ErrorTermsOf{A}$ terms. We make similar definitions involving $\ErrorTermskOf{A}{k}$.
The big-$O$ notation used here is local to compact intervals, which allows us to ignore the difference between normalized and unnormalized vector fields, \begin{align*} \TensorNorm{A}{{\hat{\mathbb{X}}}}=&\ErrorTermsOf{A} ,\\ \TensorNorm{A}{\mathbb{X}}=&\BigO{\TensorNorm{A}{{\hat{\mathbb{X}}}}} . \end{align*}
This notation allows us to prove theorem \ref{tDecayInStationaryRegions}. As outlined in the beginning of this section, our strategy in the proof is to use the Maxwell equations to trade derivatives along the generators of symmetries for spatial derivatives and then to apply the Sobolev estimate. In doing this, we use the big-$O$ notation to estimate error terms generated by converting between Lie and covariant derivatives. This allows us to improve our decay estimates from decay in mean to pointwise decay. Here, we use the null decomposition and explicitly state the norms. Clearly the same result holds for $|\vec{E}|+|\vec{B}|$ or $|{\alpha}|+|{\rho}|+|{\sigma}|+|{\underline{\alpha}}|$.
\begin{theorem} \label{tDecayInStationaryRegionsInText} Let $2M<r_1<r_2<\infty$. There is a constant $C_{(r_1,r_2)}$ such that if $F$ is a solution of the Maxwell equations \eqref{eMaxwellEquationDiv}-\eqref{eMaxwellEquationAlt}, then for all $t\in\mathbb R$, $r\in[r_1,r_2]$, and $(\theta,\phi)\in S^2$, \begin{align*}
|\phi_1|+|\phi_0|+|\phi_{-1}| \leq& C_{(r_1,r_2)} t^{-1} \left(\normKangN{4} +\normTangN{8}\right)^{1/2} . \end{align*} \begin{proof} Using the big-$O$ notation, we can control the difference between the derivative of a component of the Maxwell field and the corresponding component of the Lie derivative. Since the Lie derivative of any vector field with respect to any other is a linear combination of the coordinate vector fields with smooth coefficients, \begin{align*} \mathcal{L}_\mathbb{X} (F(\mathbb{X},\mathbb{X})) =& (\mathcal{L}_\NiceBasisSetF)(\mathbb{X},\mathbb{X}) + \ErrorTermsOf{\MaxF} . \end{align*} This process can be iterated, so that \begin{align*} \mathcal{L}_{\mathbb{T}}^k(F(\mathbb{X},\mathbb{X})) =& \ErrorTermsk{k} . \end{align*} Similarly for covariant derivatives, \begin{align*} \mathbb{X} (F(\mathbb{X},\mathbb{X})) =& (\nabla_\NiceBasisSetF)(\mathbb{X},\mathbb{X}) + \ErrorTermsOf{\MaxF} . \end{align*} We note that if we had applied two symmetry-generating derivatives before making the estimate we would have \begin{align*} \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T}( \mathbb{X} (F(\mathbb{X},\mathbb{X})) ) =& \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} ((\nabla_\NiceBasisSetF)(\mathbb{X},\mathbb{X}) ) + \ErrorTermsk{2} , \end{align*} and similarly with the Lie derivative replacing the covariant derivative in $\mathbb{X}$.
To control the radial derivative of components which have no $R$ arguments, we use \eqref{eMaxwellEquationAlt}, \begin{align*} \mathcal{L}_R \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} (F(\mathbb{T},\mathbb{T})) =& \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} \mathcal{L}_R(F(\mathbb{T},\mathbb{T})) \\ =& \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} \nabla_R(F(\mathbb{T},\mathbb{T})) \\ =& \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} ((\nabla_RF)(\mathbb{T},\mathbb{T})) + \ErrorTermsk{2} \\ =& \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} ((\nabla_\SymGenF)(R,\mathbb{T})) + \ErrorTermsk{2} \\ =& \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} \nabla_\mathbb{T}(F(R,\mathbb{T})) + \ErrorTermsk{2} \\ =& \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} \mathcal{L}_\mathbb{T}(F(R,\mathbb{T})) + \ErrorTermsk{2} \\
=& \ErrorTermsk{3} . \end{align*} Similarly, to gain control of component with one radial argument, we apply \eqref{eMaxwellEquationDiv}, \begin{align*} \mathcal{L}_R \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} (F(R,\mathbb{T})) \lesssim& \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} ((\nabla_{\hat{R}}F)({\hat{R}},\mathbb{T})) + \ErrorTermsk{2} \\ \lesssim& \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} ((\nabla_\SymGenF)(\mathbb{T},\mathbb{T})) + \ErrorTermsk{2} \\ =& \ErrorTermsk{3} . \end{align*} Since $F$ is antisymmetric, there is no need to control components with two $R$ arguments.
Control of triple derivative terms, of the form $\mathcal{L}_\mathbb{X}\mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} (F(\mathbb{X},\mathbb{X}))$, is sufficient to prove $L^\infty$ decay. From the boundedness of the conformal charge, for any interval $[r_1,r_2]$ in the exterior, there is a constant $C$, such that (with ${{r_*}}_1$ and ${{r_*}}_2$ the values of ${r_*}$ corresponding to $r=r_1$ and $r=r_2$) \begin{align*} \intTBox{t}{{{r_*}}_1}{{{r_*}}_2} t^2 \TensorNorm{F}{\mathbb{X}}^2 d\rs d^2\omega \leq& C\GenEnergy{K}[F] . \end{align*} A local, inhomogeneous, $1$-dimensional Sobolev estimate gives \begin{align*}
\int_{\{t\}\times\{{r_*}\}\times S^2} |\mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} (F(\mathbb{X},\mathbb{X}))|^2 d\omega
\leq& C \intTBox{t}{{{r_*}}_1}{{{r_*}}_2} |\mathcal{L}_R \mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} (F(\mathbb{X},\mathbb{X}))|^2 +|\mathcal{L}_\mathbb{T}\mathcal{L}_\mathbb{T} (F(\mathbb{X},\mathbb{X}))|^2 d\rs d^2\omega \\ \leq& C \intTBox{t}{{{r_*}}_1}{{{r_*}}_2} \TensorDSymNorm{F}{3}^2 d\rs d^2\omega \\ \leq& Ct^{-2} \sum_{k=0}^{3}\GenEnergy{K}[\mathcal{L}_\mathbb{T}^3F] . \end{align*} Now applying a spherical Sobolev estimate, we have \begin{align*}
|F(\mathbb{X},\mathbb{X})(t,{r_*},\theta,\phi) | \leq& Ct^{-1} \left(\sum_{k=0}^{3}\GenEnergy{K}[\mathcal{L}_\mathbb{T}^k F](t)\right)^\frac12 . \end{align*}
By lemma \ref{lSurfaceEnergyBound}, the conformal energy at any time is bounded by the initial data (with extra derivatives). This gives, in any stationary region away from the event horizon, that the components decay like $t^{-1}$. \end{proof} \end{theorem}
\section{Decay outside stationary regions} \label{sMovingDecay}
In this section, we prove decay for the field components outside of stationary regions. In Minkowski space $\mathbb R^{1+3}$, it is typical to obtain decay estimates in the regions $|\vec{x}|<(1-\epsilon)t$ and $|\vec{x}|>(1-\epsilon)t$. Because boosts are not symmetries of the Schwarzschild solution, decay in a stationary region is different from decay along outgoing curves ${r_*}\sim(1-\epsilon) t$. Similarly, since there is no reflection symmetry ${r_*}\rightarrow-{r_*}$, the decay rates in the regions ${r_*}>0$ and ${r_*}<0$ are different. Thus, we obtain decay in stationary regions, outgoing regions, and ingoing regions.
Most of the decay estimates in this section are proven by considering the energy on ingoing or outgoing null hyper-surfaces. We will use $\NullInSfc{{u_+}}$ and $\NullOutSfc{{u_-}}$ to refer to ingoing and outgoing null hyper-surfaces on which ${u_+}$ and ${u_-}$ are constant respectively. We will restrict these to the future $t\geq0$. To make estimates on $\NullOutSfc{{u_-}}$, we can introduce a parameter $t_1$ and an approximate surface which extends along the hyper-surface $t=0$ from the bifurcation sphere (${r_*}\rightarrow-\infty)$ to the intersection of $t=0$ with $\NullOutSfc{{u_-}}$, extends along $\NullOutSfc{{u_-}}$ until $t=t_1$, and finally continues onto space-like infinity along $t=t_1$. Since the deformation tensor for $T$ is zero, the surface integral of the generalized momentum $\GenMomentum{T}$ along this surface will be the same as the integral along $t=0$. Similarly, since estimate \eqref{eGlobalConfEstimate} says that the integral over the entire exterior region of the positive part of the $K$ deformation tensor is bounded, the surface integral of the generalized momentum $\GenMomentum{K}$ over the approximating surface is bounded by the initial data. Dropping the positive contribution from integrating along $t=0$ and $t=t_1$ and taking the limit as $t_1\rightarrow\infty$, \begin{align*}
\int_\NullOutSfc{\uin} \GenMomentum{K}_\alpha d\nu^\alpha \leq& C\left(\normKangN{1}+\normTangN{5}\right) . \end{align*} A similar argument can be made for $\NullInSfc{{u_+}}$. Since $\NullOutSfc{{u_-}}$ and $\NullInSfc{{u_+}}$ are null surfaces, we cannot apply the last part of lemma \ref{lSurfaceEnergyBound}.
These integrals can be expanded in terms of the Maxwell field components. \begin{align*}
\int_\NullInSfc{\uout} \left({u_+}^2|\phi_0|^2 +{u_-}^2|\phi_{-1}|^2\right) (1-2M/r) r^2d\uin d^2\omega =& \int_\NullInSfc{\uout} \GenMomentum{K}_\alpha d\nu^\alpha,\\
\int_\NullOutSfc{\uin} \left({u_-}^2|\phi_0|^2 +{u_+}^2|\phi_1|^2\right) (1-2M/r) r^2d\uout d^2\omega =& \int_\NullOutSfc{\uin} \GenMomentum{K}_\alpha d\nu^\alpha . \end{align*}
To obtain estimates for derivatives tangential to this surface, we can convert to the coordinate based components and apply angular derivatives and the Price equations \eqref{ePricei}-\eqref{ePriceiv} to get \begin{align}
\int_\NullInSfc{\uout} \left({u_+}^2|{N}\Phi_{1}|^2(1-2M/r)^{-1} r^{2} +{u_-}^2|{N}\Phi_{0}|^2\right) d\uin d^2\omega \leq& C(\normKangN{4}+\normTangN{8}) ,\label{eLabelForAngledConformalIn}\\
\int_\NullOutSfc{\uin} \left({u_-}^2|{L}\Phi_{-1}|^2(1-2M/r)^{-1} r^{2} +{u_+}^2|{L}\Phi_{0}|^2\right) d\uout d^2\omega \leq& C(\normKangN{4}+\normTangN{8}) . \label{eLabelForAngledConformalOut} \end{align}
We now prove decay in outgoing regions.
\begin{lemma}[Decay for ${r_*}>1$] \label{lFarDecay} There is a constant $C$ such that if $F$ is a solution of the Maxwell equations \eqref{eMaxwellEquationDiv}-\eqref{eMaxwellEquationAlt}, then for all $t\geq0$, ${r_*}>1$, $(\theta,\phi)\in S^2$, \begin{align*}
|\phi_1|\leq& C r^{-3/2} |{u_+}|^{-1} \left( \normKangN{4} +\normTangN{8} +\sup_{\{0\}\times\Reals^+\times S^2}\sum_i(r^{5/2}\MyFi)^2 \right)^{1/2} , \\
|\phi_0|\leq& C r^{-2} \left(\frac{{u_+}-|{u_-}|}{{u_+}(1+|{u_-}|)}\right)^{1/2} \left( \normKangN{4} +\normTangN{8} +\sup_{\{0\}\times\Reals^+\times S^2}\sum_i(r^{5/2}\MyFi)^2 \right)^{1/2}, \\
|\phi_{-1}|\leq& C r^{-1} (1+|{u_-}|)^{-1} \left( \normKangN{4} +\normTangN{8} +\sup_{\{0\}\times\Reals^+\times S^2}\sum_i(r^{5/2}\MyFi)^2 \right)^{1/2} . \end{align*} For $t<{r_*}$, \begin{align*}
|\phi_{-1}|\leq& C r^{-1} (1+|{u_-}|)^{-3/2} \left( \normKangN{4} +\normTangN{8} +\sup_{\{0\}\times\Reals^+\times S^2}\sum_i(r^{5/2}\MyFi)^2 \right)^{1/2} . \end{align*} \begin{proof} At any point in the far region, ${r_*}>1$, we will integrate along a radial, null ray to prove decay. The bounds on the conformal charge give decay for integrals along the null rays. The final end point will either be at $t=0$, where we already have decay, or at ${r_*}=0$, where we have decay by assumption. In this way, each component of the Maxwell field will be written as the sum of two terms, both of which decay. The typical null rays which we use are illustrated in figure \ref{FigOuterNullRays}.
There are a number of simplifications in the outgoing region. We can ignore factors of $(1-2M/r)$, since the ratio between $1$ and $(1-2M/r)$ is bounded above and bounded below by a strictly positive number. Since ${r_*}>1$, we can ignore ratios of ${r_*}/r$. There is the ordering ${u_+}\geq{r_*}\geq{u_-}$. On outgoing null rays, on which ${u_-}$ is constant, the change in ${u_+}$ is twice the change in ${r_*}$, and similarly, on ingoing radial, null rays the change in ${u_-}$ is twice the change in ${r_*}$.
\begin{figure}
\caption{Null rays in the outer region, ${r_*}>0$. The angular variables have been suppressed. The null rays go from a point either to the initial hypersurface $t=0$ or to the stationary region ${r_*}=0$. }
\label{FigOuterNullRays}
\end{figure}
The simplest application of our method is for the zero-weight component. First, we prove an estimate inside the light-cone, for $t>{r_*}$. We use a radial, null geodesic from $(t,{r_*},\theta,\phi)$ to $(t+{r_*},0,\theta,\phi)$ parametrized by ${u_-}$, \begin{align*}
|\Phi_{0}(t,{r_*},\theta,\phi)|
\leq& \int_{c_1} |{N}\Phi_{0}| d{u_-} +|\Phi_{0}(t+{r_*},0,\theta,\phi)|\\ \leq& \left(\int_{c_1} {u_-}^{-2} d{u_-}\right)^\frac12
\left(\int_{c_1} {u_-}^2 |{N}\Phi_{0}|^2 d{u_-}\right)^\frac12
+ |\Phi_{0}(t+{r_*},0,\theta,\phi)|\\
\leq& \left(\frac1{{u_-}}-\frac1{{u_+}}\right)^{1/2}\left(\int_{c_1} {u_-}^2 |{N}\Phi_{0}|^2 d{u_-}\right)^\frac12 + |\Phi_{0}(t+{r_*},0,\theta,\phi)| . \end{align*} The end point decays at a rate of $t'^{-1}$ evaluated at $t'=t+{r_*}$. We now integrate over the angular variables too (and apply Cauchy-Schwartz, so that the integral in $d\omega$ is inside the square root). The integral in the first term is bounded by the conformal charge as given in \eqref{eLabelForAngledConformalIn}. The second angular derivatives of $F$ will satisfy the same estimate, and we can use the second angular derivatives to control the value of the component, through a Sobolev estimate. Hence, \begin{align*}
|\Phi_{0}(t,{r_*},\theta,\phi)|
\leq& C\left(\int_{\{t\}\times\{{r_*}\}\times S^2} \sum_{k=0}^2 |\mathcal{L}_\mathbb{T}^k\Phi_{0}|^2 d\omega\right)^\frac12 \\ \leq& \left(\frac{{u_+}-{u_-}}{{u_+}{u_-}}\right)^{1/2} \left(\normKangN{4} +\normTangN{8}\right)^{1/2} ,\\
|\phi_0(t,{r_*},\theta,\phi)| \leq& \left(\frac{{u_+}-{u_-}}{{u_+}{u_-}} \right)^{1/2} r^{-2} \left(\normKangN{4} +\normTangN{8}\right)^{1/2}. \end{align*}
Outside the light-cone, where $t<{r_*}$, we integrate over the curve $c_4$, and the end point value is replaced by $|\Phi_{0}(0,t+{r_*},\theta,\phi)|$, which decays like ${r_*}'^{-1/2}=(t+{r_*})^{-1/2}$ (since $\phi_0$ decays like ${r_*}'^{-5/2}$). Thus, we have \begin{align*}
|\phi_0(t,{r_*},\theta,\phi)| \leq& \left(\frac{{u_+}-|{u_-}|}{{u_+}|{u_-}|}\right)^{-1/2} r^{-2} \left(\normKangN{4} +\normTangN{8} + \sup_{\{0\}\times\Reals^+\times S^2}\sum_i(r^{5/2}\MyFi)^2 \right)^{1/2}. \end{align*}
In the region where $|{u_-}|<1$, instead of using $\GenEnergy{K}$, we could have used $\GenEnergy{T}$, which does not have a vanishing factor of ${u_-}^2$. Thus, we may replace $({u_+}-|{u_-}|)/{u_+}|{u_-}|$ by $({u_+}-|{u_-}|)/{u_+}(1+|{u_-}|)$.
Now, we prove decay for $\phi_1$ by again integrating along ingoing, radial, null geodesics again. From any given point, we integrate along $c_4$ to the endpoint where $t=0$, \begin{align*}
|\Phi_{1}(t,{r_*},\theta,\phi)|
\leq& \int_{c_4} |{N}\Phi_{1} | d{u_-} +|\Phi_{1}(0,t+{r_*},\theta,\phi)|\\
\leq& \left(\int_{c_4} r^{-2}d{u_-} \right)^\frac12 \left(\int_{c_4} {u_+}^2 |{N}\Phi_{1}|^2 r^2 d{u_-}\right)^\frac12 {u_+}^{-1}+|\Phi_{1}(0,t+{r_*},\theta,\phi)| \\
\leq& \left(\int_{c_4} {u_+}^2 |{N}\Phi_{1}|^2 r^2 d{u_-}\right)^\frac12 {u_+}^{-1}r^{-1/2}+|\Phi_{1}(0,t+{r_*},\theta,\phi)| . \end{align*} The endpoint will be bounded by ${r_*}'^{-3/2}=(t+{r_*})^{-3/2}$. We now integrate in the angular variables, differentiate in the angular directions, and apply a spherical Sobolev estimate to get \begin{align*}
|\Phi_{1}(t,{r_*},\theta,\phi)| \leq& C {u_+}^{-1}r^{-1/2} \left(\normKangN{4}+\normTangN{8}\right)^{\frac12} \\ &+C{u_+}^{-3/2}\sup_{\{0\}\times\Reals^+\times S^2}\sum_i(r^{5/2}\MyFi)^2 ,\\
|\phi_1|\leq& C {u_+}^{-1}r^{-3/2} \left(\normKangN{4}+\normTangN{8}+\sup_{\{0\}\times\Reals^+\times S^2}\sum_i(r^{5/2}\MyFi)^2 \right)^{\frac12} . \end{align*}
Finally, for $\phi_{-1}$, we integrate along outgoing, radial, null rays on which ${u_-}$ is constant. Inside the light-cone $t>{r_*}$, we take the curve $c_2$ from $(t,{r_*},\theta,\phi)$ to $(t-{r_*},0,\theta,\phi)$. The estimate is \begin{align*}
|\Phi_{-1}(t,{r_*},\theta,\phi)|
\leq& \int_{c_2} |{L}\Phi_{-1} | d{u_+} +|\Phi_{-1}(t-{r_*},0,\theta,\phi)|\\
\leq& (\int_{c_2} r^{-2}d{u_+} )^\frac12 (\int_{c_2} {u_-}^2 |{L}\Phi_{-1}|^2 r^2 d{u_+})^\frac12 {u_-}^{-1}+|\Phi_{-1}(t-{r_*},0,\theta,\phi)|\\
\leq& C {u_-}^{-1} (\int_{c_2} {u_-}^2 |{L}\Phi_{-1}|^2 r^2 d{u_+})^\frac12 +|\Phi_{-1}(t-{r_*},0,\theta,\phi)| . \end{align*} In the stationary region, the decay rate is also $(t')^{-1}=(t-{r_*})^{-1}$, so the decay rate is \begin{align*}
|\Phi_{-1}(t,{r_*},\theta,\phi)| \leq& C |{u_-}|^{-1} \left(\normKangN{4}+\normTangN{8}\right)^\frac12 , \\
|\phi_{-1}|\leq& C |{u_-}|^{-1} r^{-1} \left(\normKangN{4}+\normTangN{8}\right)^\frac12 . \end{align*} For $t<{r_*}$, a similar argument can be made by integrating along $c_3$, with the value at the other end point being $\Phi_{-1}(0,{r_*}-t,\theta,\phi)$, where we have faster decay, \begin{align*}
|\Phi_{-1}(t,{r_*},\theta,\phi)| \leq& C |{u_-}|^{-3/2} \left(\normKangN{4}+\normTangN{8}\right)^\frac12 +|{u_-}|^{-3/2} \sup_{\{0\}\times\Reals^+\times S^2}\sum_i(r^{5/2}\MyFi)^2, \\
|\phi_{-1}|\leq& C |{u_-}|^{-3/2} r^{-1} \left(\normKangN{4}+\normTangN{8} +\sup_{\{0\}\times\Reals^+\times S^2}\sum_i(r^{5/2}\MyFi)^2\right)^\frac12 . \end{align*}
Again, in the region $|{u_-}|<1$, we can use $\GenEnergy{T}$ instead of $\GenEnergy{K}$ to get a better bound when ${u_-}$ vanishes. \end{proof} \end{lemma}
We now turn to proving decay in the ``near'' region, ${r_*}<0$. Since for any fixed interval $2M<r_1<r<r_2$, we can apply the results from section \ref{sStationaryDecay}, the main purpose of the following lemma is to prove estimates which are uniform in $r$ so that they can be extended to the event horizon. Note that ${u_+}$ extends smoothly to the event horizon and is an affine parameter for tangential geodesics.
Since the vector fields $T$ and $K$ vanish on the bifurcation sphere, the boundedness of the associated energy allows rapid divergence of the (normalized) energy density near there. Not surprisingly, this is not sufficient to control the Maxwell field. In light of this, it is somewhat surprising that the energies associated with $T$ and $K$ are sufficient to prove decay for the correctly normalized components of the Maxwell field tensor corresponding to $\phi_1$.
As explained in the introduction, the correctly normalized basis for stating results on or near the event horizon is \begin{align*} \partial_t+\partial_{\rs}, & &(1-2M/r)^{-1}(\partial_t-\partial_{\rs}), & &r^{-1} e_{A}, & &r^{-1} e_{B} . \end{align*} We can equally well replace $r^{-1}e_{A}$ and $r^{-1}e_{B}$ by $m$ and $\bar{m}$ or by ${\hat{\Theta}}$ or ${\hat{\Phi}}$. For large ${u_+}$, this is the ``correctly normalized'' basis, because it is the result of parallelly transporting the original, normalized basis on the initial data surface, $t=0$, along ingoing null geodesics to reach the event horizon.
The method used in the previous lemma gives decay rates of ${u_+}^{-1}$, ${u_+}^{-1/2}$, and ${u_+}^{-1}$ for $\Phi_{1}$, $\Phi_{0}$, and $\Phi_{-1}$ respectively. The functions $\Phi_{1}$ and $\Phi_{0}$ are correctly normalised (except for bounded factors) as $r\rightarrow2M$, but $(1-2M/r)^{-1}\Phi_{-1}$ is the correctly normalised component in this region. For $\Phi_{0}$, we prove a different preliminary decay rate and then use the divergence theorem to obtain a rate of ${u_+}^{-1}$. For the correctly normalised, negative-weight component, we use the ${u_+}^{-1}$ decay for $\Phi_{0}$, a transport equation, and an integrating factor to get ${u_+}^{-1}$ decay. We note that the vector field $(1-2M/r)^{-1}(T-R)$, which is a smoothed version of the vector field $Y$ in \cite{DafermosRodnianski} can be used to prove boundedness for this component without using a transport equation.
\begin{lemma}[Decay for ${r_*}<0$.] \label{lNearDecay} There is a constant $C$ such that if $F$ is a solution of the Maxwell equations \eqref{eMaxwellEquationDiv}-\eqref{eMaxwellEquationAlt}, then for all $t\geq0$, ${r_*}<0$, $(\theta,\phi)\in S^2$ such that ${u_+}>1$, \begin{align*}
|F(\partial_t+\partial_{\rs}, {\hat{\Theta}})| +|F(\partial_t+\partial_{\rs},{\hat{\Phi}})| \leq& C{u_+}^{-1} \left(\normKangN{4}+\normTangN{8}\right)^\frac12, \\
|\phi_0(t,{r_*})|
\leq& C|F(\partial_t+\partial_{\rs},(1-2M/r)^{-1}(\partial_t-\partial_{\rs}))| +|F({\hat{\Theta}},{\hat{\Phi}})| \\ \leq& C{u_+}^{-1}\left(\normKangN{4}+\normTangN{8} +\normTunitBifSphN{3}\right)^\frac12 , \\ F((1-2M/r)^{-1}(\partial_t-\partial_{\rs}),{\hat{\Theta}} +i{\hat{\Phi}}) \leq& C {u_+}^{-1} \left(\normKangN{4} + \normTangN{8} + \normTunitBifSphN{3} \right)^{1/2} . \end{align*} \begin{proof} The first part of this proof is similar to that of lemma \ref{lFarDecay}. The main difference is that we must track factors of $(1-2M/r)$ carefully, but we may ignore factors of $r$ since it is bounded above and below by positive constants. When tracking factors of $(1-2M/r)$, we use $(1-2M/r)\phi_i(t',{r_*}',\theta',\phi')$ to denote the value of $(1-2M/r)\phi_i$ at $(t',{r_*}'\theta',\phi')$ even if an unprimed set of coordinates is in use simultaneously. Since we are only considering ${u_+}>1$, the ingoing, radial, null rays from any point will hit the stationary region ${r_*}=0$. This is illustrated in figure \ref{FigNearDecaypm}
\begin{figure}
\caption{Null rays in the inner region ${r_*}<0$. The angular variables have been suppressed. The curve $c_5$ goes from a point in the stationary region ${r_*}=0$ to an arbitrary point in the regions $t>0$, ${r_*}<0$, ${u_+}>1$ along an ingoing, null, radial geodesics. }
\label{FigNearDecaypm}
\end{figure}
Integrating along an ingoing, radial, null geodesic, $c_5$, from $(t,{r_*},\theta,\phi)$ to $(t+{r_*},0,\theta,\phi)$, in the same way as in the proof of lemma \ref{lFarDecay}, we have \begin{align*}
|\Phi_{1}(t,{r_*},\theta,\phi)|
\leq& \int_{c_5} |{N}\Phi_{1}| d{u_-} +|\Phi_{1}(t+{r_*},0,\theta,\phi)|\\ \leq& \left(\int_{c_5} (1-2M/r) d{u_-}\right)^\frac12
\left(\int_{c_5} |{N}\Phi_{1}|^2 {u_+}^2 r^2 (1-2M/r)^{-1} d{u_-}\right)^\frac12 {u_+}^{-1} +|\Phi_{1}(t+{r_*},0,\theta,\phi)| .
\end{align*} The integral of $(1-2M/r)$ with respect to $d{u_-}$ is, up to a factor of $2$, the same as the integral of $(1-2M/r)$ with respect to $d{r_*}$. By a change of variables, this is the integral of $1$ with respect to $dr$. Thus, the contribution from the first integral in the first term is bounded by a constant. After integrating in the angular variables, the second integral is bounded by the conformal energy of the angular derivatives of $F$. The second term is the value of the component in the stationary region, so it decays like $(t+{r_*})^{-1}$. Applying the angular derivative and Sobolev estimate argument from the previous lemma, \begin{align*}
|\Phi_{1}|
=|F(\partial_t+\partial_{\rs}, {\hat{\Theta}}+i {\hat{\Phi}})| \leq& C {u_+}^{-1} \left(\normKangN{4}+\normTangN{8}\right)^\frac12 . \end{align*} This proves the first result.
For the zero weight component, we first prove an intermediate result for $(1-2M/r)^{1/2}\phi_0$. From integrating along a surface of constant $t$, we have \begin{align*}
(1-2M/r)^{1/2}|{u_+}|^2 \phi_0^2(t,{r_*},\theta,\phi)
=& -\int_{\{t\}\times[{r_*},0]\times\{\theta\}\times\{\phi\}} \partial_{\rs}\left((1-2M/r)^{1/2}|{u_+}|^2 \phi_0^2\right) d{r_*}'\\ & + (1-2M/r)^{1/2}{u_+}^2 \phi_0^2(t+{r_*},0,\theta,\phi). \end{align*} The integrand can be estimated by dropping negative terms and applying the Cauchy-Schwartz inequality as \begin{align*}
-\partial_{\rs}\left((1-2M/r)^{1/2}{u_+}^2|\phi_0|^2\right)
=& -\frac12 (1-2M/r)^{1/2}\frac{2M}{r^2} {u_+}^2 |\phi_0|^2
- 2(1-2M/r)^{1/2}{u_+} |\phi_0|^2 \\ &- 2(1-2M/r)^{1/2} {u_+}^2 \Re(\phi_0 \partial_{\rs}\phi_0) \\
\leq& C\left( {u_+}^2 (1-2M/r) |\phi_0|^2 + {u_+}^2 |\partial_{\rs}\phi_0|^2 \right) . \end{align*} Thus, for ${u_+}>1$, the integrand is controlled by the conformal energy using the standard Sobolev estimate argument by \begin{align*}
-\int_{\{t\}\times[{r_*},0]\times\{\theta\}\times\{\phi\}} \partial_{\rs}\left((1-2M/r)^{1/2}|{u_+}|^2 \phi_0^2\right) d{r_*}' \leq&C\left( \normKangN{4} + \normTangN{8} \right). \end{align*} The end point can be controlled by the stationary decay result, \begin{align*}
(1-2M/r)^{1/2}{u_+}^2|\Phi_{0}(t+{r_*},0,\theta,\phi)|^2 \leq& C \left(\normKangN{4} +\normTangN{8} \right), \end{align*} so that \begin{align*} \phi_0(t,{r_*},\theta,\phi) \leq& C(1-2M/r)^{-1/4} {u_+}^{-1} \left( \normKangN{4} + \normTangN{8} \right)^{1/2}. \end{align*}
\begin{figure}\label{FigDivThmRegion}
\end{figure}
This estimate can now be improved. For a given point $(t,{r_*},\theta,\phi)$, consider the two-dimensional surface \begin{align*} {\Omega_{(t,\rs)}}=&\{(t',{r_*}',\theta,\phi) : t'\geq0,t'+{r_*}'\leq t+{r_*}, t-{r_*}\leq t'-{r_*}'\leq t+{r_*} \} . \end{align*} This is illustrated in figure \ref{FigDivThmRegion}. Applying the (two-dimensional) divergence theorem with the vector field $({N}\Phi_{0}){N}$, we have \begin{align*} \Phi_{0}(t,{r_*},\theta,\phi) -\Phi_{0}(t+{r_*},0,\theta,\phi) +\int_{\{0\}\times[{r_*}-t,-t-{r_*}]\times\{\theta\}\times\{\phi\}} {N}\Phi_{0} d{r_*} =& -2 \int_{\Omega_{(t,\rs)}} {L}{N}\Phi_{0} d{r_*} dt . \end{align*} From the wave equation \eqref{ePriceWaveEqn} for $\Phi_{0}$, we have \begin{align*} \Phi_{0}(t,{r_*},\theta,\phi) =& \Phi_{0}(t+{r_*},0,\theta,\phi) +\int_{\{0\}\times[{r_*}-t,-t-{r_*}]\times\{\theta\}\times\{\phi\}} {N}\Phi_{0} d{r_*}\\ &-2 \int_{\Omega_{(t,\rs)}} r^{-2}(1-2M/r)(-\Delta_{S^2})\Phi_{0} d{r_*} dt . \end{align*}
We now estimate the terms on the right. The first is bounded by the stationary decay result. The second is an integral in the initial data surface $t=0$, so it can be controlled by integrals of the initial data. We have \begin{align*}
|\int_{[{r_*}-t,-t-{r_*}]} {N}\phi_0 d{r_*}|
\leq& \left(\int_{[-\infty,-t-{r_*}]} (1-2M/r)^{1/2} d{r_*}\right)^{1/2} \left(\int_{[-\infty,-t-{r_*}]} (1-2M/r)^{-1/2}|{N}\phi_0|^2 d{r_*}\right)^{1/2} . \end{align*} The first integral is bounded by \begin{align*} \int_{[-\infty,-t-{r_*}]} (1-2M/r)^{1/2} d{r_*} \leq& C(1-2M/r)^{1/2} \leq C e^{(-t-{r_*})/2M} \leq C {u_+}^{-2} . \end{align*} Since $(1-2M/r)^{-1/2}{N}$ and $\partial/\partialU_-$ differ only by smooth functions of $r$, the second can be estimated by \begin{align*}
\int_{[-\infty,-t-{r_*}]} (1-2M/r)^{-1/2}|{N}\phi_0|^2 d{r_*}
\leq& C \int_{[-\infty,-t-{r_*}]} |\frac{\partial}{\partialU_-} \phi_0|^2 (1-2M/r)^{1/2} d{r_*} . \end{align*} Since the stationary tetrad and the one based on the $U_+, U_-, \theta, \phi$ coordinate system differ only by smooth functions of $r$, \begin{align*}
\int_{[-\infty,-t-{r_*}]} (1-2M/r)^{-1/2}|{N}\phi_0|^2 d{r_*}
\leq& C \int_{[-\infty,-t-{r_*}]} |\frac{\partial}{\partialU_-}\left(\frac12F(\hat{l},\hat{n})+F({\hat{\Theta}},{\hat{\Phi}})\right) |^2 (1-2M/r)^{1/2}d{r_*} \\
\leq& C \int_{[-\infty,-t-{r_*}]} |\frac{\partial}{\partialU_-}\left(\frac12F(\frac{\partial}{\partialU_+},\frac{\partial}{\partialU_-})+F(\Theta,\frac{1}{\sin\theta}\Phi)\right) |^2 (1-2M/r)^{1/2} d{r_*} \\
&+C \int_{[-\infty,-t-{r_*}]} |\frac12F(\frac{\partial}{\partialU_+},\frac{\partial}{\partialU_-})+F(\Theta,\frac{1}{\sin\theta}\Phi) |^2 (1-2M/r)^{1/2} d{r_*} . \end{align*} Since coordinate vector fields commute, \begin{align*}
\int_{[-\infty,-t-{r_*}]} (1-2M/r)^{-1/2}|{N}\phi_0|^2 d{r_*}
\leq& C \int_{[-\infty,-t-{r_*}]} \sum_{k=0}^1 |\mathcal{L}_{\frac{\partial}{\partialU_-}}^{k} F|_{\hat{\tilde{\mathbb{X}}}}^2 (1-2M/r)^{1/2} d{r_*} . \end{align*} The same argument could have been applied to the second angular derivatives of $F$, which could have been used in a Sobolev estimate. This would have lead to \begin{align*}
\int_{[-\infty,-t-{r_*}]} (1-2M/r)^{-1/2}|{N}\phi_0|^2 d{r_*} \leq& C \normTunitBifSphN{3} . \end{align*} Thus, the integral along the initial time slice is bounded by \begin{align*}
|\int_{[{r_*}-t,-t-{r_*}]} {N}\phi_0 d{r_*}| \leq& C {u_+}^{-1} \left( \normTunitBifSphN{3} \right)^{1/2} . \end{align*}
Finally, we estimate the integral over ${\Omega_{(t,\rs)}}$ by breaking it into two parts, ${{\DivThmRegion}_A}={\Omega_{(t,\rs)}}\cap \{t>2|{r_*}|\}$ and ${{\DivThmRegion}_B}={\Omega_{(t,\rs)}}\cap \{t\leq2|{r_*}|\}$. In ${\Omega_{(t,\rs)}}$, \begin{align*}
|\int_{\Omega_{(t,\rs)}}& r^{-2}(1-2M/r)(-\Delta_{S^2})\Phi_{0} d{r_*} dt| \\ \leq& \sup_{{{\DivThmRegion}_A}}\left((1-2M/r)^{1/4} (-\Delta_{S^2})\Phi_{0}\right) \int_{{\DivThmRegion}_A} r^{-2}(1-2M/r)^{3/4} d{r_*} dt \\
&+ \left( \int_{{\DivThmRegion}_B} r^{-2}(1-2M/r) d{r_*} dt\right) \left(\int_{{\DivThmRegion}_B} r^{-2}(1-2M/r)|\Delta_{S^2}\Phi_{0}|^2 d{r_*} dt \right) \end{align*} On the first line of the right-hand side, the supremum term decays like ${u_+}^{-1}$ by the intermediate result, and the integral term is uniformly bounded by the exponential decay of $(1-2M/r)$ with respect to ${r_*}$. In the second line, the second integral is bounded by estimate \eqref{eWaveLocalDecay}, and the first integral is bounded by $(1-2M/r)$ evaluated at the point $(t',{r_*}')=((2/3){u_+},(1/3){u_+})$, and hence decays faster than ${u_+}^{-1}$. Combining all these results gives \begin{align*} \phi_0 \leq& C {u_+}^{-1} \left(\normKangN{4} +\normTangN{8} + \normTunitBifSphN{3} \right) . \end{align*}
We begin our analysis of $\Phi_{-1}$ with an intermediate decay result, using the same sort of simple argument as was used for $\Phi_{1}$. Integrating along an outgoing, radial, null ray, we have \begin{align*} \Phi_{-1}(t,{r_*},\theta,\phi)
=& \int |{L}\Phi_{-1}| d{u_+} + \Phi_{-1}(t-{r_*},0,\theta,\phi) \\
|\Phi_{-1}(t,{r_*},\theta,\phi)|
\leq& {u_-}^{-1} \left(\int (1-2M/r) d{u_+}\right)^{1/2} \left( \int |{L}\Phi_{-1}|^2 {u_-}^2 (1-2M/r)^{-1} d{u_+}\right)^{-1} + |\Phi_{-1}(t-{r_*},0,\theta,\phi)| \\ \leq& C {u_-}^{-1} \left(\normKangN{4} + \normTangN{8}\right)^{1/2} . \end{align*} Since we are working in the inner region, ${r_*}\leq0$, there is the estimate ${u_+}<{u_-}$, and \begin{align*} \Phi_{-1}(t,{r_*},\theta,\phi) \leq& C {u_+}^{-1} \left(\normKangN{4} + \normTangN{8}\right)^{1/2} . \end{align*}
To obtain stronger estimates, we will need to integrate along outgoing null geodesics starting near the initial data surface. We work with the $U_+$ and $U_-$ coordinates to control the correctly normalised, negative-weight component near the bifurcation sphere. If bounded initial data is posed on the surface $U_+U_-=-1$, which corresponds to the union of the $t=0$ surfaces in the two exterior regions, then, at least in in some small neighborhood of the bifurcation sphere, the components of the Maxwell field with respect to the smooth $(U_+,U_-,\theta,\phi)$ coordinate system must remain bounded by a multiple of their initial value. This is essentially a Cauchy stability result, as was used for the wave equation \cite{DafermosRodnianski}. Thus, in some sufficiently small neighborhood of the bifurcation sphere, in the exterior, the outgoing component is bounded by \begin{align*}
4M e^{u-/4M} |\Phi_{-1}|
\leq&C_1|F(\frac{\partial}{\partial U_-}, \Theta)|
+|F(\frac{\partial}{\partial U_-}, \Phi)| \nonumber\\ \leq& C\normTunitBifSphN{3} . \end{align*} In particular, we can pick a ${{u_+}}_0\ll0$ such that on the hypersurface ${u_+}={{u_+}}_0$ where $t\geq0$, \begin{align}
(1-2M/r)^{-1} |\Phi_{-1}| \leq& C\normTunitBifSphN{3} . \label{ePrelimBndOnMyFmNearHorizon} \end{align}
We now use the decay for $\Phi_{0}$ and $\Phi_{-1}$ to prove a stronger estimate. From the Price equation \eqref{ePriceiv}, we have \begin{align*} {L}\left( e^{{u_+}/4M}(1-2M/r)^{-1}\Phi_{-1}\right) =& \left(\frac{1}{2M}-\frac{2M}{r^2}\right)\left( e^{{u_+}/4M}(1-2M/r)^{-1}\Phi_{-1}\right) + e^{{u_+}/4M} r^{-2} \Phi_{0} . \end{align*} Since $1/2M -2M/r^2$ vanishes linearly at $r=2M$, it is bounded by $C(1-2M/r)$. Integrating along an outgoing geodesic starting on ${u_+}={{u_+}}_0$ at $(t_0,{{r_*}}_0,\theta,\phi)$ and going to $(t,{r_*},\theta,\phi)$, we have \begin{align*}
|e^{{u_+}/4M}(1-2M/r)^{-1}\Phi_{-1}(t,{r_*},\theta,\phi)|
\leq& C\int_{c_6} e^{{u_+}/4M} |\Phi_{-1}+\Phi_{0}| d{u_+} + |e^{{{u_+}}_0/4M}(1-2M/r)^{-1}\Phi_{-1}(t_0,{r_*}_0,\theta,\phi)| . \end{align*} The geodesic along which we integrate is illustrated in figure \ref{FigWtm}. We break the integral into two pieces, with one going from ${{u_+}}_0$ to ${u_+}/2$ and the other going from ${u_+}/2$ to ${u_+}$. From the boundedness and decay for $\Phi_{0}$ and $\Phi_{-1}$, the integral is bounded by \begin{align*}
\int_{c_6} e^{{u_+}/4M} |\Phi_{-1}+\Phi_{0}| d{u_+} \leq& C (e^{{u_+}/8M} + {u_+}^{-1} e^{{u_+}/4M}) \left(\normKangN{4}+\normTangN{8} +\normTunitBifSphN{3}\right)^{1/2} . \end{align*} By estimate \eqref{ePrelimBndOnMyFmNearHorizon}, the end point, $(1-2M/r)^{-1}\Phi_{-1}(t_0,{{r_*}}_0,\theta,\phi)$ is bounded. Thus, \begin{align*}
|(1-2M/r)^{-1}\Phi_{-1}(t,{r_*},\theta,\phi)| \leq&C (e^{-{u_+}/8M} + {u_+}^{-1} ) \left(\normKangN{4}+\normTangN{8} +\normTunitBifSphN{3}\right)^{1/2} \\ &+ C e^{(-{u_+}+{{u_+}}_0)/4M} \left(\normKangN{4}+\normTangN{8} +\normTunitBifSphN{3}\right)^{1/2} \\ \leq& C {u_+}^{-1}\left(\normKangN{4}+\normTangN{8} +\normTunitBifSphN{3}\right)^{1/2} . \end{align*} This provides the desired decay.
\begin{figure}\label{FigWtm}
\end{figure} \end{proof} \end{lemma}
Combining the two lemmas in this section, we have theorem \ref{tNearAndFarDecay}.
\begin{remark} \label{SimplifiedInitialData} Finally, we provide a simpler sufficient condition for the initial data to have finite norm.
The energies $\GenEnergy{T}[F]$ and $\GenEnergy{K}[F]$ are the integrals of the field components squared, $\phi_i^2$, against the weight $(1-2M/r) r^2$ and ${r_*}^2 (1-2M/r) r^2$ respectively. Thus, if the field components are bounded everywhere and decay at least as fast as $r^{-(5/2+\epsilon)}$ (for $\epsilon>0$) as $r\rightarrow\infty$, these energies will be finite. Thus, if the Maxwell field and its first eight derivatives decay at this rate, then \begin{align*} \normKangN{4} + \normTangN{8} \end{align*} will be bounded, and the result of theorem \ref{tDecayInStationaryRegionsInText} will apply.
Similarly, if the field components decay at least as fast as $r^{-(5/2+\epsilon)}$, then $\sup_{\{0\}\times\Reals^+\times S^2}\sum_i(r^{5/2}\MyFi)^2$ will be trivially bounded, so that the results of lemma \ref{lFarDecay} will give decay in the far region ${r_*}>1$.
The energy $\GenEnergy{{\hat{T}}}$ is the integral of the field components squared, $\phi_i^2$, against the measure $(1-2M/r)^{1/2}r^2 d{r_*}$. Thus, if the components are bounded as ${r_*}\rightarrow-\infty$, then this energy will be finite. Since (for ${r_*}<0$), the vector fields in $\hat{\tilde{\mathbb{X}}}$ are coordinate vector fields extending in a neighbourhood of the bifurcation sphere, they are smooth vector fields. Thus, if $F$ and its first three derivatives with respect to this collection of smooth vector fields have finite components, then \begin{align*} \normTunitBifSphN{3} \end{align*} will be finite, and the results of lemma \ref{lNearDecay} will apply.
This verifies the footnote to theorem \ref{tDecayInStationaryRegions} that boundedness and $r^{-5/2+\epsilon}$ decay for $F$ and its first eight derivatives on the initial data surface $t=0$ is sufficient to prove the decay results in this paper. The same remark applies to the result in theorem \ref{tNearAndFarDecay}. \end{remark}
\appendix \section{Exclusion of the non-radiatable mode of the Maxwell field} \label{sExclusionOfNonRadiatable} In this section, we show that if the Maxwell field has finite conformal charge, it has no spherically symmetric part and explain why this is a physically reasonable assumption. The absence of dynamic, spherically symmetric components is well-known in the literature \cite{Price}. In $\mathbb R^{1+3}$, since the electric and magnetic fields are divergence free, the spherically symmetric component of the Maxwell field is always zero. While there are spherically symmetric solutions on the Schwarzschild manifold, we show that they have no dynamics, since these solutions are constant in $t$. There is a two-parameter family of such solutions described by the central electric and magnetic charge. These solutions do not vanish on the event horizon and decay like $1/r^2$ at infinity, so they fail to be in the finite conformal energy class we consider.
The Maxwell field $F$ can be written as \begin{align*} F(t,{r_*},\theta,\phi) =& \rho(t,{r_*},\theta,\phi) (1-2M/r) \text{d} t\wedge \text{d} {r_*}\\ &+ r(1-2M/r)^{1/2}\text{d} t\wedge\omega_0(t,{r_*},\theta,\phi)\\ &+ r(1-2M/r)^{1/2}\text{d} {r_*}\wedge\omega_1(t,{r_*},\theta,\phi)\\ &+\sigma(t,{r_*},\theta,\phi) r^2 \Omega ,\\ *F(t,{r_*},\theta,\phi) =& -\sigma (1-2M/r) \text{d} t\wedge \text{d} {r_*}\\ &- r(1-2M/r)^{1/2}\text{d} t\wedge(*_{S^2})\omega_1\\ &+ r(1-2M/r)^{1/2}\text{d} {r_*}\wedge(*_{S^2})\omega_0\\ &+ \rho r^2 \Omega , \end{align*} with $\rho$ and $\sigma$ scalar functions, with $(*_{S^2})$ the Hodge dual on $S^2$, with $\omega_0$ and $\omega_1$ in $\Omega^1(S^2)$ for each value of $t$ and ${r_*}$, and with $\Omega$ the standard volume form on $S^2$. We have used a stationary, instead of null, decomposition, so $\omega_0$ and $\omega_1$ appear instead of ${\alpha}$ and ${\underline{\alpha}}$. The scalars ${\rho}$ and ${\sigma}$ are the standard ones from the null decomposition.
We first remind the reader that there is no $\omega\in\Omega^1(S^2)$ with $\Extd_{S^2}\omega= (*_{S^2}) C_0$. If there were one, we could write this condition in coordinates: \begin{align*} \omega=&\omega_\theta \text{d}\theta +\omega_\phi\text{d}\phi ,\\ \Extd_{S^2} \omega=&(*_{S^2}) C_0 ,\\ \omega_{\theta,\phi} - \omega_{\phi,\theta} =& C_0 \sin(\theta) . \end{align*} Let $f(\theta)=\int_{\phi\in S^1} \omega_\phi \text{d} \phi$. Since $\omega$ is smooth, $f$ is continuous on $[0,2\pi]$ and vanishing at $0$ and $\pi$ (since the integral is over a single point in $S^2$ in these cases). From $\text{d} \omega=(*_{S^2}) C_0$, we have $f'(\theta)=C_0 \sin(\theta)$, and $f(\pi)-f(0)=\int_{0}^{\pi} C_0 \sin(\theta) \text{d} \theta >0$. Thus, the condition that $f$ vanishes at $0$ and $\pi$ implies $C_0=0$.
The Maxwell equations are (taking the components orthogonal to various $1$-forms)
\begin{align*} \text{Equation }&\text{ Orthogonal $1$-form}\\ \text{d} F=0: &\text{d} t:& 0=&r(1-2M/r)^{1/2}\text{d}{r_*}\wedge \Extd_{S^2}\omega_1 + (\partial_{\rs}(\sigma r^2)) \text{d}{r_*}\wedge\Omega \\ &\text{d} {r_*}:& 0=&r(1-2M/r)^{1/2}\text{d} t\wedge \Extd_{S^2}\omega_0 + (\partial_t(\sigma r^2)) \text{d} t\wedge\Omega \\ \text{d} *F=0: &\text{d} t:& 0=&r(1-2M/r)^{1/2}\text{d}{r_*}\wedge \Extd_{S^2}(*_{S^2})\omega_0 + (\partial_{\rs}(\rho r^2)) \text{d}{r_*}\wedge\Omega \\ &\text{d} {r_*}:& 0=&r(1-2M/r)^{1/2}\text{d} t\wedge \Extd_{S^2}(*_{S^2})\omega_1 + (\partial_t(\rho r^2)) \text{d} t\wedge\Omega \end{align*} Since $\text{d} t$ and $\text{d}{r_*}$ are spherically symmetric, these can be projected onto the $l=0$ spherical harmonic (equivalently, we can contract on the $S^2$ volume). Since there is no $l=0$ component for the $1$-forms $\omega_0$, $\omega_1$, $(*_{S^2})\omega_0$ and $(*_{S^2})\omega_1$, we find, \begin{align*} \partial_{\rs}(\sigma_{l=0} r^2)=& 0\\ \partial_t(\sigma_{l=0} r^2)=&0 \\ \partial_{\rs}(\rho_{l=0} r^2)=&0 \\ \partial_t(\rho_{l=0} r^2)=&0 . \end{align*} From which it follows that the $l=0$ components are given by integration constants $q_E$ and $q_B$, \begin{align*} \rho_{l=0}=& \frac{q_{E}}{r^2} \\ \sigma_{l=0}=& \frac{q_{B}}{r^2} . \end{align*} Thus, there is no dynamics is the $l=0$ mode, since the $t$ derivative is always zero. These solutions do not decay sufficiently rapidly to have finite conformal energy.
The exclusion of these spherically symmetric solutions is physically reasonable. Physically, the solutions represent a perturbation of the Schwarzschild black hole to a charged Reissner-Nordstrom solution, not an external perturbation by radiation. Price refers to these spherically symmetric solutions as the ``non-radiatable'' modes, since the solutions in this two parameter family are static. Since the Maxwell equations are linear and commute with angular derivatives, the spherically symmetric component does not couple to the other components, so it will not affect the rest of our analysis to eliminate the spherically symmetric components. In analogy with the theory of solitons, we might think of the Reissner-Nordstrom solutions as a manifold in the space of solutions to the Maxwell-Einstein system. In this case, the decoupled Maxwell equations with $l>0$ correspond to linearized perturbations from this manifold, whereas perturbations with $l=0$ correspond to linearized perturbations along the manifold of stationary solutions.
\section{Analysis of the wave equation} \label{sOneDWaveAnalysis} We now prove decay estimates for solutions to the wave equation \eqref{ePriceWaveEqn}. For this equation, there is also an energy and conformal energy, which we use in our analysis. As with the Maxwell field, we must control the trapping of $u$ near the photon sphere to control the growth of the conformal charge. We do this with a local decay estimate and employ light-cone localization to obtain a local decay estimate of the full strength we require. The arguments and results of this section are a slight modification of the argument in \cite{BlueSterbenz}, only, in this case, the situation is simpler.
For the wave equation on the Schwarzschild manifold, previous analysis \cite{BlueSofferLongPaper,BlueSterbenz,DafermosRodnianski} has required a decomposition onto spherical harmonics. On each spherical harmonic, the wave equation can be treated as a one-dimensional wave equation with an effective potential. The main estimate uses a vector field, $\gamma$, which points away from the maximum of the effective potential. In the case of the geometrically defined wave equation, the location of these maxima depend on the spherical harmonic parameter, and $\gamma$ has been modified to fit each spherical harmonic. The equation \eqref{ePriceWaveEqn} is simpler, with the maxima of the effective potential always at ${r_*}=0$. Thus, a very minor modification of the previous analysis allows us to make the estimate without using a spherical harmonic decomposition.
Since the potential $V_L= \frac{1}{r^2}(1-2M/r)$ is real-valued, we may analyse the real and complex parts, which each satisfy \eqref{ePriceWaveEqn}, separately. Thus, we may assume our solutions are real-valued.
We use the method of multipliers to analyse \eqref{ePriceWaveEqn}. Although it maybe possible to introduce a Lagrangian formulation and an energy-momentum tensor, we do not do so because this is not the geometrically defined wave equation $\nabla^\alpha\nabla_\alpha(r^{-1}u)$ so the Lagrangian for this system is quite artificial, because there would be a confusion between the energy-momentum for $u$ and that of the full Maxwell field, and because some of the energies would require correction terms. The essence of the method is to choose a ``multiplier'' (a differential operator), apply it to the function $u$, multiply by the equation, and integrate by parts. The most useful differential operators are typically those given by the vector fields from the Lagrangian method.
We begin by recalling the energy and conformal energy, which were defined in section \ref{sEnergies}. Conservation of energy follows simply from the method of multipliers with the multiplier $T=\partial_t$. Multiplying \eqref{ePriceWaveEqn} by $\partial_tu$, we find \begin{align} 0 =& (\partial_tu)(-\partial_t^2 u +\partial_{\rs}^2u +\frac{1}{r^2}(1-2M/r)\Delta_{S^2}u) \nonumber \\
=& -\frac12 \partial_t( |\partial_tu|^2 +|\partial_{\rs}u|^2 +V_L|\not\!\nablau|^2 ) +\partial_{\rs}(\partial_tu\partial_{\rs}u) +\not\!\nabla\cdot(V_L\partial_tu\not\!\nablau) \label{eEnergyDivForm}. \end{align}
Integrating over a space-time slab gives conservation of energy: \begin{align*} E[u](t)-E[u](0)=&0 . \end{align*} Using the method of multipliers with $K$ gives \begin{align} E_\mathcal{C}[u](t_2)-E_\mathcal{C}[u](t_1)
=&\intTslab{t_1}{t_2} 2t(2V_L+{r_*} V_L')|\not\!\nablau|^2 d\rs d^2\omega dt \nonumber\\
\leq&\intTslab{t_1}{t_2} t\chi_{\text{trap}}|\not\!\nablau|^2 d\rs d^2\omega dt .\label{ePriceConformalIdentity} \end{align} This is similar to the estimate for the Maxwell equations. For this analysis, it is useful to introduce an energy localized inside the light cone. We let \begin{align*} E_{\min}=& \intTLCslice{t} e d\rs d^2\omega . \end{align*}
We need a variety of Hardy estimates.
\begin{lemma}
If $t\geq1$, $\chi_H$ is a non-negative function which is positive in some open set $|{r_*}|<t$, and $\alpha>0$, then if $f:\mathbb R\times S^2$ is a smooth function, and $u:[t_1,t_2]\times\mathbb R\times S^2\rightarrow\mathbb R$ is smooth with $t\in[t_1,t_2]$ and $u(t)=f$, then \begin{align*}
\intTLCslice{t} \frac{|f|^2}{(1+{r_*}^{2})} d\rs d^2\omega \leq CE_{\min}[u](t) , \\
\intTLCtighterslice{t} \frac{|f|^2}{(1+|{r_*}|)^{\alpha+2}} d\rs d^2\omega
\leq C \intTLCtighterslice{t} \frac{|\partial_{\rs} f|^{2}}{(1+{r_*})^{\alpha}} + \chi_H|f|^2 d\rs d^2\omega \end{align*} \begin{proof} We start working with $\alpha\geq0$. When ${{r_*}}_1>0$, \begin{align*}
\frac{|f({{r_*}}_1)|^2}{(1+{{r_*}}_1)^{\alpha+1}} - |f(0)|^2
=& \int_0^{{{r_*}}_1} \partial_{\rs} \frac{|f|^2}{(1+{r_*})^{\alpha+1}} d{r_*} \\
=&\int_0^{{{r_*}}_1} \frac{2f\partial_{\rs} f}{(1+{r_*})^{\alpha+1} } -(\alpha+1)\frac{|f|^2}{(1+{r_*})^{\alpha+2} } d{r_*}\\
\leq& \frac{\alpha+1}{2}\int_0^{{{r_*}}_1} \frac{|f|^2}{(1+{r_*})^{\alpha+2}} d{r_*}
+ \frac{2}{\alpha+1} \int_0^{{{r_*}}_1}\frac{|\partial_{\rs} f|^2}{(1+{r_*})^{\alpha}} d{r_*} \\
&-(\alpha+1) \int_0^{{{r_*}}_1} \frac{|f|^2}{(1+{r_*})^{\alpha+2}} d{r_*}\\
\int_0^{{{r_*}}_1} \frac{|f|^2}{(1+{r_*})^{\alpha+2}} d{r_*}
\leq& \frac{4}{(\alpha+1)^2} \int_0^{{{r_*}}_1}\frac{|\partial_{\rs} f|^2}{(1+{r_*})^{\alpha}} d{r_*}
+\frac{2}{\alpha+1} |f(0)|^2 . \end{align*}
Since, for any exponent $\beta\geq0$, $(1+{r_*})^{\beta}$ is equivalent to $(1+{r_*}^2)^{\beta/2}$ on $[0,\infty)$, the powers of $(1+{r_*})$ can be replaced by $(1+{r_*}^2)^{1/2}$. By symmetry, the same result holds on $(-{{r_*}}_1,0]$. Since $(1+{r_*}^2)^{-\beta}$ is uniformly equivalent to $(1+({r_*}-{{r_*}}_0)^2)^{-\beta}$ for ${{r_*}}_0$ in a finite interval, the $|f(0)|^2$ term can be replaced by $|f({{r_*}}_0)|^2$ in any fixed interval.
For $\alpha=0$, we take ${{r_*}}_1=(3/4)t$. By integrating the estimate over ${{r_*}}_0$ with ${{r_*}}_0$ in $(1/2,3/4)$, where $V_L$ is strictly positive, and then integrating over the angular variables, we find \begin{align*}
\intTLCslice{t} \frac{|f|^2}{(1+{r_*}^{2})} d\rs d^2\omega \leq CE_{\min}[u](t) . \end{align*}
Similarly, taking ${{r_*}}_1=(1/2)t$ and $\alpha>0$, for any non-negative function, $\chi_H$, which is positive in some open set inside $|{r_*}|\leq t$, \begin{align*}
\intTLCtighterslice{t} \frac{|f|^2}{(1+{r_*})^{\alpha+2}} d\rs d^2\omega
\leq C \intTLCtighterslice{t} \frac{|\partial_{\rs} f|^2}{(1+|{r_*}|)^{\alpha}} + \chi_H|f|^2 d\rs d^2\omega \end{align*} \end{proof} \end{lemma}
We now prove a local decay estimate to control the trapping terms. To do this, we use a radial multiplier $\gamma$ in terms of a weight $g$, \begin{align*} \gamma= g \partial_{\rs} + (\partial_{\rs} g)/2 . \end{align*} Assuming that the weight $g$ is a function of the $t$ and ${r_*}$ variables only, we have, \begin{align*} -\partial_t(2\dot{u}\gamma u) =& -\partial_t(2\dot{u}g \partial_{\rs} u +\dot{u}(\partial_{\rs}g) u) \\ =&-\partial_{\rs}\left(g (\partial_{\rs} u)^2 +(\partial_{\rs} u)(\partial_{\rs} g)u -V_L g (\not\!\nabla u)^2 +g(\dot{u})^2 \right) -\not\!\nabla\cdot(V_L(\not\!\nabla u)g u) \\
&+2(\partial_{\rs}g)(\partial_{\rs} u)^2 -\frac{(\partial_{\rs}^3g) u^2}{2} -(\partial_{\rs} V_L)g|\not\!\nabla u|^2\\ &-2\dot{u}\dot{g}(\partial_{\rs} u)-\dot{u}(\partial_{\rs}\dot{g})u . \end{align*} We use the notation \begin{align*} E_{\LDmult}[u](t)=\intTslice{t} \dot{u}(\LDmultu) d\rs d^2\omega . \end{align*} Integrating over a space-time slab, \begin{align} -2E_{\LDmult}[u](t_2)+2E_{\LDmult}[u](t_1)
= \intTslab{t_1}{t_2} \big(2(\partial_{\rs}g)(\partial_{\rs} u)^2 -\frac{(\partial_{\rs}^3g) u^2}{2} -(\partial_{\rs} V_L)g|\not\!\nabla u|^2&\nonumber\\ -2\dot{u}\dot{g}(\partial_{\rs} u)-\dot{u}(\partial_{\rs}\dot{g})u\big) &d\rs d^2\omega dt .\label{eGeneralLDFormula} \end{align}
Taking $\chi_{[-3/4,3/4]}$ to be smooth, non-negative, compactly supported in $[-3/4,3/4]$, and identically $1$ on $[-1/2,1/2]$, $b$ to be a sufficiently small parameter to be chosen later, and $\sigma\in(1,2]$, we set \begin{align*} \chi_{\text{LC}}=& \chi_{[-3/4,3/4]}(\frac{{r_*}}{t}) ,\\
\tilde{g}=& \int_0^{{r_*}} \frac{1}{1+b|y|^\sigma} dy ,\\ g(t,{r_*})=& t \tilde{g}\chi_{\text{LC}} . \end{align*} We now expand and group the terms on the right of \eqref{eGeneralLDFormula}, \begin{align} -2E_{\LDmult}[u](t_2)+2E_{\LDmult}[u](t_1)
=& \intTslab{t_1}{t_2} 2t\chi_{\text{LC}}(\partial_{\rs}\tilde{g})(\partial_{\rs} u)^2 -t\chi_{\text{LC}}(\partial_{\rs} V_L)\tilde{g}|\not\!\nabla u|^2d\rs d^2\omega dt \label{eLDTermsi}\\ &-\intTslab{t_1}{t_2} t\chi_{\text{LC}}\frac{(\partial_{\rs}^3\tilde{g}) u^2}{2}d\rs d^2\omega dt \label{eLDTermsii}\\ &+\intTslab{t_1}{t_2} 2t\tilde{g}(\partial_{\rs}\chi_{\text{LC}})(\partial_{\rs} u)^2d\rs d^2\omega dt \label{eLDTermsiii}\\ &-\intTslab{t_1}{t_2} t( 3(\partial_{\rs}\chi_{\text{LC}})(\partial_{\rs}^2\tilde{g}) +3(\partial_{\rs}^2\chi_{\text{LC}})(\partial_{\rs}\tilde{g}) +(\partial_{\rs}^3\chi_{\text{LC}})\tilde{g})\frac{u^2}{2}d\rs d^2\omega dt \label{eLDTermsiv}\\ &-\intTslab{t_1}{t_2} 2\dot{u}\dot{g}(\partial_{\rs} u)+\dot{u}(\partial_{\rs}\dot{g})u d\rs d^2\omega dt .\label{eLDTermsv} \end{align} The terms on the right in lines \eqref{eLDTermsiii}-\eqref{eLDTermsv} can be estimated by the local energy. (In these calculations, remember that $t^{-1}<{r_*}^{-1}$ and inverse powers of $t$ arise from differentiating $\chi_{\text{LC}}$.) \begin{align*} \intTslice{t}& 2t\tilde{g}(\partial_{\rs}\chi_{\text{LC}})(\partial_{\rs} u)^2 d\rs d^2\omega\\ <& C \intTLCslice{t} (\partial_{\rs} u)^2d\rs d^2\omega < C E_{\min} ,\\
\intTslice{t}& |t( 3(\partial_{\rs}\chi_{\text{LC}})(\partial_{\rs}^2\tilde{g}) +3(\partial_{\rs}^2\chi_{\text{LC}})(\partial_{\rs}\tilde{g}) +(\partial_{\rs}^3\chi_{\text{LC}})\tilde{g})\frac{u^2}{2}| d\rs d^2\omega \\
<& C \intTLCslice{t} \frac{1}{1+|{r_*}|^{2}} |u|^2 d\rs d^2\omega < CE_{\min} ,\\
\intTslice{t}& |2\dot{u}\dot{g}(\partial_{\rs} u)+\dot{u}(\partial_{\rs}\dot{g})u| d\rs d^2\omega\\
<& C \intTLCslice{t} |\dot{u}|^2 +|\partial_{\rs} u|^2 +\frac{1}{1+|{r_*}|^{2}} |u|^2 d\rs d^2\omega\\ <&C E_{\min} . \end{align*} The left-hand side can be estimated similarly by \begin{align*} E_{\LDmult}(t)
<& tC \intTLCslice{t} |\dot{u}|^2 +|\partial_{\rs}u|^2 +\frac{1}{1+|{r_*}|^{2}} |u|^2 d\rs d^2\omega \\ <&C t E_{\min} . \end{align*} The two terms on the right appearing in line \eqref{eLDTermsi} are clearly positive, since $\tilde{g}$ is increasing and was chosen to go from negative to positive at the same value of ${r_*}$ as $-V_L'$. To control the term in \eqref{eLDTermsii} by the terms in \eqref{eLDTermsi}, we note that \begin{align*}
\partial_{\rs}^2\tilde{g}=& -\LDexpb\text{sgn}({r_*})(1+b|{r_*}|)^{-(\sigma+1)} ,\\ \partial_{\rs}^3\tilde{g}
=& \sigma(\sigma+1)b^2 (1+b|{r_*}|)^{-(\sigma+2)} -\LDexpb \delta({r_*}) ,\\
\intTLCslice{t} -t\chi_{\text{LC}} \frac{\partial_{\rs}^3\tilde{g}}{2} |u|^2 d\rs d^2\omega
\geq& -t \intTLCslice{t} \chi_{\text{LC}} \frac{\sigma(\sigma+1)b^2}{2} (1+b|{r_*}|)^{-(\sigma+2)} |u|^2 d\rs d^2\omega . \end{align*}
We divide the range of integration into two pieces and use the estimate $t/|{r_*}| <C$ when ${r_*}>t/2$. From this, \begin{align*}
\intTslice{t} -t\chi_{\text{LC}} \frac{\partial_{\rs}^3\tilde{g}}{2} |u|^2 d\rs d^2\omega
\geq& -t \frac{\sigma(\sigma+1)b^2}{2} \intTLCtighterslice {t}(1+b|{r_*}|)^{-(\sigma+2)} |u|^2 d\rs d^2\omega \\
& - \frac{\sigma(\sigma+1)b^2}{2} \intTLCintermediateslice{t} (1+b|{r_*}|)^{-\sigma+1} |u|^2 d\rs d^2\omega . \end{align*} Applying the Hardy estimates, we find \begin{align*}
\intTslice{t} &-t\chi_{\text{LC}} \frac{\partial_{\rs}^3\tilde{g}}{2} |u|^2 d\rs d^2\omega \\
\geq& -t \frac{C\sigma(\sigma+1)b^2}{2} \intTLCtighterslice{t} \chi_{\text{LC}}\left( (\partial_{\rs}\tilde{g})(\partial_{\rs} u)^2 -(\partial_{\rs} V_L)\tilde{g}|\not\!\nabla u|^2 \right) d\rs d^2\omega \\ &- CE_{\min} . \end{align*} Taking $b$ sufficiently small, we can dominate the integrand by half the terms in line \eqref{eLDTermsi}. Thus, \begin{align*}
\frac12 \intTslab{t_1}{t_2} t\chi_{\text{LC}}\left(2(\partial_{\rs}\tilde{g})(\partial_{\rs} u)^2 -t(\partial_{\rs} V_L)\tilde{g}|\not\!\nabla u|^2 \right)d\rs d^2\omega dt
\leq& -2E_{\LDmult}|_{t_1}^{t_2} + C \int_{t_1}^{t_2} E_{\min}(t) dt . \end{align*} The same estimate holds for $\mathcal{L}_{\Theta_i} u$, so that summing over the components, we have \begin{align*}
\frac12 \intTslab{t_1}{t_2} t\chi_{\text{LC}}\left(2(\partial_{\rs}\tilde{g})(\partial_{\rs} \not\!\nablau)^2 -t\chi_{\text{LC}}(\partial_{\rs} V_L)\tilde{g}|\Delta_{S^2} u|^2\right) d\rs d^2\omega dt
\leq& -2E_{\LDmult}[\not\!\nablau]|_{t_1}^{t_2} + C \int_{t_1}^{t_2} E_{\min}[\not\!\nablau](t) dt . \end{align*} The left-hand side controls the trapping term by the Hardy estimate. Since the trapping term controls the growth of the conformal charge, \begin{align*} E_\mathcal{C}[u](t_2)
\leq& E_\mathcal{C}[u](0)+ |-2E_{\LDmult}[\not\!\nablau]|_{0}^{t_2}| + C \int_{0}^{t_2} E_{\min}[\not\!\nablau](t) dt ,\\ \leq& C E_\mathcal{C}[u](0) +\sup_{t\in[0,t_2]}(tE_{\min}[\not\!\nablau](t)) + C \int_{0}^{t_2} E_{\min}[\not\!\nablau](t) dt . \end{align*} Since $E[\not\!\nablau]$ is conserved, there is an immediate linear bound on the conformal charge. By applying the Cauchy-Schwartz estimate and integration by parts both twice, we can make the estimate \begin{align*} E_{\min}[\not\!\nablau](t) \leq& E[\Delta_{S^2}^2u](t)^{1/4} \left(\frac{E_\mathcal{C}[u](t)}{t^2}\right)^{3/4} . \end{align*} This allows us to make a self-improving estimate. From the linear bound, the conformal energy can't grow faster than $t^{1/4}$, and the $t^{1/4}$ implies a uniform bound. Thus, \begin{align*} E_\mathcal{C}[u](t)\leq& C (E_\mathcal{C}[u](0) + E[\Delta_{S^2}^2u](0)) , \\
\intTslabInfinity{0} t\chi |\not\!\nablau|^2 d\rs d^2\omega\leq& C (E_\mathcal{C}[u](0) + E[\Delta_{S^2}^2u](0)) . \end{align*}
Applying the same argument with the factors of $t$ and $\chi_{\text{LC}}$ dropped, so that $g(t,{r_*})=\tilde{g}({r_*})$, we find \begin{align*}
\intTslabInfinity{0} \frac{1}{(1+|{r_*}|^4)} |u|^2 d\rs d^2\omega \leq& C E[u]. \end{align*}
\noindent{\bf \Large Acknowledgement}
This project was started when the author visited UC San Diego. The author would like to thank J. Sterbenz for his hospitality and for contributing several valuables ideas.
\end{document} |
\begin{document}
\title{Absolutely Maximally Entangled States: Existence and Applications}
\begin{abstract} We investigate absolutely maximally entangled (AME) states, which are multipartite quantum states that are maximally entangled with respect to any possible bipartition. These strong entanglement properties make them a powerful resource for a variety of quantum information protocols. In this paper, we show the existence of AME states for any number of parties, given that the dimension of the involved systems is chosen appropriately. We prove the equivalence of AME states shared between an even number of parties and pure state threshold quantum secret sharing (QSS) schemes, and prove necessary and sufficient entanglement properties for a wider class of ramp QSS schemes. We further show how AME states can be used as a valuable resource for open-destination teleportation protocols and to what extend entanglement swapping generalizes to AME states. \end{abstract}
\section{Introduction} Entanglement has been a hot topic since the beginning of quantum mechanics and fueled a lot of discussions, among them most notable the Einstein-Podolsky-Rosen (EPR) paradox \cite{Einstein1935}, which finally led Bell to come up with a method of actually measuring entanglement \cite{Bell1964}. It was not until the advent of quantum information, however, that entanglement was recognized as a useful resource. Almost all applications in quantum information make either explicit or implicit use of entanglement, which makes it crucial to gain as much insight as possible. \cite{Horodecki2009}
While the entanglement of bipartite states is already very well understood \cite{Bennett1996a, Nielsen1999, Vidal1999}, the road to its generalization to more than two parties is paved with many obstacles. Therefore we often have to restrict ourselves to special cases when analyzing multipartite entanglement. A prominent choice are states that extremize the entanglement for a certain measure of entanglement. In this paper we want to do that by focusing on \emph{absolutely maximally entangled (AME)} states, which are defined as states that are maximally entangled for any possible bipartition. \cite{Helwig2012, Gisin1998, Gour2010}
\begin{definition} \label{def:AME}
An absolutely maximally entangled state is a pure state, shared among $n$ parties $P=\{1,\ldots,n\}$, each having a system of dimension $d$. Hence $\ket{\Phi} \in \ensuremath{\mathcal{H}} _1 \otimes \cdots \otimes \ensuremath{\mathcal{H}} _n$, where $\ensuremath{\mathcal{H}} _i \cong \ensuremath{\mathbb{C}} ^d$, with the following equivalent properties:
\begin{enumerate}[(i)]
\item $\ket{\Phi}$ is maximally entangled for any possible bipartition. This means that for any bipartition of $P$ into disjoint sets $A$ and $B$ with $A\cup B = P$ and, without loss of generality, $m=|B|\leq |A|=n-m$, the state $\ket{\Phi}$ can be written in the form
\begin{equation}
\label{eq:defAMEstate}
\ket\Phi =
\frac{1}{\sqrt{d^m}}\sum_{k\in \ensuremath{\mathbb{Z}} _d^{m}}
\ket{k_1}_{B_1}\cdots \ket{k_{m}}_{B_{m}}
\ket{\phi(k)}_A,
\end{equation}
with $\braket{\phi(k)|\phi(k')} = \delta_{kk'}$.
\item The reduced density matrix of every subset of parties $A\subset P$ with $|A| = \floor{\frac{n}{2}}$ is totally mixed, $\rho_A = d^{-\floor{\frac{n}{2}}} \mathbbm{1}_{d^{\floor{\frac{n}{2}}}}$.
\item The reduced density matrix of every subset of parties $A\subset P$ with $|A|\leq \frac{n}{2}$ is totally mixed.
\item The von Neumann entropy of every subset of parties $A\subset P$ with $|A| = \floor{\frac{n}{2}}$ is maximal, $S(A) = \floor{\frac{n}{2}} \log d$.
\item The von Neumann entropy of every subset of parties $A\subset P$ with $|A| \leq \frac{n}{2}$ is maximal, $S(A) = |A| \log d$. \label{def:AMEentropy}
\end{enumerate}
These are all necessary and sufficient condition for a state to be absolutely maximally entangled. We denote such a state as an \AME{n,d} state. \end{definition}
The simplest examples of AME states occur for low dimensional systems shared among few parties. Starting with qubits, the most obvious one is an EPR pair, which is maximally entangled for its only possible bipartition. For three qubits shared among three parties, we can recognize the GHZ state as an AME state. It is maximally entangled, with 1 ebit of entanglement with respect to every bipartition. For four qubits, there is no obvious candidate, and in fact it has been shown that for four qubits no AME state exists \cite{Gour2010}. We can still find an absolutely maximally entangled states for four parties, however, by increasing the dimensions of the involved systems. An \AME{4,3} state for four \emph{qutrits} shared among four parties exists, and it is given by \cite{Helwig2012} \begin{gather}
\label{eq:popescuestate}
\begin{split}
\ket{\Phi} &= \frac{1}{\sqrt{9}} \sum_{i,j=0}^2 \ket{i} \ket{j} \ket{i+j} \ket{i+2j}.
\end{split} \end{gather} This is the first indicator that the search for AME states gets more promising as we increase the dimensions of the systems.
Completing the characterization of AME states for qubits, it is known that AME states exist for 5 and 6 qubits. Explicit forms for them are given in Ref.~\cite{Helwig2012}, and it turns out that they are closely related to the five-qubit error correction code. For 7 qubits, it is still not known if an AME state exists, whereas for $\geq 8$ qubits, it has been shown that no AME states can exist \cite{Gour2010, Rains1999}.
In Ref.~\cite{Helwig2012}, we showed how AME states can be used for parallel teleportation protocols. In these protocols, the parties are divided into a sets of senders and receivers, respectively. One of the two sets is given the ability to perform joint quantum operations, while players in the other set can only perform local quantum operations. Under these conditions, a parallel teleportation of multiple quantum states is possible if the set that performs joint quantum operations is larger than the other set. A closer look at these teleportation scenarios then led to the observation that any AME state shared by an even number of parties can be used to construct a threshold quantum secret sharing (QSS) scheme \cite{Cleve1999, Gottesman2000, Imai2005}. The opposite direction was also shown, with one additional condition imposed on the QSS scheme, namely that the shared state that encodes the secret is already an AME state.
In this paper, we will give an information-information theoretic proof of this equivalence of AME states and threshold QSS scheme, which shows that the additional condition is not required. We will rather see that it is satisfied for all threshold QSS schemes. We will further give a recipe of how to construct AME states from classical codes that satisfy the Singleton bound \cite{Singleton1964}. This construction can be used to produce AME states for a wide class of parameters, and it even proves that AME states exist for any number of parties for appropriate system dimension. A result that could also be deduced from the equivalence of AME states and QSS schemes and a known construction for threshold QSS schemes \cite{Cleve1999}. We will then show more applications for AME states. The first being the construction of a wider class of QSS schemes, the \emph{ramp} QSS schemes, of which threshold QSS schemes are a special case. The next one is the utilization of AME states as resources for open-destination teleportation protocols \cite{Zhao2004}. Finally, we investigate to what extend entanglement can be swapped between two AME states.
This paper is structured as follows. In Section~\ref{section:mds}, we show how AME states can be constructed from classical codes, which also also shows the existence of AME states for any number of parties. In Section~\ref{section:AME-QSS}, we establish an equivalence between even party AME states and threshold QSS schemes, using an information theoretic approach to QSS schemes. Section~\ref{section:muitisecret} shows how to share multiple secrets using AME states. In Section~\ref{section:openteleport}, we show that AME states can be used for open-destination teleportation. After that, swapping of AME states is investigated in Section~\ref{section:swapping}.
\section{Constructing AME States from Classical MDS Codes} \label{section:mds} There is a subclass of AME($n$,$d$) states that can be constructed from optimal classical error correction codes. A classical code \ensuremath{\mathcal{C}} consists of $M$ codewords of length $n$ over an alphabet $\Sigma$ of size $d$. For our purposes, the alphabet is going to be $\Sigma=\ensuremath{\mathbb{Z}} _d$ and thus $\ensuremath{\mathcal{C}} \subset \ensuremath{\mathbb{Z}} _d^n$. The \emph{Hamming distance} between two codewords is defined as the number of positions in which they differ, and the minimal distance $\delta$ of the code \ensuremath{\mathcal{C}} as the minimal Hamming distance between any two codewords. For a given length $n$ and minimal distance $\delta$, the number of codewords $M$ in the code is bounded by the \emph{Singleton bound} \cite{Singleton1964, MacWilliams1977} \begin{equation}
\label{eq:Singleton}
M \leq d^{n-\delta+1}. \end{equation} Codes that satisfy the Singleton bound are referred to as maximum-distance separable (MDS) codes. They can be used to construct AME states:
\begin{subtheorem}
\label{theorem:MDS}
From a classical MDS code $\ensuremath{\mathcal{C}} \subset \ensuremath{\mathbb{Z}} _d^{2m}$ of length $2m$ and minimal distance $\delta = m+1$ over an alphabet $\ensuremath{\mathbb{Z}} _d$, an \AME{2m,d} state can be constructed as
\begin{align}
\label{eq:MDSstate}
\ket{\textrm{AME}}
&=\frac{1}{\sqrt{d^{m}}}\sum_{c\in \ensuremath{\mathcal{C}} } \ket{c}\\
&= \frac{1}{\sqrt{d^{m}}}\sum_{c\in \ensuremath{\mathcal{C}} }
\ket{c_1}_1 \cdots \ket{c_m}_m \ket{c_{m+1}}_{m+1} \cdots \ket{c_{2m}}_{2m}.
\end{align} \end{subtheorem} \begin{proof}
The code \ensuremath{\mathcal{C}} satisfies the Singleton bound, which means the sum contains a total of $M=d^{2m-\delta+1}=d^m$ terms. Furthermore, any two of these terms differ in at least one of the first $m$ kets because the code has minimal distance $\delta = m+1$. Hence the sum contains each possible combination of the first $m$ basis kets exactly once. Moreover, for any two different terms, the last $m$ kets must also differ in at least one ket and are thus orthogonal. This means the state has the form of Equation~\eqref{eq:defAMEstate} with respect to the bipartition into the first $m$ and last $m$ parties. The same argument works for any other bipartition into two sets of size $m$, hence the state is absolutely maximally entangled. \end{proof}
An analogous argument shows that a similar construction for an odd number of parties results in an AME state. \begin{subtheorem}
\label{theorem:MDSodd}
From a classical MDS code $\ensuremath{\mathcal{C}} \subset \ensuremath{\mathbb{Z}} _d^{2m+1}$ of length $2m+1$ and minimal distance $\delta = m+2$ over an alphabet $\ensuremath{\mathbb{Z}} _d$, an \AME{2m+1,d} state can be constructed as
\begin{align}
\label{eq:MDSstateodd}
\ket{\textrm{AME}}
&=\frac{1}{\sqrt{d^{m}}}\sum_{c\in \ensuremath{\mathcal{C}} } \ket{c}\\
&= \frac{1}{\sqrt{d^{m}}}\sum_{c\in \ensuremath{\mathcal{C}} }
\ket{c_1}_1 \cdots \ket{c_{m+1}}_{m+1} \ket{c_{m+2}}_{m+2} \cdots \ket{c_{2m}}_{2m+1}.
\end{align} \end{subtheorem} \begin{proof}
The code contains $M=d^{m}$ terms. Each of the terms differ in at least one of the first $m+1$ and last $m$ terms. Thus, with the same argument as above, this is an AME state. \end{proof}
Trivial states of that form are $d$-dimensional EPR states, which are represented by the code with codewords $00, 11, \ldots, (d-1)(d-1)$. This code has $n=2$, $\delta = 2$, $M=d^1$. For $n=3$, we can find the GHZ states for arbitrary dimensions, which can be constructed from the code $000, 111, \ldots, (d-1)(d-1)(d-1)$, which has $\delta = 3$ and $M=d^1$. As already mentioned in the introduction, for $n=4$ no AME state exists for $d=2$, however for $d=3$ the \AME{4,3} state given in Equation~\eqref{eq:popescuestate} can also be constructed from an MDS code, the $[4,2,3]_3$ ternary Hamming code.
A wide class of MDS codes is given by the Reed-Solomon codes and its generalizations \cite{Reed1960, MacWilliams1977, Seroussi1986}, which give MDS codes for $n=d-1$, $n=d$, and $n=d+1$, for $d=p^x$ being a positive power of a prime number $p$. From the Reed-Solomon codes, MDS codes can also be constructed for $n<d-1$ \cite{Singleton1964}. This shows that AME states exist for any number of parties if the system dimensions are chosen right.
At this point we would like to mention that after posting a preliminary version of our last paper on this subject \cite{Helwig2012}, it has been brought to our attention by Gerardo Adesso that the results of this section have already been previously discovered by Ashish Thapliyal and coworkers, and were presented at a conference in 2003 \cite{Thapliyal2003}, but remained unpublished.
\section{Equivalence of AME states and QSS schemes} \label{section:AME-QSS} In Ref.~\cite{Helwig2012}, we showed that \AME{2m,d} states, i.e., AME states shared between an even number of parties, are equivalent to pure state threshold quantum secret sharing (QSS) schemes that have AME states as basis states and share and secret dimension equal to $d$. Here we will give an information-theoretic proof of this equivalence, which shows that the requirement that the basis states of the QSS scheme are AME states is redundant, as it follows from this proof that these states are always absolutely maximally entangled. Before stating the theorem and the proof, we give a short motivation why AME states and QSS schemes are related.
Consider an \AME{2m,d} state shared among an even number of parties. If we take any bipartition into two sets of parties $A$ and $B$, each of size $m$, a $d^m$ dimensional state can be teleported from one set to the other due to the maximal entanglement between $A$ and $B$. Moreover, we have shown in Ref.~\cite{Helwig2012}, that the teleportation can be performed in such a way that each party in the sending set $B$ performs a local teleportation operation on their qudit, while the parties in the receiving set $A$ perform a joint quantum operation to recover all $m$ teleported qudits. This is depicted in Figure~\ref{fig:teleport} for the case of $m=4$. This also works if only one party in $B$, which we call the \emph{dealer} $D$, performs the teleportation operation, while the others do nothing. Then the teleported $d$-dimensional state can still be recovered by the players in set $A$. Furthermore, this also works for any other bipartition into sets $A'$ and $B'$ of size $m$, with $D\in B'$, without changing the teleportation operation $D$ has to perform, but now the parties in $A'$ can recover the teleported state (see Figure~\ref{fig:ameqss}). This means that any set with $m$ parties can recover the state. Moreover, the no-cloning theorem guarantees that the complement of a set that can recover the state has no information about the state. Hence all sets with less than $m$ parties cannot gain any information about the state. This, however, are exactly the requirements for a threshold QSS scheme, therefore we have constructed a \QSS{m}{2m-1} from the \AME{2m,d} state. To formally show this, and moreover that it also works in the opposite direction, meaning that a \QSS{m}{2m-1} is always related to an \AME{2m,d} state, we will use the information theoretic description of QSS schemes as introduced in Ref.~\cite{Imai2005}.
\begin{figure}
\caption{(Color online) Parties in $B$ (green) perform local teleportation operations, parties in $A$ (red) can recover teleported states by performing a joint quantum operation}
\label{fig:teleport}
\end{figure}
\begin{figure}
\caption{(Color online) After $D$ (blue) performs her teleportation operation, any set of $m$ parties (red), $A$, $A'$, $A''$ etc., can recover the teleported state. Any set of parties with $m-1$ or less parties (any set consisting only of green parties) cannot gain any information about the teleported state.}
\label{fig:ameqss}
\end{figure}
Let us quickly review the framework for a pure state \QSS{m}{2m-1} \cite{Cleve1999}. A secret $S$ is distributed among the players $P=\{1,\ldots,2m-1\}$ such that any set $A\subseteq P$ with $|A| \geq m$ can recover the secret, while any set $B \subset P$ with $|B| < m$ cannot gain any information about the secret. We further only consider the case where the dimension $d$ of the secret is the same as the dimension of each player's share.
The secret is assumed to lie in the Hilbert space $\ensuremath{\mathcal{H}} _S \cong \ensuremath{\mathbb{C}} ^d$, and the share of party $i$ in $\ensuremath{\mathcal{H}} _i \cong \ensuremath{\mathbb{C}} ^d$. The encoding is described by an isometry \begin{equation}
\label{eq:encoding}
U_S: \ensuremath{\mathcal{H}} _S \rightarrow \ensuremath{\mathcal{H}} _1 \otimes \dots \otimes \ensuremath{\mathcal{H}} _{2m-1}. \end{equation} The secret $S$ is chosen randomly and thus is described by $\rho_S = 1/d \sum_i \ket{i}\bra{i}$. We consider its purification by introducing a reference system $R$ such that $\ket{RS} = 1/\sqrt{d} \sum_i \ket{i}\ket{i} \in \ensuremath{\mathcal{H}} _R \otimes \ensuremath{\mathcal{H}} _S$. Let $\rho_{RA}$ denote the combined state of the reference system and a set of players $A\subseteq P$ after $U_S$ has been applied to the secret. Then the players $A$ can recover the secret, if there exists a completely positive map $T_A: \ensuremath{\mathcal{H}} _A \rightarrow \ensuremath{\mathcal{H}} _S$ such that \cite{Imai2005, Schumacher1996} \begin{equation}
\label{eq:RA_recover}
\mathbbm{1}_R \otimes T_A (\rho_{RA}) = \ket{RS}. \end{equation} This can be stated in terms of the mutual information \begin{equation}
\label{eq:mutualinfo}
I(X:Y) = S(X) + S(Y) - S(X,Y) \end{equation} as follows: \begin{definition}
An isometry $U_S: \ensuremath{\mathcal{H}} _S \rightarrow \ensuremath{\mathcal{H}} _1 \otimes \dots \otimes \ensuremath{\mathcal{H}} _{2m-1}$ creates a \QSS{m}{2m-1} if and only if, after applying to the system $S$ of the purification $\ket{RS}$, the mutual information between $R$ and an authorized (unauthorized) set of players $A$ ($B$) satisfies \begin{align}
\label{eq:QSSconditionA}
I(R:A) &= I(R:S) = 2S(S) && \text{if } |A| \geq m\\
\label{eq:QSSconditionB}
I(R:B) &= 0 && \text{if } |B| < m. \end{align} \end{definition} Here $S$ is the von Neumann entropy, and because of $S(i)\geq S(S)$ \cite{Imai2005}, we have \begin{equation}
\label{eq:secretentropy}
S(S) = S(R) = S(i) = \log d. \end{equation} From \cref{eq:mutualinfo,eq:QSSconditionA,eq:QSSconditionB} it immediately follows that \begin{align}
\label{eq:entropyRA}
S(R,A) &= S(A) - S(R) \quad \text{if } |A| \geq m\\
\label{eq:entropyRB}
S(R,B) &= S(B) + S(R) \quad \text{if } |B| < m. \end{align}
\begin{theorem}
\label{theorem:AME-QSS} For a state $\ket{\Phi}$ the following two properties are equivalent: \begin{enumerate}[(i)]
\item $\ket{\Phi}$ is an \AME{2m,d} state.
\item $\ket{\Phi}$ is the purification of a \QSS{m}{2m-1}, whose share and secret dimensions are $d$. \end{enumerate} \end{theorem} \begin{proof} $(i) \rightarrow (ii)$: We need to show that for an \AME{2m,d} state \cref{eq:QSSconditionA,eq:QSSconditionB} are satisfied, where $R$ can be any of the $2m$ party. This follows directly from the definition of the mutual information, \cref{eq:mutualinfo}, and \cref{def:AME} (\ref{def:AMEentropy}).
$(ii)\rightarrow (i)$:
Consider an unauthorized set of players $B$, with $|B| = m-1$. Then the set is $B \cup i$ is authorized for any additional player $i \notin B$, and from Equation~\eqref{eq:entropyRA} we have \begin{equation}
S(B,i,R) = S(B,i) - S(R) \end{equation} On the other hand, using the Araki-Lieb inequality \cite{Nielsen2000} $S(X,Y) \geq S(X) - S(Y)$ and Equation~\eqref{eq:entropyRB} gives \begin{equation}
S(B,i,R) \geq S(B,R) - S(i) = S(B) + S(R) - S(i). \end{equation} Combining the last two equations and using $S(S)=S(R)=S(i)$ shows \begin{equation}
S(B,i) \geq S(B) + S(i), \end{equation} where equallity must hold due to the subadditivity of the entropy $S(X,Y) \leq S(X) + S(Y)$. This means that the entropy increases maximally when adding one player's share to $m-1$ shares. The strong subadditivity of the entropy \cite{Nielsen2000} \begin{equation}
S(X,Y) - S(Y) \geq S(X,Y,Z) - S(Y,Z) \end{equation} states that adding one system $X$ to a system $Y$ increases the entropy at least by as much as adding the system $X$ to a larger system $Y \cup Z$ that contains $Y$. So in our case, adding one share to less than $m-1$ shares increases the entropy by at least $S(i)$, and since this is the maximum, it increases the entropy exactly by $S(i)$. Hence, starting out with a set of no shares, and repeatedly adding one share to the set until the set contains any $m$ shares and is authorized, shows that any set of $m$ shares has entropy $m S(i)$. This shows that the entropy is maximal for any subset of $m$ parties and thus $\ket{\Phi}$ is an \AME{2m,d} state. \end{proof}
\begin{corollary} The encoded state $U_S \ket{S}$ of a specific secret $\ket{S}$ with a $((m,2m-1))$ threshold QSS protocol with share and secret dimension $d$ is an \AME{2m-1,d} state. \end{corollary}
\section{Sharing multiple secrets} \label{section:muitisecret} In the previous section, we outlined how an AME state can be used to construct a QSS scheme. The role of the dealer is assigned to one of the parties and he performs a teleportation operation on his qudit, which encodes the teleported qudit onto the qudits of the remaining parties such that the criteria for a QSS scheme are met. While Theorem~\ref{theorem:AME-QSS} shows the equivalence of AME states and QSS schemes, the actual protocol for the encoding and decoding operations has been presented in Ref.~\cite{Helwig2012}. Note that in the described scenario, the role of the dealer can be assigned to any player. Thus one may ask, what happens if more than one of the players assumes the role of the dealer. The answer is that, given an \AME{2m, d} state, up to $m$ players are able to independently encode one qudit each onto the qudits of the remaining players in such a way that results in a QSS scheme with a more general access structure.
For a secret sharing scheme with a general access structure, each set of players falls into one of three categories \cite{Iwamoto2005, Gheorghiu2012}. \begin{enumerate}
\item \emph{Authorized}: A set of players is authorized, if it can recover the secret
\item \emph{Forbidden}: A set of players is called a forbidden set, if the players cannot gain any information about the encoded secret
\item \emph{Intermediate}: A set of players is classified as an intermediate set, if they cannot recover set secret, but may be able to gain part of the information. This means that the reduced density matrix of that set of players depends on the encoded secret, but not enough as to recover the secret. \end{enumerate}
A special kind of access structure is a $(m,L,n)$ \emph{ramp secret sharing scheme} \cite{Blakley1984}. Here $n$ is the total number of players, $m$ is the number of players needed to recover the secret, and $L$ is the number of shares that have to be removed from a minimal authorized set to destroy all information about the secret. In terms of the above defined set categories that means that any set of $m$ or more players is authorized, any set of $m-L$ or less players is forbidden, and any set consisting of more than $m-L$, but less than $m$ players is an intermediate set. This is the access structure we get from an \AME{2m,d} state if more than one party assumes the role of the dealer.
\begin{theorem}
\label{theorem:multisecrets}
Given an \AME{2m,d} state, a QSS scheme with secret dimension $d^L$ and a $(m,L,2m-L)$ ramp access structure can be constructed for all $1\leq L \leq m$. \end{theorem} \begin{proof}
The encoding of the secret is done by assigning the role of dealer to $L$ of the $2m$ players. For simplicity we choose them to be the first $L$ players. Each of them performs a Bell measurement on their respective qudit of the AME state and one qudit of the secret. The Bell measurement is described by the general $d$-dim Bell states $\ket{\Psi_{kl}}$ and the unitaries $U_{kl}$ that transform among them \cite{Bennett1993} \begin{align}
\label{eq:Bell}
\ket{\Psi_{qp}} &= \frac{1}{\sqrt{d}}\sum_j e^{2\pi ijq/d} \ket{j}\ket{j+p}\\
\label{eq:BellU}
U_{qp} &= \sum_j e^{2\pi ijq/d} \ket{j}\bra{j+p}, \end{align} where the kets are understood to be mod $d$. For a secret $\ket{s}$ and outcomes $(q_1,p_1) \ldots (q_L,p_L)$ for the Bell measurement of the dealers, the initial \AME{2m,d} state is transformed to \begin{equation}
\label{eq:PhiS}
\ket{\Phi_S} = \frac{1}{\sqrt{d^{m-L}}}
\sum_{k\in \ensuremath{\mathbb{Z}} _d^m} s_{\vv{q}\vv{p},k_1\cdots k_L}
\ket{k_{L+1}}_{B_1} \cdots \ket{k_m}_{B_{m-L}}
\ket{\phi(k)}_{A}. \end{equation} Here \begin{equation}
\label{eq:s}
s_{\vv{q}\vv{p},k_1\cdots k_L} =
\braket{k_1\cdots k_L|U_{q_1p_1}^{\dagger}\otimes \cdots \otimes U_{q_Lp_L}^{\dagger}|s}, \end{equation} and the partition of the remaining $2m-L$ parties into two sets $A$ and $B$ of size $m$ and $m-L$, respectively, is arbitrary. After obtaining their measurement outcomes, the dealers broadcast their results to all of the remaining players. This concludes the encoding process.
To show that any set of $m$ or more players is authorized, it suffices to show that set $A$ in Equation~\ref{eq:PhiS} can recover the secret. They can do so by applying the unitary operation \begin{align}
U = ( U_{q_1p_1} \otimes \cdots \otimes U_{q_Lp_L} \otimes \mathbbm{1} ) V
\intertext{with}
\label{eq:sortV}
V = \sum_{k\in \ensuremath{\mathbb{Z}} _d^m} \ket{k_1}\cdots\ket{k_m} \bra{\phi(k)}, \end{align} to their system. This changes the state to \begin{equation}
U \ket{\Phi_S} = \frac{1}{\sqrt{d^{m-L}}}
\sum_{(k_{L+1},\ldots,k_m)\in \ensuremath{\mathbb{Z}} _d^{m-L}}
\ket{k_{L+1}}_{B_1} \cdots \ket{k_m}_{B_{m-L}}
\ket{s}_{A'}
\ket{k_{L+1}}_{A_{L+1}} \cdots \ket{k_m}_{A_m} \end{equation} where $A' = \{ A_1, \ldots, A_L \}$. Thus the players in set $A$ have the secret in their possession. It immediately follows from the no-cloning theorem that $B$, and thus any set of size $m-L$ or less, cannot have any information about the secret since all information is located in the complement set. Alternatively, this also follows from the observation that the reduced density matrix of $B$ is always completely mixed, independent of the secret.
The last thing left to show is that all sets with more than $m-L$ but fewer than $m$ players are indeed intermediate sets. To see that, consider the case $L=1$, where a set $C$ of $m-1$ players is not authorized to recover the secret. If one more player in the complement of $C$ assumes the role of the dealer, the scheme is changes to $L=2$. This operation does not change the fact that $C$ cannot recover the first secret, and thus it is still not authorized for $L=2$. This argument can be continued to any other $1 < L \leq m$ by adding more dealers. Hence a set of $m-1$ (or fewer) players is not authorized to recover the secret for all value of $1\leq L \leq m$. That a set of more than $m-L$ players is not forbidden follows from the fact that information cannot be lost and thus the complement of a forbidden set has to be authorized. However, we just argued that the complement of a set of more than $m-L$ players is not authorized (since it consists of less than $m$ players). Hence any set with more than $m-L$ and fewer than $m$ players is an intermediate set. \end{proof}
A closer look at the proof shows us that it actually is not absolutely necessary for the initial state to be maximally entangled with respect to any bipartition, but only for bipartitions for which all dealers are in the same set. In fact, we can generalize the proof of Theorem~\ref{theorem:AME-QSS} to the case of ramp QSS to show that this is a necessary and sufficient condition for the construction of $(m,L,2m-L)$ ramp QSS schemes.
\begin{theorem}
\label{EntQECC:theorem:AME-ramp} For a state $\ket{\Phi} \in \ensuremath{\mathcal{H}} _P \otimes \ensuremath{\mathcal{H}} _R$, shared between $2m-L$ players $P$, each holding a qudit, and $L$ reference qudits, the following two properties are equivalent: \begin{enumerate}[(i)]
\item $\ket{\Phi}$ is maximally entangled for any bipartition for which the $L$ reference qudits are in the same set.
\item $\ket{\Phi}$ is the purification of a $(m,L,2m-L)$ ramp QSS schemes. The encoded secret of the ramp QSS scheme has dimension $d^L$, and each share has dimension $d$. \end{enumerate} \end{theorem}
The proof is a straightforward generalization of the proof of Theorem~\ref{theorem:AME-QSS} and is provided in Appendix~\ref{appendix:ramp}.
\section{Open-destination teleportation} \label{section:openteleport} Given a state with such high amount of entanglement as the AME state has, one cannot help thinking about ways of using these resources for teleportation protocols. In Ref.~\cite{Helwig2012} we already showed how AME states can be used for two different teleportation scenarios that require either sending or receiving parties to perform joint quantum operations, while the other end may only use local quantum operations.
Another teleportation scenario that uses genuine multipartite entanglement, and has already been demonstrated experimentally \cite{Zhao2004}, is open-destination teleportation. In this scenario, a genuinely multipartite entangled state is shared between $n$ parties, each in the possession of one qudit. One of the parties, the dealer, performs a teleportation operation on her qudit and an ancillary qudit $\ket{\Phi}$. After this teleportation operation, the final destination of $\ket{\Phi}$ is still undecided, thus open-destination teleportation. The destination is decided upon in the next step, where a subset $A$ of the remaining parties $P$ performs a joint quantum operation on their qudits such that a player in $P\backslash A$ ends up with the state $\ket{\Phi}$ -- up to local operations that depend on measurement outcomes of the dealer and parties $A$. Here we want to show that open-destination teleportation can also be performed with AME states.
Assume that an \AME{n,d} state has been distributed among $n$ parties. One of the $n$ parties is assigned the role of the dealer. She performs a Bell measurement on her qudit and the secret $\ket{S} = \sum a_i \ket{i}$. This transforms the state to \begin{equation}
\ket{S} \ket{\Phi} \rightarrow
\ket{\Phi_S} = \frac{1}{\sqrt{d^m}}
\sum_{(k,i)\in \ensuremath{\mathbb{Z}} _d^m} a_{p q,i}
\ket{k_1}_{B_1} \cdots \ket{k_{m-1}}_{B_{m-1}}
\ket{\phi(k,i)}_A, \end{equation} where $pq$ labels the outcome of the Bell measurement and has to be made public. The remaining $n-1$ parties that share the resulting state have been divided into two sets $A$ and $B$ of size $\ceil{n/2}$ and $m-1 = \floor{n/2}-1$, respectively. Now, after the teleportation operation has been completed, the parties in set $A$ may choose one party $B_i \in B$ as the final destination for the state $\ket{S}$. Then, after performing the joint unitary operation of Equation~\eqref{eq:sortV} followed by a Bell measurement on qudits $A_i$ and $A_m$ with outcome $rs$, the party $B_i$ ends up with the state $\ket{\Phi}_{B_i} = U^\dagger_{rs} U^\dagger_{pq} \ket{S}$, which can be easily transformed to $\ket{S}$ if the measurement results $pq$ and $rs$ are known.
Note that with the parallel teleportation protocol introduced in Ref.~\cite{Helwig2012}, also one of the parties in $A$ can be chosen to receive the state $\ket{S}$. Thus, after the dealer's teleportation operation is completed, any set of size greater or equal $\ceil{n/2}$ can choose any of the remaining $n-1$ parties as the final destination of the teleportation.
\section{Swapping of AME states} \label{section:swapping}
Entanglement swapping \cite{Yurke1992} is a very useful tool for the application of entanglement in communication. By making a Bell measurement on Bob's side, two entangled states shared between Alice and Bob, and Bob and Charlie, respectively, can be transformed into an entangled state shared by Alice and Charlie. Employing this procedure in quantum repeaters \cite{Dur1999} allows entangled states to be used for long distance communications. In this section, we show to what extent a generalization of the entanglement swapping protocol can be constructed to allow swapping of entanglement between absolutely maximally entangled states shared between different parties.
Assume that parties $\{1, 2, \ldots, 2n\}$ share an \AME{2n,d} state, \begin{eqnarray} \ket{\Phi}_{1,\ldots, 2n}&=&\sum \ket{i_1\cdots i_n}_{1,\ldots, n}\ket{\phi(i_1, \ldots, i_n)}_{n+1,\ldots, 2n}\\
&=&\sum \ket{i_1 \cdots i_n}_{1,\ldots, n}U\ket{i_1\cdots i_n}_{n+1, \ldots, 2n}, \end{eqnarray} where $U$ is a unitary transformation with $U\ket{i_1\cdots i_n}=\ket{\phi(i_1, \ldots, i_n)}$.
Suppose parties $\{n+1, \ldots, 3n\}$ also share an \AME{2n,d} state \begin{eqnarray} \ket{\Phi}_{n+1,\ldots, 3n}
&=&\sum \ket{i_1 \cdots i_n}_{n+1, \ldots, 2n}U\ket{i_1\cdots i_n}_{2n+1,\ldots, 3n}. \end{eqnarray} Now each of the parties $\{n+1, \ldots, 2n\}$ performs a Bell measurement on their qudits from both AME states. Without loss of generality, we can assume the measurement result is $(q,p)=(0,0)$ (see Equation~\eqref{eq:Bell} for the notation), since other measurement outcomes produce the same state up to local transformations. Then the state shared by the parties $\{1, \ldots, n, 2n+1, \ldots, 3n\}$ becomes \begin{equation} \ket{\Phi}_{1, \ldots, n, 2n+1, \ldots, 3n}=\sum \ket{i_1\cdots i_n}_{1,\ldots, n}U^2\ket{i_1\cdots i_n}_{2n+1, \ldots, 3n} \end{equation} Consecutive applications of the above procedure gives the following lemma: \begin{lemma} \label{lemma:swapping} Suppose each group of parties $\{1, \ldots, 2n\}$, $\{n+1, \ldots, 3n\}$, $\cdots$, $\{mn+1, \ldots, (m+1)n\}$ shares an \AME{2n,d} state, \begin{equation} \ket{\Phi}=\sum \ket{i_1 \cdots i_n}U\ket{i_1\cdots i_n}. \end{equation} Then, if each of the parties $\{n+1, n+2, \ldots, mn\}$ performs a Bell measurement on their two qudits, the resulting state shared by the parties $\{1,\ldots, n, mn+1, \ldots, (m+1)n\}$ is locally equivalent to \begin{equation} \label{eq:swappinglemma} \ket{\Phi}_{1,\ldots, n, mn+1, \ldots, (m+1)n}=\sum \ket{i_1\cdots i_n}_{1, \ldots, n}U^m\ket{i_1\cdots i_n}_{mn+1, \ldots, (m+1)n} \end{equation} \end{lemma} \begin{proof}[Proof by induction] The case for $m=2$ is demonstrated in the above discussion already. If the lemma holds for $m$, for $m+1$ the two remaining states, after the parties $\{n+1, n+2, \ldots, mn\}$ performed their Bell measurements, are \begin{multline} \ket{\Phi}_{1,\ldots, n, mn+1, \ldots, (m+1)n}=\\ \sum \ket{i_1\cdots i_n}_{1, \ldots, n}U^m\ket{i_1\cdots i_n}_{mn+1, \ldots, (m+1)n} \end{multline} and \begin{multline} \ket{\Phi}_{mn+1, \ldots, (m+1)n,(m+1)n+1, \ldots, (m+2)n}=\\ \sum \ket{i_1\cdots i_n}_{mn+1, \ldots, (m+1)n}U\ket{i_1\cdots i_n}_{(m+1)n+1, \ldots, (m+2)n}. \end{multline} After the parties $\{mn+1, \ldots, (m+1)n\}$ all perform a Bell measurement, the state shared by $\{1, \ldots, n, (m+1)n+1, \ldots, (m+2)n\}$ becomes \begin{multline} \ket{\Phi}_{1,\ldots, n, (m+1)n+1, \ldots, (m+2)n}=\\ \sum \ket{i_1\cdots i_n}_{1, \ldots, n}U^{m+1}\ket{i_1\cdots i_n}_{(m+1)n+1, \ldots, (m+2)n}. \end{multline} \end{proof} The state in Equation~\eqref{eq:swappinglemma} is generally not an AME state, however, depending on the exact form of the unitary $U$, the resulting state can be absolutely maximally entangled again for certain $m$, as expressed in the following corollary. \begin{corollary}[Swapping of AME States] \label{corollary:swapping} Suppose each set of parties $\{1, \ldots, 2n\}$, $\{n+1, \ldots, 3n\}$, $\cdots$, $\{mn+1, \ldots, (m+1)n\}$ shares an \AME{2n,d} state, \begin{equation} \ket{\Phi}=\sum \ket{i_1 \cdots i_n}U\ket{i_1\cdots i_n}. \end{equation} If $U^m$ is locally unitary equivalent to $U$ up to some permutation of parties, then, by making a Bell measurement on each of the parties $(n+1, \ldots, mn)$, parties $(1, \ldots, n, mn+1, \ldots, (m+1)n)$ will share an \AME{2n,d} state. \end{corollary}
In the following we will show an example for AME swapping, the swapping of an \AME{4,3} state. As an application we will show that different from the EPR state, the \AME{4,3} state \begin{equation} \begin{split} \label{eq:swapping43state} \ket{\Phi} &= \ket{0000}+ \ket{0111}+ \ket{0222}\\
&+ \ket{1012}+ \ket{1120} + \ket{1201}\\
&+ \ket{2021}+ \ket{2102} + \ket{2210} \end{split} \end{equation} needs two steps of Bell measurements for the swapping to reproduce an \AME{4,3} state.
\begin{example} Assume we have three \AME{4,3} states, shared by the players $\{A, B, C, D\}$, $\{C, D, E, F\}$, and $\{E, F, G, H\}$, respectively. After $C$, $D$, $E$, and $F$ all perform a Bell measurement on their two qutrits, the parties $\{A, B, G, H\}$ will share an \AME{4,3} state. This is illustrated in Figure~\ref{fig:swapping}. \end{example}
\begin{figure}
\caption{Entanglement swapping between three \AME{4,3} states results in a new \AME{4,3} state between previously unentangled parties. Dotted circles indicate where Bell measurements have to be performed.}
\label{fig:swapping}
\end{figure}
\begin{proof}[Calculation] From Equation~\eqref{eq:swapping43state} we can see that the unitary $U$ of Lemma~\ref{lemma:swapping} is given by \begin{equation} \begin{split} U\ket{00}=\ket{00} \qquad U\ket{01}=\ket{11} \qquad U\ket{02}=\ket{22}\\ U\ket{10}=\ket{12} \qquad U\ket{11}=\ket{20} \qquad U\ket{12}=\ket{01}\\ U\ket{20}=\ket{21} \qquad U\ket{21}=\ket{02} \qquad U\ket{22}=\ket{10} \end{split} \end{equation} Applying this unitary twice gives \begin{equation} \begin{split} U^2\ket{00}=\ket{00} \qquad U^2\ket{01}=\ket{20} \qquad U^2\ket{02}=\ket{10}\\ U^2\ket{10}=\ket{01} \qquad U^2\ket{11}=\ket{21} \qquad U^2\ket{12}=\ket{01}\\ U^2\ket{20}=\ket{21} \qquad U^2\ket{21}=\ket{02} \qquad U^2\ket{22}=\ket{10} \end{split} \end{equation} It can be easily seen that by permuting the two parties and exerting a unitary transformation that exchange $\ket{1}$ and $\ket{2}$ in the second party, this unitary transformation becomes the identity. Thus $U^3$ is locally unitary equivalent to $U$ up to permutation of parties, and together with Corollary~\ref{corollary:swapping}, it is easily to see that after the Bell measurement, the resulting state would be locally unitary equivalent with an \AME{4,3} state up to a permutation of party $G$ and $H$. Since the \AME{4,3} state satisfies permutation symmetry, which means by permuting any two parties the resulting state is still an \AME{4,3} state, $A, B, G, H$ really share the same \AME{4,3} state that was swapped. \renewcommand{\qedsymbol}{} \end{proof}
\begin{remark} In the above example, we require that party $C$ acts as the third party of the first AME state and the first party of the second AME state. This is actually not required. Since the \AME{4,3} state is permutationally invariant, we only need $C$ to posses any qudit of each AME state. The same requirement applies for $D$. In fact, most of the AME states we found are permutational invariant, and in these cases we do not need to have restriction on which specific qudits the parties control. \end{remark}
\section{Conclusion} In this paper, we have shown the existence of AME states for a wide range of parameters, in particular, the derivation of AME states from classical MDS codes proves that AME states exist for any number of parties if the system dimension is chosen large enough. We have proven an equivalence between AME states and threshold quantum secret sharing schemes. By extending the idea of how threshold QSS schemes follow from the entanglement properties of AME states, we have shown that a wider class, namely ramp QSS schemes can be constructed from AME states. The entanglement requirements to construct ramp QSS schemes are over-satisfied by AME states, and we prove the necessary and sufficient entanglement conditions for the construction of ramp QSS schemes.
Two more applications for AME states have been given in form of open-destination teleportation and entanglement swapping of AME states. The quantum secret sharing and teleportation scenarios that have been presented here and in Ref.~\cite{Helwig2012}, indicate that AME states can be used for a wide variety of quantum information protocols that involve the displacement of quantum states.
\section{Entanglement in Ramp QSS Schemes} \label{appendix:ramp} Here, we give a generalization of the methods used in Theorem~\ref{theorem:AME-QSS} to prove the equivalence of AME states and threshold QSS schemes to $(m,L,2m-L)$ ramp QSS schemes for arbitrary $L$. The generalization is very straightforward, the secret dimension is now $d^L$ instead of $d$, changing also the dimension of the reference system to $d^L$. We define an isometry $U_S$ that encodes the $d^L$ dimensional secret $S$ into a state shared by the $2m-L$ players, each holding a $d$ dimensional system, \begin{equation}
\label{EntQECC:eq:rampencoding}
U_S: \ensuremath{\mathcal{H}} _S \rightarrow \ensuremath{\mathcal{H}} _1 \otimes \dots \otimes \ensuremath{\mathcal{H}} _{2m-L}, \end{equation} where $\ensuremath{\mathcal{H}} _i \cong \ensuremath{\mathbb{C}} ^d$ and $\ensuremath{\mathcal{H}} _S \cong {\ensuremath{\mathbb{C}} ^{d^L}}$.
We further introduce a reference system $\ensuremath{\mathcal{H}} _R \cong \ensuremath{\mathcal{H}} _S$ and consider the state $\ket{\Phi}$ that is generated by applying the encoding operation to $\ensuremath{\mathcal{H}} _S$ for a maximally entangled state $\ket{RS} = 1/\sqrt{d} \sum_i \ket{i}\ket{i} \in \ensuremath{\mathcal{H}} _R \otimes \ensuremath{\mathcal{H}} _S$, i.e., $\ket{\Phi} = \mathbbm{1}_R \otimes U_S \ket{RS}$. A set of players $A \subset P$ shares the state $\rho_{RA} = \Tr_{P\backslash A} \ket{\Phi}$ with the reference system. $A$ is authorized, if there exists a completely positive map $T_A: \ensuremath{\mathcal{H}} _A \rightarrow \ensuremath{\mathcal{H}} _S$ such that \cite{Imai2005, Schumacher1996} \begin{equation}
\label{EntQECC:eq:rampRA_recover}
\mathbbm{1}_R \otimes T_A (\rho_{RA}) = \ket{RS}. \end{equation}
For the mutual information between an authorized set (i.e., $|A| \geq m$) and the reference system is \begin{equation}
\label{EntQECC:eq:rampQSSconditionA}
I(R:A) = I(R:S) = 2S(S) \quad \text{if } |A| \geq m, \end{equation} and for a forbidden set, we must have \begin{equation}
\label{EntQECC:eq:rampQSSconditionB}
I(R:B) = 0 \quad \text{if } |B| \leq m-L. \end{equation} $U_S$ defines a $(m,L,2m-L)$ ramp QSS scheme if and only if these two equations are satisified.
Since any set of players $C \subset P$ with $|C| = L$ can change some forbidden set into an authorized set, we have $S(C) \geq S(S)$ \cite{Imai2005} for all sets with $L$ players. And because $S(S)$ is maximal and equal to $S(R)$, \begin{equation}
\label{EntQECC:eq:rampsecretentropy}
S(S) = S(R) = S(C) = L \log d. \end{equation} Equations~\eqref{EntQECC:eq:rampQSSconditionA} and \eqref{EntQECC:eq:rampQSSconditionB} can be rewritten to give \begin{align}
\label{EntQECC:eq:entropyRA}
S(R,A) &= S(A) - S(R) \quad \text{if } |A| \geq m\\
\label{EntQECC:eq:entropyRB}
S(R,B) &= S(B) + S(R) \quad \text{if } |B| \leq m-L. \end{align} This sums up the changes in the lead-up to Theorem~\ref{theorem:AME-QSS}, whose version we may now state and prove for ramp QSS schemes. For this we regard the reference system of dimension $d^L$ as consisting of $L$ systems, each of dimension $d$, so that $\ket{\Phi}$ is a state shared between $2m$ parties, $2m-L$ players that share the secret and $L$ in the reference system, each possessing a qudit.
\newtheorem*{thm:ramp}{Theorem~\ref{EntQECC:theorem:AME-ramp}} \begin{thm:ramp} For a state $\ket{\Phi} \in \ensuremath{\mathcal{H}} _P \otimes \ensuremath{\mathcal{H}} _R$, shared between $2m-L$ players $P$, each holding a qudit, and $L$ reference qudits, the following two properties are equivalent: \begin{enumerate}[(i)]
\item $\ket{\Phi}$ is maximally entangled for any bipartition for which the $L$ reference qudits are in the same set.
\item $\ket{\Phi}$ is the purification of a $(m,L,2m-L)$ ramp QSS schemes. The encoded secret of the ramp QSS scheme has dimension $d^L$, and each share has dimension $d$. \end{enumerate} \end{thm:ramp} \begin{proof} $(i) \rightarrow (ii)$:
In the equations for the mutual information, all occurring sets, $A$, $B$, $R$, $A \cup R$ and $B \cup R$, are maximally entangled with the rest because for all of them all reference qudits are in the same set of the bipartition. Hence we have $S(A) = (2m-|A|) \log d$, $S(B) = |B| \log d$, $S(R) = S(S) = L \log d$, $S(A,R) = (2m-|A|-L) \log d$ and $S(A,B) = (|B| + L) \log d$. Plugging these into Equations~\eqref{EntQECC:eq:rampQSSconditionA} and \eqref{EntQECC:eq:rampQSSconditionB} while using the definition of the mutual information (Equation~\ref{eq:mutualinfo}), confirms that these are satisfied.
$(ii)\rightarrow (i)$:
Consider an unauthorized set of players $B$, with $|B| = m-L$. Then the set is $B \cup C$ is authorized for any additional set $C$ with $|C| = L$ and $C \cap B = \emptyset$. From Equation~\eqref{EntQECC:eq:entropyRA} we have \begin{equation}
S(B,C,R) = S(B,C) - S(R) \end{equation} On the other hand, using the Araki-Lieb inequality \cite{Nielsen2000} $S(X,Y) \geq S(X) - S(Y)$ and Equation~\eqref{EntQECC:eq:entropyRB} gives \begin{equation}
S(B,C,R) \geq S(B,R) - S(C) = S(B) + S(R) - S(C). \end{equation} Combining the last two equations and using $S(S)=S(R)=S(C)$ shows \begin{equation}
S(B,C) \geq S(B) + S(C), \end{equation} where equality must hold due to the subadditivity of the entropy $S(X,Y) \leq S(X) + S(Y)$. This means that the entropy increases maximally when adding $L$ shares to $m-L$ shares. The strong subadditivity of the entropy \cite{Nielsen2000} \begin{equation}
S(X,Y) - S(Y) \geq S(X,Y,Z) - S(Y,Z) \end{equation} states that adding system $X$ to system $Y$ increases the entropy at least by as much as adding system $X$ to a larger system $Y \cup Z$ that contains $Y$. So in our case, adding $L$ shares to less than $m-L$ shares increases the entropy by at least $S(C)$, and since this is the maximum, it increases the entropy exactly by $S(C)$. Moving the shares over one by one from $C$ to $m-L$ or less shares must increase the entropy maximally with each share for it to be maximally increased when all shares are added. Hence adding one share to a set that contains less than $m$ shares increases the entropy maximally. Hence, starting out with a set of no shares, and repeatedly adding one share to the set until the set contains any $m$ shares and is authorized, shows that any set of $m$ shares has entropy $m \log d$. This shows that the entropy is maximal for any subset of $m$ players, i.e., $\ket{\Phi}$ is maximally entangled for any bipartition into $m$ players $A$ and its complement $P\backslash A \cup R$, which contains all $L$ reference qudits, and thus is maximally entangled for any bipartition where all reference qudits are in the same set. \end{proof}
\end{document} |
\begin{document}
\begin{abstract} It was shown by A. Beauville that if the canonical map $\varphi_{|K_M|}$ of a complex smooth projective surface $M$ is generically finite, then $\deg(\varphi_{|K_M|})\leq 36$. The first example of a surface with canonical degree 36 was found by the second author. In this article, we show that for any surface which is a degree four Galois \'etale cover of a fake projective plane $X$ with the largest possible automorphism group ${\rm Aut}(X)=C_7:C_3$ (the unique non-abelian group of order 21), the base locus of the canonical map is finite, and we verify that 35 of these surfaces have maximal canonical degree 36. We also classify all smooth degree four Galois \'etale covers of fake projective planes, which give possible candidates for surfaces of canonical degree $36$. Finally, we also confirm in this paper the optimal upper bound of the canonical degree of smooth threefolds of general type with sufficiently large geometric genus, related to earlier work of C. Hacon and J.-X. Cai.\end{abstract}
\maketitle
\section{Introduction}
Let $M$ be a smooth complex projective minimal surface of general type with $p_g(M)\neq0$. Assume that the canonical map, $$\varphi=\varphi_{|K_M|}:M\dashrightarrow W:=\overline{\varphi(M)}\subseteq\mathbb{P}^{p_g(M)-1}$$ is generically finite onto its image. We are interested in the \emph{canonical degree} of $M$, the degree of $\varphi$. If $\varphi$ is not generically finite, we simply say that $M$ has canonical degree zero. The following proposition was proved in \cite{B}, cf. \cite{Y1}. We include the proof here for completeness.
\begin{proposition}\label{Bea} Let $M$ be a minimal surface of general type whose canonical map $\varphi=\varphi_{|K_M|}$ is generically finite. Then $\deg \varphi\leq 36$. Moreover, $\deg\varphi=36$ if and only if $M$ is a smooth ball quotient $\mathbb{B}_\mathbb{C}^2/\Sigma$ with $p_g(M)=3$, $q(M)=0$, and $|K_M|$ is base point free. \end{proposition}
\begin{proof} Let $P$ be the mobile part of $|K_M|$. Let $S\rightarrow M$ be a resolution of $P$ and let $P_S$ be the induced base point free linear system defining $S\rightarrow W=\overline{\varphi(M)}$. Then \begin{align*} \deg\varphi\cdot(p_g-2)\leq\deg\varphi\cdot\deg W
= P_S^2\leq P^2\leq K_M^2
\leq9\chi(\mathcal{O}_M)\leq9(1+p_g). \end{align*} The first inequality is the degree bound for a non-degenerate surface in $\mathbb{P}^n$ given in \cite{B}, while the fourth inequality is the Bogomolov-Miyaoka-Yau inequality. Hence as $p_g\geq3$, we have \begin{align*} \deg\varphi\leq 9(\frac{1+p_g}{p_g-2})\leq36. \end{align*}
Moreover, $\deg\varphi=36$ only when $p_g(M)=3$, $q(M)=0$, and $P_S^2=P^2=K_M^2$. This is only possible when $|K_M|$ is base point free. In such a case,
$K_M^2=36=9\chi(\mathcal{O}_M)$ and hence $M$ is a smooth ball quotient $\mathbb{B}_\mathbb{C}^2/\Sigma$ by results of Aubin and Yau, cf. \cite{B} or \cite{BHPV}. \end{proof}
\noindent{\bf Notation.} Throughout this paper, we do not distinguish line bundles with divisors. The linear equivalence and numerical equivalence of divisors are written respectively as $D_1\sim D_2$ and $D_1\equiv D_2$. The cyclic group of order $n$ is denoted by $C_n$. The group $C_7:C_3$ is the unique non-abelian group of order 21. The projective space of dimension $n$ over $\mathbb{C}$ is denoted by $\mathbb{P}^n$. A finite field of order $n$ is denoted by $F_n.$
\vskip 0.2 cm
From Proposition \ref{Bea}, it is an interesting problem to know the geometric realization of possible canonical degrees and many surfaces with canonical degree at most $16$ have been constructed, see \cite{P} or \cite{DG} for more references. However, the first example of a surface with maximal canonical degree 36 was constructed only recently by \cite{Y1} as a suitably chosen $C_2\times C_2$-Galois cover of a special fake projective plane $X$. The fake projective plane $X$ in \cite{Y1} has ${\rm Aut}(X)=C_7:C_3$, and by \cite{LY} it satisfies $h^0(X,2L_X)=0$ for every ample generator $L_X$ of ${\rm NS}(X)$. The choice of the lattice for the ball quotient $M$ is explicitly described in \cite{Y1} via the classifying data of \cite{PY} and \cite{CS}.
Here are the main goals of this paper. The first goal is to construct more examples of surfaces with maximal canonical degree. This is given as Theorem \ref{main} below. Then we examine the corresponding question in complex dimension 3, given as Corollary \ref{3fold} below. A second goal is to identify all potential examples of surfaces of canonical degree 36 constructed as a degree four Galois \'etale cover of a fake projective plane. We prove that for these Galois covers the canonical maps have at worst discrete base locus whenever the underlying fake projective plane has the largest possible automorphism group $C_7:C_3$. This is given as Theorem \ref{iso} and Proposition \ref{all}. For the presentation of this paper, we start with Theorem \ref{iso} hoping that it would give the reader a more comprehensible overall picture.
We remark that our proof of Theorem \ref{main} is essentially independent of Theorem \ref{iso} and Proposition \ref{all}. A reader who is interested only in new surfaces of canonical degree $36$ may briefly go over statements in earlier sections and proceed directly to Section \ref{new} of the paper.
Recall that a fake projective plane is a ball quotient $X=\mathbb{B}_\mathbb{C}^2/\Pi$ for some lattice $\Pi\subseteq{\rm PU}(2,1)$, where $\Pi$ is constructed as a subgroup of a maximal arithmetic lattice $\overline \Gamma$. An unramified cover $M$ of $X$ is given by $\mathbb{B}^2_\mathbb{C}/\Sigma$ for a normal subgroup $\Sigma\lhd\Pi$ of finite index. For the sequence of Galois covers $$M:=B_{\mathbb{C}}^2/\Sigma\stackrel{p}\rightarrow X=B_{\mathbb{C}}^2/\Pi\stackrel{q}\rightarrow B_{\mathbb{C}}^2/\overline \Gamma$$
corresponding to the normal subgroups $\Sigma\lhd\Pi\lhd\overline \Gamma$, one has the covering group ${\rm Gal}(M/X)=\Pi/\Sigma$ and ${\rm Aut}(X)=\overline \Gamma/\Pi$. We focus on the case when $|{\rm Gal}(M/X)|=4$ and ${\rm Aut}(X)=C_7:C_3$. Our first theorem identifies potential examples of surfaces of canonical degree 36.
\begin{theorem}\label{iso} Let $M\rightarrow X$ be a degree four Galois \'etale cover over a fake projective plane $X$ with ${\rm Aut}(X)=C_7:C_3$. Then $q(M)=0$ and the base locus of the linear system $|K_M|$ is discrete. \end{theorem}
A degree four Galois \'etale cover $M\rightarrow X$ over a fake projective plane $X$ is determined by a quotient of $H_1(X,\mathbb{Z})$ of order four, to be explained in details in Lemma \ref{cover} of Section \ref{pre}. The degree of this cover is dictated by the possible existence of a surface of maximal canonical degree, i.e., $K_M^2/K_X^2=4$. There are many degree four covers of fake projective planes. For future reference, we classify all such surfaces. In the table below, only lattices of fake projective planes giving rise to Galois \'etale covers of degree four are listed, which is the case if there is a normal subgroup of index four in the lattice $\Pi$ corresponding to a given fake projective plane $X=\mathbb{B}^2_\mathbb{C}/\Pi$. This list of the fake projective planes follows the conventions in \cite{PY} and \cite{CS}. In the following table, we have \begin{enumerate}
\item column 1: $k$ is a totally real number field, $\ell$ is a totally imaginary extension of $k$, and $\mathcal T$ represents a finite number of places relevant to the classification. These are notations used to classify fake projective planes defined in \cite{PY};
\item column 2 the corresponding naming of classes of maximal arithmetic lattices containing fake projective planes in \cite{CS} corresponding to $\overline{\Gamma}$ in the notation of \cite{PY}, where $a$ and $p$ are data from the first column;
\item column 3: the naming of the individual fake projective planes in each class used in \cite{CS};
\item column 4: $\mbox{Aut}(X)$ is the automorphism group of a fake projective plane $X$;
\item column 5: the first homology class of a fake projective plane $X$;
\item column 6: $N_0$ is the number of degree $4$ coverings of $X$, which is the number of subgroups of index four of the lattice $\Pi$;
\item column 7: $N_1$ denotes the number of normal coverings among the degree $4$ coverings above. \end{enumerate} All the examples in the last column satisfy $H_1(M,\mathbb{Q})=0$, which implies $q(M)=0$ by Poincar\' e Duality.
\vskip 0.2 cm
\begin{adjustbox}{center, rotate=0, nofloat, caption=[Table 1]}
$\begin{array}{|c|c|c|c|c|c|c|} \hline (k,\ell,\mathcal{T})&\mbox{class}&X&{\rm Aut}(X)&H_1(X,\mathbb{Z})&N_0&N_1\\ \hline\hline (\mathbb{Q},\mathbb{Q}(\sqrt{-1}),\{5\})&(a=1,p=5,\emptyset)&(a=1,p=5,\emptyset, D_3)&C_3&C_2\times C_4\times C_{31}&4&3\\ \cline{2-7} &(a=1,p=5,\{2\})&(a=1,p=5, \{2\},D_3)&C_3&C_4\times C_{31}&4&1\\ \cline{1-7} (\mathbb{Q},\mathbb{Q}(\sqrt{-1}),\{2,5\})&(a=1,p=5,\{2I\})&(a=1,p=5,\{2I\})&\{1\}&C_2\times C_3\times C_4^2&47&19\\ \hline (\mathbb{Q},\mathbb{Q}(\sqrt{-2}),\{3\})&(a=2,p=3,\emptyset)&(a=2,p=3,\emptyset, D_3)&C_3&C_2^2\times C_{13}&4&1\\ \cline{2-7} &(a=2,p=3,\{2\})&(a=2,p=3, \{2\},D_3))&C_3&C_2^2\times C_{13}&4&1\\ \cline{1-7} (\mathbb{Q},\mathbb{Q}(\sqrt{-2}),\{2,3\})&(a=2,p=3,\{2I\})&(a=2,p=3,\{2I\})&\{1\}&C_2^4\times C_3&83&35\\ \hline (\mathbb{Q},\mathbb{Q}(\sqrt{-7}),\{2\})&(a=7,p=2,\emptyset) &(a=7,p=2,\emptyset, D_3 2_7)&C_7:C_3&C_2^4&91&35\\ \cline{3-7} &&(a=7,p=2,\emptyset,7_{21})&\{1\}&C_2^2\times C_3\times C_7&3&1\\ \cline{2-7} &(a=7,p=2,\{7\})&(a=7,p=2,\{7\},D_3 2_7)&C_7:C_3&C_2^3&7&7\\ \cline{3-7} &&(a=7,p=2,\{7\},D_3 7'_7)&C_3&C_2^2\times C_7&2&1\\ \cline{3-7} &&(a=7,p=2,\{7\},7_{21})&\{1\}&C_2^3\times C_3&19&7\\ \hline (\mathbb{Q},\mathbb{Q}(\sqrt{-7}),\{2,3\})&(a=7,p=2,\{3\})&(a=7,p=2,\{3\},D_3)&C_3&C_2\times C_4\times C_7&4&3\\ \cline{3-7} &&(a=7,p=2,\{3\},3_3)&\{1\}&C_2^2\times C_3\times C_4&19&11\\ \cline{2-7} &(a=7,p=2,\{3,7\})&(a=7,p=2,\{3,7\},D_3)&C_3&C_4\times C_7&2&1\\ \cline{3-7} &&(a=7,p=2,\{3,7\},3_3)&\{1\}&C_2\times C_3\times C_4&7&3\\ \hline (\mathbb{Q},\mathbb{Q}(\sqrt{-7}),\{2,5\})&(a=7,p=2,\{5\})&(a=7,p=2,\{5\})&\{1\}&C_2^2\times C_9&3&1\\ \cline{1-7} (\mathbb{Q},\mathbb{Q}(\sqrt{-15}),\{2\})&(a=15,p=2,\emptyset)&(a=15,p=2,\emptyset,D_3)&C_3&C_2^2\times C_7&2&1\\ \cline{3-7} &&(a=15,p=2,\emptyset,3_3)&\{1\}&C_2^3\times C_9&11&7\\ \cline{2-7} &(a=15,p=2,\{3\})&(a=15,p=2,\{3\},3_3)&C_3&C_2^3\times C_3&19&7\\ \cline{2-7} &(a=15,p=2,\{5\})&(a=15,p=2,\{5\},3_3)&\{1\}&C_2^2\times C_9&3&1\\ \cline{2-7} &(a=15,p=2,\{3,5\})&(a=15,p=2,\{3,5\},3_3)&C_3&C_2^2\times C_3&1&1\\ \hline (\mathcal{C}_{18},\{v_3\})&(\mathcal{C}_{18},p=3,\emptyset)&(\mathcal{C}_{18},p=3,\emptyset,d_3 D_3)&C_3\times C_3&C_2^2\times C_{13}&1&1\\ \hline (\mathcal{C}_{20},\{v_2\})&(\mathcal{C}_{20},\{v_2\},\emptyset)&(\mathcal{C}_{20},\{v_2\},\emptyset,D_3 2_7)&C_7:C_3&C_2^6&651&651\\ \cline{2-7} &(\mathcal{C}_{20},\{v_2\},\{3+\})&(\mathcal{C}_{20},\{v_2\},\{3+\},D_3)&C_3&C_4\times C_7&2&1\\ \cline{3-7} &&(\mathcal{C}_{20},\{v_2\},\{3+\},\{3+\}_3)&\{1\}&C_2\times C_3\times C_4&7&3\\ \cline{2-7} &(\mathcal{C}_{20},\{v_2\},\{3-\})&(\mathcal{C}_{20},\{v_2\},\{3-\},D_3)&C_3&C_4\times C_7&2&1\\ \cline{3-7} &&(\mathcal{C}_{20},\{v_2\},\{3-\},\{3-\}_3)&\{1\}&C_2\times C_3\times C_4&7&3 \\ \hline \end{array}$ \end{adjustbox} \vskip 0.2cm \begin{center} {\sc Table 1} \end{center}
\begin{proposition}\label{all} There are altogether $835$ lattices which give rise to $1670$ non-biholomorphic smooth minimal surfaces as degree four Galois \'etale covers of fake projective planes with $q(M)=0$. \end{proposition}
From Table 1, there are 35 degree four Galois \'etale covers of the fake projective plane $(a=7, p=2,\emptyset, D_32_7)$, which all have Galois group $C_2\times C_2$. Generalizing the result of \cite{Y1}, we show that these \'etale covers all have canonical degree 36.
\begin{theorem}\label{main} The $35$ degree four Galois \'etale covers of the fake projective plane $(a=7, p=2,\emptyset, D_32_7)$, all with Galois group $C_2\times C_2$, are minimal surfaces of general type with canonical degree $36$. \end{theorem}
Our result has the implication on the optimal canonical degree for smooth threefolds of general type with large geometric genus. We refer the readers to Section \ref{sec3fold} for more details.
\begin{corollary}\label{3fold} There exist many examples of smooth minimal threefolds of general type $Y$ with the degree of the canonical map $\deg(\Phi_{|K_Y|})=72$. In fact, there exist such threefolds with $p_g(Y)=3g$ and $K_Y^3=72(g-1)$ for each $g\geqslant 2$.
\end{corollary}
The surface studied in \cite{Y1} has Picard number one, which is a deep result in automorphic forms from \cite{Ro}, \cite{BR}, and is used in \cite{Y1} to simplify the geometric arguments. For a general degree four \'etale cover of a fake projective plane, it is not clear whether the Picard number equals to one. Comparing to the result in \cite{Y1}, one technical improvement in the present article is to show that any surface as in Theorem \ref{iso} possesses a generically finite canonical map. Continuing from this, mobility of the canonical system is proved but in a different argument from \cite{Y1}. In fact, we can show that any degree four \'etale cover of a fake projective plane with ${\rm Aut}(X)=C_7:C_3$ has generically finite canonical map and at worst discrete base locus. To get rid of the finite number of base points, we need more detailed information about the canonical sections as given in \cite{Y1}, see in particular the corrigendum there. By analyzing carefully the method used in \cite{Y1}, we come up with new examples of surfaces with maximal canonical degree by considering new degree four Galois \'etale covers of the same fake projective plane $X$ used in \cite{Y1}. These new \'etale covers correspond to various $C_2\times C_2$ quotient groups of $H_1(X,\mathbb{Z})=C_2^4$. In such cases, we are able to write down relevant global sections explicitly with the help of Magma and finish the prove of base point freeness. This last step is where we have to restrict further the type of lattice $\Sigma$ associated to $M$.
To find which \'etale cover works for our scheme, as a first step we list all normal subgroups of index four in a lattice associated to a fake projective plane. All fake projective planes supporting such a subgroup are listed in the third column of Table 1 above. Now for each of the listed surfaces, we exhaust all possible normal subgroups of index four. The procedure of finding such a surface as well as verification of necessary conditions stated in Theorem \ref{iso} and Proposition \ref{all} is similar to that in \cite{Y1}. In \cite{Y1}, the choice of the $C_2\times C_2$ Galois \'etale cover is very specific and has to come from killing the $2$-torsion invariant line bundles under a Sylow $3$-subgroup of the automorphism group $C_7:C_3$. In this paper, we obtain more examples by overcoming this technical hurdle, namely, we consider all possible $C_2\times C_2$ Galois \'etale covers of the fake projective plane in \cite{Y1}.
The explicit computation is accomplished by using Magma. The proof of Theorem \ref{main} generalizes the argument of \cite{Y1}.
Here is the organization of this paper. We first prepare some preliminary results related to our construction in Section \ref{pre}. The proofs of Theorem \ref{iso} and \ref{main} are given in Section \ref{seciso} and \ref{new} respectively. Finally we study the corresponding problem in dimension three in Section \ref{sec3fold}.
\section{Preliminary discussions and idea of proofs}\label{pre} Let $X=\mathbb{B}_\mathbb{C}/\Pi$ be a fake projective plane with $\pi_1(X)=\Pi$. It is known from definition that the first Betti number of $X$ is trivial. According to \cite{PY}, there is always a nontrivial torsion element in $H_1(X,\mathbb{Z})$. The torsion group $H_1(X,\mathbb{Z})$ is available from \cite{CS}.
\begin{lemma}\label{cover} A fake projective plane $X$ possesses a degree four Galois \'etale cover if and only if there is a quotient group of order four of $H_1(X,\mathbb{Z})$. \end{lemma} \begin{proof} We know that $H_1(X,\mathbb{Z})$ is a direct sum of finite cyclic abelian groups as the first Betti number of $X$ is trivial. If $Q$ is a quotient group of order four of $H_1(X,\mathbb{Z})$, then there is a homomorphism $$\rho:\Pi\rightarrow\Pi/[\Pi,\Pi]=H_1(X,\mathbb{Z})\rightarrow Q.$$ The kernel of $\rho$ gives rise to a normal subgroup $\Sigma$ of index four in $\Pi$, with $Q$ as the deck transformation group of the covering map $M=\mathbb{B}^2_\mathbb{C}/\Sigma\rightarrow X=\mathbb{B}^2_\mathbb{C}/\Pi$.
On the other hand, if there is a normal subgroup $\Sigma$ of index four in $\Pi$, it leads to a homomorphism $\sigma:\Pi\rightarrow \Pi/\Sigma$. As a group of order four is always abelian, $\sigma$ factors through a homomorphism $\Pi/[\Pi,\Pi]\rightarrow\Pi/\Sigma$. We conclude that $\Pi/\Sigma$ lives as a quotient group of order four of $\Pi/[\Pi,\Pi]=H_1(X,\mathbb{Z})$. \end{proof}
We consider an \'etale cover $\pi:M\rightarrow X$ corresponding to a subgroup $\pi_1(M)\leq\Pi$ of index four. In particular, the finite group $\mathcal{G}=\Pi/\pi_1(M)$ is either $C_2\times C_2$ or $C_4$.
\begin{lemma}\label{gg} Let $M$ be a smooth projective surface and assume that there is an \'etale cover $\pi:M\rightarrow X$ of degree four over a fake projective plane $X$. Suppose that $q(M)=0$, then $p_g(M)=3$. \end{lemma} \begin{proof} Since $\pi:M\rightarrow X$ is \'etale and $p_g(X)=q(X)=0$, $\chi(\mathcal{O}_M)=4\chi(\mathcal{O}_X)=4$. It follows that $p_g(M)=3$ if $q(M)=0$. \end{proof}
Suppose now a surface $M$ is constructed as in Lemma \ref{gg}. We study
the canonical map $\varphi=\varphi_{|K_M|}:M\dashrightarrow\mathbb{P}^2$. We will assume that $\pi:M\rightarrow X$ is a \emph{Galois cover}, i.e., $\Sigma:=\pi_1(M)\leq\Pi$ is normal.
Note that then $|K_M|$ is invariant under the Galois group $\mathcal{G}:={\rm Gal}(M/X)=\Pi/\pi_1(M)$.
Let us relate the canonical sections from Lemma \ref{gg} to divisors on $X$. It is known from the Universal Coefficient Theorem that torsions in $H_1(X.\mathbb{Z})$ give rise to a torsion line bundle on $X$, cf. Lemma 4 of \cite{LY}. Denote by $\mathcal{L}_\chi$ the invertible sheaf on $X$ corresponding to a torsion line bundle on $X$ given by a character $\chi$. In this case, the trivial character $\mathcal{O}_X$ is denoted by $\mathcal{L}_1$. The push forward of the structure sheaf of $M$ splits into eigen-sheaves $$ \pi_*\mathcal{O}_M=\bigoplus_{\chi:\mathcal{G}\rightarrow \mathbb{C}^*}\mathcal{L}_\chi, $$ Denote by $\omega_M$ the dualizing sheaf of a surface $M$. Then $$\pi_*\omega_M=\bigoplus_{\chi:\mathcal{G}\rightarrow \mathbb{C}^*}\omega_X\otimes \mathcal{L}_\chi.$$ It follows from the degeneration of the Leray spectral sequence that \begin{equation} H^i(M,\omega_M)=\bigoplus_{\chi:\mathcal{G}\rightarrow \mathbb{C}^*}H^i(X,\omega_X\otimes \mathcal{L}_\chi) \end{equation} for all $i$. Hence vanishing of $q(M)$ implies that $H^1(X,\omega_X\otimes \mathcal{L}_\chi)=0$ for all $\chi:\mathcal{G}\rightarrow \mathbb{C}^*$. By Serre Duality, $h^2(X,\omega_X\otimes\mathcal{L}_\chi)=h^0(X,\mathcal{L}_\chi^{-1})$, which is either $0$ or $1$ depending on whether $\chi$ is trivial of not. From Riemann-Roch formula and the fact that $X$ is a fake projective plane, it follows that $h^0(X,\omega_X\otimes\mathcal{L}_\chi)=1$ for each $\chi\neq 1$, which corresponds to three linearly independent sections in Lemma \ref{gg}. Denote by $D_1, D_2, D_3$ the corresponding curves on $X$. It follows that $H^0(M,K_M)$ is generated by $\pi^*(D_i)$, $i=1,2,3$, noting that $\pi^* \mathcal{L}_\chi\cong\mathcal{O}_M$.
\begin{lemma}\label{bpf23} Assume that $q(M)=0$ and let $D_1, D_2, D_3$ be divisors obtained as above. Assume that $D_1\cap D_2\cap D_3=\emptyset$. Then $H^0(M,K_M)$ is base point free and the canonical degree of $M$ is $36$. \end{lemma}
\begin{proof} Let $x$ be a point in the base point set of $|K_M|$. Since $|K_M|$ is invariant under the Galois group $\mathcal{G}$, $\pi(x)\in D_1\cap D_2\cap D_3$, which is empty. It follows from Proposition \ref{Bea} that the canonical degree of $M$ is $36$. \end{proof}
The last lemma would be utilized in Section \ref{new} to give a proof of Theorem \ref{main}. The presentation here is a simplification of the original one, thanks to the suggestion of the referee.
\section{General constraints on base point set}\label{seciso}
The goal of this section is to give a proof of Theorem \ref{iso}, which gives constraints on the base point set of $|K_M|$ without knowledge on an explicit description of fake projective plane $X$. Here as $\rho(X)=1$, we always denote by $L_X$ an ample generator of ${\rm Pic}(X)$. Also recall that for a fake projective plane $X$, we have $p_g(X)=q(X)=0$ and $L_X^2=1$ by definition. We begin with the following simple observations.
\begin{lemma}\label{gen} Let $X$ be a fake projective plane and let $L_X$ be an ample generator of ${\rm Pic}(X)$. Then $h^0(X,L)\leq1$ for any line bundle $L\equiv L_X$ and $h^0(X,L')\leq 2$ for any line
bundle $L'\equiv 2L_X$.
\end{lemma} \begin{proof} If $L''$ is a line bundle with $L''\equiv 4L_X$, then by Riemann-Roch formula $h^0(X,L'')=3$. But if $L\equiv L_X$ and $H^0(X,L)$ has two linearly independent sections $x$ and $y$, then
$\{x^4,x^3y,x^2y^2,xy^3,y^4\}$ are five linearly independent sections of $H^0(X,L^{\otimes 4})$, which is absurd. The second statement is proved similarly.
\end{proof}
\begin{lemma}\label{Sch} If $C$ is an irreducible and reduced curve on a fake projective plane $X$ with $C\equiv L_X$, then $C$ is smooth of genus 3. \end{lemma} \noindent{\bf Proof.} Given an irreducible and reduced curve $C$, we denote by $C^\nu$ the normalization of $C$ and $\nu:C^\nu\rightarrow C$ the normalization morphism. The $\mathcal{O}_C$ sheaf $\delta:=\nu_*\mathcal{O}_{C^\nu}/\mathcal{O}_C$ is the cokernel of the natural map $\mathcal{O}_C\rightarrow\nu_*\mathcal{O}_{C^\nu}$ and satisfies $$g(C^\nu)=p_a(C^\nu)=p_a(C)-h^0(C,\delta).$$
We first remark that $g(C^\nu)\geq2$ as $X$ is hyperbolic. The Ahlfors-Schwarz Lemma applied to the composition map induced by the normalization $\nu':C^\nu\xrightarrow{\nu}C\hookrightarrow X$ (cf. \cite{CCL}) for the manifolds equipped with Poincar\'e metrics implies that the K\"ahler forms satisfy $\nu'^*\omega_X\leq\omega_{C^\nu}$, with equality if and only if it is a holomorphic isometry leading to totally geodesic $C$. Since there is no totally geodesic curve on a fake projective plane from the proof of \cite[Lemma 6]{LY}, the inequality is strict. Hence for $C\equiv kL_X$ with $k\geq1$, integrating over $C^\nu$, we get $$2k= \frac{2}{3}(K_X\cdot C)<\deg(K_{C^\nu})=2g(C^\nu)-2=k(k+3)-2h^0(C,\delta),$$ where we used the fact that the Ricci curvature is $\frac{3}{2}$ of the holomorphic sectional curvature for the Poincar\'e metric on $X$ and the adjunction $p_a(C)=\frac{1}{2}C\cdot(K_X+C)$. Hence $k = 1$ implies that $h^0(C,\delta) = 0$ and $C$ is smooth with $g(C)=3$. \qed
\begin{lemma}\label{inv} Let $X$ be a fake projective plane with a nontrivial automorphism group and let $C$ be an effective divisor such that $C\equiv L_X$. For any nontrivial subgroup $H\leq{\rm Aut}(X)$ with $H\cong C_3$ or $C_7$, $h^*C\neq C$ for any $h\in H-\{e\}$.
\end{lemma} \begin{proof} Clearly $C$ must be reduced and irreducible as $\rho(X)=1$. From Lemma \ref{Sch}, $C$ is smooth of genus three. Suppose now $h^*C=C$ for all $h\in H$. From \cite[Lemma 6]{LY}, $H$ must act non-trivially on $C$. Note that $H$ can only be $C_3$ or $C_7$ from the list of \cite{CS}.
If $H\cong C_7$, then there exists an $H$-fixed point on $C$, as by the Hurwitz formula there is no \'etale cover of degree 7 from a smooth genus three curve. By \cite[Lemma 7]{LY}, for $x=\dim_\mathbb{C} H^1(C,\mathcal{O}_C)^{\rm inv}$ we have the equation, $$n=2-2\cdot3+\frac{2\cdot7}{7-1}(3-x)\ \Rightarrow\ 3n+7x=9. $$ The only solution is $(n,x)=(3,0)$ and $C/C_7\subseteq X/C_7$ is a smooth rational curve. But then there is a non-constant lifted map from $\mathbb{P}^1$ to the universal cover $\mathbb{B}^2_\mathbb{C}$ of $X/C_7$, this contradicts to Liouville's theorem.
If $H\cong C_3$, then there exists an $H$-fixed point on $C$, as by the Hurwitz formula there is no \'etale cover of degree 3 from a smooth genus three curve. By the same argument as above, we see that $(n,x)=(5,0)$ or $(2,1)$. In either cases, there is a non-constant lifted map from $\mathbb{P}^1$ or $\mathbb{C}$ to $\mathbb{B}^2_\mathbb{C}$, which again contradicts Liouville's theorem.
\end{proof}
\begin{lemma}\label{propgenfin} Let $X$ be a fake projective plane with ${\rm Aut}(X)=C_7:C_3$. Suppose that there is a Galois \'etale cover $\pi:M\rightarrow X$ of degree four and $q(M)=0$, then the canonical map $\varphi:M\dashrightarrow\mathbb{P}^2$ is generically finite. \end{lemma}
\begin{proof} From Lemma \ref{gg}, we know that $p_g(M)=3$ and hence the canonical map maps $M$ to $\mathbb{P}^2$. Write $|K_M|=P+F$, where $P$ is the mobile part and $F$ is the fixed divisor. By construction, we have $\varphi=\varphi_{|K_M|}=\varphi_{P}:M\dashrightarrow\mathbb{P}^2$. We will abuse the notation: $P$ will be the mobile linear system or a general member in it.
Assume that $\overline{\varphi(M)}=C\subseteq\mathbb{P}^2$ is a curve. We will derive a contradiction.
First of all, we claim that $P$ is not base point free, or equivalently $P^2\neq0$. Assume now $P^2=0$. We consider $\mathcal{G}={\rm Gal}(M/X)$. Since $g^*K_M=K_M$ for any $g\in\mathcal{G}$, we have that $g^*F=F$ for each $g\in\mathcal{G}$. Indeed, $g^*P$ is a mobile sub-linear system of $|K_M|$ and hence $g^*F\geq F$ as Weil divisors. Hence as $\pi$ is Galois, $F=\pi^*F_X$ for an effective divisor $F_X$ on $X$. Moreover, if ${\rm NS}(X)=\<L_X\rangle$ for an ample divisor $L_X$, then $K_X\equiv3L_X$, $F_X\equiv lL_X$ for some $0\leq l\leq 3$, and $P\equiv\pi^*(3-l)L_X$. Now, $P^2=0$ implies that $l=3$ and hence $P\equiv0$. This is a contradiction as a non-zero effective divisor cannot be numerically trivial.
Since $\varphi:M\dashrightarrow C\subseteq\mathbb{P}^2$ is not a morphism, we take a composition of finitely many smooth blow-ups $\rho:\widehat{M}\rightarrow M$ to resolve $P$ and let $\psi:\widehat{M}\rightarrow C\subseteq\mathbb{P}^2$ be the induced morphism. We have the following diagram after taking the Stein factorization of $\psi:S\rightarrow C$: \begin{center} \begin{tikzpicture} implies/.style={double double equal sign distance, -implies}, \node (m) at (0,2) {$\widehat{M}$}; \node (M) at (0,0) {$M$}; \node (C) at (2,0) {$C$}; \node (in) at (2.5,0) {$\subseteq$}; \node (P) at (3,0) {$\mathbb{P}^2$}; \node (CC) at (2,2) {$\tilde{C}$};
\path[->] (m) edge node[left]{$\rho$}(M); \path[->] (m) edge node[above]{$\beta$} (CC); \path[dashed,->] (M) edge node[below]{$\varphi$} (C); \path[->] (CC) edge node[right]{$\alpha$} (C); \path[->] (m) edge node[above]{$\psi$} (C); \end{tikzpicture} \end{center}
If $\rho^*P=\widehat{P}+\widehat{F}$, where $\widehat{P}=\psi^*|\mathcal{O}_C(1)|$ is base point free, $\widehat{F}\geq0$ is the fixed divisor, and $\psi=\psi_{\widehat{P}}$, then $\widehat{F}$ is a non-trivial effective $\rho$-exceptional divisor with $\beta(\widehat{F})=\tilde{C}$. In particular, $\tilde{C}\cong\mathbb{P}^1$ as all the irreducible components of $\widehat{F}$ are rational. Since $\alpha:\tilde{C}\rightarrow C$ is defined by $\alpha^*|\mathcal{O}_C(1)|\subseteq|\mathcal{O}_{\mathbb{P}^1}(d)|$ for some $d\geq1$ and hence an element in $\widehat{P}$ is given by $\beta^*H$ for some $H\in|\mathcal{O}_{\mathbb{P}^1}(d)|$, we have $\widehat{P}\supseteq\beta^*|\mathcal{O}_{\mathbb{P}^1}(d)|$. In particular, we get
$$\widehat{P}=\psi^*|\mathcal{O}_C(1)|=\beta^*\alpha^*|\mathcal{O}_C(1)|=\beta^*|\mathcal{O}_{\mathbb{P}^1}(d)|.$$ As $\dim \widehat{P}=p_g(M)=3$, we get $d=2$ and $C\subseteq\mathbb{P}^2$ being irreducible and non-degenerate is a smooth conic in $\mathbb{P}^2$.
Let $\widehat{M}_{c}$ be a general fibre of $\widehat{M}\rightarrow\tilde{C}$ and $D:=\rho_*(\widehat{M}_{c})\equiv P/2$ be the corresponding prime divisor on $M$. Recall that $\pi:M\rightarrow X$ is Galois, $K_M=\pi^*K_X\equiv\pi^*(3L_X)$ and $P\equiv\pi^*(lL_X)$ for some $1\leq l\leq 3$ as $P^2\neq0$, where ${\rm NS}(X)=\langle L_X\rangle$ and $L_X^2=1$. It follows from the genus formula, $$(K_M+D)\cdot D=2g_a(D)-2\in2\mathbb{Z}$$ that $l=2$ is the only possibility. Hence $P\equiv\pi^*(2L_X)$, $F=\pi^*F_X\equiv\pi^*L_X$, and $D\equiv\pi^*L_X$. Note that if $h^0(X,2L_X)=0$ for any ample generator $L_X$ on $X$, then we arrive the required contradiction as $2F_X\neq0$. This is exactly the argument in \cite{Y1}, where the vanishing holds for $X$ a very special fake projective plane as discussed in the introduction. Below we provide a more elementary argument.
It is easy to see that $\mathcal{G}$ acts on $C\cong\mathbb{P}^1$ holomorphically and induces an action on $\tilde{C}$. We claim that there is always a fixed point on $\tilde{C}=\mathbb{P}^1$. If $\mathcal{G}$ acts trivially, then every point is a fixed point.\footnote{In fact, this case is absurd. If $\mathcal{G}$ acts trivially on $C$, then $\mathcal{G}$ also acts trivially on $\tilde{C}\cong\mathbb{P}^1$. Any fibre of $\beta:\widehat{M}\rightarrow\tilde{C}$ as a section of $H^0(\mathbb{P}^1,\mathcal{O}_{\mathbb{P}^1}(1))$ is $\mathcal{G}$-fixed and descends to a $\mathcal{G}$-invariant section $D\equiv\pi^*L_X$ on $M$, which then descends to a section $D_X\equiv L_X$ on $X$. For any two such sections $D$ and $D'$ on $M$, $D\sim D'$ implies that $D_X\equiv D'_X\equiv L_X$ where $\pi^*D_X=D$ and $\pi^*(D_X')=D'$. Since $X$ has only finitely many nontrivial torsion but $H^0(\mathbb{P}^1,\mathcal{O}_{\mathbb{P}^1}(1))$ is infinite, we can find a line bundle $L=L_X+T_X$ for some torsion line bundle $T_X$ on $X$ with $\dim|L|\geq1$. This contradicts Lemma \ref{gen}.} Otherwise, $\mathcal{G}$ has two fixed points on $\tilde{C}$ from the Lefschetz fixed point formula. In particular, the fiber $\widehat{M}_c$ over a fixed point $c$ is $\mathcal{G}$-invariant and descends to an effective divisor $G^X\equiv L_X$ on $X$.\footnote{Up to here everything works for all fake projective planes with a nontrivial automorphism group.}
Suppose now that ${\rm Aut}(X)=C_7:C_3$. Note that in this case a non-trivial torsion elements is always a 2-torsion. In particular for any $\sigma\in{\rm Aut}(X)$, $\sigma^*G^X\sim G^X+T_\sigma$ for some 2-torsion $T_\sigma$ and $$\sigma^*(2G^X)=2\sigma^*(G^X)\sim 2G^X+2T_\sigma=2G^X.$$
On the other hand, for any non-trivial element $\sigma\in{\rm Aut}(X)$, $G^X\neq\sigma^*G^X$ by Lemma \ref{inv}. The curves $G^X$ and $\sigma^*G^X$ intersect at a unique point $Q_\sigma$ as $G^X\cdot(\sigma^*G^X)=L_X^2=1$. We claim that there are three linearly independent sections of the form $2\sigma^*G^X$ in $|2G^X|$, which then contradicts to Lemma \ref{gen}.
We fix one non-trivial $\sigma$ and consider $Q:=Q_\sigma$. Note that then $2G^X$ intersects with $\sigma^*(2G^X)$ only at $Q$ with multiplicity four. By the result of \cite{PY}, the isotropic group at $Q$ cannot be the whole ${\rm Aut}(X)$. Hence there exists a nontrivial element $\tau\in{\rm Aut}(X)$, $\tau\neq\sigma$, such that $\tau^*Q\neq Q$. In particular, $\tau^*(2G^X)$ only intersects with $\tau^*\sigma^*(2G^X)$ at $\tau^*Q$ with multiplicity four. Since elements in the pencil $\langle\mu\cdot2G^X+\lambda\cdot2\sigma^*G^X\rangle$ must pass through $Q$ with multiplicity four, one of $\tau^*(2G^X)$ and $\tau^*\sigma^*(2G^X)$ is not in $\<2G^X,2\sigma^*G^X\rangle$ or otherwise $\tau^*Q=Q$. Hence $h^0(X,2G^X)>2$ and we have a contradiction to Lemma \ref{gen}.
Hence we conclude that $\dim\overline{\varphi(M)}\neq1$. Since $\varphi(M)\subseteq\mathbb{P}^2$ has to be positive dimensional, we conclude that $\varphi:M\dashrightarrow\mathbb{P}^2$ must be dominant and hence generically finite. \end{proof}
\begin{lemma}\label{codim1} Let $M\rightarrow X$ be a Galois \'etale cover of degree four of a fake projective plane $X$ with ${\rm Aut}(X)=C_7:C_3$. If $q(M)=0$, then the canonical linear system $|K_M|$ is mobile, i.e., there is no codimension one base locus. \end{lemma} \begin{proof} We follow the same notation as in the proof of Lemma \ref{propgenfin}: ${\rm NS}(X)=\<L_X\rangle$ for an ample divisor $L_X$, $K_X\equiv3L_X$, $F_X\equiv lL_X$ for some $0\leq l\leq 3$, and $P\equiv\pi^*(3-l)L_X$. We claim that $l=0$.
Since $\dim P=p_g(M)-1=2>0$, $P$ contains a nontrivial effective divisor and hence $l\neq 3$.
If $l=1$, then we consider the action of ${\rm Aut}(X)=C_7:C_3$ on $F_X=L_X+T$, where $T$ is a 2-torsion. Then the same argument as in the proof of Lemma \ref{propgenfin} produces a line bundle $\mathcal{L}\equiv 2L_X$ with $h^0(X,\mathcal{L})>2$, but this violates Lemma \ref{gen}.
If $l=2$, then we consider the same argument as above on $P_X\equiv L_X$.
Here is an alternate argument. In the above setting, if $H^0(X,2L_X)=0$ for $L_X$ any ample generator of ${\rm Pic}(X)$, then $|K_M|=P$ is mobile. Indeed, the assumption also implies that $H^0(X,L_X)=0$ for any ample generator of ${\rm Pic}(X).$ Hence for $F=\pi^*F_X$ with $F_X\equiv lL_X$, $l=0$ is the only possibility and $F=0$. The hypothesis holds for any fake projective plane with an automorphism group of order 21 by a result of \cite{LY}. \end{proof}
{\it Proof of Theorem \ref{iso}} First of all, from Magma, all Galois coverings of a fake projective plane of index $4$ can be listed, as is done in the proof of Proposition \ref{all} below. Furthermore, Magma tells us that abelianization of the lattices associated to such coverings are all trivial. Hence $q(M)=0$ for our examples. Theorem \ref{iso} now follows from Lemma \ref{codim1}.
\begin{proof}[of Proposition \ref{all}] We simply apply the procedure of construction as in \cite{Y1} to each of the fake projective plane listed in column 3 of Table 1. We first need to enumerate all possible surfaces as degree four Galos \'etale cover associated to fake projective planes as listed. It turns out that the number of index four subgroups of the lattice $\Pi$ to a fake projective plane in the table is recorded in the column $N_1$ in Table 1. This could be seen by considering subgroups of order $4$ in $H_1(X,\mathbb{Z})$ as in Lemma \ref{cover}, or by listing index four subgroups of $\Pi$ from Magma.
Now we claim that all the different sub-lattices of index $4$ of $\Pi$ in Table 1 give rise to non-isometric complex hyperbolic forms in terms of the Killing metrics on the locally symmetric spaces. For this purpose, we assume that $\Lambda_1$ and $\Lambda_2$ are two groups obtained from the above procedure and $B_{\mathbb{C}}^2/\Lambda_1$ is isometric to $B_{\mathbb{C}}^2/\Lambda_2$. From construction, $\Lambda_1$ and $\Lambda_2$ are normal subgroups of index 4 in two lattices $\Pi_1$ and $\Pi_2$ corresponding to the fundamental groups of fake projective planes. Let $\overline \Gamma_1$ and $\overline \Gamma_2$ be the corresponding maximal arithmetic groups in the respective classes. As $B_{\mathbb{C}}^2/\Lambda_1$ and $B_{\mathbb{C}}^2/\Lambda_2$ are isometric, $\Lambda_1$ is conjugate to $\Lambda_2$ as discrete subgroups of the same algebraic group $G$ with $G\otimes \mathbb{R}\cong PU(2,1)$. Hence the two corresponding maximal lattices satisfy $\overline \Gamma_1\cong \overline \Gamma_2$, and similarly $\Pi_1\cong\Pi_2$. It follows that they have to come from the same row in the Table 1 and hence correspond to the same subgroup of index $4$ in the same lattice associated to some fake projective plane. Hence there are altogether $835$ non-isometric complex two ball quotients obtained in this way, by summing over the column of $N_1$ in Table 1.
Now for each locally symmetric space $M=B_{\mathbb{C}}^2/\Lambda$ obtained as above, it gives rise to a pair of complex structures $J_1$ and $ J_2$, which are conjugate to each other. These two complex structures give rise to two non-biholomorphic complex surfaces $S_1=(M,J_1)$ and $S_2=(M,J_2)$. In fact, if they are biholomorphic, the corresponding four-fold quotient $S_1/[\Pi,\Lambda]$ and $S_2/[\Pi,\Lambda]$ are biholomorphic and are fake projective space. This contradicts the results in \cite{KK}, see also the Addendum of \cite{PY}, that conjugate complex structures on a fake projective space give rise to two different complex structures.
In general, let $(M_1, J_1)$ and $(M_2,J_2)$ be two complex ball quotients obtained from taking degree 4 \'etale covers of some possibly different fake projective planes. If $(M_1, J_1)$ and $(M_2,J_2)$ are biholomorphic, they are isometric with respect to the corresponding Bergman (Killing) metrics. Hence from the earlier argument, $M_1$ is isometric to $M_2$ and we may regard $M_1=M_2$. Now the argument of the last paragraph implies that $J_1=J_2$. In conclusion, we conclude that the $1670$ complex surfaces obtained from the pair of conjugate complex structures on the $835$ underlying locally symmetric structures give rise to distinct complex surfaces. This concludes the proof of Proposition \ref{all}. \end{proof}
\section{New examples of surfaces with maximal canonical degree}\label{new}
Our goal in this section is to prove Theorem \ref{main}. The surface studied in \cite{Y1} and here is constructed from the fake projective plane $X$ given in \cite[Section 5.9]{PY} in the class of $(a=7, p=2)$ and is denoted by $(a=7, p=2,\emptyset, D_32_7)$ in the notation of \cite{CS}.
\begin{proof}[of Theorem \ref{main}] We consider $\pi:M\rightarrow X$ a Galois $C_2\times C_2$--\'etale cover of the fake projective plane $X$ in the class $(a=7, p=2,\emptyset, D_32_7)$. From Magma computation, the irregularity $q(M)=0$, cf. Proposition \ref{all}. Hence by Lemma \ref{bpf23}, it suffices for us to prove that the canonical map of $M$ is base point free. From the discussion in Section \ref{pre}, there are non-trivial 2-torsions $\tau_i\in{\rm Pic}^0(X)$ for $i=1,2,3$ corresponding to characters of $\mathcal{G}={\rm Gal}(M/X)=C_2\times C_2$ such that $H^0(X,K_X+\tau_i)=\<t_i\rangle$ and $H^0(M,K_M)=\langle\pi^*t_i|\ i=1,2,3\rangle$.
For the convenience of the reader, we recall the key steps of the argument in \cite{Y1}. For simplicity, we denote by $G$ the automorphism group ${\rm Aut}(X)=C_7:C_3$. The automorphism group of $X$ has a presentation
$G=\langle a,b|a^7=b^3=1, bab^{-1}=a^2\rangle.$ The group $G$ contains a normal Sylow 7-subgroup $G_7=\<a\rangle$, and seven conjugate Sylow $3$-subgroups, one of which is $G_3:=\<b\rangle$. We know from the Riemann-Roch formula that $h^0(X,2K_X)=10$. In terms of the explicit basis of $H^0(X,2K_X)$ given by \cite{BK}, the action of $G$ is presented by \begin{eqnarray} &&a(u_0:u_1:u_2:u_3:u_4:u_5:u_6:u_7:u_8:u_9)\nonumber \\ &=&(u_0:\zeta_7^6u_1:\zeta_7^5u_2:\zeta_7^3u_3:\zeta_7u_4:\zeta_7^2u_5:\zeta_7^4u_6:\zeta_7u_7:\zeta_7^2u_8:\zeta_7^4u_9)\\ &&b(u_0:u_1:u_2:u_3:u_4:u_5:u_6:u_7:u_8:u_9) \nonumber \\ &=&(u_0:u_2:u_3:u_1:u_5:u_6:u_4:u_8:u_9:u_7) \end{eqnarray}
From the Corrigendum of \cite{Y1}, under the action of $G_7$, $S:=\cup_{\Sigma\in C_2^4-\{1\}} H^0(X,K_X+\Sigma)$ consists of 3 orbits, where we recall that a $p$-torsion element $\Sigma\in H_1(X,\mathbb{Z})=C_2^4$ correspond to a $p$-torsion element $\Sigma\in{\rm Pic}^0(X)$ by the universal coefficient theorem (see \cite[Lemma 4]{LY}).
\begin{enumerate}
\item $\langle\widetilde{t}_0\rangle=H^0(X,K_X+\Sigma_0)$, where $\Sigma_0$ is $G$-invariant corresponding to an element in $H_1(X/G,\mathbb{Z})^\times$ and $\widetilde{t}_0^2=u_0$.
\item Two disjoint $G_7$ orbits $\<a\rangle\widetilde{t}_1$ and $\<a\rangle\widetilde{t}_2$, where $\widetilde{t}_i$'s are $G_3$-invariant corresponding to elements in $H_1(X/G_3,\mathbb{Z})^\times-\{\Sigma_0\}$.
\end{enumerate} Let $v_0=u_0, v_1=u_1+u_2+u_3, v_2=u_4+u_5+u_6$, and $v_3=u_7+u_8+u_9$. From \cite{Y1}, one finds that \begin{equation}\label{4.3}\begin{cases} \widetilde{t}_0^2=v_0,\\ \widetilde{t}_1^2=v_0+\frac12(1+\sqrt{-7})v_1,\\ \widetilde{t}_2^2=v_0+(-5+\sqrt{-7})v_1+4(1-\sqrt{-7})v_2-4(v_3)\end{cases} \end{equation} with the help of elementary command \verb'IsDomain' in Magma. It is proved that $\cap_{i=0}^2Z_{t_i^2}=\emptyset$, which was verified in the Corrigendum of \cite{Y1} by checking that $\cap_{i=0}^2Z_{t_i^2}=\emptyset$ on $X$ modulo $p=23$ from the command \verb'HilbertPolynomial' in Magma. We remark that the same example was also studied later in \cite{Ri}, where the author independently verified with more sophisticated techniques in Magma that the sections obtained from the above procedure do give rise to sections in $H^0(M,K_M)$.
Now under the action of $G_7$, the explicit sections $\widetilde{t}_0$ and $a^j\widetilde{t}_i$, $i=1,2$ and $0\leq j\leq 6$, precisely give the effective sections of $S:=\cup_{\Sigma\in C_2^4-\{1\}} H^0(X,K+\Sigma)$. We will prove that $\cap_{i=0}^2Z_{t^2_i}=\emptyset$ by consider possible choices of $\{t_1,t_2,t_3\}\subseteq S=\langle\widetilde{t}_0\rangle\cup\<a\rangle\widetilde{t}_1\cup\<a\rangle\widetilde{t}_2$ and check by Magma whether these sections have common intersection.
Conjugating by an element in $G_7$, we may assume that $t_1$ belongs to $\{\widetilde{t}_0,\widetilde{t}_1,\widetilde{t}_2\}$. Suppose $t_1=\widetilde{t}_0$, where $\widetilde{t}_0$ is invariant as a set under $G$, then conjugate by an element in $G_7$, we may assume that $t_2=\widetilde{t}_1$. But by construction $\tau_3=\tau_1\cdot\tau_2$ is determined by $\tau_1=\sigma_0$ and $\tau_2$, which gives $\widetilde{t}_2\in H^0(X,K_X+\tau_0\cdot\tau_1)=H^0(X,K_X+\tau_2)$. In particular, this case was already checked in \cite{Y1} as $\cap_{i=0}^2Z_{t^2_i}= Z_{v_0}\cap Z_{\widetilde{t}_1^2}\cap Z_{\widetilde{t}^2_2}=\emptyset$ and we are done.
Consider now the case that none of $t_i$'s is $\widetilde{t}_0$. In this scenario, $t_i$ belongs to the orbits of $\widetilde{t}_1$ or $\widetilde{t}_2$. Again we use the fact that effective divisors $D_i$'s have common intersections if and only if $2D_i$'s have common intersections. Hence it suffices for us to prove the following claim. \begin{lemma}\label{check} Let $i, j\in \{1,\dots,6\}$. Then \begin{enumerate}[$(a)$]
\item $Z_{\widetilde{t}_1}\cap Z_{a^i\widetilde{t}_1}\cap Z_{a^j\widetilde{t}_2}=\emptyset$ for $1\leq i,j\leq6;$
\item $Z_{\widetilde{t}_2}\cap Z_{a^i\widetilde{t}_1}\cap Z_{a^j\widetilde{t}_2}=\emptyset$ for $1\leq i,j\leq6;$
\item $Z_{\widetilde{t}_1}\cap Z_{a^i\widetilde{t}_1}\cap Z_{a^j\widetilde{t}_1}=\emptyset$ for $1\leq i<j\leq6;$
\item $Z_{\widetilde{t}_2}\cap Z_{a^i\widetilde{t}_2}\cap Z_{a^j\widetilde{t}_2}=\emptyset$ for $1\leq i<j\leq6.$ \end{enumerate} \end{lemma}
\begin{proof} In terms of the basis chosen with action of $G_7$ given in equation (4.1) and the explicit sections listed in (4.3), statement $(a)$ in Lemma \ref{check} holds if there is no the common intersection for the following sections, \begin{eqnarray*} &\{&u_0+\frac12(1+\sqrt{-7})(u_1+u_2+u_3),\\ &&u_0+\frac12(1+\sqrt{-7})(\zeta_7^{-i}u_1+\zeta_7^{-2i}u_2+\zeta_7^{-4i}u_3),\\ &&u_0+(-5+\sqrt{-7})(\zeta_7^{-j}u_1+\zeta_7^{-2j}u_2+\zeta_7^{-4j}u_3)+4(1-\sqrt{-7})(\zeta_7^{j}u_4 +\zeta_7^{2j}u_5+\zeta_7^{4j}u_6)\\&&-4(\zeta_7^{j}u_7+\zeta_7^{2j}u_8+\zeta_7^{4j}u_9)\}. \end{eqnarray*}
Instead of using the command \verb'HilbertPolynomial' over the cyclotomic field $\mathbb{Q}(\zeta_7)$ on $X$, we specialize it to the finite field $F_{29}$, where $16$ is a primitive $7$-th root of unity and $14$ serves as $\sqrt{-7}$. In this way, computing over the finite field $F_{29}$, we verify from Magma that the above three polynomials do not have common intersection on $X$ for all $i, j\in \{1,\dots,6\}$ in $F_{29}$. This implies that the original equations do not have common zero over the algebraic number field $\mathbb{Q}(\zeta_7)$. Similar arguments applies to $(b)$, $(c)$, and $(d)$ in the Lemma \ref{check}. \end{proof}
We remark that Lemma \ref{check} actually is stronger than what is sufficient for our purpose. For example, consider the case of $(a)$. It is enough to check $Z_{\widetilde{t}_1}\cap Z_{a^i\widetilde{t}_1}\cap Z_{a^j\widetilde{t}_2}=\emptyset$ for one pair of $(i,j)$ corresponding to the elements $\mathcal{G}-\{1\}=\{\tau_1,\tau_2,\tau_3\}.$ However, since we are checking by Magma, the extra computation does not make any essential difference in computer time. Similar argument applies to the cases $(b)$, $(c)$, $(d)$ as well.
Theorem \ref{main} follows immediately from Lemma \ref{check}. \end{proof}
\section{Remark on maximal canonical degree of threefolds}\label{sec3fold}
Theorem \ref{main} has an implication on the canonical degree bound of threefolds. The purpose of this section is to explain literatures in this direction and relations to Theorem \ref{main}. From this point on, let $Y$ be a Gorenstein minimal complex projective threefold of general type with locally factorial terminal singularities. Suppose that the linear system $|K_Y|$ defines a generically finite map $\Phi=\Phi_{|K_Y|}:Y\dashrightarrow\mathbb{P}^{p_g(Y)-1}.$ M. Chen asked in \cite{Ch} if there is an upper bound of $\deg(\Phi)$. A positive answer was provided in \cite{Hac} with $\deg(\Phi)\leq576.$ Later on, it was improved in \cite{DG2} that $\deg(\Phi)\leq360$ (with equality if and only if $p_g(Y)=4, q(Y)=2, \chi(\omega_Y)=5, K_Y^3=360$, and $|K_Y|$ is base point free.) In \cite{C}, it is shown that $\deg(\Phi)\leq 72$ if the geometric genus satisfies $p_g(Y)>10541$.
As a corollary of Theorem \ref{main} and the above discussion, we conclude that the canonical degree 72 can be achieved as stated in Corollary \ref{3fold}.
\begin{proof}[of Corollary \ref{3fold}] Equipped with Theorem \ref{main}, the corollary follows essentially from an observation of \cite[Section 3]{C}.
Take $C$ a smooth hyperelliptic curve of genus $g\geq2$, then the canonical map $\varphi_{|K_C|}:C\rightarrow\mathbb{P}^{g-1}$ is the composition of the double cover $C\rightarrow\mathbb{P}^1$ with the $(g-1)$-Veronese embedding $\mathbb{P}^1\hookrightarrow\mathbb{P}^{g-1}$. In particular, $\deg(\varphi_{|K_C|})=2$, cf. \cite{Har}. Take $M$ a surface satisfying the optimal degree bound $\deg(\varphi_{|K_M|})=36$ as in Theorem \ref{main}, then $\varphi=\varphi_{|K_M|}:M\rightarrow\mathbb{P}^2$ is a generically finite morphism of $\deg(\varphi)=K_M^2=36$.
Now let $Y=X\times C$, then $Y$ is a smooth projective threefold of general type with $p_g(Y)=3g$ and $\Phi=\Phi_{|K_Y|}:Y\rightarrow\mathbb{P}^{3g-1}$ a morphism. From our construction, it follows that $\Phi$ is generically finite and $$\deg{\Phi}\cdot\deg W=K_Y^3=3K_X^2\cdot K_C=3\cdot36\cdot(2g-2),$$
where $W=\Phi(Y)$ is the image of the composition maps $Y\hookrightarrow\mathbb{P}^2\times\mathbb{P}^{g-1}\hookrightarrow\mathbb{P}^{3g-1}$ defined by $|K_Y|$ and ${\mathcal{O}_{\mathbb{P}^2\times\mathbb{P}^{g-1}}(1,1)}$. Hence $\deg W=3(g-1)$ and $\deg(\Phi)=72.$ \end{proof}
\noindent{\bf Acknowledgements.}\label{ackref} It is a pleasure for the second author to thank Donald Cartwright for his help on Magma commands. The authors would like to express their appreciation and thankfulness to the referee for very helpful comments and suggestions on the paper. This work is partially done during the first author's visit at Research Institute of Mathematical Sciences in Kyoto, National Center of Theoretical Sciences and National Taiwan University in Taiwan, and the second author's visit of the Institute of Mathematics of the University of Hong Kong. The authors thank the warm hospitality of the institutes.
\end{document} |
\begin{document}
\title[Gaussian estimates]{Gaussian estimates for fundamental solutions of second order parabolic systems with time-independent coefficients}
\author{Seick Kim} \address{Mathematics Department, University of Missouri, Columbia, Missouri 65211}
\email{[email protected]}
\subjclass[2000]{Primary 35A08, 35B45; Secondary 35K40}
\keywords{Gaussian estimates, a priori estimates, parabolic system}
\begin{abstract} Auscher, McIntosh and Tchamitchian studied the heat kernels of second order elliptic operators in divergence form with complex bounded measurable coefficients on $\mathbb R^n$. In particular, in the case when $n=2$ they obtained Gaussian upper bound estimates for the heat kernel without imposing further assumption on the coefficients. We study the fundamental solutions of the systems of second order parabolic equations in the divergence form with bounded, measurable, time-independent coefficients, and extend their results to the systems of parabolic equations. \end{abstract}
\maketitle
\section{Introduction} \label{sec:I}
In 1967, Aronson \cite{Aronson} proved Gaussian upper and lower bounds for the fundamental solutions of parabolic equations in divergence form with bounded measurable coefficients. To establish the Gaussian lower bound Aronson made use of the Harnack inequality for nonnegative solutions which was proved by Moser in 1964 (see \cite{Moser}). Related to Moser's parabolic Harnack inequality, we should mention Nash's earlier paper \cite{Nash} where the H\"{o}lder continuity of weak solutions to parabolic equations in divergence form was established. In 1985, Fabes and Stroock \cite{FS} showed that the idea of Nash could be used to establish a Gaussian upper and lower bound on the fundamental solution. They showed that actually such Gaussian estimates could be used to prove Moser's Harnack inequality. We note that Aronson also obtained Gaussian upper bound estimates of the fundamental solution without using Moser's Harnack inequality.
In \cite{Auscher}, Auscher proposed a new proof of Aronson's Gaussian upper bound estimates for the fundamental solution of second order parabolic equations with time-independent coefficients. His method relies crucially on the assumption that the coefficients are time-independent and thus it does not exactly reproduce Aronson's result, which is valid even for the time-dependent coefficients case. However, his method is interesting in the sense that it carries over to equations with complex coefficients provided that the complex coefficients are a small perturbation of real coefficients. Along with this direction, Auscher, McIntosh and Tchamitchian also showed that the heat kernel of second order elliptic operators in divergence form with complex bounded measurable coefficients in the two dimensional space has a Gaussian upper bound (see \cite{AMT} and also \cite{AT}).
We would like to point out that a parabolic equation with complex coefficients is, in fact, a special case of a system of parabolic equations. From this point of view, Hofmann and the author showed that the fundamental solution of a parabolic system has an upper Gaussian bound if the system is a small perturbation of a diagonal system, which, in particular, generalized the result of Auscher mentioned above to the time-dependent coefficients case (see \cite{HK}). However, the above mentioned result of Auscher, McIntosh and Tchamitchian regarding the heat kernel of two dimensional elliptic operators with complex coefficients does not follow directly from our result.
One of the main goals of this article is to provide a proof that weak solutions of the parabolic system of divergence type with time-independent coefficients associated to an elliptic system in two dimensions enjoy the parabolic local boundedness property and to show that its fundamental solution has a Gaussian upper bound. More generally, we show that if weak solutions of an elliptic system satisfy H\"{o}lder estimates at every scale, then weak solutions of the corresponding parabolic system with time-independent coefficients also satisfies similar parabolic H\"{o}lder estimates from which, in particular, the parabolic local boundedness property follows easily. Also, such an argument allows one to derive H\"{o}lder continuity estimates for weak solutions of parabolic equations with time-independent coefficients directly from De Giorgi's theorem \cite{DG57} on elliptic equations, bypassing Moser's parabolic Harnack inequality. In fact, this is what Auscher really proved in the setting of complex coefficients equations by using a functional calculus method (see \cite{Auscher} and also \cite{AQ}, \cite{AT}). Even in those complex coefficients settings, we believe that our approach is much more straightforward and thus appeals to wider readership.
Finally, we would like to point out that in this article, we are mainly interested in global estimates and that we do not attempt to treat, for example, the systems with lower order terms, etc. However, let us also mention that, with some extra technical details, our methods carry over to those cases as well as to the systems of higher order; see e.g. \cite{AQ}, \cite{AT} for the details, and also Remark~\ref{rmk:local}.
The remaining sections are organized in the following way. In Section~\ref{sec:N} we give notations, definitions, and some known facts. We state the main results in Section~\ref{sec:M} and give the proofs in Section~\ref{sec:P}.
\section{Notation and definitions} \label{sec:N}
\subsection{Geometric notation}
\begin{enumerate} \item $\mathbb R^n=\text{$n$-dimensional real Euclidean space.}$ \item $x=(x_1,\cdots,x_n)$ is an arbitrary point of $\mathbb R^{n}$. \item $X=(x,t)$ denotes an arbitrary point in $\mathbb R^{n+1}$, where $x\in\mathbb R^n$ and $t\in\mathbb R$. \item $B_r(x)=\set{y\in\mathbb R^n:\abs{y-x}<r}$ is an open ball in $\mathbb R^n$ with center $x$ and radius $r>0$. We sometimes drop the reference point $x$ and write $B_r$ for $B_r(x)$ if there is no danger of confusion. \item $Q_r(X)=\set{(y,s)\in\mathbb R^{n+1}: \abs{y-x}<r\text{ and } t-r^2<s<t}$. We sometimes drop the reference point $X$ and write $Q_r$ for $Q_r(X)$. \item $Q^{*}_r(X)=\set{(y,s)\in\mathbb R^{n+1}: \abs{y-x}<r\text{ and } t<s<t+r^2}$. \item $Q_{r,s}(X)=\set{(y,s)\in Q_r(X)}$; i.e., $Q_{r,s}(X)=B_r(x)\times\set{s}$ if $s\in(t-r^2,t)$ and $Q_{r,s}(X)=\emptyset$ otherwise. We sometimes drop the reference point $X$ and write $Q_{r,s}$ for $Q_{r,s}(X)$. \item For a cylinder $Q=\Omega\times (a,b)\subset \mathbb R^{n+1}$, $\partial_P Q$ denotes its parabolic boundary, namely, $\partial_P Q=\partial\Omega\times (a,b)\cup \overline{\Omega}\times\set{a}$, where $\partial\Omega$ is the usual topological boundary of $\Omega\subset\mathbb R^n$ and $\overline\Omega$ is its closure. \end{enumerate}
\subsection{Notation for functions and their derivatives}
\begin{enumerate} \item For a mapping from $\Omega\subset\mathbb R^n$ to $\mathbb R^N$, we write $\vec{f}(x)=(f^1(x),\ldots,f^N(x))^T$ as a column vector. \item $\overline{f}_{Q}=\frac{1}{\abs{Q}}\int_{Q}f$, where $\abs{Q}$ denotes the volume of $Q$. \item $u_t=\partial u/\partial t$. \item $D_{x_i} u= D_i u= u_{x_i}=\partial u/\partial x_i$. \item $D u=(u_{x_1},\ldots,u_{x_n})^T$ is the spatial gradient of $u=u(x,t)$. \item For $\vec{f}=(f^1,\ldots,f^N)^T$, $D\vec{f}=(Df^1,\ldots,Df^N)$; that is $D\vec{f}$ is the $n\times N$ matrix whose $i$-th column is $Df^i$. \end{enumerate}
\subsection{Function spaces}
\begin{enumerate} \item For $\Omega\subset\mathbb R^n$ and $p\ge 1$, $L^p(\Omega)$ denotes the space of functions with the following norms: \begin{equation*} \norm{u}_{L^p(\Omega)}=\left(\int_\Omega\abs{u(x)}^p\,dx\right)^{1/p}\quad\text{and}\quad \norm{u}_{L^\infty(\Omega)}=\esssup_\Omega\abs{u}. \end{equation*} \item $C^{\mu}(\Omega)$ denotes the space of functions that are H\"{o}lder continuous with the exponent $\mu\in (0,1]$, and \begin{equation*} [u]_{C^{\mu}(\Omega)} =\sup_{x\neq x'\in \Omega}\frac{\abs{u(x)-u(x')}}{\abs{x-x'}^\mu}<\infty. \end{equation*} \item The Morrey space $M^{2,\mu}(\Omega)$ is the set of all functions $u\in L^2(\Omega)$ such that \begin{equation*} \norm{u}_{M^{2,\mu}(\Omega)}=\sup_{B_\rho(x)\subset \Omega} \left(\rho^{-\mu}\int_{B_\rho(x)}\abs{u}^2\right)^{1/2}<\infty. \end{equation*} \item $C^{\mu}_{P}(Q)$ denotes the space of functions defined on $Q\subset\mathbb R^{n+1}$ such that \begin{equation*} [u]_{C^{\mu}_{P}(Q)} =\sup_{X\neq X'\in Q}\frac{\abs{u(X)-u(X')}}{d_P(X,X')^\mu}<\infty, \end{equation*} where $d_P(X,X')=\max\left(\abs{x-x'},\sqrt{\abs{t-t'}}\right)$.
\end{enumerate}
\subsection{Elliptic and parabolic systems and their adjoints}
\begin{definition} We say that the coefficients $A^{\alpha\beta}_{ij}(x)$ satisfy the uniform ellipticity condition if there exist numbers $\nu_0, M_0>0$ such that for all $x\in\mathbb R^n$ we have \begin{equation} \label{eqn:para} \ip{\vec{A}^{\alpha\beta}(x)\vec{\xi}_\beta,\vec{\xi}_\alpha}\ge \nu_0\abs{\vec{\xi}}^2\quad\text{and } \abs{\ip{\vec{A}^{\alpha\beta}(x)\vec{\xi}_\beta,\vec{\eta}_\alpha}}\le M_0\abs{\vec{\xi}}\abs{\vec{\eta}}, \end{equation} where we used the following notation. \begin{enumerate} \item For $\alpha,\beta=1,\ldots,n$, $\vec{A}^{\alpha\beta}(x)$ are $N\times N$ matrices with $(i,j)$-entries $A^{\alpha\beta}_{ij}(x)$. \item $\vec{\xi}_\alpha=(\xi_\alpha^1,\cdots,\xi_\alpha^N)^T$ and $\abs{\vec{\xi}}^2=\sum\limits_{\alpha=1}^n\sum\limits_{i=1}^N\abs{\xi_\alpha^i}^2$. \item $\ip{\vec{A}^{\alpha\beta}(x)\vec{\xi}_\beta,\vec{\eta}_\alpha} =\sum\limits_{\alpha,\beta=1}^n \sum\limits_{i,j=1}^N A_{ij}^{\alpha\beta}(x)\xi_\beta^j \eta_\alpha^i$. \end{enumerate} We emphasize that we do not assume that the coefficients are symmetric. \end{definition}
\begin{definition} We say that a system of $N$ equations on $\mathbb R^n$ \begin{equation*} \sum_{j=1}^N\sum_{\alpha,\beta=1}^n D_{x_\alpha}(A^{\alpha\beta}_{ij}(x) D_{x_\beta}u^j)=0\qquad (i=1,\ldots,N) \end{equation*} is elliptic if the coefficients satisfy the uniform ellipticity condition. We often write the above system in a vector form \begin{equation} \label{eqn:E-01} L\vec{u}:=\sum_{\alpha,\beta=1}^n D_\alpha(\vec{A}^{\alpha\beta}(x) D_\beta\vec{u})=0, \quad\vec{u}=(u^1\ldots,u^N)^T. \end{equation} The adjoint system of \eqref{eqn:E-01} is given by \begin{equation} \label{eqn:E-02} L^{*}\vec{u}:=\sum_{\alpha,\beta=1}^n D_\alpha\left((\vec{A}^{\alpha\beta}){}^{*}(x) D_\beta\vec{u}\right)=0, \end{equation} where $(\vec{A}^{\alpha\beta}){}^{*}=(\vec{A}^{\beta\alpha})^T$, the transpose of $\vec{A}^{\beta\alpha}$. \end{definition}
\begin{definition} We say that a system of $N$ equations on $\mathbb R^{n+1}$ \begin{equation*} u^i_t-\sum_{j=1}^N\sum_{\alpha,\beta=1}^n D_{x_\alpha}(A^{\alpha\beta}_{ij}(x) D_{x_\beta}u^j)=0\qquad (i=1,\ldots,N) \end{equation*} is parabolic if the (time-independent) coefficients satisfy the uniform ellipticity condition. We often write the above system in a vector form \begin{equation} \label{eqn:P-01} \vec{u}_t-L\vec{u} :=\vec{u}_t-\sum_{\alpha,\beta=1}^n D_\alpha(\vec{A}^{\alpha\beta}(x) D_\beta\vec{u})=0. \end{equation} The adjoint system of \eqref{eqn:P-01} is given by \begin{equation} \label{eqn:P-02} \vec{u}_t+L^{*}\vec{u} :=\vec{u}_t+\sum_{\alpha,\beta=1}^n D_\alpha\left((\vec{A}^{\alpha\beta}){}^{*}(x) D_\beta\vec{u}\right)=0, \end{equation} where $(\vec{A}^{\alpha\beta}){}^{*}=(\vec{A}^{\beta\alpha})^T$, the transpose of $\vec{A}^{\beta\alpha}$. \end{definition}
\subsection{Weak solutions}
In this article, the term ``weak solution'' is used in a rather abusive way. To avoid unnecessary technicalities, we may assume that all the coefficients involved are smooth so that all weak solutions are indeed classical solutions. However, this extra smoothness assumption will not be used quantitatively in our estimates. This is why we shall make clear the dependence of constants.
\begin{enumerate} \item We say that $\vec{u}$ is a weak solution of \eqref{eqn:E-01} in $\Omega\subset\mathbb R^n$ if $\vec{u}$ is a (classical) solution of \eqref{eqn:E-01} in $\Omega$ and $\vec{u}, D\vec{u}\in L^2(\Omega)$. \item We say that $\vec{u}$ is a weak solution of \eqref{eqn:P-01} in a cylinder $Q=\Omega\times (a,b)\subset\mathbb R^{n+1}$ if $\vec{u}$ is a (classical) solution of \eqref{eqn:E-01} in $Q$ and $\vec{u}, D\vec{u}\in L^2(Q)$, $\vec{u}(\cdot,t)\in L^2(\Omega)$ for all $a\le t\le b$, and $\sup_{a\le t\le b} \norm{\vec{u}(\cdot,t)}_{L^2(\Omega)}<\infty$. \end{enumerate}
\subsection{Fundamental solution}
By a fundamental solution (or fundamental matrix) $\vec{\Gamma}(x,t;y)$ of the parabolic system \eqref{eqn:P-01} we mean an $N\times N$ matrix of functions defined for $t>0$ which, as a function of $(x,t)$, is a solution of \eqref{eqn:P-01} (i.e., each column is a solution of \eqref{eqn:P-01}), and is such that \begin{eqnarray} \lim_{t\downarrow 0}\int_{\mathbb R^n}\vec{\Gamma}(x,t;y)\vec{f}(y)\,dy =\vec{f}(x) \end{eqnarray} for any bounded continuous function $\vec{f}=(f^1,\ldots,f^N)^T$, where $\vec{\Gamma}(x,t;y)\vec{f}(y)$ denotes the usual matrix multiplication.
\subsection{Notation for estimates}
We employ the letter $C$ to denote a universal constant usually depending on the dimension and ellipticity constants. It should be understood that $C$ may vary from line to line. We sometimes write $C=C(\alpha,\beta,\ldots)$ to emphasize the dependence on the prescribed quantities $\alpha,\beta,\ldots$.
\subsection{Some preliminary results and known facts}
\begin{lemma}[Energy estimates] \label{lem:P-03} Let $\vec{u}$ be a weak solution of \eqref{eqn:P-01} in $Q_R=Q_R(X)$. Then for $0<r<R$, we have \begin{equation*} \sup_{t-r^2\le s\le t} \int_{Q_{r,s}}\abs{\vec{u}(\cdot,s)}^2+ \int_{Q_r}\abs{D\vec{u}}^2\le \frac{C}{(R-r)^2} \int_{Q_R}\abs{\vec{u}}^2. \end{equation*} \end{lemma} \begin{proof} See e.g., \cite[Lemma 2.1, p. 139]{LSU}. \end{proof}
\begin{lemma}[Parabolic Poincar\'{e} inequality] \label{lem:P-02} Let $\vec{u}$ be a weak solution of \eqref{eqn:P-01} in $Q_R=Q_R(X)$. Then there is some constant $C=C(n,M_0)$ such that \begin{equation*} \int_{Q_R}\abs{\vec{u}-\overline{\vec{u}}_{Q_R}}^2\le C R^2 \int_{Q_R}\abs{D\vec{u}}^2. \end{equation*} \end{lemma} \begin{proof} See e.g., \cite[Lemma 3]{Struwe}. \end{proof}
\begin{lemma} \label{lem:P-04} Let $Q_{2R}=Q_{2R}(X_0)$ be a cylinder in $\mathbb R^{n+1}$. Suppose $\vec{u}\in L^2(Q_{2R})$ and there are positive constants $\mu\le 1$ and $M$ such that for any $X\in Q_R$ and any $r\in (0,R)$ we have \begin{equation*} \int_{Q_r(X)}\abs{\vec{u}-\overline{\vec{u}}_{Q_{r}(X)}}^2\le M^2r^{n+2+2\mu}. \end{equation*} Then $\vec{u}$ is H\"{o}lder continuous in $Q_R$ with the exponent $\mu$ and $[\vec{u}]_{C^{\mu}_P(Q_R)}\le C(n,\mu)M$. \end{lemma} \begin{proof} See e.g., \cite[Lemma 4.3, p. 50]{L}. \end{proof}
\begin{definition}[Local boundedness property] We say that the system \eqref{eqn:P-01} satisfies the local boundedness property for weak solutions if there is a constant $M$ such that all weak solutions $\vec{u}$ of \eqref{eqn:P-01} in $Q_{2r}(X)$ satisfy the estimates \begin{equation*} \sup_{Q_r(X)}\abs{\vec{u}}\le M\left(\frac{1}{\abs{Q_{2r}}} \int_{Q_{2r}(X)}\abs{\vec{u}}^2 \right)^{1/2}. \end{equation*} Similarly, we say that the adjoint system \eqref{eqn:P-02} satisfies the local boundedness property if the corresponding estimates hold for weak solutions $\vec{u}$ of \eqref{eqn:P-02} in $Q_{2r}^{*}(X)$. \end{definition}
\begin{theorem}[Theorem~1.1, \cite{HK}] \label{thm:P-01} Assume that the system \eqref{eqn:P-01} and its adjoint system \eqref{eqn:P-02} satisfy the local boundedness property for weak solutions. Then the fundamental solution of the system \eqref{eqn:P-01} has an upper bound \begin{equation} \abs{\vec{\Gamma}(x,t;y)}_{op}\le C_0 t^{-{n/2}}\exp\left(-\frac{k_0\abs{x-y}^2}{t}\right), \end{equation} where $\abs{\vec{\Gamma}(x,t;y)}_{op}$ denotes the operator norm of the fundamental matrix $\vec{\Gamma}(x,t;y)$. Here, $C_0=C_0(n,\nu_0,M_0,M)$ and $k_0=k_0(\nu_0,M_0)$. \end{theorem}
\section{Main results} \label{sec:M}
\begin{definition} We say that an elliptic system \eqref{eqn:E-01} satisfies the H\"{o}lder estimates for weak solutions at every scale if there exist constants $\mu_0>0$ and $H_0$ such that all weak solutions $\vec{u}$ of the system in $B_{2r}=B_{2r}(x_0)$ satisfy the following estimates \begin{equation} \label{eqn:M-04} [\vec{u}]_{C^{\mu_0}(B_r)}\le H_0 r^{-(n/2+\mu_0)} \norm{\vec{u}}_{L^2(B_{2r})}. \end{equation} Similarly, we say that a parabolic system \eqref{eqn:P-01} satisfies H\"{o}lder estimates for weak solutions at every scale if there exist constants $\mu_1>0$ and $H_1$ such that all weak solutions $\vec{u}$ of the system in $Q_{2r}=Q_{2r}(X_0)$ satisfy the following estimates \begin{equation} \label{eqn:M-05} [\vec{u}]_{C^{\mu_1}_P(Q_r)}\le H_1 r^{-(n/2+1+\mu_1)} \norm{\vec{u}}_{L^2(Q_{2r})}. \end{equation} \end{definition}
\begin{remark} Elliptic systems with constant coefficients satisfy the above property, and in that case, the ellipticity condition \eqref{eqn:para} can be weakened and replaced by the Legendre-Hadamard condition. De Giorgi's theorem \cite{DG57} states that the property is satisfied if $N=1$. The property is also satisfied if $n=2$ and it is due to Morrey (see Corollary~\ref{thm:M-04}). Some other examples include, for instance, a certain three dimensional elliptic system which was studied by Kang and the author in \cite{KK}. \end{remark}
We shall prove the following main results in this paper: \begin{theorem} \label{thm:M-02} If an elliptic system \eqref{eqn:E-01} satisfies the H\"{o}lder estimates for weak solutions at every scale, then the corresponding parabolic system \eqref{eqn:P-01} with time-independent coefficients also satisfies the H\"{o}lder estimates for weak solutions at every scale. \end{theorem}
\begin{theorem} \label{thm:M-03} Suppose that the elliptic system \eqref{eqn:E-01} and its adjoint system \eqref{eqn:E-02} defined on $\mathbb R^n$ both satisfy the H\"{o}lder estimates for weak solutions at every scale with constants $\mu_0, H_0$. Let $\vec{\Gamma}(x,t;y)$ be the fundamental solution of the parabolic system \eqref{eqn:P-01} with the time-independent coefficients associated to the elliptic system \eqref{eqn:E-01}. Then $\vec{\Gamma}(x,t;y)$ has an upper bound \begin{equation} \label{bound} \abs{\vec{\Gamma}(x,t;y)}_{op}\le C_0 t^{-n/2}\exp\left(-\frac{k_0\abs{x-y}^2}{t}\right), \end{equation} where $C_0=C_0(n,\nu_0,M_0,\mu_0,H_0)$ and $k_0=k_0(\nu_0,M_0)$. Here, $\abs{\vec{\Gamma}(x,t;y)}_{op}$ denotes the operator norm of fundamental matrix $\vec{\Gamma}(x,t;y)$. \end{theorem}
\begin{remark} \label{rmk:local} We would like to point out that \eqref{bound} is a global estimate. Especially, the bound \eqref{bound} holds for all time $t>0$. Suppose that the elliptic system \eqref{eqn:E-01} and its adjoint system \eqref{eqn:E-02} enjoy the H\"{o}lder estimates for weak solutions up to a fixed scale $R_0$; that is, there is a number $R_0>0$ such that if $\vec{u}$ is a weak solution of either \eqref{eqn:E-01} or \eqref{eqn:E-02} in $B_r=B_r(x)$ with $0<r\le R_0$, then $\vec{u}$ is H\"{o}lder continuous and satisfies \begin{equation*} [\vec{u}]_{C^{\mu_0}(B_r)}\le H_0 r^{-(n/2+\mu_0)} \norm{\vec{u}}_{L^2(B_{2r})}. \end{equation*} Then, the statement regarding the bound \eqref{bound} for the fundamental solution should be localized as follows: For any given $T>0$, there are constants $k_0=k_0(\nu_0,M_0)$ and $C_0=C_0(n,\nu_0,M_0,\mu_0,H_0,R_0,T)$ such that \eqref{bound} holds for $0<t\le T$. \end{remark}
\begin{corollary} \label{thm:M-04} Let $\vec{\Gamma}(x,t;y)$ be the fundamental solution of the parabolic system \eqref{eqn:P-01} with time-independent coefficients associated to an elliptic system \eqref{eqn:E-01} defined on $\mathbb R^2$. Then $\vec{\Gamma}(x,t;y)$ has an upper bound \eqref{bound} with the constants $C_0, k_0$ depending only on the ellipticity constants $\nu_0, M$. \end{corollary} \begin{proof} First, let us recall the well known theorem of Morrey which states that any two dimensional elliptic system \eqref{eqn:E-01} with bounded measurable coefficients satisfies the H\"{o}lder estimates for weak solutions at every scale, with the constants $\mu_0, H_0$ depending only on the ellipticity constants (see, \cite[pp. 143--148]{Morrey}). Next, note that the ellipticity constants $\nu_0, M_0$ in \eqref{eqn:para} remain unchanged for $\tilde{A}{}^{\alpha\beta}_{ij}(x)=A^{\beta\alpha}_{ji}(x)$. Therefore, the corollary is an immediate consequence of Theorem~\ref{thm:M-03}. \end{proof}
\begin{remark} In fact, the converse of Theorem~\ref{thm:P-01} is also true (see \cite[Theorem~1.2]{HK}). Therefore, in order to extend the above corollary to the parabolic system with time-dependent coefficients, one needs to show that the system satisfies the local boundedness property for weak solutions. Unfortunately, we do not know whether it is true or not if the coefficients are allowed to depend on the time variable. If $n\ge 3$, it is not true in general, even for the time-independent coefficients case since there is a famous counter-example due to De Giorgi (see \cite{DG68}). \end{remark}
\section{Proof of Main Results} \label{sec:P}
\subsection{Some technical lemmas and proofs} \begin{lemma} \label{lem:M-01} If $\vec{u}$ is a weak solution of the parabolic system with time-independent coefficients \eqref{eqn:P-01} in $Q_{R}=Q_{R}(X_0)$, then $\vec{u}_t\in L^2(Q_{r})$ for $r<R$ and satisfies the estimates \begin{equation} \norm{\vec{u}_t}_{L^2(Q_{r})} \le C(R-r)^{-1} \norm{D\vec{u}}_{L^2(Q_{R})}. \end{equation} In particular, if $\vec{u}$ is a weak solution of \eqref{eqn:P-01} in $Q_{2r}$, then the above estimates together with the energy estimates yield \begin{equation} \label{eqn:M-00} \norm{\vec{u}_t}_{L^2(Q_{r})} \le Cr^{-2} \norm{\vec{u}}_{L^2(Q_{2r})}. \end{equation} \end{lemma}
\begin{proof} We first note that if the coefficients are symmetric, (i.e., $A_{ij}^{\alpha\beta}=A_{ji}^{\beta\alpha}$) this is a well known result; a proof for such a case is found, for example, in \cite[pp. 172--181]{LSU} or in \cite[pp. 360--364]{Evans}. However, the standard proof does not carry over to the non-symmetric coefficients case and for that reason, we provide a self-contained proof here.
Fix positive numbers $\sigma,\tau$ such that $\sigma<\tau\le R$. Let $\zeta$ be a smooth cut-off function such that $\zeta\equiv 1$ in $Q_\sigma$, vanishes near $\partial_P Q_{\tau}$, and satisfies
\begin{equation*} \label{eqn:K-01} 0\le \zeta \le 1\quad\text{and}\quad \abs{\zeta_t}+\abs{D\zeta}^2 \le C(\tau-\sigma)^{-2}. \end{equation*} Note that on each slice $Q_{\tau,s}$, we have \begin{equation*} \begin{split} 0&=\int_{Q_{\tau,s}}\left(\vec{u}_t-D_{\alpha}(\vec{A}^{\alpha\beta}D_\beta \vec{u})\right)\cdot\zeta^2\vec{u}_t\\ &=\int_{Q_{\tau,s}}\zeta^2\abs{\vec{u}_t}^2+\int_{Q_{\tau,s}}\zeta^2 \ip{\vec{A}^{\alpha\beta}D_\beta\vec{u},D_\alpha\vec{u}_t}+ \int_{Q_{\tau,s}} 2\zeta\ip{\vec{A}^{\alpha\beta}D_\beta\vec{u},D_\alpha\zeta\vec{u}_t}. \end{split} \end{equation*} Therefore, we find by using the Cauchy-Schwarz inequality that \begin{equation*} \begin{split} \int_{Q_{\tau,s}}\zeta^2\abs{\vec{u}_t}^2 &\le C\int_{Q_{\tau,s}}\zeta^2\abs{D\vec{u}}\abs{D\vec{u}_t} +C\int_{Q_{\tau,s}}\zeta\abs{D\vec{u}}\abs{D\zeta}\abs{\vec{u}_t}\\ &\le \frac{\epsilon}{2}\int_{Q_{\tau,s}}\zeta^2\abs{D\vec{u}_t}^2+ \frac{C}{\epsilon}\int_{Q_{\tau,s}}\zeta^2\abs{D\vec{u}}^2+ C\int_{Q_{\tau,s}}\abs{D\zeta}^2\abs{D\vec{u}}^2 \\ &+\frac{1}{2}\int_{Q_{\tau,s}}\zeta^2\abs{\vec{u}_t}^2. \end{split} \end{equation*} Thus we have \begin{equation} \label{eqn:K-02} \qquad \int_{Q_\tau}\zeta^2\abs{\vec{u}_t}^2 \le \epsilon\int_{Q_\tau}\zeta^2\abs{D\vec{u}_t}^2+ \frac{C}{\epsilon}\int_{Q_\tau}\zeta^2\abs{D\vec{u}}^2+ C\int_{Q_\tau}\abs{D\zeta}^2\abs{D\vec{u}}^2. \end{equation} Since $\vec{u}_t$ also satisfies \eqref{eqn:P-01}, the energy estimates
yield \begin{equation} \label{eqn:K-03} \int_{Q_{\tau}}\zeta^2\abs{D\vec{u}_t}^2 \le \frac{C_0}{(\tau-\sigma)^2}\int_{Q_{\tau}}\abs{\vec{u}_t}^2. \end{equation} This is the part where we exploit the assumption that the coefficients are time-independent. Combining \eqref{eqn:K-02} and \eqref{eqn:K-03}, we have \begin{equation*} \int_{Q_{\sigma}}\abs{\vec{u}_t}^2 \le \frac{C_0\epsilon}{(\tau-\sigma)^2}\int_{Q_{\tau}}\abs{\vec{u}_t}^2+ \frac{C}{\epsilon}\int_{Q_\tau}\abs{D\vec{u}}^2+ \frac{C}{(\tau-\sigma)^2}\int_{Q_{\tau}}\abs{D\vec{u}}^2. \end{equation*} If we set $\epsilon=(\tau-\sigma)^2/2C_0$, we finally obtain \begin{equation*} \int_{Q_{\sigma}}\abs{\vec{u}_t}^2 \le \frac{1}{2}\int_{Q_{\tau}}\abs{\vec{u}_t}^2+ \frac{C}{(\tau-\sigma)^2}\int_{Q_{\tau}}\abs{D\vec{u}}^2. \end{equation*} Here, we emphasize that $C$ is a constant independent of $\sigma,\tau$. Then by a standard iteration argument (see e.g. \cite[Lemma~{3.1}, pp. 161]{Giaq83}), we have \begin{equation} \label{eqn:K-05} \int_{Q_{r}}\abs{\vec{u}_t}^2 \le \frac{C}{(R-r)^2}\int_{Q_{R}}\abs{D\vec{u}}^2 \quad\text{for } 0<r<R. \end{equation} The proof is complete. \end{proof}
\begin{lemma} \label{lem:M-02} If $\vec{u}$ is a weak solution of the parabolic system with time-independent coefficients \eqref{eqn:P-01} in $Q_{2r}=Q_{2r}(X_0)$, then $D\vec{u}(\cdot,s), \vec{u}_t(\cdot,s)\in L^2(Q_{r,s})$ for all $s\in[t_0-r^2,t_0]$, and satisfy the following estimates uniformly in $s\in[t_0-r^2,t_0]$. \begin{eqnarray} \label{eqn:M-11} \norm{D\vec{u}(\cdot,s)}_{L^2(Q_{r,s})} \le C r^{-2} \norm{\vec{u}}_{L^2(Q_{2r})}, \\ \label{eqn:M-12} \norm{\vec{u}_t(\cdot,s)}_{L^2(Q_{r,s})} \le C r^{-3} \norm{\vec{u}}_{L^2(Q_{2r})}. \end{eqnarray} \end{lemma}
\begin{proof} By the energy estimates applied to $\vec{u}_t$ we obtain \begin{equation} \label{eqn:M-13} \sup_{t_0-r^2\le s\le t_0}\int_{Q_{r,s}}\abs{\vec{u}_t(\cdot,s)}^2 \le \frac{C}{r^2}\int_{Q_{3r/2}} \abs{\vec{u}_t}^2. \end{equation} On the other hand, the estimates \eqref{eqn:K-05} and the energy estimates (this time, applied to $\vec{u}$ itself) yield \begin{eqnarray} \label{eqn:K-12} \int_{Q_{3r/2}} \abs{\vec{u}_t}^2 \le \frac{C}{r^2} \int_{Q_{7r/4}} \abs{D\vec{u}}^2\le \frac{C}{r^4} \int_{Q_{2r}}\abs{\vec{u}}^2. \end{eqnarray} Combining \eqref{eqn:M-13} and \eqref{eqn:K-12} together, we have the estimates \eqref{eqn:M-12}.
Next, assume that $\vec{u}$ is a weak solution of \eqref{eqn:P-01} in $Q_{4r}=Q_{4r}(X_0)$. Let $\zeta$ be a smooth cut-off function such that $\zeta\equiv 1$ in $Q_r$, vanishes near $\partial_P Q_{2r}$, and satisfies \begin{equation} \label{eqn:K-30} 0\le \zeta \le 1\quad\text{and}\quad \abs{\zeta_t}+\abs{D\zeta}^2 \le Cr^{-2}. \end{equation} Note that on each slice $Q_{2r,s}$, we have \begin{equation*} \begin{split} 0&=\int_{Q_{2r,s}}\left(\vec{u}_t-D_{\alpha}(\vec{A}^{\alpha\beta}D_\beta \vec{u})\right)\cdot\zeta^2\vec{u}\\ &=\int_{Q_{2r,s}}\zeta^2\vec{u}_t\cdot\vec{u}+\int_{Q_{2r,s}}\zeta^2 \ip{\vec{A}^{\alpha\beta}D_\beta\vec{u},D_\alpha\vec{u}}+ 2\zeta\ip{\vec{A}^{\alpha\beta}D_\beta\vec{u},D_\alpha\zeta\vec{u}}. \end{split} \end{equation*} Using the ellipticity condition and the Cauchy-Schwarz inequality, we find \begin{equation*} \begin{split} \nu_0\int_{Q_{2r,s}}\zeta^2\abs{D\vec{u}}^2 &\le \int_{Q_{2r,s}}\zeta^2\abs{\vec{u}_t}\abs{\vec{u}} +C\int_{Q_{2r,s}}\zeta\abs{D\vec{u}}\abs{D\zeta}\abs{\vec{u}}\\ &\le \frac{\epsilon\nu_0}{2}\int_{Q_{2r,s}}\zeta^2\abs{\vec{u}_t}^2+ \frac{C}{\epsilon}\int_{Q_{2r,s}}\zeta^2\abs{\vec{u}}^2+ C\int_{Q_{2r,s}}\abs{D\zeta}^2\abs{\vec{u}}^2 \\ &+\frac{\nu_0}{2}\int_{Q_{2r,s}}\zeta^2\abs{D\vec{u}}^2. \end{split} \end{equation*} Then by \eqref{eqn:K-30}, \eqref{eqn:M-12}, and the energy estimates, for all $s\in[t_0-r^2,t_0]$, we have \begin{equation} \label{eqn:M-99} \begin{split} \int_{Q_{r,s}}\abs{D\vec{u}}^2 &\le \epsilon\int_{Q_{2r,s}}\abs{\vec{u}_t}^2+ \frac{C}{\epsilon}\int_{Q_{2r,s}}\abs{\vec{u}}^2+ \frac{C}{r^2}\int_{Q_{2r,s}}\abs{\vec{u}}^2 \\ &\le \frac{C\epsilon}{r^6}\int_{Q_{4r}}\abs{\vec{u}}^2+ \frac{C}{\epsilon r^2}\int_{Q_{4r}}\abs{\vec{u}}^2+ \frac{C}{r^4}\int_{Q_{4r}}\abs{\vec{u}}^2. \end{split} \end{equation} If we set $\epsilon=r^2$, then the above estimates \eqref{eqn:M-99} now become \begin{equation*} \int_{Q_{r,s}}\abs{D\vec{u}}^2 \le \frac{C}{r^4}\int_{Q_{4r}}\abs{\vec{u}}^2, \end{equation*} from which the estimates \eqref{eqn:M-11} follows by a well known covering argument. \end{proof}
\begin{lemma} \label{lem:M-03} Assume that the elliptic system \eqref{eqn:E-01} satisfies the H\"{o}lder estimates for weak solutions at every scale with constants $\mu_0, H_0$. Let $\vec{u}$ be a weak solution of the inhomogeneous elliptic system \begin{equation} D_{\alpha}(\vec{A}^{\alpha\beta}(x) D_{\beta}\vec{u}) =\vec{f}\quad\text{in}\quad B_{2}=B_{2}(x_0), \end{equation} where $\vec{f}$ belongs to the Morrey space $M^{2,\lambda}(B_{2})$ with $\lambda\ge 0$. Then, for any $\gamma\ge 0$ with $\gamma<\gamma_0=\min(\lambda+4,n+2\mu_0)$ (we may take $\gamma=\gamma_0$ if $\gamma_0<n$) there exists $C=C(n,\nu_0,M_0,\mu_0,H_0,\lambda,\gamma)$ such that $\vec{u}$ satisfies the following local estimates \begin{equation} \label{eqn:M-08} \int_{B_r(x)}\abs{D\vec{u}}^2 \le C \left( r^{\gamma-2} \int_{B_2}\abs{D\vec{u}}^2+ r^{\gamma-2} \norm{\vec{f}}_{M^{2,\lambda}(B_2)}^2 \right) \end{equation} uniformly for all $x\in B_1=B_1(x_0)$ and $0<r\le 1$. Moreover, if $\gamma<n$, then $\vec{u}$ belongs to the Morrey space $M^{2,\gamma}(B_{1})$ and \begin{equation} \label{eqn:M-10} \norm{\vec{u}}_{M^{2,\gamma}(B_{1})}\le C\left(\norm{\vec{u}}_{L^2(B_{2})}+ \norm{D\vec{u}}_{L^{2}(B_{2})}+ \norm{\vec{f}}_{M^{2,\lambda}(B_2)}\right). \end{equation} \end{lemma} \begin{proof} First, we note that the property \eqref{eqn:M-04} implies that for all $0<\rho<r$ and $x\in\mathbb R^n$, we have \begin{equation*} \int_{B_\rho(x)}\abs{D\vec{u}}^2\le C\cdot H_0 \left(\frac{\rho}{r}\right)^{n-2+2\mu_0} \int_{B_r(x)}\abs{D\vec{u}}^2. \end{equation*} In the light of the above observation, the estimates \eqref{eqn:M-08} is quite standard and is found, for example, in \cite[Chapter~3]{Giaq83}. Then, by Poincar\'{e} inequality we have \begin{equation} \label{eqn:M-07} \int_{B_r(x)}\abs{\vec{u}-\overline{\vec{u}}_{B_r(x)}}^2 \le C r^{\gamma} \left( \norm{D\vec{u}}_{L^2(B_2)}^2+ \norm{\vec{f}}_{M^{2,\lambda}(B_2)}^2\right) \end{equation} uniformly for all $x\in B_1=B_1(0)$ and $0<r\le 1$. It is well known that if $\gamma<n$, then the estimates \eqref{eqn:M-07} yield \eqref{eqn:M-10} (see e.g. \cite[Chapter~3]{Giaq83}). \end{proof}
\subsection{Proof of Theorem~\ref{thm:M-02}}
Let $\vec{u}$ be a weak solution of \eqref{eqn:P-01} in a cylinder $Q_4=Q_4(0)$. We rewrite \eqref{eqn:P-01} as $L\vec{u}=\vec{u}_t$. By Lemma~\ref{lem:M-02}, we find that $\vec{u}_t(\cdot,s)$ is in $L^2(Q_{2,s})$ and satisfies \begin{displaymath} \norm{\vec{u}_t(\cdot,s)}_{L^2(Q_{2,s})}\le C \norm{\vec{u}}_{L^2(Q_4)} \quad\text{for all }-4\le s\le 0. \end{displaymath} Therefore, we may apply Lemma~\ref{lem:M-03} with $\vec{f}=\vec{u}_t$ and $\lambda=0$, and then apply Lemma~\ref{lem:M-02} to find that for all $x\in B_{1}(0)$ and $0<r\le 1$, we have \begin{equation} \label{eqn:X-11} \begin{split} \int_{B_{r}(x)}\abs{D\vec{u}(\cdot,s)}^2 &\le C r^{\gamma-2} \left( \norm{D\vec{u}(\cdot,s)}_{L^2(Q_{2,s})}^2+ \norm{\vec{u}_t(\cdot,s)}_{L^{2}(Q_{2,s})}^2\right)\\ &\le C r^{\gamma-2} \norm{\vec{u}}_{L^{2}(Q_{4})}^2 \quad\text{uniformly in }s\in[-4,0] \end{split} \end{equation} for all $\gamma<\min(4,n+2\mu_0)$.
By Lemma~\ref{lem:P-02} and then by \eqref{eqn:X-11} we find that for all $X=(x,t)\in Q_1$ and $r\le 1$ \begin{equation} \label{eqn:X-12} \begin{split} \int_{Q_r(X)}\abs{\vec{u}-\overline{\vec{u}}_{Q_r(X)}}^2 &\le C r^2\int_{t-r^2}^t\int_{B_{r}(x)} \abs{D\vec{u}(y,s)}^2\,dy\,ds \\ &\le C r^{2+\gamma} \norm{\vec{u}}_{L^{2}(Q_{4})}^2. \end{split} \end{equation} Note that if $n\le 3$, then we may write $\gamma=n+2\mu$ for some $\mu>0$. In that case, \eqref{eqn:X-12} now reads \begin{equation} \int_{Q_r(X)}\abs{\vec{u}-\overline{\vec{u}}_{Q_r(X)}}^2 \le C r^{n+2+2\mu} \norm{\vec{u}}_{L^{2}(Q_{4})}^2 \end{equation} for all $X\in Q_1$ and $r\le 1$. Therefore, if $n\le 3$, then Lemma~\ref{lem:P-04} yields the estimates \begin{equation} \label{eqn:X-13} [\vec{u}]_{C^{\mu}_P(Q_{1/2})} \le C \norm{\vec{u}}_{L^{2}(Q_{4})}. \end{equation} We have thus shown that in the case when $n\le 3$, any weak solution $\vec{u}$ of \eqref{eqn:P-01} in a cylinder $Q_4=Q_4(0)$ satisfies the above a priori estimates \eqref{eqn:X-13} provided that the associated elliptic system satisfies the H\"{o}lder estimates for weak solutions at every scale. The general case is recovered as follows. For given $X_0=(x_0,t_0)$ and $r>0$, let us consider the new system \begin{equation} \label{eqn:scale} \vec{u}_t-\tilde{L}\vec{u} :=\vec{u}_t-\sum_{\alpha,\beta=1}^n D_\alpha(\tilde{\vec{A}}{}^{\alpha\beta}(x) D_\beta\vec{u})=0, \end{equation} where $\tilde{\vec{A}}{}^{\alpha\beta}(x)=\vec{A}^{\alpha\beta}(x_0+rx)$. Note that the associated elliptic system $\tilde{L}\vec{u}=0$ also satisfies the H\"{o}lder estimates for weak solutions at every scale. Moreover, the ellipticity constants $\nu_0, M_0$ remain the same for the new coefficients $\tilde{\vec{A}}{}^{\alpha\beta}$. Let $\vec{u}$ be a weak solution of \eqref{eqn:P-01} in $Q_{4r}(X_0)$. Then $\tilde{\vec{u}}(X)=\tilde{\vec{u}}(x,t):=\vec{u}(x_0+rx,t_0+r^2t)$ is a weak solution of \eqref{eqn:scale} in $Q_{4}(0)$ and thus $\tilde{\vec{u}}$ satisfies the estimates \eqref{eqn:X-13}. By rescaling back to $Q_{4r}(X_0)$, the estimates \eqref{eqn:X-13} become \begin{equation} \label{eqn:X-16} [\vec{u}]_{C^{\mu}_P(Q_{r/2})} \le C r^{-(n/2+1+\mu)}\norm{\vec{u}}_{L^{2}(Q_{4r})}. \end{equation} Thus, when $n\le 3$, the theorem now follows from a well known covering argument.
In the case when $n\ge 4$, we invoke a bootstrap argument. For the sake of simplicity, let us momentarily assume that $4\le n \le 7$. Let $\vec{u}$ be a weak solution of \eqref{eqn:P-01} in $Q_{8}=Q_{8}(0)$. Let us fix $X_0=(x_0,t_0)\in Q_2(0)$ and observe that $\vec{u}_t$ also satisfies the system \eqref{eqn:P-01} in $Q_{4}(X_0)$. Thus, by a similar argument that led to \eqref{eqn:X-11}, we find that for all $x\in B_{1}(x_0)$ and $0<r\le 1$ we have \begin{equation} \int_{B_{r}(x)}\abs{D\vec{u}_t(\cdot,s)}^2 \le C r^{\gamma-2} \norm{\vec{u}_t}_{L^{2}(Q_{4}(X_0))}^2 \quad\text{uniformly in }s\in [t_0-4,t_0], \end{equation} for all $\gamma<4$ (we may take $\gamma=4$ if $n>4$). Then, by \eqref{eqn:M-10} in Lemma~\ref{lem:M-03}, Lemma~\ref{lem:M-01}, and Lemma~\ref{lem:M-02} we conclude that \begin{equation} \label{eqn:X-14} \norm{\vec{u}_t(\cdot,s)}_{M^{2,\gamma}(B_{1}(x_0))} \le C \norm{\vec{u}}_{L^{2}(Q_{8}(0))} \quad\text{for all }s\in [t_0-4,t_0]. \end{equation} Since the above estimates \eqref{eqn:X-14} hold for all $X_0=(x_0,t_0)\in Q_{2}(0)$, we find that, in particular, $\vec{u}_t(\cdot, s)$ belongs to $M^{2,\gamma}(B_{2}(0))$ for all $-4\le s \le 0$, and satisfies \begin{equation} \label{eqn:X-15} \norm{\vec{u}_t(\cdot,s)}_{M^{2,\gamma}(B_{2}(0))} \le C \norm{\vec{u}}_{L^{2}(Q_{8}(0))} \quad\text{for all }s\in [-4,0], \end{equation} where we also used \eqref{eqn:M-12} of Lemma~\ref{lem:M-02}.
The above estimates \eqref{eqn:X-15} for $\vec{u}_t$ now allows us to invoke Lemma~\ref{lem:M-03} with $\vec{f}=\vec{u}_t$ and $\lambda=\gamma$. Then, by Lemma~\ref{lem:M-03} and Lemma~\ref{lem:M-02}, we find that for all $x\in B_{1}(0)$ and $0<r\le 1$, we have \begin{equation*} \int_{B_{r}(x)}\abs{D\vec{u}(\cdot,s)}^2 \le C r^{\overline{\gamma}-2} \norm{\vec{u}}_{L^{2}(Q_{8}(0))}^2 \quad\text{uniformly in }s\in [-4,0] \end{equation*} for all $\overline{\gamma}<\min(\gamma+4,n+2\mu_0)$. Since we assume that $n\le 7$, we may write $\overline{\gamma}=n+2\overline{\mu}$ for some $\overline{\mu}>0$. By the exactly same argument we used in the case when $n\le 3$, we derive the estimates \begin{equation*} [\vec{u}]_{C^{\mu}_P(Q_{1/2})} \le C \norm{\vec{u}}_{L^{2}(Q_{8})}, \end{equation*} and the theorem follows as before.
Finally, if $n\ge 8$, we repeat the above process; if $\vec{u}$ is a weak solution of \eqref{eqn:P-01} in $Q_{16}(0)$, then $\vec{u}_t(\cdot,s)$ is in $M^{2,\gamma}(B_{1}(0))$ for all $\gamma<8$ and so on. The process cannot go on indefinitely and it stops in $k=[n/4]+1$ steps. The proof is complete. \hfil\qed
\subsection{Proof of Theorem~\ref{thm:M-03}}
The proof is based on Theorem~\ref{thm:P-01}, the proof of which, in turn, is found in \cite{HK}. By Theorem~\ref{thm:P-01}, we only need to establish the local boundedness property for weak solutions of the parabolic system \eqref{eqn:P-01} and for those of its adjoint system \eqref{eqn:P-02}.
From the hypothesis that the elliptic system \eqref{eqn:E-01} satisfies the H\"{o}lder estimates for weak solutions at every scale, we find, by Theorem~\ref{thm:M-02}, that the parabolic system \eqref{eqn:P-01} with the associated time-independent coefficients also satisfies the H\"{o}lder estimates for weak solutions at every scale; that is, there exist some constants $\mu>0$ and $C$, depending on the prescribed quantities, such that if $\vec{u}$ is a weak solution of \eqref{eqn:P-01} in $Q_{4r}(X)$, then it satisfies the estimates \begin{equation*} [\vec{u}]_{C^{\mu}_P(Q_{2r})}\le C r^{-(n/2+1+\mu)} \norm{\vec{u}}_{L^2(Q_{4r})}. \end{equation*} Let us fix $Y\in Q_r=Q_{r}(X)$. Then, for all $Z\in Q_{r}(Y)\subset Q_{2r}(X)$, we have \begin{equation} \label{eqn:final} \abs{\vec{u}(Y)} \le \abs{\vec{u}(Z)}+ d_P(Y,Z)^{\mu}\cdot [\vec{u}]_{C^{\mu}_P(Q_{2r})} \le \abs{\vec{u}(Z)}+ C r^{-(n/2+1)} \norm{\vec{u}}_{L^2(Q_{4r})}. \end{equation} By averaging \eqref{eqn:final} over $Q_{r}(Y)$ with respect to $Z$, we derive (note $\abs{Q_r}=Cr^{n+2}$) \begin{equation*} \abs{\vec{u}(Y)} \le C r^{-(n+2)} \norm{\vec{u}}_{L^1(Q_r(Y))} + C r^{-(n/2+1)} \norm{\vec{u}}_{L^2(Q_{4r})}. \end{equation*} Since $Y\in Q_r$ is arbitrary, we find, by H\"{o}lder's inequality, that $\vec{u}$ satisfies \begin{equation*} \norm{\vec{u}}_{L^\infty(Q_r)} \le C r^{-(n/2+1)} \norm{\vec{u}}_{L^2(Q_{4r})} \end{equation*} for some constant $C=C(n,\nu_0,M_0,\mu_0,H_0)$.
To finish the proof, we also need to show that if $\vec{u}$ is a weak solution of the adjoint system \eqref{eqn:P-02} in $Q^{*}_{4r}=Q^{*}_{4r}(X)$, then it satisfies the local boundedness property \begin{equation} \label{last} \norm{\vec{u}}_{L^\infty(Q^{*}_r)} \le C r^{-(n/2+1)} \norm{\vec{u}}_{L^2(Q^{*}_{4r})}. \end{equation} The verification of \eqref{last} requires only a slight modification of the previous arguments (mostly, one needs to replace $Q_r$ by $Q_r^{*}$ and so on), but it is rather routine and we skip the details. \hfil\qed
\end{document} |
\begin{document}
\title{Online Ramsey numbers: Long versus short cycles}
\begin{abstract}
Online Ramsey game is played between Builder and Painter on an infinite board $K_{\mathbb N}$.
In every round Builder selects an edge, then Painter colors it red or blue.
Both know target graphs $H_1$ and $H_2$.
Builder aims to create either a red copy of $H_1$ or a blue copy of $H_2$ in $K_{\mathbb N}$ as soon as possible, and Painter tries to prevent it.
The online Ramsey number $\tilde{r}(H_1,H_2)$ is the minimum number of rounds such that the Builder wins.
We study $\tilde{r}(C_k,C_n)$ where $k$ is fixed and $n$ is large.
We show that $\tilde{r}(C_k,C_n)=2n+\mathcal O(k)$ for an absolute constant $c$ if $k$ is even, while $\tilde{r}(C_k,C_n)\le 3n+o(n)$ if $k$ is odd. \end{abstract}
\section{Introduction}
The classic Ramsey number $r(H_1,H_2)$ of graphs $H_1,H_2$ is the smallest number of vertices~$n$ such that every $2$-edge colored complete graph on $n$ vertices contains a monochromatic copy of $H_i$ in the $i$-th color, for some $i\in \{1,2\}$. This presentation naturally leads to a variant where one aims to minimize the number of edges. Introduced by Erd\H os, Faudree, Rousseau, and Schelp~\cite{Erdos1978TheSRN}, the size-Ramsey number $\hat{r}(H_1,H_2)$ of a pair of graphs $H_1,H_2$ is the smallest integer $m$ such that there exists an $m$-edge graph $G$ with the property that every $2$-edge-coloring of $G$ results in a monochromatic copy of $H_i$ in the $i$-th color.
We study the game variant of the size-Ramsey number called online Ramsey number (or online size-Ramsey number). A Ramsey game, introduced by Beck \cite{Beck1993AchievementsGA}, is played between Builder and Painter on an infinite board $K_{\mathbb N}$, i.e.,~a complete graph on the vertex set $\mathbb N$. In each round, Builder selects an edge and Painter colors it either red or blue. Given two finite graphs $H_1$ and $H_2$, Builder aims to create either a red copy of $H_1$ or a blue copy of $H_2$ in $K_{\mathbb N}$ as soon as possible, while Painter tries to prevent it. The online Ramsey number $\tilde{r}(H_1,H_2)$ is the smallest number of rounds in which Builder can ensure the appearance of a red copy of $H_1$ or a blue copy of $H_2$ as a subgraph, provided both players play optimally.
Although the online Ramsey number is upper bounded by the size-Ramsey number (that is always finite), this bound is often far from optimal. Probably the most challenging problem in the online Ramsey game theory is to determine $\tilde{r}(K_n,K_m)$ asymptotically. Though exponential bounds on $\tilde{r}(K_n,K_n)$ are known \cite{Conlon2009OnlineRN,CFGH}, they differ in the power base, similarly as in the case of the corresponding Ramsey numbers. Conlon, Fox, Grinshpun, and He \cite{CFGH} studied the off-diagonal case of $\tilde{r}(K_n,K_m)$ and, among more general results, proved that $\tilde{r}(K_3,K_n)$ is of order $n^3$ up to a polylogarithmic factor.
We focus on the off-diagonal online Ramsey numbers for cycles. Let us mention first a few results involving cycles in the (off-line) Ramsey theory.
The Ramsey number for a pair of cycles of length $n$ was determined by Bondy and Erd\H os \cite{Bondy1973cyclesRN}. The work of Rosta \cite{Rosta1973cyclesI,Rosta1973cyclesII} and Faudree and Schelp \cite{Schelp1974cycles} completed the picture by determining the Ramsey number for different combinations of cycle lengths $(C_k,C_n)$. Their result implies that if $k$ is small and $n$ is big, then $r(C_k,C_n)=n+k/2-1$ for even $k$, while for odd $k$ we have $r(C_k,C_n)=2n-1$.
The size-Ramsey number for two cycles seems much harder to determine. Haxell, Kohayakawa, and {\L}uczak~\cite{HKL} proved that $\hat{r}(C_k,C_n)=\mathcal O(n)$ provided $n \ge k \gg \log n$. Their result is in fact stronger since they considered the induced copies of $C_k$ and $C_n$. The bounds on $\hat{r}(C_k,C_n)$ in terms of multiplicative constants were later improved several times; recently Javadi, Khoeini, Omidi, and Pokrovskiy \cite{JKOP} showed that if $n$ is large enough and $n\ge k\ge \log n + 15$, then $\hat{r}(C_k, C_n)\le 113484\cdot 10^6 n$. Recently, Bednarska-BzdΔga and Εuczak \cite{BL} proved that $\hat{r}(C_k, C_n)\le A\cdot n$ for an absolute constant $A$, when $k$ does not depend on $n$.
As for the online Ramsey number of two cycles, BlaΕΎej, DvoΕΓ‘k, and Valla \cite{Blazej2019b} showed how Builder can obtain a monochromatic cycle of length $n$ within at most $72n-3$ rounds (and an induced monochromatic cycle in $735n-27$ rounds).
From the above it is clear that $\tilde{r}(C_k, C_n) = \mathcal O(n)$. We aim to determine the precise multiplicative constant of $n$ within $\tilde{r}(C_k, C_n)$ provided $k \ge 3$ is fixed.
For even $k$ we determine $\tilde{r}(C_k, C_n)$ up to an additive term depending on $k$, while for odd $n$ we find a linear upper bound not far from optimal. The main results of our paper are as follows.
\begin{theorem}\label{thm:even}
$\tilde{r}(C_k,C_n) \le 2n + 20k$ for $n\ge 3k$ and even $k\ge 4$. \end{theorem}
\begin{theorem}\label{thm:odd}
$\tilde{r}(C_k,C_n) \le 3n + \log_2 n + 50 k$ for $n\ge 8k$ and odd $k\ge 3$. \end{theorem}
The multiplicative constant $2$ in \Cref{thm:even} is optimal in view of a result by Cyman, Dzido, Lapinskas and Lo \cite{CDLL} who proved that for every connected graph $H$ Painter has a strategy such within $|V(H)|+|E(H)|-1$ rounds no red cycle is created nor a blue copy of $H$. Hence $\tilde{r}(C_k,C_n)\ge 2n-1$ for every $k\ge 3$. The best known lower bound in case of odd $k$ was proved by Adamski and Bednarska-BzdΔga \cite{lowerbound2021}. They showed that for every connected graph $H$ Builder needs at least $\varphi |V(H)| + |E(H)|-2\varphi+1$ rounds (where $\varphi \approx 1.618$ denotes the golden ratio) to force Painter to create a red odd cycle or a blue copy of $H$. In particular, $\tilde{r}(C_k,C_n)\ge (\varphi+1)n-2\varphi+1> 2.6n-3$ for any $n$ and odd $k$.
To summarize, the above lower bounds and \Cref{thm:even,thm:odd} imply that \[
2n-1\le\tilde{r}(C_k,C_n) \le 2n + 20 k \text{\quad for even $k$ and $n\ge 3k$ \quad and} \] \[
2.6n-3<\tilde{r}(C_k,C_n) \le 3n + \log_2 n + 50 k \text{\quad for odd $k$ and $n\ge 8k$.} \]
The upper bound on $\tilde{r}(C_{2k},C_n)$ proves a conjecture posed in \cite{ABC4} that $\tilde{r}(C_{2k},P_n)=2n+o(n)$ for every fixed $k\ge 2$. The exact value of $\tilde{r}(C_{2k},P_n)$ remains open for every $k\ge 3$. The only exact general result is $\tilde{r}(C_{4},P_n)=2n-2$ for $n\ge 8$; the lower bound follows from the bound by Cyman, Dzido, Lapinskas, and Lo, while the upper bound comes from \cite{ABC4}.
For odd $k$ we have the following upper bound, slightly better than an immediate consequence of \Cref{thm:odd}.
\begin{theorem}\label{thm:oddpath}
$\tilde{r}(C_k,P_n) \le 3n + 50k$ for every $n$ and odd $k\ge 3$. \end{theorem}
Thus, in view of the lower bound by Adamski and Bednarska-BzdΔga, for odd $k$ we have \[
2.6n-4\le \tilde{r}(C_k,P_n)\le 3n + 50k. \] For $k=3$ a better upper bound $3n-4$ holds for every $n\ge 3$ \cite{lowerbound2021}.
Our paper is organized as follows. We present the proofs of the main two \Cref{thm:even,thm:odd} in \Cref{sec:even_buidler_strategy,sec:odd_buidler_strategy}, respectively. In both sections, we use a general tool for shortening long cycles; this tool is shown in \Cref{sec:approx}. We do not focus on optimizing the multiplicative constant of $k$ in \Cref{thm:even,thm:odd,thm:oddpath} as minor improvements lead to arguments that are significantly more complex.
\section{Preliminaries}
A graph $H$ is a tuple of vertices $V(H)$ and edges $E(H)$. Let $v(H)$ and $e(H)$ denote the number of its vertices and edges, respectively. By $P_m$ we mean a path on $m\ge 1$ vertices. The \emph{line forest} $L^{(t)}_m$ is a graph on $m\ge 1$ vertices with $t\ge 1$ components where every component is a path. We denote by ${\mathcal L}^{(\le t)}_m$ the family of all line forests on $m$ vertices with at most $t$ components.
We say a graph is colored if every edge is colored blue or red. A graph is red if all its edges are red; analogously we define a blue graph.
Let ${\mathcal G}_1$ and ${\mathcal G}_2$ be nonempty families of finite graphs. The online Ramsey game $\tilde R({\mathcal G}_1, {\mathcal G}_2)$ is played between Builder and Painter on an infinite board $K_{\mathbb N}$, i.e., an infinite complete graph. In every round, Builder chooses a previously unselected edge of $K_{\mathbb N}$ and Painter colors it red or blue. The game ends if after Painter's move there is a red copy of a graph from ${\mathcal G}_1$ or a blue copy of a graph from ${\mathcal G}_2$. Builder tries to finish the game as soon as possible, while Painter aims to delay Builder's win for as long as possible. Let $\tilde{r}({\mathcal G}_1, {\mathcal G}_2)$ be the minimum number of rounds in the game $\tilde R({\mathcal G}_1, {\mathcal G}_2)$, provided both players play optimally. If ${\mathcal G}_i$ consists of one graph $G_i$, we simply write $\tilde{r}(G_1, G_2)$ and $\tilde R(G_1, G_2)$.
Given a colored graph $H$, we also consider a version of the game $\tilde R({\mathcal G}_1, {\mathcal G}_2)$ where the initial board $K_{\mathbb N}$ already contains $H$. We denote this game by $\tilde R_H({\mathcal G}_1, {\mathcal G}_2)$ and the minimum number of rounds Builder needs to achieve his goal in $\tilde R_H({\mathcal G}_1, {\mathcal G}_2)$ is denoted by $\tilde{r}_H({\mathcal G}_1,{\mathcal G}_2)$. Clearly $\tilde{r}_H({\mathcal G}_1,{\mathcal G}_2)\le \tilde{r}({\mathcal G}_1,{\mathcal G}_2)$ and $\tilde R({\mathcal G}_1, {\mathcal G}_2)$ is equivalent to $\tilde R_H({\mathcal G}_1, {\mathcal G}_2)$ with an empty graph $H$. Any game $\tilde R_H({\mathcal G}_1, {\mathcal G}_2)$ will be called shortly a Ramsey game.
Given a partially played of a Ramsey game, by the \emph{host graph} we mean the colored graph induced by the set of all red and blue edges already drawn on the board. Any vertex that is not incident to any colored edge is called a \emph{free vertex}, and an edge incident to only free vertices is called a \emph{free edge}.
While considering a partially played Ramsey game, we say that Builder can \emph{force a colored graph} $F$ within $t$ rounds, if Builder has a strategy such that after at most $t$ more rounds the host graph contains a copy of $F$.
Throughout the paper we use an idea by Grytczuk, Kierstead, and Pra{\l}at \cite{Grytczuk2008} of forcing two colored paths in a Ramsey game.
\begin{lemma}[\cite{Grytczuk2008}, Theorem~2.3]\label{lem:gryt}
Let $k \ge 1$.
In every Ramsey game within $2k - 1$ rounds Builder can force Painter to create two vertex disjoint monochromatic paths: the red one and the blue one, the sum of whose lengths is equal to $k$. \end{lemma}
The strategy of Builder in the proof of the above lemma is quite simple and can be applied to extending a blue path that is already present in the host graph, provided Painter avoids a red path of a given length. Thus we obtain the following corollary.
\begin{corollary}\label{cor:gryt2} Let $k,m \ge 1$ and let $H$ be a blue path with $0\le e(H)< k$. Assume that in a Ramsey game the host graph contains $H$. Then Builder has a strategy such that either after $2(k-e(H)+m-1)-1$ rounds the host graph contains a blue path of length $k$ or for some $j<k$ after $2(j-e(H)+m)-1$ rounds the host graph contains two vertex disjoint monochromatic paths: the red one of length $m$ and the blue one of length $j$. \end{corollary}
All our proofs are based on the following, quite obvious, observation which we use implicitly.
\begin{lemma}[Triangle inequality for Ramsey games]\label{lem:triangleinequality} Let $F$, $G$, and $H$ be (uncolored) graphs, $F_b$ be the blue copy of $F$ and $C$ be any colored graph. Then \[
\tilde{r}_C(G,H)\le\tilde{r}_C(G,F)+\tilde{r}_{F_b}(G,H). \] In particular, if $C$ is an empty graph, then \[
\tilde{r}(G,H)\le\tilde{r}(G,F)+\tilde{r}_{F_b}(G,H). \] \end{lemma}
\section{Approximate cycles}\label{sec:approx}
The key realization of Builder's strategy in $\tilde R(C_k,C_n)$ presented in further sections will be forcing a long blue cycle, sometimes much longer than $n$. Therefore, we need a tool to shorten it, provided that Painter never creates a red $C_k$. The goal of this section is to prove the following theorem.
\begin{theorem}\label{thm:shortenall}
Let $n>3k-3$, $0\le h\le 2n$, and let $H$ be the blue cycle of length $n+h$.
Then
\[
\tilde{r}_{H}(C_k,C_n)\le 3k+10.
\]
\end{theorem}
We start with the case $h=1$. \begin{lemma}\label{lem:shortenbluecyclebyone}
Let $n>3k-3$ and $H$ be a blue cycle of length $n+1$.
Then
\[
\tilde{r}_{H}(C_k,C_n)\le k+2.
\]
\end{lemma}
\begin{proof}
Let $v_0, v_1, \dots, v_n$ denote the vertices of the blue cycle $H$.
Let $v_{n+1+x}=v_x$ for every $x$.
We begin with three easy observations that hold for $q<n$ and any $p$.
\siamver{\begin{enumerate}}{\begin{enumerate}[(1)]}
\item\label{short0}
If a blue edge $v_p v_{p+2}$ is added to $H$, then a blue $C_n$ appears in the host graph.
\item\label{short1}
If we add two blue edges $v_p v_{p+q}$ and $v_{p+2} v_{p+q+1}$ to the cycle $H$, then again we have a blue $C_n$.
See \Cref{fig:observation_short_2}.
\item\label{short2}
If a red edge $v_p v_{p+2k-2}$ is added to $H$, then Builder can finish the game within at most $k-1$ rounds by selecting all edges of the path $v_p v_{p+2}v_{p+4}\dots v_{p+2k-2}$.
\end{enumerate}
\newcommand{\drawbluepathexample}[1]{
\pgfmathtruncatemacro\nodeaddon{#1+1}
\pgfmathtruncatemacro\edgesmone{#1-2}
\node[hide] (0) at (0,0) {};
\node[hide] (\nodeaddon) at (\nodeaddon,0) {};
\foreach \i in {1,...,#1} {
\node (\i) at (\i,0){};
}
\foreach \i in {0,...,#1} {
\pgfmathtruncatemacro\j{\i+1}
\draw[blue] (\i) -- (\j);
}
}
\begin{figure}
\caption{
Example of Observation (\ref{short1}) for $q=14$.
}
\label{fig:observation_short_2}
\end{figure}
\begin{figure}
\caption{
Two cases of Builder's strategy in \Cref{lem:shortenbluecyclebyone} for $k=5$.
\textbf{Left:} Painter colors both $v_0v_{2k-1}$ and $v_{k-1}v_{3k-4}$ red.
\textbf{Right:} Painter colors $v_0v_{2k-1}$ blue.
In both cases Builder chooses the dotted edges.
If all of them are red, then red $C_k$ appears.
Otherwise there will be a blue $C_n$.
}
\label{fig:shortenings}
\end{figure}
Now we divide the problem based on the parity of $k$.
\begin{itemize}
\item $k$ is odd.
Builder begins the game $\tilde R_{H} (C_k,C_n)$ with selecting two edges $v_0 v_{2k-1}$ and $v_{k-1} v_{3k-4}$.
\begin{itemize}
\item
If Painter colors both edges red, Builder can finish the game by selecting all edges of the paths $v_0 v_2 v_4 \dots v_{k-1}$ and $v_{2k-1} v_{2k+1} v_{2k+3} \dots v_{3k-4}$.
If any of these edges is blue, then there is a blue $C_n$; otherwise there is a red $C_k$.
\item
If Painter colors $v_0 v_{2k-1}$ blue, then Builder chooses $v_2 v_{2k}$.
If Painter colors it blue, the game ends immediately by (\ref{short1}).
If $v_2 v_{2k}$ is red, Builder can end the game within $k-1$ rounds using (\ref{short2}).
\item
In the remaining case, Painter colors $v_{k-1} v_{3k-4}$ blue.
Then Builder chooses $v_{k-3} v_{3k-5}$ and again, using (\ref{short1}) or (\ref{short2}), he can finish the game within at most $k-1$ rounds.
\end{itemize}
\item $k$ is even.
The proof is very similar to the odd case. Builder begins by selecting edges $v_0 v_{2k-3}$ and $v_{k-2} v_{3k-5}$.
\begin{itemize}
\item
If Painter colors both edges red, then Builder selects all edges of the paths $v_0 v_2 v_4 \dots v_{k-2}$ and $v_{2k-3} v_{2k-1} \dots v_{3k-5}$.
If any of these edges is colored blue, then there is a blue $C_n$; otherwise there is a red $C_k$ in the host graph.
\item
If $v_0 v_{2k-3}$ is blue, then Builder chooses $v_1 v_{2k-1}$. If Painter colors it blue, the game ends immediately by (\ref{short1}).
If $v_1 v_{2k-1}$ is red, Builder can end the game within $k-1$ rounds using (\ref{short2}).
\item
In the remaining case, Painter colors $v_{k-2} v_{3k-5}$ blue.
Then Builder chooses $v_{k-1} v_{3k-3}$ and again, using (\ref{short1}) or (\ref{short2}), he can finish the game within at most $k-1$ rounds.
\siamver{}{\qedhere}
\end{itemize}
\end{itemize} \end{proof}
The following lemma will be useful in the proof of \Cref{thm:shortenall} for $h\le k$.
\begin{lemma}\label{lem:shortenbluecyclebyOk}
Let $h\ge 1$, $k\ge 3$, $n>2k$ and let $H$ be a blue cycle on $n+h$ vertices.
Then
\[
\tilde{r}_{H}(C_k,\{C_n,C_{n+1}\})\le h+k+4.
\]
\end{lemma} \begin{proof}
For any integers $p> 2q\ge 0$ we define a colored graph $C(p,q)$ such that it is the union of a blue cycle $v_0v_1\ldots v_{p-1}$ of length $p$ and a red path $v_0 v_2 v_4 \dots v_{2q}$ of length $q$.
We begin with two easy observations.
\siamver{\begin{enumerate}}{\begin{enumerate}[(1)]}
\item\label{Cpq}
If $p > 2q+2$ and the host graph is $C(p,q)$, then Builder can force $C(p-1,q)$ or $C(p,q+1)$ within $1$ round by selecting edge $v_{2q}v_{2q+2}$.
\item\label{Cpq2}
If $p > 2k+2\ell$, $0\le \ell\le k-2$ and the host graph is $C(p,k+\ell)$, then Builder can force $C(p-2-4\ell,k-2-\ell)$ or a red $C_k$ within two rounds by selecting edges $v_0 v_{2k-2}$ and $v_{2\ell+2} v_{2k+2\ell}$.
This achieves his goal, since coloring both of the edges blue shortens the initial blue cycle by $4\ell+2$ vertices, while the initial red path is shortened by $2\ell+2$ (i.e. the shorter blue cycle has a red path of length $k+\ell-(2\ell+2)=k-\ell-2$ on it); see \Cref{fig:approximate_cycle_cases}.
\end{enumerate}
\newcommand{\drawbluepathexample}[1]{
\pgfmathtruncatemacro\nodeaddon{#1+1}
\pgfmathtruncatemacro\edgesmone{#1-2}
\node[hide] (0) at (0,0) {};
\node[hide] (\nodeaddon) at (\nodeaddon,0) {};
\foreach \i in {1,...,#1} {
\node (\i) at (\i,0){};
}
\foreach \i in {0,...,#1} {
\pgfmathtruncatemacro\j{\i+1}
\draw[blue] (\i) -- (\j);
}
}
\begin{figure}
\caption{
Two cases that appear in observation (\ref{Cpq2}) while $k=8$ and $\ell=0$.
The outlined edges constitute the final structure.
\textbf{Top:}
One of the two edges is red.
\textbf{Bottom:}
Both of the edges are blue.
}
\label{fig:approximate_cycle_cases}
\end{figure}
We will show the strategy for Builder in $\tilde R_H(C_k,\{C_n,C_{n+1}\})$, which repeatedly uses the above observations.
First, let us define a potential function of the host graph.
For $p\ge n$ and $0\le q\le k+1$ we define $f(p,q)=p-n+k-q$ and call it the potential of a colored graph $C(p,q)$.
Given a colored graph $G$ containing a blue cycle on $n$ vertices, by the potential $f(G)$ of $G$ we mean the smallest potential among all subgraphs $C(p,q)$ contained in $G$, where $p\ge n$ and $0\le q\le k+1$.
The game starts with the host graph $H=C(n+h,0)$, so $f(H)=h+k$.
We assume that Painter never creates a red $C_k$.
Builder's strategy is focused on decreasing the potential after (almost) every round, based on observations (\ref{Cpq}) and (\ref{Cpq2}).
More precisely, let $C(p,q)$ be the structure with the smallest potential in the host graph after some round of the game.
As long as $p\ge n+2$, Builder plays in the following way.
\siamver{\begin{enumerate}}{\begin{enumerate}[(a)]}
\item
If $q=k+1$ and $p\ge n+6$, then Builder selects two edges based on (\ref{Cpq2}) applied with $\ell=1$.
Then the host graph contains a copy of $C(p-6,q-4)$, so the potential decreases by 2, from $f(p,k+1)$ to $f(p-6,k-3)$.
\item
If either $q=k$ and $p\ge n+6$, or $q<k$, then Builder selects an edge based on (\ref{Cpq}), so that a copy of $C(p-1,q)$ or $C(p,q+1)$ is created within one round.
In both cases the potential of the host graph decreases by 1.
\item\label{ending}
If $q=k$ and $n+2\le p\le n+5$, then Builder uses (\ref{Cpq2}) applied with $\ell=0$.
After two rounds he gets a copy of $C(p-2,k-2)$, so the potential of the host graph does not change.
\end{enumerate}
The game stops when the host graph $H'$ contains $C(p,q)$ with $p=n$ or $p=n+1$.
Observe that at this moment $q\le k$, so the potential $f(H')$ is non-negative.
Note also that case (\ref{ending}) of not changing the potential happens at most twice in the game so there are at most $4$ rounds such that except for these four rounds the potential decreases by one on average per round.
In view of the starting potential $f(H)=h+k$, we conclude that Builder achieves his goal within at most $h+k+4$ rounds.
\end{proof}
Though \Cref{lem:shortenbluecyclebyOk} holds for every $h$, it gives the upper bound depending on $n$ if $h$ increases with $n$. The next lemma allows to shorten very long cycles more efficiently.
\begin{lemma}\label{lem:shortenbluecycle}
Let $k\ge 3$, $k\le h\le 2n$ and let $H$ be a blue cycle of length $n+h$.
Then
\[
\tilde{r}_{H}(C_k,\{C_{n+r},C_{n+r+1},C_{n+r+2}\})\le k+1,
\]
where $r=0$ if $h$ is even and $r=1$ if $h$ is odd.
\end{lemma}
\begin{proof}
Let $p=n+h$ and $q=\lfloor h/2\rfloor$. We have $q\ge \lfloor k/2\rfloor$, $p\ge 3q$ and we need to show that
\[
\tilde{r}_{H}(C_k,\{C_{p-2q},C_{p-2q+1},C_{p-2q+2}\})\le k+1.
\]
Denote the consecutive vertices of $H$ by $v_0,v_1, \dots, v_{p-1}$.
We present Builder's strategy in two stages.
The first stage depends on the parity of $k$.
\begin{itemize}
\item $k$ is even.
Builder selects edge $v_0v_{2q}$.
If Painter colors it blue, then there is a blue $C_{p-2q+1}$ and the game ends.
Otherwise, we proceed to the second stage.
\item $k$ is odd.
Builder chooses edges $v_0 v_{q}$ and $v_{q} v_{2q}$.
If Painter colors both edges blue, then we have a blue cycle $C_{p-2q+2}$ and the game ends.
Therefore, we can assume that $v_{q} v_{2q}$ is red.
Then Builder chooses $v_{2q}v_{3q}$.
If after Painter's answer both edges $v_0 v_{q}$ and $v_{2q} v_{3q}$ are blue, then we obtain a blue $C_{p-2q+2}$ and finish the game.
If any of these edges is red, it is adjacent to the red edge $v_{q}v_{2q}$.
Because of the symmetry, we can assume that the pair of red edges is $v_{0}v_{q}$ and $v_{q}v_{2q}$ and the game proceeds to the second stage.
\end{itemize}
In the second stage, the vertices $v_0$ and $v_{2q}$ of the blue cycle $H$ are connected with a red path of length $1$ or $2$ (depending on the parity of $k$).
Now Builder selects all edges of a path $P_{k}$ (if $k$ is even) or $P_{k-1}$ (if $k$ is odd) from $v_0$ to $v_{2q}$, on the edge set
\begin{equation*}
\begin{gathered}
\bigcup_{i=0}^{\lfloor k/2\rfloor -2} v_i v_{i+2q+1}
\quad\cup\quad \bigcup_{i=1}^{\lfloor k/2\rfloor -1} v_i v_{i+2q-1}
\quad\cup\ v_{\lfloor k/2\rfloor -1}v_{2q+\lfloor k/2\rfloor -1},
\end{gathered}
\end{equation*}
see \Cref{fig:forced_odd_red_cycle_or_join2}.
Every edge $v_x v_y$ of this path satisfies $2q-1 \le |x-y| \le 2q+1$.
\begin{figure}
\caption{
The path from $v_0$ to $v_{2q}$ in \Cref{lem:shortenbluecycle} for $k=19$ (outlined), edges $v_0 v_q$ and $v_q v_{2q}$ (thin red) and the blue cycle $C_p$.
}
\label{fig:forced_odd_red_cycle_or_join2}
\end{figure}
If Painter avoids a red cycle of length $k$, then one of these edges must be blue.
Such a blue edge cuts off $2q$, $2q-1$ or $2q-2$ vertices from the cycle $H$ so we obtain a blue cycle on $p-2q$, $p-2q+1$ or $p-2q+2$ vertices in the host graph.
Finally, notice that Builder selects at most $1+(k-1)$ edges in the game for even $k$, while for odd $k$ the game last at most $3+(k-2)$ rounds. \end{proof}
\begin{proof}[Proof of \Cref{thm:shortenall}] If $h\le k$, then the assertion follows from \Cref{lem:shortenbluecyclebyOk} and \Cref{lem:shortenbluecyclebyone}. Further assume that $k\le h\le 2n$. We apply \Cref{lem:shortenbluecycle}. It shows that, starting from a blue cycle on $n+h$ vertices, Builder can force either a red $C_k$ or one of the blue cycles $C_{n},C_{n+1},C_{n+2},C_{n+3}$ ($C_{n},C_{n+1}$ or $C_{n+2}$ if $h$ is even; $C_{n+1},C_{n+2}$ or $C_{n+3}$ if $h$ is odd) within $k+1$ rounds. Then we apply \Cref{lem:shortenbluecyclebyOk} and \Cref{lem:shortenbluecyclebyone} and the assertion of \Cref{thm:shortenall} follows. In the worst-case scenario, Builder needs $k+1$ rounds to get from the blue $C_{n+h}$ to a blue cycle $C'$ of length at most $n+3$, then $k+7$ rounds to get from $C'$ to $C_{n+1}$, and finally $k+2$ rounds to get a blue $C_n$ from $C_{n+1}$. \end{proof}
\section{Short even cycle vs long cycle}\label{sec:even_buidler_strategy}
In this section, we show a Builder's strategy for $\tilde{r}(C_k,C_n)$ where $k$ is even. Supporting lemmas will also be used extensively in the odd cycle case in \Cref{sec:odd_buidler_strategy}. First, let us state overview of the strategy in \Cref{alg:even_case}.
\begin{algorithm}[ht]
\caption{Builder's strategy for $\tilde{r}(C_k,C_n)$ for an even $k$}\label{alg:even_case}
\begin{algorithmic}
\State Build a red $P_{2k-4}$ or a blue $P_{n+\mathcal O(1)}$ with \Cref{cor:gryt2}.
\If{we have a red $P_{2k-4}$}
\State Build $k$ blue paths of total length of $n+\mathcal O(k)$ with \Cref{lem:icicle}.
\State Join $k$ paths into two with \Cref{lem:join_to_2_paths}.
\State Join two paths into one with \Cref{lem:join_2_paths2}.
\EndIf
\State Join the blue path into an approximate cycle with \Cref{lem:join_2_paths2}.
\State Shorten the approximate cycle to $C_n$ using \Cref{thm:shortenall}.
\end{algorithmic} \end{algorithm}
A big part of the strategy is spent on forcing a long blue path efficiently using a threat of a red $C_k$. For this purpose, we now introduce an icicle path, see \Cref{fig:icicle_path}. \begin{definition}
\emph{Icicle path} $I\!P_{\ell,n}$ is a graph on $n$ vertices that consists of a red path $P_\ell$ on $\ell$ vertices called a \emph{spine} and $\ell$ vertex disjoint blue paths (possibly with no edges) called \emph{icicles}, each having one endpoint in the red path. \end{definition}
\begin{figure}
\caption{
Example of an icicle path $I\!P_{14,\nodes}$, i.e., in total there are $\nodes$ vertices and its red path contains $14$ vertices.
}
\label{fig:icicle_path}
\end{figure}
\begin{lemma}\label{lem:icicle}
Let $1\le m\le n$ and $k\geq 3$.
If $B$ is a blue path on $m$ vertices, then
\[
\tilde{r}_{B}\big(C_k, {\mathcal L}^{(\le k)}_n\big) \le 2n-2m.
\] \end{lemma}
\begin{proof}
Having a blue path $P_m$, Builder will play so that after every round of the game all blue edges of the host graph are contained in a vertex disjoint union of a blue path $P_j$, let us call it a special path, and an icicle path $I\!P_{a,t}$.
We define the score of such a host graph, which changes after every round, as
\[
S(I\!P_{a,t},P_j) = t+j+\text{number of blue edges of the host graph}.
\]
At the start of the game we have an icicle path $I\!P_{1,m}$ (the blue path $P_m$) and a free vertex as a special path.
We claim that Builder can play so that the icicle path has at most $k$ icicles all the time.
Furthermore, if the icicle path has $k-1$ or $k$ icicles, then the special path is trivial; moreover if the icicle path has $k$ icicles, then its last icicle is trivial.
We also claim that Builder can increase the score of the host graph by one in every round provided no red $C_k$ is created.
Suppose that our statement is true after round $r\ge 0$, the host graph has an icicle path $I\!P_{a,t}$ and a blue path $P_j$.
Denote vertices of the spine of the icicle path by $v_1,v_2,\dots,v_a$ and let $b_i$ be the number of vertices of the blue icicle with an endpoint $v_i$.
The score now is
\[
S(I\!P_{a,t},P_j) = t+j+\Big(j-1+\sum_{i=1}^a (b_i-1)\Big) = 2t+2j-a-1.
\]
We claim that Builder can increase the score by one in round $r+1$ by proceeding with the following strategy.
Denote one endpoint of the special path $P_j$ by $u$.
We consider three cases, depending on the length of the spine.
\begin{enumerate}
\item\label{case:new_path}
If $a \leq k-2$, then Builder selects $v_a u$.
If Painter colors it red, then $v_a u$ increases the icicle path to $I\!P_{a+1,t+1}$, we take any free vertex as a new (empty) special path, and the score is increased by $1$.
Observe that if the spine of the new icicle path has $k-1$ vertices, then the new special path is trivial as required.
If Painter colors $v_a u$ blue, then we forget about the red edge $v_{a-1}v_a$, the path $P_j$ is increased by the blue icicle with the endpoint $v_a$ and we obtain the blue path on $j+b(v_a)$ vertices, while the new icicle path is $I\!P_{a-1,t-b(v_a)}$.
The new blue edge increases the score by $1$.
\item
If $a=k-1$, then Builder selects $v_a u$.
Recall that by the inductive assumption the special path is trivial.
If Painter colors $v_a u$ red, then $v_a u$ increases the icicle path to $I\!P_{k,t+1}$, we take any free vertex as a new (empty) special path, and the score is increased by $1$.
The last blue icicle of $I\!P_{k,t+1}$ is trivial and the new special path is trivial as required.
If Painter colors $v_a u$ blue, then we forget about the red edge $v_{a-1}v_a$, the path $P_j$ is increased by the blue icicle path with the endpoint $v_a$ and we obtain the blue path on $j+b(v_a)$ vertices, while the new icicle path is $I\!P_{a-1,t-b(v_a)}$.
The new blue edge increases the score by $1$.
\item
If $a = k$, then Builder selects the edge $v_k v_1$.
By the inductive assumption the special path is trivial and the icicle with the endpoint $v_k$ is trivial.
If Painter colors $v_k v_1$ red, then $C_k$ is created and Builder wins.
If Painter colors it blue, then we forget about the red edge $v_1v_2$, the icicle with the endpoint $v_1$ gets moved to $v_k$ and thereby we obtain a new icicle path $I\!P_{k-1,t}$ with one blue edge more than in $I\!P_{k,t}$.
The score is increased by $1$, the special path remains trivial.
\end{enumerate}
In all cases the statement is true after round $r+1$.
The starting position with a blue path $P_m$ and a trivial special path $u$ has the score $(I\!P_{1,m},u) =2m$.
As long as there is no red $C_k$, the score increases after every round so after $2n-2m$ rounds it reaches $2n$.
Then we have an icicle path $I\!P_{a,t}$ and a special path $P_j$ such that they contain all blue edges of the host graph and $a\le k$.
Thus $2n=S(I\!P_{a,t},P_j) = 2(t+j)-a-1$ and hence $t+j=(2n+a+1)/2\ge n+1$.
We infer that $I\!P_{a,t}\cup P_j$ contains at most $k$ vertex disjoint blue paths of total number of vertices at least $n$ (in case $a=k$ we ignore the special path, which is trivial). \end{proof}
\begin{observation}\label{obs:avoid_ck}
If there is a red path $P_{k-1}$ where $v_1$ and $v_{k-1}$ are its endpoints, then if Builder selects edges $v_1x$ and $v_{k-1}x$ where $x \not\in V(P_{k-1})$, then Painter must color one of these edges blue if he wants to avoid a red $C_k$. \end{observation}
In the next lemma we show that, having a red path on $k+t-4$ vertices, Builder can force Painter to connect $t$ disjoint paths into two paths. The parity of $k$ is irrelevant.
\begin{lemma}\label{lem:join_to_2_paths}
Suppose that $k\geq 3$, $t\ge 2$ and $H$ is a colored graph consisting of two vertex disjoint graphs: a blue $L^{(t)}_m$ and a red path of length $k+t-4$.
Then
\[
\tilde{r}_{H}\big(C_k, {\mathcal L}^{(\le 2)}_{m+t-2}\big) \le 5(t-2).
\] \end{lemma} \begin{proof}
Let $v_1,\dots,v_{k+t-4}$ denote vertices of the red path.
Notice that \Cref{obs:avoid_ck} can be used between every pair of vertices $v_i$ and $v_{i+k-2}$, for $i\le t-2$.
Starting with $t$ blue paths on $m$ vertices in total, Builder repeats $t-2$ times the following procedure which joins two blue paths.
Take three blue paths $P$, $P'$, $P''$ on $b_1$, $b_2$, $b_3$ vertices, respectively, and let $x_1$, $x_2$, $x_3$ be one of their endpoints, respectively.
If this is the $i$-th iteration of this procedure, then Builder selects edges $x_1v_i$, $x_2v_i$, and $x_3v_i$.
If Painter colored two of them blue, then Builder achieved his goal.
Otherwise, there are two red edges, say $x_1v_i$ and $x_2v_i$.
Builder selects $x_1v_{i+k-2}$ and $x_2v_{i+k-2}$.
Both edges must be blue by \Cref{obs:avoid_ck} so there is a blue path on $b_1+b_2+1$ vertices that joins $P$ and $P'$ through either $v_i$ or $v_{i+k-2}$.
Every five rounds Builder either forces a red $C_k$ or he merges two of the blue paths through an additional vertex.
Thus within $5(t-2)$ rounds Builder obtains two blue paths with the sum of their vertex numbers $m+t-2$ (or a red $C_k$). \end{proof}
\begin{lemma}\label{lem:join_2_paths}
Let $k\geq 4$ be an even integer.
Suppose that there are two blue vertex disjoint paths $P$ and $Q$ both on $k/2$ vertices and $p,q$ are one of their endpoints, respectively.
Then within $k$ rounds Builder can force a red $C_k$ or a blue path $R$ on at least $k/2$ vertices of $P\cup Q$, with its endpoints $p$ and $q$. \end{lemma} \begin{proof}
Let $u_1,\dots,u_{k/2}$ denote the vertices in $P$ and let $v_1,\dots,v_{k/2}$ denote the vertices in $Q$.
Builder selects edge $u_1v_1$, all edges $u_iv_{k/2-i+1}$ for $i \in [k/2]$, as well as $u_iv_{k/2-i+2}$ for each $2\le i \le k/2$; see \Cref{fig:cycle_knot}.
These edges constitute a cycle on $k$ vertices so if Painter colors all the edges red, then Builder obtains a red $C_k$; otherwise, the blue paths are joined by a blue edge.
The blue edge connects the paths in a way that the resulting blue path $R$ with endpoints $u_{k/2}, v_{k/2}$ skips $k/2$ vertices in the worst case.
Hence, the final path $R$ contains at least $k/2$ vertices.
\begin{figure}
\caption{
Cycle created to join two partial paths for $\frac k2=5$.
A possible resulting path $R$ that would be created if $u_4v_3$ was blue is outlined.
}
\label{fig:cycle_knot}
\end{figure} \end{proof}
Typically, we use \Cref{lem:join_2_paths} to join two long blue paths or joining endpoints of a single path to form a cycle while losing at most $k/2$ vertices in the process. In most cases the following simpler version of this lemma would be sufficient for our needs.
\begin{lemma}\label{lem:join_2_paths2}
Let $k\geq 4$ be an even integer and let $H$ be a blue line forest on $m>k/2$ vertices, with two components.
Then $\tilde{r}_H(C_k,P_{m-k/2})\le k$. \end{lemma}
\begin{proof}
Let $H_1$ and $H_2$ be the blue components of $H$.
If one of the paths $H_1$, $H_2$ has less than $k/2$ vertices, then the other contains a blue path on $m-k/2$ vertices and Builder wins the game, doing nothing.
Thus assume that $v(H_1),v(H_2)\ge k/2$.
Let $P$, $Q$ be the blue paths induced on last $k/2$ vertices of $H_1$ and $H_2$, respectively.
We apply \Cref{lem:join_2_paths} to $P$, $Q$ and obtain either a red $C_k$ or a blue path on at least $v(H_1)+v(H_2)-k/2=m-k/2$ vertices within $k$ rounds. \end{proof}
\begin{theorem}\label{thm:cyclepath}
If $k\geq 4$ is even and $n\in{\mathbb N}$, then $\tilde{r}(C_k,P_n) \le 2n+11k$. \end{theorem}
\begin{proof}
First, Builder applies a strategy from \Cref{cor:gryt2} until a red path $P_{2k-4}$ or a blue path $P_n$ is created.
If we got blue $P_n$, the game ends after at most $2(n-1+(2k-6))-1=2n+4k-15$ rounds.
Otherwise, for some $m<n$ after at most $2(2k-5+m-1)-1=2m+4k-13$ rounds there are vertex disjoint paths: a red path $P_{2n-4}$ and a blue path $P_m$ in the host graph.
Further we assume that the latter case holds.
We also assume that Painter never creates a red $C_k$.
In the next stage of the game Builder forces a blue line forest $L^{(t)}_{n+k/2}$ with $t\le k$, disjoint from the red $P_{2k-4}$.
In view of \Cref{lem:icicle}, this stage lasts at most $\tilde{r}_{P_m}(C_k, {\mathcal L}^{(\le k)}_{n+k/2}) \le 2(n+k/2)-2m=2n-2m+k$ rounds.
Now we have a blue $L^{(t)}_{n+k/2}$ and the red path $P_{2k-4}$.
\Cref{lem:join_to_2_paths} ensures that within next $5(t-2)$ rounds Builder can force Painter to create a blue line forest $L$ on $t-2+n+k/2\ge n+k/2$ vertices, with at most two components.
Finally, it follows from \Cref{lem:join_2_paths2} that within next $\tilde{r}_L(C_k,P_n)=k$ rounds Builder can force a blue path on $n$ vertices.
In the worst case scenario, the game $\tilde R(C_k,P_n)$ lasts not longer than
\[
2m+4k-13+2n-2m+k+5(k-2)+k<2n+11k
\]
rounds. \end{proof}
\begin{proof}[Proof of \Cref{thm:even}]
Let $k\ge 4$ be an even integer and $n\ge 3k$.
We assume that during the game Painter never creates a red $C_k$.
In the first stage of the game Builder forces a blue path $P$ on $n+k/2$ vertices.
Based on \Cref{thm:cyclepath} he can achieve this goal within $2n+k+11k=2n+12k$ rounds.
Then, since $n>k/2$, we apply \Cref{lem:join_2_paths} to the ending segments of $P$ and conclude that Builder can force a blue cycle on at least $n$ vertices of $P$, within $k$ rounds.
The blue cycle has $n+h$ vertices with $0\le h\le k/2$ and finally we use \Cref{thm:shortenall} to shorten it into a cycle $C_n$ within $3k+10$ rounds.
The total number of rounds is at most $2n+12k+k+2k+10< 2n+20k$. \end{proof}
\section{Short odd cycle vs long cycle}\label{sec:odd_buidler_strategy}
Compared to the even case, odd case is not easily put into a clear pseudo-code. Hence, along with the formal description we present a diagram of the strategy in \Cref{fig:stagesCases} that shows how the complex parts of the algorithm fit into each other.
Builder's strategy in $\tilde R(C_k,C_n)$ is based on wish triangles. This structure may appear during the construction and helps with achieving the goal.
\begin{definition}\label{def:wish_triangle}
Let a \emph{wish triangle} be a colored $C_3$ with either three red edges or one red and two blue edges; see \Cref{fig:wish_triangles}. \end{definition}
\begin{figure}
\caption{The wish triangles}
\label{fig:wish_triangles}
\end{figure}
Before presenting Builder's strategy, we prove several auxiliary lemmata. The first lemma of this section is similar to \Cref{lem:join_2_paths}, however, due to $k$ being odd we cannot use the threat of a red $C_k$ in the same way. We show that a similar construction as in the proof of \Cref{lem:join_2_paths} works as long as we have a red path on three vertices connected to the blue paths in advance.
\newcommand{\wishtriangle}[1]{
\node[main,label=-90:{$t_2$}] (t2) at (-1.3,0) {};
\node[main,label=+90:{$t_1$}] (t1) at ($(t2)+(+40:1.3)$) {};
\node[main,label=-90:{$t_3$}] (t3) at ($(t2)+(-40:1.3)$) {};
\draw[#1] (t1) -- (t2) -- (t3);
\draw[red] (t1) -- (t3); } \newcommand{\buildexample}[1]{
\foreach \i in {1,...,#1}{
\node[main,label=+90:{$u_\i$}] (u\i) at (\i,+1){};
\node[main,label=-90:{$v_\i$}] (v\i) at (\i,-1){};
}
\foreach \i in {2,...,#1}{
\pgfmathtruncatemacro\prev{\i-1}
\draw[blue] (u\prev) -- (u\i);
\draw[blue] (v\prev) -- (v\i);
}
\pgfmathtruncatemacro\lastvtx{#1+1}
\node (uend) at (\lastvtx,+1) {};
\node (vend) at (\lastvtx,-1) {};
\draw[blue] (uend)--(u#1);
\draw[blue] (vend)--(v#1);
}
\begin{lemma}\label{lem:red_gluepath}
Let $k\ge 3$ be an odd integer.
Suppose that a colored graph $H$ contains two blue vertex disjoint paths $P$ and $Q$ both on $\lfloor k/2 \rfloor$ vertices and a vertex $x\notin V(P\cup Q)$.
Let $p,p'$ and $q,q'$ be endpoints of $P$ and $Q$, respectively.
Suppose that the edges $xp'$, $xq'$ are red.
Then within $k-2$ rounds Builder can force a red $C_k$ or a blue path $R$ on at least $\lceil k/2\rceil$ vertices of $P\cup Q$, with its endpoints $p$ and $q$. \end{lemma} \begin{proof}
As the endpoints of paths $P$ and $Q$ are connected by a red path of length $2$ Builder may use the strategy from the proof of \Cref{lem:join_2_paths} while substituting one of its red edges by the red path through $x$.
More precisely, Builder selects edges
\begin{align*}
u_iv_{\lfloor\frac k2\rfloor-i+1} & \quad\text{for all $i \leq \Big\lfloor\frac k2\Big\rfloor$ and}\\
u_{i+1}v_{\lfloor\frac k2\rfloor-i+1} & \quad\text{for all $i \leq \Big\lfloor\frac k2\Big\rfloor-1$.}
\end{align*}
\begin{figure}
\caption{
A red $C_{13}$ created by a Builder using strategy of \Cref{lem:join_2_paths} while assuming he had red connections $xu_1$ and $xv_1$ to start with.
}
\label{fig:forced_odd_red_cycle_or_join}
\end{figure}
With these $k-2$ edges Builder either creates a red cycle of length $k$ or he joins the blue paths as desired; see \Cref{fig:forced_odd_red_cycle_or_join}. \end{proof}
We now show that we are able to join two blue paths if we have a wish triangle in hand.
\begin{lemma}\label{lem:gluepath}
Let $k\ge 3$ be an odd integer.
Suppose that the graph $H$ contains three vertex disjoint graphs: two blue paths $P$ and $Q$ both on at $\lfloor k/2\rfloor$ vertices and a wish triangle.
Let $p$ and $q$ be one of the endpoints of $P$ and $Q$, respectively.
Then within $k+4$ rounds Builder can force a red $C_k$ or a blue path $R$ on at least $\lceil k/2\rceil$ vertices of $P\cup Q$, with its endpoints $p$ and $q$. \end{lemma} \begin{proof}
Let us denote the wish triangle vertices by $t_1$, $t_2$, and $t_3$ such that $t_1t_3$ is a red edge.
Denote the vertices of $P$ by $u_1,u_2,\ldots,u_s$, the vertices of $Q$ by $v_1,v_2,\ldots,v_t$ and suppose that $u_s$ and $v_t$ will be the endpoints of the target path $R$.
First, Builder selects edges $u_1t_1$, $v_1t_1$, $u_2t_3$, and $v_2t_3$.
If both $u_1t_1$ and $v_1t_1$ are blue, then the blue paths are joined into a blue path on $k+1$ vertices so we reached the goal.
If both $u_1t_1$ and $v_1t_1$ are red, then Builder uses \Cref{lem:red_gluepath} to join the paths within $k$ rounds, see \Cref{fig:two_same_edges_case}.
Similarly, we argue the case where $u_2t_3$ and $v_2t_3$ are both blue or both red.
\begin{figure}
\caption{
Case analysis for the first four edges.
Vertex $x$ represents either $t_1$ or $t_3$.
}
\label{fig:two_same_edges_case}
\end{figure}
If none of the previous cases occurs, then the new edges from $t_1$ have distinct colors, and similarly for $t_3$.
We may assume, without loss of generality by swapping $P$ and $Q$, that $t_1u_1$ is blue and $t_1v_1$ is red.
Now we distinguish two cases depending on colors of edges from $t_3$.
If the edge $t_3u_1$ is red and $t_3v_1$ is blue, then depending on the wish triangle colors we have two subcases, depicted on \Cref{fig:cycle_knot2}.
If the wish triangle is blue, then we are done because the blue paths $P$ and $Q$ are joined through $(u_1,t_1,t_2,t_3,v_1)$ blue path.
If the wish triangle is red, then we may use the strategy of \Cref{lem:red_gluepath} because $t_1$ and $t_3$ can be thought of as parts of the blue paths and they are joined with a red path of length $2$.
\begin{figure}
\caption{
Case analysis if blue edges from $t_1$ and $t_3$ end up in either different (left and middle) or the same vertices (right).
\textbf{Left:}
The wish path $(t_1,t_2,t_3)$ is blue -- the blue paths get connected.
\textbf{Middle:}
The wish path is red -- use \Cref{lem:red_gluepath} on the outlined subgraph.
\textbf{Right:}
Regardless of the wish triangle colors we use \Cref{lem:red_gluepath} on the outlined subgraph.
}
\label{fig:cycle_knot2}
\end{figure}
If the edge $t_3u_1$ is blue and $t_3v_1$ is red, then we have a single case that does not depend on the color of the wish triangle, depicted on \Cref{fig:cycle_knot2}.
We can use \Cref{lem:red_gluepath} on the blue paths because one got extended by $u_1t_1$ and they are connected by a red path $t_1t_3v_1$.
This covers the remaining cases and concludes the proof. \end{proof}
The following two lemmata are corollaries of the previous one. We omit their proofs since the first one is almost the same as the proof of \Cref{lem:join_2_paths2}, while \Cref{lem:gluepath2cycle} follows almost immediately from \Cref{lem:gluepath}.
\begin{lemma}\label{lem:gluepath2}
Let $k\geq 3$ be an odd integer and $m>\lfloor k/2\rfloor$.
Suppose that a colored graph $H$ contains two vertex disjoint graphs: a blue $L^{(2)}_m$ and a wish triangle.
Then $\tilde{r}_H(C_k,P_{m-\lfloor k/2\rfloor})\le k+4$. \end{lemma}
\begin{lemma}\label{lem:gluepath2cycle} Let $k\geq 3$ be an odd integer, $m\ge k$ and let ${\mathcal C}$ be the family of all cycles of length at least $m-\lfloor k/2\rfloor$ and not greater than $m$. Suppose that a colored graph $H$ contains two vertex disjoint graphs: a blue path on $m$ vertices and a wish triangle. Then $\tilde{r}_H(C_k,{\mathcal C})\le k+4$. \end{lemma}
Although wish triangles help to glue blue paths together, they are not sufficient. We now present lemmata that gradually lead to the full proof. Know that our main goal is to produce a long blue path and proceed further in a similar way as in the even case. To get there, we either use wish triangles or exploit the fact that they cannot be created. Throughout the procedure, the longest blue path may be little broken which is covered by the following definition.
\begin{definition}
Let an \emph{almost blue path} be a path that has at most one red edge and its remaining edges are blue. \end{definition}
\begin{lemma}\label{lem:longerpath}
Suppose the colored graph $F$ consists of two components: an almost blue path on $t \ge 1$ vertices and a wish triangle.
Let $s \ge 1$.
Then $\tilde{r}_F(C_k,P_{t+s})\le 2s+12k$. \end{lemma} \begin{proof}
We assume that Painter never creates a red $C_k$.
Let $T$ be the wish triangle in $F$.
If the almost blue path $F\setminus T$ contains a red edge, then we denote by $H$, $H'$ two maximal blue path contained in $F\setminus T$; otherwise we put $H=F\setminus T$ and make a technical assumption that $v(H')=0$.
We divide the game into three stages.
In the first stage, Builder applies a strategy from \Cref{cor:gryt2}, extending $H$ or creating a new red path.
This results in a red path $P_{2k-4}$ or a blue path $P_{v(H)+s+k}$.
If we got a blue path on $v(H)+s+k$ vertices, the game proceeds to the third stage (the second stage is omitted) and then the first stage lasts at most $2(s+k+2k-6)-1<2s+6k$ rounds.
Otherwise, for some $m<s+k$ after at most $2(m+2k-5)-1<2m+4k$ rounds new vertex disjoint paths appear in the host graph: a red path $P'$ on $2k-4$ vertices and a blue path $P$ on $v(H)+m$ vertices.
The game proceeds to the second stage.
In the second, stage Builder pretends he plays the game $\tilde R_P(C_k, {\mathcal L}^{(\le k)}_{t'})$ with $t'=t-v(H')+s+k$, on the board vertex disjoint from $(V(F)\setminus V(H))\cup V(P)$.
Based on \Cref{lem:icicle}, after at most $2t'-2v(P)=2(t-v(H')+s+k)-2(v(H)+m)=2s+2k-2m$ rounds of the second stage we have a blue line forest on $t-v(H')+s+k$ vertices, with at most $k$ blue components.
This line forest together with $H'$ (if any) form a blue line forest $L$ with at most $k+1$ components, on $t+s+k$ vertices.
Let us recall that the host graph contains also the red path $P'$ on $2k-4$ vertices.
Next, we use \Cref{lem:join_to_2_paths} to join the components of $L$ and to obtain two blue paths on $t+s+k$ vertices within $\tilde{r}_L(C_k,{\mathcal L}^{(\le 2)}_{t+s+k})\le 5(t-2)\le 5(k-1)$ rounds.
The second stage last at most $2s+2k-2m+5(k-1)<2s+7k-2m-5$ rounds.
The third stage begins after less than $2s+6k$ rounds of the first stage if $v(H')\neq 0$ or after less than $(2m+4k)+(2s+7k-5)=2s+11k-5$ rounds of the first and the second stages if $v(H')=0$.
In both cases at the start of the third stage the host graph contains a blue $L^{(2)}_{t+s+k}$ and the wish triangle $T$.
We use the wish triangle to perform \Cref{lem:gluepath2} joining two blue paths of $L^{(2)}_{t+s+k}$ into a single blue path within $k+4$ rounds.
Hence we obtain a blue path on at least $t+s+k-k/2>t+s$ vertices.
In total, the number of rounds in the game $\tilde R_F(C_k,P_{t+s})$ is upper bounded by $2s+11k-5+k+4 < 2s+12k$. \end{proof}
\begin{lemma}\label{lem:extendblue}
Suppose $H$ consists of two components: a blue edge $v_1v_2$ and an almost blue path $P_t$ on $t\ge 3$ vertices.
Then Builder in the game $\tilde R_H(C_k,C_n)$ has a strategy such that
\begin{itemize}
\item either after at most 2 rounds there is an almost blue path on $t+2$ vertices,
\item or after at most 4 rounds there is a colored graph with two components: a~wish triangle and an almost blue path on $t-2$ vertices.
\end{itemize} \end{lemma} \begin{proof}
If the colored path $P_t$ contains no red edge, then connect one of its endpoints to $v_1$.
No matter what the color of the new edge is, we get an almost blue path on $t+2$ vertices.
In the other case, let us denote by $u_1u_2$ the red edge of $P_t$.
Builder selects edges $u_1v_1$ and $u_2v_2$, see \Cref{fig:almost_blue_path_extension} for depiction of the following case analysis.
If one of the new edges is blue, then the path obtained from $P_t$ by replacing $u_1u_2$ with the path $(u_1,v_1,v_2,u_2)$ gives the result.
If both are red, Builder selects edge $u_1v_2$ and, if $t\ge 4$, also the edge incident to two blue neighbors of $u_1$ and $u_2$ at the path $P_t$.
If $u_1v_2$ is red, then $u_1u_2v_2$ constitute a red wish triangle; if it is blue then $u_1v_1v_2$ is a blue-blue-red wish triangle.
In both cases we obtained a wish triangle and the almost blue path gets shorter by two vertices.
\begin{figure}
\caption{
Builder's strategy for extending an almost blue path by two or forcing a wish triangle.
Final results of the strategy are shown by the outlined edges.
}
\label{fig:almost_blue_path_extension}
\end{figure} \end{proof}
\begin{lemma}\label{lem:path2cycle}
Suppose $k\ge 3$ is an odd integer and $H$ is an almost blue path on $t\ge 2k$ vertices.
Let ${\mathcal C}$ be the family of all cycles of length at least $(t-k)/2$ and not greater than $t$.
Then $\tilde{r}_H(C_k,{\mathcal C})\le k+1$. \end{lemma} \begin{proof}
We assume that Painter never creates a red $C_k$.
We divide the game into two stages but skip the first stage if $H$ is a blue path.
In the first stage, for $H$ which is not blue, denote its red edge by $v_1v'_1$.
There are two maximal blue paths $P$, $P'$ in $H$ so assume that $P$ has endpoints $v_1$ and $v_2$, while $v'_1$ and $v'_2$ are the endpoints of $P'$.
We can assume that $P$ is not shorter than $P'$.
Builder selects the edge $v'_1v_2$.
If Painter colors it blue, we obtain a blue path $H'$ on the vertex set $V(H)$ and proceed to the second stage.
Otherwise, both edges $v_1v'_1$ and $v'_1v_2$ are red.
Then endings of $P$ can be through of as two paths $Q_1,Q_2$, on $\lfloor k/2\rfloor$ vertices each, such that their ends $v_1\in V(Q_1)$ and $v_2\in V(Q_2)$ are connected with a red path of length 2.
Furthermore, $Q_1$ and $Q_2$ are disjoint since $v(Q_1)+v(Q_2)<k\le t/2\le |V(P)|$.
Based on \Cref{lem:red_gluepath} within $k-2$ next rounds Builder can join them so that at most $\lfloor k/2\rfloor$ vertices of $Q\cup Q'$ are not contained in the resulting blue path $R$.
Taking into account the other blue edges of $P$, we have a blue cycle on at least $v(P)-\lfloor k/2\rfloor\ge (t-k)/2$ vertices of $P$.
The game ends in this case.
In the second stage, we have a blue path $H'$ on $t$ vertices in the host graph (if $H$ was a blue path, then $H'=H$).
Let $u_1,u_2,\dots,u_{t}$ be consecutive vertices of $H'$.
Builder selects edges $u_1u_{\lfloor t/2\rfloor}$ and $u_{\lfloor t/2\rfloor}u_{t}$.
If Painter colors one of them blue, then Builder gets a blue cycle on at least $\lfloor t/2\rfloor$ vertices and we are done.
Otherwise, both of the edges are red so we apply \Cref{lem:red_gluepath} for two ending blue paths of $H'$ similarly as in the previous stage and conclude that within the next $k-2$ rounds Builder can force a blue cycle on at least $t-\lfloor k/2\rfloor$ vertices of $H'$.
Summarizing, the second stage last at most $2+k-2=k$ rounds and results in a blue cycle of length at least $(t-k)/2$.
It is not hard to verify that in all cases the game lasts at most $1+k$ rounds and the vertex set of the target blue cycle is contained in $V(H)$. \end{proof}
Aside from an almost blue path Builder's strategy shall exploit another structure which we formally define next -- a blue path interlaced with every other vertex of a red path of twice the length. \begin{definition}
Let \emph{dragon tail} of length $m$ denoted by $\mathcal T(m)$ be a graph that consists of a red path of length $2m$ where odd vertices are connected into a blue path, see \Cref{fig:dragon_tail}.
\begin{figure}
\caption{Dragon tail graph $\mathcal T(7)$}
\label{fig:dragon_tail}
\end{figure} \end{definition}
\begin{lemma}\label{lem:redmatching}
Suppose $H$ is a red matching of $m$ edges.
Then Builder in the game $\tilde R_H(C_k,C_n)$ has a strategy such that for two starting vertices $s_1,s_2\notin V(H)$ one of the following holds.
\begin{itemize}
\item For some $t<m$, after at most $2t+6$ rounds there is a colored graph that contains vertex disjoint: two wish triangles and a blue path on $t$ vertices such that if $t\ge 2$, then the path starts in $s_1$ or $s_2$.
\item After $2m$ rounds there is a dragon tail of length $m-2$ that starts in $s_1$ or $s_2$.
\end{itemize} \end{lemma}
\begin{proof}
Builder gradually builds a dragon tail of length $d$.
In every step, Builder will select two edges that either form a new wish triangle or they extend the dragon tail by one.
Initially, the dragon tail has $0$ length and it starts in $s_1$.
Having a partially built dragon tail of length $d$ in hand, let $u$ be its last vertex and let $vw$ be an unused red edge, i.e.,~$vw$ is an edge of $H$ such that Builder has not selected any edge incident to $v,w$ yet.
Builder selects edges $uv$ and $uw$.
Suppose that Painter colors edges $uv$ and $uw$ with different colors.
Without loss of generality suppose that $uv$ is red and $uw$ is blue.
Then the dragon tail is extended by one as the new blue edge $uw$ is adjacent to red edges $uv$ and $vw$.
For the next step, Builder assigns $u:=w$, i.e., in the next step Builder selects the two edges from the vertex that was the endpoint of the blue edge, see \Cref{fig:dragon_tail2}.
\begin{figure}
\caption{
Initially red matching after six steps of Builder's strategy.
The resulting blue path and the wish triangle are outlined.
Observe the dragon tail $\mathcal T(5)$ spanning from $s$ to $u$.
}
\label{fig:dragon_tail2}
\end{figure}
If the selected edges $uv$ and $uw$ are of the same color, then a wish triangle $uvw$ was created.
Builder adds this wish triangle to a set of wish triangles $T$ and shortens the dragon tail by one so that it does not overlap with the wish triangle.
Let us remark that if the first wish triangle appears in the first step, then Builder chooses $s_2$ and starts building a dragon tail again.
If the second wish triangle was created with $s_2$ as its vertex, then Builder finishes the game with the two wish triangles and any free vertex as the blue path on $t=0$ vertices.
Builder continues increasing the dragon tail as long as there are less than two wish triangles and there are unused red edges of the matching.
In the case where the second wish triangle is created at the moment the dragon tail has length $d\ge 1$, additionally to the two wish triangles Builder has a blue path $P$ of length $d-1$ as an induced subgraph of the dragon tail $\mathcal T(d)$ built so far and the path has one of its endpoints in $s_1$ or $s_2$.
Notice that then the game lasts at most $2(d-1)+8$ rounds since Builder has selected $d-1$ pairs of edges incident to $P$ and additional 4 pairs of edges, two pairs for every wish triangle disjoint from a partially built dragon tail.
After $2m$ rounds there are no unused red edges.
If there is at most one wish triangle in the host graph, then Builder obtains a dragon tail of length $m-2$ (or longer if there is no with triangle). \end{proof}
\newcommand{\builddragontail}[1]{
\node[main] (p1) at (0,0){};
\pgfmathtruncatemacro\lastVertex{#1+1}
\foreach[count=\i] \x in {2,...,\lastVertex}{
\node[main] (p\x) at (\i,0) {};
\pgfmathtruncatemacro\xx{\x-1}
\node[main] (m\x) at ($(p\xx)+(60:1)$) {};
\draw[blue] (p\xx) -- (p\x);
\draw[red] (p\xx) -- (m\x) -- (p\x);
} } \newcommand{\builddragon}[3]{
\begin{scope}[shift={(-#3 cm,0)}]
\foreach \x in {1,...,#1}{
\node[main] (c\x) at (360*\x/#1-360/#1:#3) {};
}
\foreach \f in {1,...,#1}{
\pgfmathtruncatemacro\t{Mod(\f,#1)+1}
\draw[blue] (c\f) -- (c\t);
}
\end{scope}
\builddragontail{#2}
\draw[blue] (p2) -- (c#1); }
Having a dragon tail in hand, let us now show that Builder can force a long blue cycle.
\begin{lemma}\label{lem:tail2cycle}
Suppose $H$ is a dragon tail of length $m$ and $u_0u_1$ is its first blue edge.
Then Builder can play so that after less than $\log_2 m+8$ rounds of the game $\tilde R_H(C_k,C_n)$ there is a red $C_k$ or a blue cycle on more than $m-k$ but at most $m+1$ vertices of the dragon tail and contains the edge $u_0u_1$. \end{lemma}
\begin{proof}
We assume that Painter does not create a red $C_k$.
Suppose that $m>k\ge 3$ and a dragon tail $H$ is a union of a blue path $u_0 u_1 \dots u_m$ and a red path $u_0w_1u_1\dots w_m u_m$.
In the first round of $\tilde R_H(C_k,C_n)$ Builder selects the edge $u_0u_m$.
If Painter colors it blue, then Builder reached his goal by obtaining a blue $C_{m+1}$.
From now on, we assume that $u_0u_m$ is red.
Note that if Builder created every edge from $u_0$ to $u_s$ for each $0 < s < m$, then for some index $i$ there would be a blue $u_0u_j$ and a red $u_0u_{j+1}$ because $u_0u_1$ is blue and $u_0u_m$ is red.
However, in order to find such an index $j$, Builder can select much less edges: he performs a standard binary search (see \Cref{fig:dragon_tail_halving}).
More precisely, Builder selects $u_0u_{\lfloor m/2\rfloor}$.
If Painter colors it red, then Builder focuses on part of the path from $u_0$ to $u_{\lfloor m/2\rfloor}$; otherwise, he focuses on part of the path from $u_{\lfloor m/2\rfloor}$ to $u_m$.
He then continues selecting edges $u_0u_i$ using the same approach on the focused part.
In the end he ends up focusing on a part that contains only two vertices and therefore finds an index $1\le j\le m-1$ such that the edge $u_0u_j$ is blue and $u_0u_{j+1}$ is red.
At each step the number of vertices between endpoints of the focused path is halved so this procedure takes at most $\lceil \log_2 m\rceil$ rounds.
If $j \ge m-k$, then we have a blue cycle $u_0 u_1 \dots u_j$ on more than $m-k$ vertices.
\begin{figure}
\caption{
Binary halving example on $\mathcal T(8)$ with order of edge placements by the Builder.
If the outlined blue cycle is long enough, then Builder wins immediately.
}
\label{fig:dragon_tail_halving}
\end{figure}
From now on suppose that $j<m-k$.
In the next round Builder selects the edge $u_{j+1}u_m$.
We consider two possible Painter's responses.
If $u_{j+1}u_m$ is red, then Builder achieves his goal by selecting $u_0u_s$ for $s=m-(k-3)/2$, see \Cref{fig:dragon_red_case}.
If Painter colors $u_0u_s$ red, then Builder obtains a red cycle on $k$ vertices $u_s w_{s+1} u_{s+1} w_{s+2} \dots u_{m-1} w_m u_m u_{j+1} u_0$.
If Painter colors it blue, Builder obtains a blue cycle on $m-(k-3)/2$ vertices $u_j u_{j+1}\dots u_s u_1 u_2 \dots u_{j-1}$ (note that this is more than $m-k$).
\begin{figure}
\caption{
Cases where $u_{j+1}u_m$ is red within $\mathcal T(8)$ assuming $k=11$.
Resulting cycles are outlined; the outline below edges is for the red cycle and the outline above edges is for the blue one.
}
\label{fig:dragon_red_case}
\end{figure}
Suppose that $u_{j+1}u_m$ is blue.
We consider two cases based on the value of $k$ as we need to perform extra steps when $k$ is very small.
For the first case, assume that $k\ge 7$.
Builder achieves his goal by playing $u_{j-1}u_t$ with $t=j+(k-3)/2$, see \Cref{fig:dragon_blue_case}.
If Painter colors $u_{j-1}u_t$ red, then we have a red cycle on $k$ vertices $u_{j-1} w_j u_j w_{j+1} \dots u_{t-1} w_t u_t$.
If Painter colors it blue, then we have a blue cycle on at least $m-k$ vertices $u_0 u_1 \dots u_{j-1} u_t u_{t+1} \dots u_m u_{j+1} u_j$.
\begin{figure}
\caption{
Cases where $u_{j+1}u_m$ is blue within $\mathcal T(8)$ assuming $k=11$.
Resulting cycles are outlined; the outline below edges is for the red cycle and the outline above edges is for the blue one.
}
\label{fig:dragon_blue_case}
\end{figure}
In the other case where $k\le 5$, Builder always selects $u_{j-1}u_{j+2}$.
If it is colored blue, then we have the same long blue cycle as in the first case.
Hence, assume that the edge $u_{j-1} u_{j+2}$ is red.
Builder proceeds based on the exact value of $k$, see \Cref{fig:dragon_small_cases}.
\begin{figure}
\caption{
Closeup of vertices from $u_{j-1}$ to $u_{j+2}$ of the dragon tail; we see the final set of cases where $u_{j+1}u_m$ is blue for $k=5$ (left) and $k=3$ (right).
Note how the created (dotted) edges form a red cycle of length $k$ if Painter colors them red.
The final blue cycle is outlined.
}
\label{fig:dragon_small_cases}
\end{figure}
If $k=5$, then in the next two rounds Builder forces edges $u_{j-1}w_{j+1}$ and $w_{j+1}u_{j+2}$ blue and hence we get a blue cycle longer than $m-k$, on vertices
\[
u_0 u_1 \dots u_{j-1} w_{j+1} u_{j+2} u_{j+3} \dots u_m u_{j+1} u_j.
\]
If $k=3$, then in the next four rounds Builder forces a blue path $u_{j+2}w_{j}w_{j+1}w_{j+2}w_{j-1}$ and again a cycle longer than $m-k$ arises on vertices
\[
u_0 u_1 \dots u_{j-1} w_j w_{j+1} w_{j+2} u_{j+2} u_{j+3} \dots u_m u_{j+1} u_j.
\]
Finally, observe that in all cases Builder achieves his goal within at most $1+\lceil \log_2 m\rceil+6$ rounds of the game $\tilde R_H(C_k,C_n)$. \end{proof}
\begin{definition}
Let \emph{dragon graph} $\mathfrak{D}(t,m)$ be a graph that consists of a blue cycle on $t$ vertices and a dragon tail $\mathcal T(m)$ of length $m$ that shares one of its endpoints $u$ with the blue cycle, and has a blue edge between vertex of the cycle adjacent to $u$ and a vertex of the dragon tail adjacent via blue edge with $u$, see \Cref{fig:dragon}. \end{definition}
\begin{figure}
\caption{Dragon graph $\mathfrak{D}(13,8)$ with outlined path that is used instead of edge $u_0u_1$.}
\label{fig:dragon}
\end{figure}
\begin{lemma}\label{lem:blueredending}
Suppose $H$ is a dragon graph $\mathfrak{D}(t,m)$, $t\ge 3$ and $m>k\ge 3$.
Let ${\mathcal C}$ be the family of all cycles of length at least $t+m-k$ and not greater than $t+m$.
Then
\[
\tilde{r}_{\mathfrak{D}(t,m)}(C_k,{\mathcal C})\le \log_2 m +8.
\] \end{lemma}
\begin{proof}
Let $u_0u_1$ be the first edge of the tail of the dragon graph.
Suppose that $u_0$ lies on the blue cycle $C$ of the dragon graph, $x$ is its neighbor at the cycle and the edge $xu_1$ is blue.
Based on Lemma \ref{lem:tail2cycle}, Builder can play so that after at most $\log_2 m+8$ rounds of the game $\tilde R_H(C_k,C_n)$ there is a blue cycle $C'$ on more than $m-k$ vertices of the dragon tail and $u_0u_1\in E(C')$, see \Cref{fig:dragon}.
Observe that $(C\cup C'\cup\{xu_1\})\setminus\{u_0u_1\}$ is a blue cycle on at least $t+m-k$ vertices of $H$. \end{proof}
\subsection{Builder's strategy}
For simplicity, assume that Painter never creates a red $C_k$. Builder's strategy for odd $k$ is presented in three separate stages. The first stage ends if either the colored graph has $2n+8k$ vertices or it contains two wish triangles. In the second stage, Builder creates a blue cycle on at least $n$ vertices. In the third stage, Builder shortens the blue cycle to exactly $n$ vertices.
\subsubsection*{Stage I}
Throughout the first stage, Builder keeps three vertex disjoint structures: a red matching $M$, an almost blue path $B$, and a set of wish triangles $T$. As long as the host graph has less than $2n+8k$ vertices and $T$ contains less than two triangles, Builder draws a free edge. If it is colored red, he adds it to $M$. Otherwise, he plays according to \Cref{lem:extendblue} which either extends $B$, or creates a wish triangle and shortens $B$ by two vertices. In case the wish triangle was created, Builder puts it to $T$ and if $T$ contains two triangles, the first stage ends. See the decision diagram in \Cref{fig:stagesCases}.
Suppose that at the end of Stage I we have $v(B)=t$, $e(M)=m$ and the host graph is $H_1$. Based on \Cref{lem:extendblue}, one can calculate that the number of rounds in Stage I is not greater than \[
e(M)+\frac32 (v(B)+4)+10=m+\frac32 t+16. \] The first term stands for one round used for each edge in $M$, second term represents three edges created for each pair of vertices within $B$ (plus $4$ because of the lost vertices when wish triangles are created), and the last term is for the $5$ rounds spent creating each wish triangle.
The number of vertices of the host graph at the end of Stage I satisfies the inequalities \begin{equation}\label{eq:first_stage}
v(H_1)\le 2n+8k\text{ and }2m+t\le v(H_1)\le 2m+t+8. \end{equation} Furthermore, $v(H_1)= 2n+8k$ if $T$ contains less than two wish triangles.
\subsubsection*{Stage II} This stage starts with the host graph $H_1$, containing $M$, $B$, and $T$ from Stage I. The goal is to obtain a blue cycle of length between $n$ and $2n+\mathcal O(k)$.
We consider three cases, the first one is based on appearance of two wish triangles. In the other cases, only an almost blue path $B$ on $t$ vertices and a red matching $M$ with $m$ edges is important for Builder. Then, in view of the calculations at the end of Stage I, we have $t+2m\ge v(H_1)-8 = 2n+8k-8$, hence either $M$ is big or $B$ is long -- this constitutes the second and the third case of this stage. See \Cref{fig:stagesCases} for an overview of how the cases interact.
\begin{figure}
\caption{
Overview of Builder's strategy in Stage I, Stage II, and Stage III.
$\bigtriangleup$ denotes wish triang
Thick rectangles signify Builder's steps (with number of rounds in brackets); angled corners are decisions; purple boxes with rounded corners are structures that Builder obtained in that particular phase of the strategy.
Arrows from lemmata point to graphs that may be obtained by them.
}
\label{fig:stagesCases}
\end{figure}
\paragraph*{Two wish triangles Case} In this case Builder focuses on two wish triangles in $T$ and the almost blue path $B$ on $t\le 2n+8k$ vertices ($t=1$ for an empty path). If $t> n+\lfloor k/2\rfloor$, then we define $B'$ as an almost blue path on $t'=n+\lfloor k/2\rfloor$ vertices, contained in $B$; otherwise we put $B'=B$ and $t'=t$. We apply \Cref{lem:longerpath} for $s=n-t'+\lfloor k/2\rfloor$, the path $B'$ and the first of the wish triangles from $T$. Thereby Builder gets a blue path $B''$ on $t'+s=n+\lfloor k/2\rfloor$ vertices, within $2s+12k\le 2n-2t'+13k$ rounds. Then, having the blue path $B''$ and the second wish triangle from $T$, in view of \Cref{lem:gluepath2cycle} Builder can force a blue cycle of length between $n$ and $n+\lfloor k/2\rfloor$, within $k+4$ next rounds. It ends Stage II. The maximum number of rounds of the second stage in Two wish triangles Case is at most $(2n-2t'+13k)+(k+4)=2n-2t'+14k+4$.
Other cases may end up having this case as a subroutine so the above number will be considered in analysis for the total number of rounds.
In order to calculate the total number of rounds, observe that the definition of $t'$ implies that $t-2t'<0$ for $t>n+\lfloor k/2\rfloor$, while for $t\le n+\lfloor k/2\rfloor$ we have \begin{equation}\label{eq:eight_k}
t-2t'\le v(H_1)-2(n+\lfloor k/2\rfloor)<8k. \end{equation} Now we are ready to bound the number of rounds in two stages in Two wish triangles Case. These number is not greater than \[
(m+\frac32 t+16)+(2n-2t'+14k+4) \putabove{\le}{(\ref{eq:first_stage})} \frac12v(H_1)+t-2t'+2n+14k+20\putabove{\le}{(\ref{eq:first_stage},\ref{eq:eight_k})} 3n+26k+20. \]
\paragraph*{Red matching Case} In this case we assume that there are less than two wish triangles in $T$ and the almost blue path $B$ is short, namely $t<2k$. Then Builder ignores $T$ and $B$, focuses on $M$ and plays according to \Cref{lem:redmatching} choosing as starting points any two free vertices. Then two situations can happen.
\begin{enumerate}
\item
For some $t'<m$, after at most $2t'+6$ rounds, we have a blue path on $t'$ vertices and two wish triangles.
Then Builder continues as in Two wish triangle Case
and creates a blue cycle of length between $n$ and $n+\lfloor k/2\rfloor$.
The number of rounds of Stage II in this case is at most $(2t'+6)+(2n-2t'+14k+4)=2n+14k+10$.
Together with Stage I, for $t<2k$ we have at most
\[
(m+\frac32 t+16)+(2n+14k+10)\le \frac12 v(H_1)+t+2n+14k+26<3n+20k+26
\]
rounds.
\item
After $2m$ rounds we have a dragon tail of length $m-2$.
Then Builder plays as in \Cref{lem:tail2cycle} and creates a blue cycle of length between $m-2-k$ and $m-1$, within less than $\log_2 m+8$ rounds.
Observe that $m-1\le(v(H_1)-t)/2\le n+4k$ and $m-2-k\ge (v(H_1)-8-t)/2-2-k> v(H_1)/2-2k-6\ge n$.
Thus we have a blue cycle of length between $n$ and $n+4k$ and the number of rounds of Stage II in this case is less than $2m+\log_2 m+8$.
The game proceeds to Stage III.
The number of rounds of Stages I and II in this case is at most
\[
(m+\frac32 t+16)+(2m+\log_2 m+8)\le \frac32 v(H_1)+\log_2 m+24\le 3n+\log_2 n+12k+26.
\]
\end{enumerate}
\paragraph*{Almost blue path Case} In this case we assume that there are less than two wish triangles in $T$ and the almost blue path $B$ has at least $2k$ vertices. Then Builder first focuses on $B$ and based on \Cref{lem:path2cycle} he forces a blue cycle $C$ such that $(t-k)/2\le v(C)\le t$, within at most $k+1$ rounds. If $v(C)\ge n$, then we end Stage II. Otherwise, Builder applies his strategy from \Cref{lem:redmatching}, using as starting vertices two adjacent vertices $s_1,s_2$ of the blue cycle $C$. Furthermore, he never selects edges incident to vertices of $V(C)\setminus \{s_1,s_2\}$. This results in two subcases. \begin{enumerate}
\item
For some $s<m$ after $2s+6$ rounds the part of the host graph disjoint from $V(C)\setminus \{s_1,s_2\}$ contains three vertex disjoint graphs: two wish triangles and a blue path $B'$ on $s$ vertices such that if $s\ge 2$, then $B'$ starts in $s_1$ or $s_2$.
If $s=1$, then we have two wish triangles and a blue path on the vertex set $V(C)\setminus\{s_1,s_2\}$, disjoint from the wish triangles.
If $s\ge 2$, then $B'$ starts in $s_1$ or $s_2$, say $s_2$.
Then $s_1$ is the only vertex of $C$ which may be a vertex of a wish triangle.
Thus the sum of $B'$ and the blue path $C\setminus\{s_1\}$ is a blue path on $t'=s+v(C)-1$ vertices.
Further Builder proceeds as in Two wish triangles Case and creates a blue cycle of length between $n$ and $n+\lfloor k/2\rfloor$.
The number of rounds of Stage II in this case is at most
\[
(k+1)+(2s+6)+(2n-2t'+14k+4)=2n-2v(C)+15k+13\le 2n-t+16k+13.
\]
The number of rounds of Stages I and II is
\[
(m+\frac32 t+16)+(2n-t+16k+13)\le \frac12 v(H_1)+2n+16k+29\le 3n+20k+29.
\]
\item
After $2m$ rounds Builder obtains a dragon $\mathfrak{D}(v(C),m-2)$.
Then we can apply \Cref{lem:blueredending} to that dragon, since
\begin{eqnarray*}
m-2 \ge \frac12(v(H_1)-8-v(B))-2&\ge& \frac12(v(H_1)-2v(C)-k)-6\\
&\ge& \frac12(v(H_1)-2n-k)-10>k.
\end{eqnarray*}
Because the dragon has $v(C)+m-2$ vertices and
\[
2n+8k>v(C)+m-2\ge \frac12(t-k)+m-2\ge\frac12(v(H_1)-8-k)-2> n,
\]
Builder gets a blue cycle of length between $n+1$ and $2n+8k$ and it takes him less than $\log_2 m + 8$ next rounds.
The number of rounds of Stage II in this case is at most
\[
(k+1)+2m+(\log_2 m + 8)=2m+\log_2 m+k+9.
\]
Together with Stage I it gives
\begin{eqnarray*}
(m+\frac32 t+16)+(2m+\log_2 m+k+9)&\le& \frac32 v(H_1)+\log_2 m+k+25\\
&\le& 3n+\log_2 n+13k+27
\end{eqnarray*}
rounds.
\end{enumerate}
\subsubsection*{Stage III}
Builder now has a blue cycle of length between $n$ and $2n+8k$. For $n\ge 8k$ we apply \Cref{thm:shortenall} to get
a blue $C_n$, which finishes the game. The number of rounds in Stage III is at most $3k+10$.
Summarizing, the total number of rounds in the game is less than \[
3n+\log_2 n+26k+27+3k+10< 3n+\log_2 n+50k. \]
\section{Short odd cycle vs long path}
It remains to prove \Cref{thm:oddpath}. The proof is similar to the argument in the previous section, however, it is simpler because only one wish triangle is enough for Builder's need and we do not have to perform the binary search at a dragon tail. Thus we repeat Stage I, then in Stage II Two wish triangles Case ends when the blue path $B''$ is built. As for Red matching Case and Almost blue path Case, we repeat the argument but we finish immediately after creating a path of length at least $n$. We do not need Stage III. Furthermore, we do need the assumption that $n\ge 8k$ since it is used in Stage III only. Summarizing, after some rough calculations, we obtain that Builder can force a red $C_k$ or a blue $P_n$ within less than $3n+40k$ rounds.
\section{Future work}
We have proved that $\tilde{r}(C_{k},P_n)=2n+o(n)$ for every fixed even $k\ge 3$. We have no such asymptotically tight result in case of odd $k$. However, our results, as well as the lower bound on $\tilde{r}(C_k,C_n)$, suggest that for fixed odd $k$ the online Ramsey number $\tilde{r}(C_k,C_n)$ is not far from $3n$. We conjecture that it is $(3+o(1))n$.
\begin{conjecture}
$\tilde{r}(C_{k},C_n)=3n+o(n)$ for every fixed odd $k\ge 3$. \end{conjecture}
Another question is how the number of the host graph affects the number of rounds in the game. One can verify that in the proof of \Cref{thm:even} and \Cref{thm:odd} Builder can keep the order of the host graph not greater than $n+ck$ for even $k$, and $2n+ck$ for odd $k$, where $c$ is some absolute (and not big) constant. For big $n$ these two numbers are quite close to the corresponding Ramsey numbers $r(C_k,C_n)$, equal to $n+k/2-1$ and $2n-1$, respectively. It would be interesting to know what happens in a restricted version of the Ramsey game, when Builder is allowed to keep the order of the host graph not greater than $r(C_k,C_n)$. Clearly, the number of rounds in such a game is bounded by the restricted size Ramsey number $r^*(C_k,C_n)$, but it is known only for odd $k$ and is quadratic in $n$. More precisely, $r^*(C_k,C_n)=\lceil (2n-1)(n+1)/2 \rceil$ for any fixed odd $k\ge 3$ and big enough $n$, as shown by \L{}uczak, Polcyn and Rahimi \cite{LPR}.
\siamver{
}{
}
\end{document} |
\begin{document}
\markboth{J.~Struckmeier} {Extended Hamilton-Lagrange formalism}
\title{EXTENDED HAMILTON-LAGRANGE FORMALISM AND\\ ITS APPLICATION TO FEYNMAN'S PATH INTEGRAL\\ FOR RELATIVISTIC QUANTUM PHYSICS} \author{J\"URGEN STRUCKMEIER} \address{Gesellschaft f\"ur Schwerionenforschung (GSI), Planckstrasse~1, 64291~Darmstadt, Germany\\ Goethe University, Max-von-Laue-Str.~1, 60438~Frankfurt am Main, Germany\\ [email protected]} \maketitle \begin{history} \received{10 October 2008}
\end{history} \begin{otherinfo} Published in: Int.~J.~Mod.~Phys~E, Vol.~18, No.~1 (2009), pp.~79--108 \end{otherinfo} \begin{abstract} With this paper, a consistent and comprehensive treatise on the foundations of the \emph{extended Hamilton-Lagrange formalism} will be presented. In this formalism, the system's dynamics is parametrized along a time-like system evolution parameter $s$, and the physical time $t$ is treated as a \emph{dependent} variable $t(s)$ on equal footing with all other configuration space variables $q^{i}(s)$. In the action principle, the conventional classical action $L\,dt$ is then replaced by the generalized action $L_{\mathrm{e}}ds$, with $L$ and $L_{\mathrm{e}}$ denoting the conventional and the extended Lagrangian, respectively. Supposing that both Lagrangians describe the same physical system then provides the correlation of $L$ and $L_{\mathrm{e}}$. In the existing literature, the discussion is restricted to only those extended Lagrangians $L_{\mathrm{e}}$ that are \emph{homogeneous forms of first order} in the velocities. As a result, the Legendre transformation of $L_{\mathrm{e}}$ to a corresponding extended Hamiltonian is \emph{singular} and thus does not provide us with an equivalent extended Hamiltonian $H_{\mathrm{e}}$.
In this paper, it is shown that a class of extended Lagrangians $L_{\mathrm{e}}$ exists that are correlated to corresponding conventional Lagrangians $L$ \emph{without being homogeneous functions in the velocities}. Then the Legendre transformation of $L_{\mathrm{e}}$ to an \emph{extended Hamiltonian} $H_{\mathrm{e}}$ exists. With this class of extended Hamiltonians, an extended canonical formalism is presented that is completely analogous to the conventional Hamiltonian formalism. The physical time $t$ and the negative \emph{value} of the conventional Hamiltonian then constitute and an additional pair of conjugate canonical variables. The extended formalism also includes a theory of extended canonical transformations, where the time variable $t(s)$ is also subject to transformation.
In the extended formalism, the system's dynamics is described as a motion on a hypersurface within an \emph{extended} phase space of even dimension. It is shown that the hypersurface condition does \emph{not} embody a constraint as the condition is automatically satisfied on the system path that is given by the solution of the extended set of canonical equations.
It is furthermore demonstrated that the value of the extended Hamiltonian and the parameter~$s$ constitute a second \emph{additional pair} of canonically conjugate variables. In the corresponding quantum system, we thus encounter an additional \emph{uncertainty relation}.
As a consequence of the formal similarity of conventional and extended Hamilton-Lagrange formalisms, Feynman's \emph{non-relativistic} path integral approach can be converted \emph{on a general level} into a form appropriate for \emph{relativistic} quantum physics. In the emerging parametrized quantum description, the additional uncertainty relation serves as the means to incorporate the hypersurface condition and hence to finally eliminate the parametrization.
As the staring point, the non-homogeneous extended Lagrangian $L_{\mathrm{e}}$ of a classical relativistic point particle in an external electromagnetic field will be presented. It will be shown that this extended Lagrangian can be transformed into a corresponding extended Hamiltonian $H_{\mathrm{e}}$ by a \emph{regular} Legendre transformation. With this $L_{\mathrm{e}}$, it is shown that the generalized path integral approach yields the Klein-Gordon equation as the corresponding quantum description. Moreover, the space-time propagator for a free relativistic particle will be derived. These results can be regarded as the proof of principle of the \emph{relativistic generalization} of Feynman's path integral approach to quantum physics. \end{abstract} \keywords{Extended Hamilton-Lagrange formalism, relativity, path integral, relativistic quantum physics} \ccode{PACS numbers: 04.20.Fy, 03.65.-w, 03.65.Pm} \section{Introduction} Even more than hundred years after the emerging of Einstein's special theory of relativity, the presentation of classical dynamics in terms of the Lagrangian and the Hamiltonian formalisms is still usually based in literature on the Newtonian absolute time as the system evolution parameter\cite{abraham,greiner,arnold,frankel,saletan,ratiu,kleinert}. The way to generalize the Hamilton-Lagrange formalism in order to render it compatible with special relativity is obvious and well-established. It consists of introducing a system evolution parameter, $s$, as a new time-like independent variable, and of subsequently treating the physical time $t=t(s)$ as a \emph{dependent} variable of $s$, in parallel to all configuration space variables $q^{i}(s)$. This idea has been pursued in numerous publications, only a few of them being cited here.
Despite this unambiguity in the foundations and the huge pile of publications on the matter --- dating back to P.~Dirac\cite{dirac} and C.~Lanczos\cite{lanczos} --- a truly consistent extended Hamilton-Lagrange formalism is still missing. The reason for this is that the discussion in the existing literature is restricted to only those extended Lagrangians that are \emph{homogeneous forms of first order} in the velocities. In this paper, this class of Lagrangians will be referred to as \emph{trivial extended Lagrangians}. For the class of trivial extended Lagrangians, corresponding trivial extended Hamiltonians cannot be directly derived by a Legendre transformation as the transformation is singular. Yet, trivial extended Hamiltonians can always be set of on the basis of a given conventional Hamiltonian.
As will be shown in this paper, \emph{extended} Lagrangians $L_{\mathrm{e}}$ indeed exist for given conventional Lagrangians $L$ that both describe the same physical system and that are \emph{no homogeneous forms} in the velocities $\mathrm{d} q^{\mu}/\mathrm{d} s$. In other words, the correlation of $L$ and $L_{\mathrm{e}}$ is \emph{not unique} is the sense that we can find more than one extended Lagrangian $L_{\mathrm{e}}$ that can be reduced to the same conventional Lagrangian $L$. This will be demonstrated for the simple case of the free relativistic point particle.
If for a given conventional Lagrangian $L$ a non-trivial extended Lagrangian $L_{\mathrm{e}}$ can be found, then the Legendre transformation is regular, and hence an equivalent extended Hamiltonian $H_{\mathrm{e}}\not\equiv0$ can be derived directly. This will be shown for the case of a relativistic particle in an external electromagnetic field, whose extended Hamiltonian will be derived by Legendre-transforming the corresponding non-homogeneous extended Lagrangian. Remarkably, we thus \emph{derive} an extended Hamiltonian which coincides with the ``super-Hamiltonian'' that was \emph{postulated} earlier by Misner, Thorne, and Wheeler\cite{misner}.
For extended Hamiltonians $H_{\mathrm{e}}$, the subsequent extended set of canonical equations is found to perfectly coincide in its \emph{form} with the conventional one. This also applies for the theory of extended canonical transformations. The \emph{trivial extended generating function} $F_{2}$ is shown to generate exactly the subgroup of conventional canonical transformations within the group of extended canonical transformations. This subgroup consists of exactly those extended canonical mappings that transform the time variables identically.
On grounds of the \emph{formal similarity} of conventional and extended Hamilton-Lagrange formalisms, it is possible to formally convert non-relativistic approaches that are based on conventional Lagrangians into relativistic approaches in terms of extended Lagrangians. This idea is worked out exemplarily for Feynman's path integral approach to quantum physics.\cite{feynman}
The paper is organized as follows. We start in Sect.~\ref{sec:eleq} with the Lagrangian description and derive from the extended form of the action integral the extended Lagrangian $L_{\mathrm{e}}$, together with its relation to the conventional Lagrangian $L$. It is shown that this relation reduces to the factor $\mathrm{d} t/\mathrm{d} s$. The extended set of Euler-Lagrange equations then follows from the dependencies of the extended Lagrangian.
In the extended Hamilton-Lagrange description of dynamics, the system's motion takes place on \emph{hypersurfaces in extended phase spaces}. In the extended Lagrangian formalism, this space is given by the tangent bundle $T(\mathbb{M}\times\mathbb{R})$, whereas in the extended Hamiltonian formalism, the hypersurface lies within the cotangent bundle $T^{*}(\mathbb{M}\times\mathbb{R})$, both cases built over the space-time configuration manifold $\mathbb{M}\times\mathbb{R}$. It is proved that the emerging of a hypersurface condition does \emph{not} imply the system to be constrained as the condition is always satisfied on the system path that is given by the solution of the (unconstrained) extended set of canonical equations. This perception corresponds to the case of a conventional Hamiltonian system with no explicit time dependence, where the system's motion takes place on a phase-space hypersurface of constant energy. Likewise, the correlation of the dynamical variables that is induced by this hypersurface of constant energy is not considered to be a constraint as for autonomous systems the energy is automatically maintained by any solution of the set of canonical equations. The hypersurface condition thus distinguishes physical from unphysical phase-space locations that cannot represent at any time the system's state for the given canonical equations and the initial conditions. In this sense, the hypersurface condition is the classical particle analogue of the \emph{mass shell condition} of quantum field theory.
To provide a simple example, we derive in Sect.~\ref{sec:lag1-fp} the non-homogeneous extended Lagrangian $L_{\mathrm{e}}$ for a free relativistic point particle. This Lorentz-invariant Lagrangian $L_{\mathrm{e}}$ has the remarkable feature to be \emph{quadratic} in the velocities. This contrasts with the conventional Lorentz-invariant Lagrangian $L$ that describes the identical dynamics. For this system, the hypersurface condition depicts the constant square of the four-velocity vector.
We show in Sect.~\ref{sec:lag1-em} that the extended Lagrangian $L_{\mathrm{e}}$ of a relativistic particle in an external electromagnetic field agrees \emph{in its form} with the corresponding non-relativistic conventional Lagrangian $L$. The difference between both is that the derivatives in the extended Lagrangian $L_{\mathrm{e}}$ are being defined with respect to the particle's \emph{proper time}, which are converted into derivatives with respect to the Newtonian \emph{absolute time} in the non-relativistic limit.
In Sect.~\ref{sec:caneq}, we switch to the extended Hamiltonian description. As the extended Hamiltonian $H_{\mathrm{e}}$ springs up from a non-homogeneous extended Lagrangian $L_{\mathrm{e}}$ by means of a regular Legendre transformation, both functions equally contain the total information on the dynamical system in question. The Hamiltonian counterparts of the Lagrangian description, namely, the extended set of canonical equations, the hypersurface condition, and the correlation of the extended Hamiltonian $H_{\mathrm{e}}$ to the conventional Hamiltonian $H$ are presented. On this basis, the theory of extended canonical transformations and the extended version of the Hamilton-Jacobi equation are worked out as straightforward generalizations of the conventional theory. As a mapping of the time $t$ is incorporated in an extended canonical transformation, not only the transformed coordinates emerging from the Hamilton-Jacobi equation are constants, as usual, but also the transformed time $T$. The extended Hamilton-Jacobi equation may thus be interpreted as defining the mapping of the entire dynamical system into its state at a \emph{fixed instant of time}, i.e., for instance, into its initial state. In the extended formulation, the Hamilton-Jacobi equation thus reappears in a new perspective.
We furthermore show that the \emph{value} of the extended Hamiltonian $H_{\mathrm{e}}$ and the system evolution parameter $s$ yield an additional pair of canonically conjugate variables. For the corresponding quantum system, we thus encounter an additional \emph{uncertainty relation}. Based on both the extended Lagrangian $L_{\mathrm{e}}$ and the additional uncertainty relation, we present in Sect.~\ref{sec:pathint} the path integral formalism in a form appropriate for \emph{relativistic quantum systems}. An extension of Feynman's approach was worked out earlier\cite{duru} for a particular system. Nevertheless, the most general form of the extended path integral formalism that applies for any extended Lagrangian $L_{\mathrm{e}}$ is presented here for the first time. By consistently treating space and time variables on equal footing, the generalized path integral formalism is shown to apply as well for Lagrangians that \emph{explicitly} depend on time. In particular, the transition of a wave function is presented here as a space-time integral over a space-time propagator. In this context, we address the physical meaning of the additional integration over $t$. The uncertainty relation is exhibited as the \emph{quantum physics' means} to incorporate the hypersurface condition in order to finally eliminate the parameterization.
On grounds of a generalized understanding of the action principle, Feynman showed that the Schr\"odinger equation emerges as the non-relativistic \emph{quantum description} of a dynamical system if the corresponding \emph{classical} system is described by the non-relativistic Lagrangian $L$ of a point particle in an external potential. Parallel to this beautiful approach, we derive in Section~\ref{sec:kg} the Klein-Gordon equation as the \emph{relativistic quantum description} of a system, whose classical counterpart is described by the \emph{non-homogeneous extended Lagrangian} $L_{\mathrm{e}}$ of a relativistic point particle in an external electromagnetic field. The reason for this to work is twofold. As the extended Lagrangian $L_{\mathrm{e}}$ agrees in its form with the conventional non-relativistic Lagrangian $L$, the generalized path integral formalism can be worked out similarly to the non-relativistic case. Furthermore, as we proceed in our derivation an infinitesimal proper time step $\Delta s$ only and consider the limit $\Delta s\to0$, the hypersurface condition disappears by virtue of the uncertainty relation.
We finally derive in Sect.~\ref{sec:prop} the space-time propagator for the wave function of a free particle with spin zero from the extended Lagrangian of a free relativistic point particle. The hypersurface condition, as the companion of the classical extended description, is taken into account in the quantum description by integrating over all possible parameterizations of the system's variables. This integration is now explained in terms of the uncertainty relation. We regard these results as the ultimate confirmation of the relativistic generalization of Feynman's path integral formalism. \section{Extended Hamilton-Lagrange formalism} \subsection{\label{sec:eleq}Extended set of Euler-Lagrange equations} The conventional formulation of the principle of least action is based on the action functional $S[\bm{q}(t)]$, defined by \begin{equation}\label{principle0} S[\bm{q}(t)]=\int_{t_{a}}^{t_{b}}L\left(\bm{q},\dfrac{\bm{q}}{t},t\right)\mathrm{d} t, \end{equation} with $L(\bm{q},\dot{\bm{q}},t)$ denoting the system's conventional Lagrangian, and the vector of configuration space variables $\bm{q}(t)=(q^{1}(t),\ldots,q^{n}(t))$ as a function of time. In this formulation, the independent variable time $t$ plays the role of the Newtonian \emph{absolute time}. The actual path $(\bar{\bm{q}}(t),\dot{\bar{\bm{q}}}(t))$ the physical system ``realizes'' is given as the \emph{extremum} of the action $S$, hence for $\delta S=0$. The path representing this extremum of $S$ is the solution of the set of Euler-Lagrange equations ($i=1,\ldots,n$) for the given initial conditions $\bm{q}_{0},\dot{\bm{q}}_{0}$, \begin{equation}\label{lageqm0} \dfrac{}{t}\left(\pfrac{L}{\left(\dfrac{q^{i}}{t} \right)}\right)-\pfrac{L}{q^{i}}=0. \end{equation} The reformulation of the least action principle~(\ref{principle0}) that is eligible for relativistic physics is accomplished by treating the time $t(s)=q^{0}(s)/c\,$ --- like the vector $\bm{q}(s)$ of configuration space variables --- as a \emph{dependent} variable of a newly introduced timelike independent variable, $s$~\cite{lanczos,fanchi,rohrlich,struck}. The action functional then writes in terms of an \emph{extended Lagrangian} $L_{\mathrm{e}}$ \begin{equation}\label{principle1} S_{\mathrm{e}}[\bm{q}(s),t(s)]=\int_{s_{a}}^{s_{b}}L_{\mathrm{e}}\left(\bm{q},\dfrac{\bm{q}}{s},t, \dfrac{t}{s}\right)\mathrm{d} s\equiv \int_{s_{a}}^{s_{b}}L_{\mathrm{e}}\left(q^{\mu},\dfrac{q^{\mu}}{s}\right)\mathrm{d} s. \end{equation} Herein, the index $\mu=0,\ldots,n$ denotes the entire range of extended configuration space variables. As the action functional~(\ref{principle1}) has the form of (\ref{principle0}), the subsequent Euler-Lagrange equations that determine the particular path $(\bar{\bm{q}}(s),\bar{t}(s))$ on which the value of the functional~(\ref{principle1}) takes on an extreme value, adopt the customary form of Eq.~(\ref{lageqm0}) \begin{equation}\label{lageqm} \dfrac{}{s}\left(\pfrac{L_{\mathrm{e}}}{\left(\dfrac{q^{\mu}}{s} \right)}\right)-\pfrac{L_{\mathrm{e}}}{q^{\mu}}=0. \end{equation} For the index $\mu=0$, the Euler-Lagrange equation can be expressed equivalently in terms of $t(s)$ as \begin{equation}\label{lageqm-t} \dfrac{}{s}\left(\pfrac{L_{\mathrm{e}}}{\left(\dfrac{t}{s} \right)}\right)-\pfrac{L_{\mathrm{e}}}{t}=0. \end{equation} The equations of motion for both $\bm{q}(s)$ and $t(s)$ are thus determined by the extended Lagrangian $L_{\mathrm{e}}$. The solution $\bm{q}(t)$ of the Euler-Lagrange equations that equivalently emerges from the corresponding conventional Lagrangian $L$ may then be constructed by eliminating the evolution parameter $s$.
As the actions, $S$ and $S_{\mathrm{e}}$, are supposed to be alternative characterizations of the \emph{same} underlying physical system, the action principles $\delta S=0$ and $\delta S_{\mathrm{e}}=0$ must hold simultaneously. This means that $$ \delta\int_{s_{a}}^{s_{b}}L\dfrac{t}{s}\,\mathrm{d} s= \delta\int_{s_{a}}^{s_{b}}L_{\mathrm{e}}\,\mathrm{d} s, $$ which, in turn, is assured if both integrands differ at most by the $s$-derivative of an arbitrary differentiable function $F(\bm{q},t)$ $$ L\dfrac{t}{s}=L_{\mathrm{e}}+\dfrac{F}{s}. $$ Functions $F(\bm{q},t)$ define a particular class of point transformations of the dynamical variables, namely those ones that preserve the form of the Euler-Lagrange equations. Such a transformation can be applied at any time in the discussion of a given Lagrangian system and should be distinguished from correlating $L_{\mathrm{e}}$ and $L$. We may thus restrict ourselves without loss of generality to those correlations of $L$ and $L_{\mathrm{e}}$, where $F\equiv0$. In other words, we correlate $L$ and $L_{\mathrm{e}}$ \emph{without} performing simultaneously a transformation of the dynamical variables. We will discuss this issue in the more general context of \emph{extended canonical transformations} in Sect.~\ref{sec:cantra}. The extended Lagrangian $L_{\mathrm{e}}$ is then related to the conventional Lagrangian, $L$, by \begin{equation}\label{lag1} L_{\mathrm{e}}\left(\bm{q},\dfrac{\bm{q}}{s},t,\dfrac{t}{s}\right)= L\left(\bm{q},\dfrac{\bm{q}}{t},t\right)\dfrac{t}{s},\qquad \dfrac{\bm{q}}{t}=\dfrac{\bm{q}/\mathrm{d} s}{t/\mathrm{d} s}. \end{equation} The derivatives of $L_{\mathrm{e}}$ from Eq.~(\ref{lag1}) with respect to its arguments can now be expressed in terms of the conventional Lagrangian $L$ as \begin{align} \quad\pfrac{L_{\mathrm{e}}}{q^{\mu}}&=\pfrac{L}{q^{\mu}}\dfrac{t}{s}, \qquad\mu=1,\ldots,n\\ \quad\pfrac{L_{\mathrm{e}}}{t}&=\pfrac{L}{t} \dfrac{t}{s}\\ \pfrac{L_{\mathrm{e}}}{\left(\dfrac{q^{\mu}}{s}\right)}&= \pfrac{L}{\left(\dfrac{q^{\mu}}{t}\right)},\qquad \mu=1,\ldots,n\label{L1-deri}\\ \pfrac{L_{\mathrm{e}}}{\left(\dfrac{t}{s}\right)}&=L+ \sum_{\mu=1}^{n}\pfrac{L}{\left(\dfrac{q^{\mu}}{t}\right)} \pfrac{\left(\dfrac{q^{\mu}/\mathrm{d} s}{t/\mathrm{d} s}\right)}{\left(\dfrac{t}{s}\right)}\dfrac{t}{s}= L-\sum_{\mu=1}^{n}\pfrac{L}{\left(\dfrac{q^{\mu}}{t}\right)}\dfrac{q^{\mu}}{s} {\left(\dfrac{s}{t}\right)}^{2}\dfrac{t}{s}\nonumber\\ &=L-\sum_{\mu=1}^{n}\pfrac{L}{\left(\dfrac{q^{\mu}}{t}\right)} \dfrac{q^{\mu}}{t}.\label{L1-deri2} \end{align} Equations~(\ref{L1-deri}) and (\ref{L1-deri2}) yield for the following sum over the extended range $\mu=0,\ldots,n$ of dynamical variables \begin{align*} \sum_{\mu=0}^{n}\pfrac{L_{\mathrm{e}}}{\left(\dfrac{q^{\mu}}{s}\right)} \dfrac{q^{\mu}}{s}&=L\dfrac{t}{s}- \sum_{\mu=1}^{n}\pfrac{L}{\left(\dfrac{q^{\mu}}{t}\right)} \dfrac{q^{\mu}}{t}\dfrac{t}{s}+ \sum_{\mu=1}^{n}\pfrac{L}{\left(\dfrac{q^{\mu}}{t}\right)} \dfrac{q^{\mu}}{s}\\ &=L_{\mathrm{e}}. \end{align*} The extended Lagrangian $L_{\mathrm{e}}$ thus satisfies the equation \begin{equation}\label{lagid} L_{\mathrm{e}}-\sum_{\mu=0}^{n}\pfrac{L_{\mathrm{e}}}{\left(\dfrac{q^{\mu}}{s}\right)} \dfrac{q^{\mu}}{s} \begin{cases}\stackrel{\not\equiv}{=}0 & \mbox{ if $L_{\mathrm{e}}$ not homogeneous}\\ \equiv0 & \mbox{ if $L_{\mathrm{e}}$ homogeneous}\end{cases} \mbox{ in }\dfrac{q^{\mu}}{s}. \end{equation} Regarding the correlation~(\ref{lag1}) and the pertaining condition~(\ref{lagid}), two different cases must be distinguished. In the first case, an extended Lagrangian $L_{\mathrm{e}}$ can be set up immediately by multiplying a given conventional Lagrangian $L$ with $\mathrm{d} t/\mathrm{d} s$ and expressing all velocities $\mathrm{d}\bm{q}/\mathrm{d} t$ in terms of $\mathrm{d}\bm{q}/\mathrm{d} s$ according to Eq.~(\ref{lag1}). Such an extended Lagrangian $L_{\mathrm{e}}$ is called a \emph{trivial extended Lagrangian} as it contains no additional information on the underlying dynamical system. A trivial extended Lagrangian $L_{\mathrm{e}}$ constitutes a homogeneous form of first order in the $n+1$ variables $\mathrm{d} q^{0}/\mathrm{d} s,\ldots,\mathrm{d} q^{n}/\mathrm{d} s$. This may be seen by replacing all derivatives $\mathrm{d} q^{\mu}/\mathrm{d} s$ with $a\cdot\mathrm{d} q^{\mu}/\mathrm{d} s$, $a\in\mathbb{R}$ in Eq.~(\ref{lag1}), which yields \begin{align*} L_{\mathrm{e}}\left(\bm{q},a\dfrac{\bm{q}}{s},t,a\dfrac{t}{s}\right)&= L\left(\bm{q},\dfrac{\bm{q}}{t},t\right)a\dfrac{t}{s}\\ &=aL_{\mathrm{e}}\left(\bm{q},\dfrac{\bm{q}}{s},t,\dfrac{t}{s}\right). \end{align*} Consequently, Euler's theorem on homogeneous functions states that Eq.~(\ref{lagid}) constitutes an \emph{identity}\cite{lanczos}. In that case, we may differentiate the identity with respect to the velocity $\mathrm{d} q^{\nu}/\mathrm{d} s$ to get \begin{equation}\label{lagid-deri} \sum_{\mu=0}^{n}\pfrac{^{2}L_{\mathrm{e}}} {\left(\dfrac{q^{\mu}}{s}\right)\partial\left(\dfrac{q_{\nu}}{s}\right)}\dfrac{q^{\mu}}{s}\equiv0. \end{equation} This is a homogeneous set of $n$ equations for the velocities $\mathrm{d} q^{\mu}/\mathrm{d} s$. It has a non-trivial solution ($\mathrm{d}\bm{q}/\mathrm{d} s\neq0$) only if the coefficient matrix is singular \begin{equation}\label{singularLegTrans} \det\left(\pfrac{^{2}L_{\mathrm{e}}} {\left(\dfrac{q^{\mu}}{s}\right)\partial\left(\dfrac{q_{\nu}}{s}\right)}\right)=0. \end{equation} Due to Eq.~(\ref{singularLegTrans}), a corresponding extended Hamiltonian $H_{\mathrm{e}}$ does not follow from a trivial extended Lagrangian $L_{\mathrm{e}}$ as the mediating Legendre transformation is \emph{singular}.
The Euler-Lagrange equation~(\ref{lageqm-t}) for $\mathrm{d} t/\mathrm{d} s$ then reduces to the conventional set of Eqs.~(\ref{lageqm0}) for arbitrary $t(s)$, hence, we do not obtain a substantial equation of motion for $t(s)$. Inserting Eq.~(\ref{L1-deri2}) into Eq.~(\ref{lageqm-t}), one finds $$ \sum_{\mu=1}^{n}\underbrace{\dfrac{q^{\mu}}{t}}_{\neq0}\bigg[ \underbrace{\pfrac{L}{q^{\mu}}-\dfrac{}{t}\left( \pfrac{L}{\dot{q}^{\mu}}\right)}_{\Rightarrow\;\;=0}\bigg]=0. $$ The parametrization of time $t(s)$ is thus left undetermined --- which reflects the fact that a conventional Lagrangian does not provide any information on a parametrization of time and that a trivial extended Lagrangian does not incorporate additional information.
The second case is completely overlooked in literature (cf, for instance\cite{dirac,lanczos,johns}), namely that extended Lagrangians $L_{\mathrm{e}}$ exist that are related to a given conventional Lagrangian $L$ according to Eq.~(\ref{lag1}) \emph{without being homogeneous forms} in the $n+1$ velocities $\mathrm{d} q^{\mu}/\mathrm{d} s$. In Sect.~\ref{sec:lag1-fp}, a simple example will be furnished by setting up such a non-homogeneous extended Lagrangian $L_{\mathrm{e}}$ for the \emph{free relativistic point particle}. For a non-homogeneous extended Lagrangian $L_{\mathrm{e}}$, the extended set of Euler-Lagrange equations~(\ref{lageqm}) is not redundant and the Legendre transformation to an extended Hamiltonian $H_{\mathrm{e}}$ exists. In that case, Eq.~(\ref{lagid}) does \emph{not} represent an identity, which implies that Eq.~(\ref{lagid-deri}) and, subsequently, Eq.~(\ref{singularLegTrans}) do not hold. Then, Eq.~(\ref{lagid}), regarded as an \emph{implicit equation}, is always satisfied on the extended system evolution path parametrized by $s$, which is given by the solution of the extended set of Euler-Lagrange equations~(\ref{lageqm}). This can be seen by calculating the total $s$-derivative of Eq.~(\ref{lagid}) and inserting the Euler-Lagrange equations~(\ref{lageqm}) \begin{align} &\quad\,\dfrac{}{s}L_{\mathrm{e}}\left(q^{\mu},\dfrac{q^{\mu}}{s}\right)- \sum_{\mu=0}^{n}\dfrac{q^{\mu}}{s}\dfrac{}{s}\pfrac{L_{\mathrm{e}}} {\left(\dfrac{q^{\mu}}{s}\right)}-\sum_{\mu=0}^{n}\pfrac{L_{\mathrm{e}}} {\left(\dfrac{q^{\mu}}{s}\right)}\dfrac{\left(\dfrac{q^{\mu}}{s}\right)}{s}\nonumber\\ &=\dfrac{L_{\mathrm{e}}}{s}-\sum_{\mu=0}^{n}\pfrac{L_{\mathrm{e}}}{q^{\mu}}\dfrac{q^{\mu}}{s}-\sum_{\mu=0}^{n} \pfrac{L_{\mathrm{e}}}{\left(\dfrac{q^{\mu}}{s}\right)}\dfrac{\left(\dfrac{q^{\mu}}{s}\right)}{s}\nonumber\\ &=0. \label{lagid-deri2} \end{align} For this reason, Eq.~(\ref{lagid}) actually does \emph{not} impose a constraint on the system's evolution along~$s$ but separates \emph{unphysical states} that do not satisfy Eq.~(\ref{lagid}) from the physical states that are solutions of the Euler-Lagrange equations~(\ref{lageqm}).
In this respect, Eq.~(\ref{lagid}) exactly corresponds to the case of the conserved energy function $e(t)=e_{0}$ of a conventional Lagrangian system $L(\bm{q},\dot{\bm{q}})$ with no explicit time dependence. In that case, the quantity $e(t)$ \begin{equation}\label{lagid-conv} e(t)=\sum_{\mu=1}^{n}\pfrac{L}{\dot{q}^{\mu}}\dot{q}^{\mu}-L(\bm{q},\dot{\bm{q}})=e_{0} \end{equation} is a constant of motion and hence defines a surface in $TM$ on which the system's motion takes place. Nevertheless, it is not considered a constraint as the condition~(\ref{lagid-conv}) is automatically satisfied by means of the conventional Euler-Lagrange equations~(\ref{lageqm0}), \begin{align*} \dfrac{e(t)}{t}&=\sum_{\mu=1}^{n}\left( \dot{q}^{\mu}\dfrac{}{t}\pfrac{L}{\dot{q}^{\mu}}+ \cancel{\pfrac{L}{\dot{q}^{\mu}}\ddot{q}^{\mu}}- \pfrac{L}{q^{\mu}}\dot{q}^{\mu}-\cancel{\pfrac{L}{\dot{q}^{\mu}}\ddot{q}^{\mu}}\right)\\ &=\sum_{\mu=1}^{n}\dot{q}^{\mu}\underbrace{\left(\dfrac{}{t}\pfrac{L}{\dot{q}^{\mu}}-\pfrac{L}{q^{\mu}}\right)}_{=0}\\ &=0. \end{align*} To summarize, by switching from the conventional variational principle~(\ref{principle0}) to the extended representation~(\ref{principle1}), we have introduced an extended Lagrangian $L_{\mathrm{e}}$ that in addition depends on $\mathrm{d} t(s)/\mathrm{d} s$. Due to the emerging conserved quantity that follows from Eq.~(\ref{lagid-deri2}), the actual number of degrees of freedom is unchanged. In the language of Differential Geometry, the system's motion along the parameter $s$ now takes place on a \emph{hypersurface}, defined by Eq.~(\ref{lagid}), within the tangent bundle $T\mathbb{M}_\mathrm{e}\equiv T(\mathbb{M}\times\mathbb{R})$ over the ``spatial-plus-time'' configuration manifold \mbox{$\mathbb{M}_\mathrm{e}\equiv\mathbb{M}\times\mathbb{R}$}. This is the basis required for the description of \emph{relativistic} point particle dynamics, which mandates configuration space coordinates and time to be treated on equal footing in a chart representation. It contrasts with the conventional Lagrangian description in $T\mathbb{M}$ over the spatial configuration manifold $\mathbb{M}$ for Lagrangians that do not explicitly depend on the system's parameter $t$, which is commonly identified in applications with Newton's absolute time. \subsection{\label{sec:caneq}Extended set of canonical equations} The Lagrangian formulation of particle dynamics can \emph{equivalently} be expressed as a Hamiltonian description. The complete information on the given dynamical system is then contained in a Hamiltonian $H$, which carries the same information content as the corresponding Lagrangian $L$. It is defined by the Legendre transformation \begin{equation}\label{legendre} H(\bm{q},\bm{p},t)=\sum_{\mu=1}^{n}p_{\mu}\dfrac{q^{\mu}}{t}- L\left(\bm{q},\dfrac{\bm{q}}{t},t\right), \end{equation} with the covariant momentum vector components $p_{\mu}$ being defined by $$ p_{\mu}=\pfrac{L}{\left(\dfrac{q^{\mu}}{t}\right)}. $$ Correspondingly, the \emph{extended} Hamiltonian $H_{\mathrm{e}}$ is defined as the extended Legendre transform of the extended Lagrangian $L_{\mathrm{e}}$ as
\begin{equation}\label{legendre1} H_{\mathrm{e}}(\bm{q},\bm{p},q^{0},p_{0})=\sum_{\mu=0}^{n}p_{\mu}\dfrac{q^{\mu}}{s}- L_{\mathrm{e}}\left(q^{\nu},\dfrac{q^{\nu}}{s}\right), \end{equation} wherein $q^{0}(s)=ct(s)$ and $p_{0}(s)$ denotes the canonical conjugate variable of $q^{0}(s)$. In order for $H_{\mathrm{e}}$ to take over the complete information on the dynamical system from $L_{\mathrm{e}}$, the Hesse matrix must be non-singular $$ \det\left(\pfrac{^{2}L_{\mathrm{e}}}{\left(\dfrac{q^{\mu}}{s}\right) \partial\left(\dfrac{q_{\nu}}{s}\right)}\right)\ne0. $$ We know from Eq.~(\ref{L1-deri}) that for $\mu=1,\ldots,n$ the momentum variable $p_{\mu}$ is equally obtained from the extended Lagrangian $L_{\mathrm{e}}$, \begin{equation}\label{p-def} p_{\mu}=\pfrac{L_{\mathrm{e}}}{\left(\dfrac{q^{\mu}}{s}\right)}. \end{equation} This fact ensures the Legendre transformations~(\ref{legendre}) and (\ref{legendre1}) to be compatible. For the index $\mu=0$, i.e., for $q^{0}=ct$ we must take some care as the derivative of $L_{\mathrm{e}}$ with respect to $\mathrm{d} t/\mathrm{d} s$ evaluates to $$ \pfrac{L_{\mathrm{e}}}{\left(\dfrac{t}{s}\right)}=L-\sum_{\mu=1}^{n} \pfrac{L}{\left(\dfrac{q^{\mu}}{t}\right)}\dfrac{q^{\mu}}{t}=-H(\bm{q},\bm{p},t). $$ The momentum coordinate $p_{0}(s)$ that is conjugate to $q^{0}=ct(s)$ must therefore be defined as \begin{equation}\label{p0-def0} p_{0}(s)=-\frac{e(s)}{c},\qquade(s)\stackrel{\not\equiv}{=} H\big(\bm{q}(s),\bm{p}(s),t(s)\big), \end{equation} with $e(s)$ representing the instantaneous \emph{value} of the Hamiltonian $H$ at $s$, but \emph{not} the \emph{function} $H$ proper as these functions are different. The canonical coordinate $p_{0}$ must be conceived --- like all other canonical coordinates --- as a function of the independent variable, $s$, only. Thus, $p_{0}$ has solely a derivative with respect to $s$. In contrast, the Hamiltonian $H$ contains the complete information on the underlying dynamical system --- which is provided as the dependence of the value $e(s)$ of $H$ on the \emph{individual} values of the $q^{\mu}(s)$, $p_{\mu}(s)$, and $t(s)$ --- and thus has derivatives with respect to all these canonical coordinates. We may express the definition of $p_{0}(s)$, and $e(s)$, by means of the comprehensible notation \begin{equation}\label{p0-def} p_{0}(s)=\pfrac{L_{\mathrm{e}}}{\left(\dfrac{q^{0}}{s}\right)}(s) \quad\Longleftrightarrow\quad e(s)=-\pfrac{L_{\mathrm{e}}}{\left(\dfrac{t}{s}\right)}(s). \end{equation} According to the extended Legendre transformation~(\ref{legendre1}), the condition~(\ref{lagid}) translates in the extended Hamiltonian description simply into \begin{equation}\label{hamid} H_{\mathrm{e}}\big(\bm{q}(s),\bm{p}(s),t(s),e(s)\big)=0. \end{equation} This means that the extended Hamiltonian $H_{\mathrm{e}}$ directly defines the hypersurface within the extended phase space the classical particle motion is restricted to. Geometrically, the hypersurface lies in the cotangent bundle $T^*\mathbb{M}_\mathrm{e}\equiv T^{*}(\mathbb{M}\times\mathbb{R})$ over the same extended configuration manifold $\mathbb{M}_\mathrm{e}\equiv\mathbb{M}\times\mathbb{R}$ as in the case of the Lagrangian description. This is exactly the higher-dimensional analogue of the case of an \emph{autonomous} conventional Hamiltonian system, hence a Hamiltonian with no \emph{explicit} time dependence, $H\big(\bm{q}(t),\bm{p}(t)\big)=e_{0}$ --- where the system's initial energy $e_{0}$ embodies a \emph{constant of motion}. In that case, the system's motion again takes place on a hypersurface that is now defined by $H(\bm{q},\bm{p})=e_{0}$ and represents the phase-space surface of constant energy within the cotangent bundle $T^{*}\mathbb{M}$ over the configuration manifold $\mathbb{M}$.
By virtue of the Legendre transformations~(\ref{legendre}) and~(\ref{legendre1}), the correlation from Eq.~(\ref{lag1}) of extended and conventional Lagrangians is finally converted into \begin{align} H_{\mathrm{e}}(\bm{q},\bm{p},t,e)&=\sum_{\mu=1}^{n}p_{\mu}\dfrac{q^{\mu}}{s}- e\dfrac{t}{s}-L_{\mathrm{e}}\left(\bm{q},\dfrac{\bm{q}}{s},t,\dfrac{t}{s}\right)\nonumber\\ &=\sum_{\mu=1}^{n}p_{\mu}\dfrac{q^{\mu}}{s}- e\dfrac{t}{s}-L\left(\bm{q},\dfrac{\bm{q}}{s},t\right)\dfrac{t}{s}\nonumber\\ &=\cancel{\sum_{\mu=1}^{n}p_{\mu}\dfrac{q^{\mu}}{s}}-e\dfrac{t}{s}+ \left(H(\bm{q},\bm{p},t)-\cancel{\sum_{\mu=1}^{n}p_{\mu}\dfrac{q^{\mu}}{t}}\right)\dfrac{t}{s}\nonumber\\ &=\big(H(\bm{q},\bm{p},t)-e\big)\dfrac{t}{s}. \label{H1-def} \end{align} The extended Legendre transformation~(\ref{legendre1}) in conjunction with (\ref{p-def}) and the extended set of Euler-Lagrange equations~(\ref{lageqm}) immediately yields the extended set of canonical equations ($\mu=0,\ldots,n$), \begin{equation}\label{caneq-def} \pfrac{H_{\mathrm{e}}}{p_{\mu}}=\dfrac{q^{\mu}}{s},\qquad \pfrac{H_{\mathrm{e}}}{q^{\mu}}=-\pfrac{L_{\mathrm{e}}}{q^{\mu}}=-\dfrac{p_{\mu}}{s}. \end{equation} The right-hand sides of these equations follow directly from the Legendre transformation~(\ref{legendre1}) as the Lagrangian $L_{\mathrm{e}}$ does not depend on the momenta $p_{\mu}$ and has, up to the sign, the same space-time dependence as the Hamiltonian $H_{\mathrm{e}}$. The extended set is characterized by the additional pair of canonical equations for the index $\mu=0$, which reads in terms of $t(s)$ and $e(s)$ \begin{equation}\label{caneq-def1} \dfrac{e}{s}=\pfrac{H_{\mathrm{e}}}{t},\qquad \dfrac{t}{s}=-\pfrac{H_{\mathrm{e}}}{e}. \end{equation} For the total derivative of $H_{\mathrm{e}}(\bm{q},\bm{p},t,e)$ we thus find \begin{align*} \dfrac{H_{\mathrm{e}}}{s}&=\pfrac{H_{\mathrm{e}}}{p_{i}}\dfrac{p_{i}}{s}+ \pfrac{H_{\mathrm{e}}}{q^{i}}\dfrac{q^{i}}{s}+ \pfrac{H_{\mathrm{e}}}{t}\dfrac{t}{s}+\pfrac{H_{\mathrm{e}}}{e}\dfrac{e}{s}\\ &=\dfrac{q^{i}}{s}\dfrac{p_{i}}{s}-\dfrac{p_{i}}{s}\dfrac{q^{i}}{s}+ \dfrac{e}{s}\dfrac{t}{s}-\dfrac{t}{s}\dfrac{e}{s}\\ &\equiv0. \end{align*} Thus, if $e(0)=e_{0}$ is identified with the system's initial energy $e_{0}=H(\bm{q}_{0},\bm{p}_{0},0)$ at $t=0$, then the condition $H_{\mathrm{e}}(\bm{q},\bm{p},t,e)=0$, $\mathrm{d} H_{\mathrm{e}}(\bm{q},\bm{p},t,e)/\mathrm{d} s=0$ is \emph{automatically} fulfilled along the system's trajectory that is given by the solution of the extended set of canonical equations~(\ref{caneq-def}).
The extended phase-space variable $e(s)$ is defined as the particular function of the independent variable, $s$, that represents the \emph{value} of the conventional Hamiltonian, $H$. In accordance with Eqs.~(\ref{p0-def0}) and (\ref{hamid}), we thus determine $H$ for any given extended Hamiltonian $H_{\mathrm{e}}$ by solving $H_{\mathrm{e}}=0$ for $e$. Then, $H$ emerges as the right-hand side of the equation $e=H$.
In the converse case, if the conventional Hamiltonian $H$ is given and $H_{\mathrm{e}}$ is set up according to Eq.~(\ref{H1-def}), then the canonical equation for $\mathrm{d} t/\mathrm{d} s$ yields an \emph{identity}, hence allows arbitrary parametrizations of time, $$ \dfrac{t}{s}=-\pfrac{H_{\mathrm{e}}}{e}=-\pfrac{}{e}\left[\big(H(\bm{q},\bm{p},t)- e\big)\dfrac{t}{s}\right]=\dfrac{t}{s}. $$ Exactly as in the Lagrangian description, this is not astonishing as a conventional Hamiltonian $H$ generally does not provide the information for an equation of motion for $t(s)$, i.e., for a particular parametrization of time $t$. Furthermore, setting up the extended Hamiltonian $H_{\mathrm{e}}$ according to Eq.~(\ref{H1-def}) on the basis of a given conventional Hamiltonian $H$ does not generate additional information on the actual dynamical system.
Corresponding to Eq.~(\ref{p0-def0}), we may introduce the variable $e_{\mathrm{e}}$ as the \emph{value} of the extended Hamiltonian $H_{\mathrm{e}}$. We can formally imagine $H_{\mathrm{e}}$ to be also a function of $s$ in addition to its dependence of the extended phase-space variables, \begin{equation}\label{p0-def1} e_{\mathrm{e}}\stackrel{\not\equiv}{=}H_{\mathrm{e}}(\bm{q},\bm{p},t,e,s). \end{equation} By virtue of the extended set of canonical equations~(\ref{caneq-def}), we find that $e_{\mathrm{e}}$ is a constant of motion if and only if $H_{\mathrm{e}}$ does \emph{not} explicitly depend on $s$, $$ e_{\mathrm{e}}(s)=\mathrm{const.}\qquad\Longleftrightarrow\qquad H_{\mathrm{e}}=H_{\mathrm{e}}(\bm{q},\bm{p},t,e). $$ In this case, $s$ can be regarded as a \emph{cyclic variable}, with $e_{\mathrm{e}}$ the pertaining constant of motion, and hence its conjugate. Thus, in the same way as $(e,t)$ constitutes a pair of canonically conjugate variables, so does the pair $(e_{\mathrm{e}},s)$, i.e., the \emph{value} $e_{\mathrm{e}}$ of the extended Hamiltonian $H_{\mathrm{e}}$ and the parameterization of the system's variables in terms of $s$. In the context of a corresponding quantum description, this additional pair of canonically conjugate variables gives rise to the additional uncertainty relation \begin{equation}\label{uncertain} \Deltae_{\mathrm{e}}\,\Delta s\geq{\textstyle\frac{1}{2}}\hbar. \end{equation} Thus, in a quantum system whose classical limit is described by an extended Hamiltonian $H_{\mathrm{e}}$, we cannot simultaneously measure exactly both a deviation $\Deltae_{\mathrm{e}}$ from the hypersurface condition $\Deltae_{\mathrm{e}}(s)=0$ from Eqs.~(\ref{hamid}), (\ref{p0-def1}) \emph{and} the actual value of the system evolution parameter $s$. For the particular extended Hamiltonian $H_{\mathrm{e}}$ of a relativistic particle in an external electromagnetic field, to be discussed in Sect.~\ref{sec:ham1-em}, the condition reflects the \emph{relativistic energy-momentum correlation}, whereas the parameter $s$ represents the particle's \emph{proper time}. For this particular system, the uncertainty relation~(\ref{uncertain}) thus states the we cannot have simultaneous knowledge on a deviation from the relativistic energy-momentum correlation~(\ref{constraint-em}) \emph{and} the particle's proper time. The extended Lagrangian $L_{\mathrm{e}}$ and the uncertainty relation~(\ref{uncertain}) constitute together the cornerstones for deriving the \emph{relativistic generalization} of Feynman's path integral approach to non-relativistic quantum physics, to be presented in Sect.~\ref{sec:pathint}.
To end this section, we remark that the extended Hamiltonian $H_{\mathrm{e}}$ most frequently found in literature is given by (cf, for instance, Refs.~\cite{lanczos,siegel,thirring,stiefel,tsiga,synge}) \begin{equation}\label{H1-triv} H_{\mathrm{e}}(\bm{q},\bm{p},t,e)=H(\bm{q},\bm{p},t)-e. \end{equation} According to Eqs.~(\ref{caneq-def1}), the canonical equation for $\mathrm{d} t/\mathrm{d} s$ is obtained as $$ \dfrac{t}{s}=-\pfrac{H_{\mathrm{e}}}{e}=1. $$ Up to arbitrary shifts of the origin of our time scale, we thus \emph{identify} $t(s)$ with $s$. As all other partial derivatives of $H_{\mathrm{e}}$ coincide with those of $H$, so do the respective canonical equations. The system description in terms of $H_{\mathrm{e}}$ from Eq.~(\ref{H1-triv}) is thus \emph{identical} to the conventional description and does not provide any additional information. The extended Hamiltonian~(\ref{H1-triv}) thus constitutes the simplest form of a \emph{trivial extended Hamiltonian}. \subsection{\label{sec:cantra}Extended canonical transformations} The conventional theory of canonical transformations is built upon the conventional action integral from Eq.~(\ref{principle0}). In this theory, the Newtonian absolute time $t$ plays the role \emph{of the common independent variable} of both original and destination system. Similarly to the conventional theory, we may build the \emph{extended theory of canonical equations} on the basis of the extended action integral from Eq.~(\ref{principle1}). With the time $t=q^{0}/c$ and the configuration space variables $q^{i}$ treated on equal footing, we are enabled to correlate two Hamiltonian systems, $H$ and $H^{\prime}$, with different time scales, $t(s)$ and $T(s)$, hence to canonically map the system's time $t$ and its conjugate quantity $e$ in addition to the mapping of generalized coordinates $\bm{q}$ and momenta $\bm{p}$. The global timelike evolution parameter $s$ then plays the role of the common independent variable of both systems, $H$ and $H^{\prime}$. A general mapping of all dependent variables may be formally expressed as \begin{equation}\label{can1} Q^{\mu}=Q^{\mu}(q^{\nu},p_{\nu}),\qquad P_{\mu}=P_{\mu}(q^{\nu},p_{\nu}),\qquad\mu=0,\ldots,n \end{equation} Completely parallel to the conventional theory, the subgroup of general transformations~(\ref{can1}) that satisfy the principle $\delta S_{\mathrm{e}}=0$ of the action functional~(\ref{principle1}) is referred to as ``canonical'', \begin{equation}\label{canbed2a} \delta\int_{s_{a}}^{s_{b}} L_{\mathrm{e}}\left(q^{\nu},\dfrac{q^{\nu}}{s}\right)\mathrm{d} s =\delta\int_{s_{a}}^{s_{b}} L_{\mathrm{e}}^{\prime}\left(Q^{\nu},\dfrac{Q^{\mu}}{s}\right)\mathrm{d} s. \end{equation} The action integrals may be expressed equivalently in terms of an extended Hamiltonian by means of the Legendre transformation~(\ref{legendre1}). We thus get the following condition for a transformation~(\ref{can1}) to be canonical \begin{equation}\label{canbed2} \delta\int_{s_{a}}^{s_{b}}\left[ \sum_{\mu=0}^{n}p_{\mu}\dfrac{q^{\mu}}{s}- H_{\mathrm{e}}\big(q^{\nu},p_{\nu}\big)\right]\mathrm{d} s =\delta\int_{s_{a}}^{s_{b}}\left[ \sum_{\mu=0}^{n}P_{\mu}\dfrac{Q^{\mu}}{s}- H^{\prime}_{\mathrm{e}}\big(Q^{\nu},P_{\nu}\big)\right]\mathrm{d} s. \end{equation} As we are operating with \emph{functionals}, the conditions~(\ref{canbed2a}) and (\ref{canbed2}) hold if the \emph{integrands} differ at most by the derivative $\dF_{1}/\mathrm{d} s$ of an arbitrary differentiable function $F_{1}(q^{\nu},Q^{\nu})$ \begin{align} L_{\mathrm{e}}&=L_{\mathrm{e}}^{\prime}+\dfrac{F_{1}}{s}\label{can2a}\\ \sum_{\mu=0}^{n}p_{\mu}\dfrac{q^{\mu}}{s}-H_{\mathrm{e}}&= \sum_{\mu=0}^{n}P_{\mu}\dfrac{Q^{\mu}}{s}-H^{\prime}_{\mathrm{e}}+\dfrac{F_{1}}{s}. \label{can2} \end{align} Because of $$ \delta\int_{s_{a}}^{s_{b}}\dfrac{F_{1}}{s}\,\mathrm{d} s=
\delta\left({\left.F_{1}\right|}_{s_{b}}\right)-
\delta\left({\left.F_{1}\right|}_{s_{a}}\right)\equiv0, $$ a term $\dF_{1}/\mathrm{d} s$ does not contribute to the variation of the action functional~(\ref{principle1}). This means that the particular path $\left(\bar{\bm{q}}(s),\bar{t}(s)\right)$ on which the action integral takes on an extremum is maintained.
We restrict ourselves to functions $F_{1}(q^{\nu},Q^{\nu})$ of the old and the new extended configuration space variables, hence to a function of those variables, whose derivatives match those of the integrands in Eq.~(\ref{canbed2}). Calculating the $s$-derivative of $F_{1}$, \begin{equation}\label{genf1} \dfrac{F_{1}}{s}=\sum_{\mu=0}^{n}\left[ \pfrac{F_{1}}{q^{\mu}}\dfrac{q^{\mu}}{s}+ \pfrac{F_{1}}{Q^{\mu}}\dfrac{Q^{\mu}}{s}\right], \end{equation} we then get \emph{unique} transformation rules by comparing the coefficients of Eq.~(\ref{genf1}) with those of (\ref{can2}) \begin{equation}\label{F1} p_{\mu}=\pfrac{F_{1}}{q^{\mu}},\qquad P_{\mu}=-\pfrac{F_{1}}{Q^{\mu}},\qquad H^{\prime}_{\mathrm{e}}=H_{\mathrm{e}}. \end{equation} $F_{1}$ is referred to as the \emph{extended generating function} of the --- now generalized --- canonical transformation. The extended Hamiltonian $H_{\mathrm{e}}$ has the important property that its \emph{value} is conserved under extended canonical transformations. This means that the system's physical evolution is kept being confined to the surface $H^{\prime}_{\mathrm{e}}=0$, hence that the condition~$(\ref{hamid})$ is maintained in the transformed system, as required. Corresponding to the extended set of canonical equations, the additional transformation rule is given for the index $\mu=0$. This transformation rule may be expressed equivalently in terms of $t(s)$, $e(s)$, and $T(s)$, $E(s)$ as \begin{equation}\label{F1a} e=-\pfrac{F_{1}}{t},\qquadE=\pfrac{F_{1}}{T}, \end{equation} with $E$, correspondingly to Eq.~(\ref{p0-def0}), the value of the transformed Hamiltonian $H^{\prime}$ \begin{equation}\label{P0-def0} P_{0}(s)=-\frac{E(s)}{c},\qquadE(s)\stackrel{\not\equiv}{=} H^{\prime}(\bm{Q}(s),\bm{P}(s),T(s)). \end{equation} The addressed transformed Hamiltonian $H^{\prime}$ is finally obtained from the general correlation of conventional and extended Hamiltonians from Eq.~(\ref{H1-def}), and the transformation rule $H^{\prime}_{\mathrm{e}}=H_{\mathrm{e}}$ for the extended Hamiltonian from Eq.~(\ref{F1}) $$ \Big[H^{\prime}(\bm{Q},\bm{P},T)-E\Big]\dfrac{T}{s}= \Big[H(\bm{q},\bm{p},t)-e\Big]\dfrac{t}{s}. $$ Eliminating the evolution parameter $s$, we arrive at the following two equivalent transformation rules for the conventional Hamiltonians under extended canonical transformations \begin{align} \Big[H^{\prime}(\bm{Q},\bm{P},T)-E\Big]\pfrac{T}{t}&=H(\bm{q},\bm{p},t)-e\nonumber\\ \Big[H(\bm{q},\bm{p},t)-e\Big]\pfrac{t}{T}&=H^{\prime}(\bm{Q},\bm{P},T)-E. \label{canham1} \end{align} The transformation rules (\ref{canham1}) are generalizations of the rule for conventional canonical transformations as now cases with $T\ne t$ are included. We will see at the end of this section that the rules~(\ref{canham1}) merge for the particular case $T=t$ into the corresponding rules of conventional canonical transformation theory.
By means of the Legendre transformation \begin{equation}\label{legendre-F1} F_{2}(q^{\nu},P_{\nu})=F_{1}(q^{\nu},Q^{\nu})+ \sum_{\mu=0}^{n}Q^{\mu}P_{\mu},\qquad P_{\mu}=-\pfrac{F_{1}}{Q^{\mu}}, \end{equation} we may express the extended generating function of a generalized canonical transformation equivalently as a function of the original extended configuration space variables $q^{\nu}$ and the extended set of transformed canonical momenta $P_{\nu}$. As, by definition, the functions $F_{1}$ and $F_{2}$ agree in their dependence on the $q^{\mu}$, so do the corresponding transformation rules $$ \pfrac{F_{1}}{q^{\mu}}=\pfrac{F_{2}}{q^{\mu}}=p_{\mu}. $$ This means that all $q^{\mu}$ do not take part in the transformation defined by~(\ref{legendre-F1}). As $F_{1}$ does not depend on the $P_{\nu}$, the new transformation rule pertaining to $F_{2}$ thus follows immediately as \begin{align*} \pfrac{F_{2}}{P_{\nu}}&=\sum_{\mu=0}^{n}Q^{\mu} \pfrac{P_{\mu}}{P_{\nu}}=\sum_{\mu=0}^{n}Q^{\mu}\delta_{\mu}^{\nu}\\ &=Q^{\nu}. \end{align*} The new set of transformation rules, which is, of course, equivalent to the previous set from Eq.~(\ref{F1}), is thus \begin{equation}\label{F2} p_{\mu}=\pfrac{F_{2}}{q^{\mu}},\qquad Q^{\mu}=\pfrac{F_{2}}{P_{\mu}},\qquad H^{\prime}_{\mathrm{e}}=H_{\mathrm{e}}. \end{equation} Expressed in terms of the variables $\bm{q}$, $\bm{p}$, $t$, $e$, and $\bm{Q}$, $\bm{P}$, $T$, $E$ the new set of coordinate transformation rules takes on the more elaborate form \begin{equation}\label{rules} p_{i}=\pfrac{F_{2}}{q^{i}},\qquad Q^{i}=\pfrac{F_{2}}{P_{i}},\qquad e=-\pfrac{F_{2}}{t},\qquad T=-\pfrac{F_{2}}{E}. \end{equation} Similarly to the conventional theory of canonical transformations, there are two more possibilities to define a generating function of an extended canonical transformation. By means of the Legendre transformation $$ F_{3}(p_{\nu},Q^{\nu})=F_{1}(q^{\nu},Q^{\nu})- \sum_{\mu=0}^{n}q^{\mu}p_{\mu},\qquad p_{\mu}=-\pfrac{F_{1}}{q^{\mu}}, $$ we find in the same manner as above the transformation rules \begin{equation}\label{F3} q^{\mu}=-\pfrac{F_{3}}{p_{\mu}},\qquad P_{\mu}=-\pfrac{F_{3}}{Q^{\mu}},\qquad H^{\prime}_{\mathrm{e}}=H_{\mathrm{e}}. \end{equation} Finally, applying the Legendre transformation, defined by $$ F_{4}(p_{\nu},P_{\nu})=F_{3}(p_{\nu},Q^{\nu})+ \sum_{\mu=0}^{n}Q^{\mu}P_{\mu},\qquad P_{\mu}=-\pfrac{F_{3}}{Q^{\mu}}, $$ the following equivalent version of transformation rules emerges $$ q^{\mu}=-\pfrac{F_{4}}{p_{\mu}},\qquad Q^{\mu}= \pfrac{F_{4}}{P_{\mu}},\qquad H^{\prime}_{\mathrm{e}}=H_{\mathrm{e}}. $$ Calculating the second derivatives of the generating functions, we conclude that the following correlations for the derivatives of the general mapping from Eq.~(\ref{can1}) must hold for the entire set of extended phase-space variables, $$ \pfrac{Q^{\mu}}{q^{\nu}}=\pfrac{p_{\nu}}{P_{\mu}},\qquad \pfrac{Q^{\mu}}{p_{\nu}}=-\pfrac{q^{\nu}}{P_{\mu}},\qquad \pfrac{P_{\mu}}{q^{\nu}}=-\pfrac{p_{\nu}}{Q^{\mu}},\qquad \pfrac{P_{\mu}}{p_{\nu}}=\pfrac{q^{\nu}}{Q^{\mu}}. $$ Exactly if these conditions are fulfilled for all $\mu,\nu=0,\ldots,n$, then the extended coordinate transformation~(\ref{can1}) is canonical and preserves the form of the extended set of canonical equations~(\ref{caneq-def}). Otherwise, we are dealing with a general, non-canonical coordinate transformation that does \emph{not} preserve the form of the canonical equations.
The connection of the extended canonical transformation theory with the conventional one is furnished by the particular extended generating function \begin{equation}\label{F2-triv} F_{2}(\bm{q},\bm{P},t,E)=f_{2}(\bm{q},\bm{P},t)-tE, \end{equation} with $f_{2}(\bm{q},\bm{P},t)$ denoting a conventional generating function. According to Eqs.~(\ref{rules}), the coordinate transformation rules following from~(\ref{F2-triv}) are $$ p_{i}=\pfrac{f_{2}}{q^{i}},\qquad Q^{i}=\pfrac{f_{2}}{P_{i}}, \qquade=-\pfrac{f_{2}}{t}+E,\qquad T=t. $$ With $\partial T/\partial t=1$, the general transformation rule~(\ref{canham1}) for conventional Hamiltonians now yields the well-known rule for Hamiltonians $H^{\prime}$ under conventional canonical transformations, $$ H^{\prime}(\bm{Q},\bm{P},t)=H(\bm{q},\bm{p},t)+E-e=H(\bm{q},\bm{p},t)+\pfrac{f_{2}}{t}. $$ Canonical transformations that are defined by extended generating functions of the form of Eq.~(\ref{F2-triv}) leave the time variable unchanged and thus define the subgroup of conventional canonical transformations within the general group of extended canonical transformations. Corresponding to the trivial extended Hamiltonian from Eq.~(\ref{H1-triv}), we may refer to (\ref{F2-triv}) as the \emph{trivial extended generating function}. \subsection{\label{sec:hj}Extended Hamilton-Jacobi equation} In the context of the extended canonical transformation theory, we may derive an extended version of the Hamilton-Jacobi equation. We are looking for a generating function $F_{2}(q^{\nu},P_{\nu})$ of an extended canonical transformation that maps a given extended Hamiltonian $H_{\mathrm{e}}=0$ into a transformed extended Hamiltonian $H^{\prime}_{\mathrm{e}}=0$ with the property that \emph{all} partial derivatives of $H^{\prime}_{\mathrm{e}}(Q^{\nu},P_{\nu})$ vanish. Hence, according to the extended set of canonical equations~(\ref{caneq-def}), the derivatives of all canonical variables $Q^{\mu}(s),P_{\mu}(s)$ with respect to the system's evolution parameter $s$ must vanish \begin{equation}\label{hj-cond} \pfrac{H^{\prime}_{\mathrm{e}}}{P_{\mu}}=\dfrac{Q^{\mu}}{s}=0,\qquad -\pfrac{H^{\prime}_{\mathrm{e}}}{Q^{\mu}}=\dfrac{P_{\mu}}{s}=0,\qquad \mu=0,\ldots,n. \end{equation} This means that \emph{all} transformed canonical variables $Q^{\mu},P_{\mu}$ must be constants of motion. Writing the variables for the index $\mu=0$ separately, we thus have $$ T=\mathrm{const.},\quad Q^{i}=\mathrm{const.},\quad E=\mathrm{const.},\quad P_{i}=\mathrm{const.} $$ Thus, corresponding to the conventional Hamilton-Jacobi formalism, the vectors of the transformed canonical variables, $\bm{Q}$ and $\bm{P}$, are constant. Yet, in the extended formalism, the transformed time $T$ is also a constant. The particular generating function $F_{2}(q^{\nu},P_{\nu})$ that defines transformation rules for the extended set of canonical variables such that Eqs.~(\ref{hj-cond}) hold for the transformed variables thus defines a mapping of the entire system into its state at a fixed instant of time, hence --- up to trivial shifts in the origin of the time scale --- into its initial state at $T=t(0)$ $$ T=t(0),\quad Q^{i}=q^{i}(0),\quad P_{i}=p_{i}(0),\quad E=H(\bm{q}(0),\bm{p}(0),t(0)). $$ We may refer to this particular generating function as the \emph{extended Hamiltonian action function} $F_{2}\equiv S_{\mathrm{e}}(q^{\nu},P_{\nu})$. According to the transformation rule $H^{\prime}_{\mathrm{e}}=H_{\mathrm{e}}$ for extended Hamiltonians from Eq.~(\ref{F1}), we obtain the transformed extended Hamiltonian $H^{\prime}_{\mathrm{e}}\equiv0$ simply by expressing the original extended Hamiltonian $H_{\mathrm{e}}=0$ in terms of the transformed variables. This means for the conventional Hamiltonian $H(\bm{q},\bm{p},t)$ according to Eq.~(\ref{H1-def}) in conjunction with the transformation rules from Eqs.~(\ref{rules}), $$ \left[H\left(\bm{q},\pfrac{S_{\mathrm{e}}}{\bm{q}},t\right)+ \pfrac{S_{\mathrm{e}}}{t}\right]\dfrac{t}{s}=0. $$ As we have $\mathrm{d} s/\mathrm{d} t\ne0$ in general, we finally get the generalized form of the Hamilton-Jacobi equation, \begin{equation}\label{gen-hjgl} H\left(q^{1},\ldots,q^{n},\pfrac{S_{\mathrm{e}}}{q^{1}},\ldots, \pfrac{S_{\mathrm{e}}}{q^{n}},t\right)+\pfrac{S_{\mathrm{e}}}{t}=0. \end{equation} Equation (\ref{gen-hjgl}) has exactly the \emph{form} of the conventional Hamilton-Jacobi equation. Yet, it is actually a \emph{generalization} as the extended action function $S_{\mathrm{e}}$ represents an \emph{extended} generating function of type $F_{2}$, as defined by Eq.~(\ref{legendre-F1}). This means that $S_{\mathrm{e}}$ is also a function of the (constant) transformed energy $E=-P(0)$.
Summarizing, the extended Hamilton-Jacobi equation may be interpreted as defining the mapping of all canonical coordinates $\bm{q}$, $\bm{p}$, $t$, and $e$ of the actual system into constants $\bm{Q}$, $\bm{P}$, $T$, and $E$. In other words, it defines the mapping of the entire dynamical system from its actual state at time $t$ into its state at a \emph{fixed instant of time}, $T$, which could be the initial conditions. \subsection{\label{sec:pathint}Generalized path integral with extended Lagrangians} In Feynman's path integral approach to quantum mechanics, the space and time evolution of a wave function $\psi(\bm{q},t)$ is formulated in terms of a transition amplitude density $K(b,a)$, also referred to as a \emph{kernel}, or, a \emph{propagator}:
\begin{equation}\label{wave-evol} \psi(\bm{q}_{b},t_{b})=\int_{-\infty}^{\infty} K(\bm{q}_{b},t_{b};\bm{q}_{a},t_{a})\,\psi(\bm{q}_{a},t_{a})\,\mathrm{d}^3\bm{q}_{a}. \end{equation}
The parameterized kernel $K_{\sigma}(b,a)$ for a parameterized action $S_{\mathrm{e}}$ is given by the multiple path integral \begin{equation}\label{kernel-para} K_{\sigma}(b,a)=\iint\exp\left\{\frac{i}{\hbar} S_{\mathrm{e}}[\bm{q}(s),t(s)]\right\}\mathscr{D}^3\bm{q}(s)\mathscr{D}t(s). \end{equation} Herein, the integrals are to be taken over all paths that go from $(\bm{q}_{a},t_{a})$ at $s_{a}$ to $(\bm{q}_{b},t_{b})$ at $s_{b}$. The justification for integrating over all times is that in relativistic physics we must treat space and time on equal footing. Hence, we must allow the laboratory time $t$ to take any value --- negative and even positive ones --- if we regard $t$ from the viewpoint of a particle with its proper time $s$. We thus additionally integrate over all \emph{histories} of the particle. The integration over all \emph{futures} can then be interpreted as integration over all histories of the \emph{anti-particle}, whose proper timescale runs backwards in terms of the particle's proper timescale.\cite{stueckel}
If the time paths and the spatial paths are taken to be independent of each other, hence if we do not incorporate the shell condition (\ref{lagid}) into the integration boundaries, we also sum over all particles off the mass shell. The action functional $S_{\mathrm{e}}$ stands for the $s$-integral over the extended Lagrangian $L_{\mathrm{e}}$, as defined by Eq.~(\ref{principle1}).
In classical dynamics, the parameterization of space and time variables can be eliminated by means of the shell condition~(\ref{lagid}). For the corresponding quantum description, the uncertainty principle from Eq.~(\ref{uncertain}) applies. It tells us that an \emph{accurate} fulfillment of the condition $\Deltae_{\mathrm{e}}(s)=0$ is related to a \emph{complete uncertainty} about the parameterization of the system's variables phase-space in terms of $s$. Therefore, in the context of the path integral approach, the condition $\Deltae_{\mathrm{e}}(s)=0$ is incorporated by \emph{integrating} the parameterized kernel $K_{\sigma}(b,a)$ over all possible parameterizations $\sigma=s_{b}-s_{a}>0$ of coordinates $\bm{q}(s)$ and time~$t(s)$. The final kernel, hence the transition amplitude density is thus given by \begin{equation}\label{kernel-gen} K(b,a)=\int_{0}^{\infty}K_{\sigma}(b,a)\,\mathrm{d}\sigma. \end{equation} This means that \emph{all} parameterized kernels $K_{\sigma}(b,a)$ contribute with \emph{equal weight} to the total transition amplitude $K(b,a)$. As an example, we calculate in Sect.~\ref{sec:prop} the explicit form of the space-time propagator for the wave function of a relativistic free particle from the extended Lagrangian $L_{\mathrm{e}}$ of the pertaining classical system.
For an infinitesimal step $\delta\epsilon=s_{b}-s_{a}$, we may approximate the action functional $S_{\mathrm{e}}$ from Eq.~(\ref{principle1}) by $$ S_{\mathrm{e},\delta\epsilon}[q^{\mu}(s)]=\delta\epsilon\,L_{\mathrm{e}}\left(\frac{q^{\mu}_{b}+ q^{\mu}_{a}}{2},\frac{q^{\mu}_{b}-q^{\mu}_{a}}{\delta\epsilon}\right). $$ For $s_{b}=s_{a}+\delta\epsilon$, the kernel $K_{\sigma}(s_{a}+\delta\epsilon,s_{a})$ from Eq.~(\ref{kernel-para}) that yields the transition amplitude density for a particle along this infinitesimal interval $s_{b}-s_{a}$ is accordingly given by $$ K(b,a)=\frac{1}{M} \exp\left[\frac{i}{\hbar}S_{\mathrm{e},\delta\epsilon}\right]. $$ As we proceed an infinitesimal step $\delta\epsilon$ only, and then take the limit $\delta\epsilon\to0$, the integration~(\ref{kernel-gen}) over all possible parameterizations of this step must be omitted. For, conversely to the situation discussed beforehand, a small $\delta\epsilon=\Delta s$ is related to a large uncertainty with respect to satisfying the condition $\Deltae_{\mathrm{e}}(s)=0$, so that in the limit $\delta\epsilon\to0$ the condition ceases to exist.
The yet to be determined normalization factor $M$ represents the integration measure for one step of the multiple path integral~(\ref{kernel-para}). Clearly, this measure must depend on the step size~$\delta\epsilon$. The transition of a given wave function $\psi(q^{\mu}_{a})$ at the particle's proper time $s_{a}$ to the wave function $\psi(q^{\mu}_{b})$ that is separated by an infinitesimal proper time interval $\delta\epsilon=s_{b}-s_{a}$ can now be formulated according to Eq.~(\ref{wave-evol}) as \begin{equation}\label{trans-infini} \psi(q^{\mu}_{b})=\frac{1}{M}\int\exp\left[ \frac{i}{\hbar}S_{\mathrm{e},\delta\epsilon}\right]\psi(q^{\mu}_{a})\,\mathrm{d}^{4}q_{a}. \end{equation} Note that we integrate here over the entire space-time. To serve as test for this approach, we derive in Sect.~\ref{sec:kg} the Klein-Gordon equation on the basis of the extended Lagrangian $L_{\mathrm{e}}$ for a relativistic particle in an external electromagnetic field. \section{Examples of extended Hamilton-Lagrange systems} \subsection{\label{sec:lag1-fp}Extended Lagrangian for a relativistic free particle} As only expressions of the form $\bm{q}^{2}-c^{2}t^{2}$ are preserved under the Lorentz group, the conventional Lagrangian for a \emph{free point particle} of mass $m$, given by \begin{equation}\label{lagnr-fp} L^{\mathrm{nr}}\left(\bm{q},\dfrac{\bm{q}}{t},t\right)=T-V= {\textstyle\frac{1}{2}} m{\left(\dfrac{\bm{q}}{t}\right)}^{2}-mc^{2}, \end{equation} is obviously not Lorentz-invariant. Yet, in the extended description, a corresponding Lorentz-invariant Lagrangian $L_{\mathrm{e}}$ can be constructed by introducing $s$ as the new independent variable, and by treating the space and time variables, $\bm{q}(s)$ and $q^{0}=ct(s)$ equally. This is achieved by adding the corresponding derivative of the time variable $t(s)$, \begin{equation}\label{lag1-fp} L_{\mathrm{e}}\left(\bm{q},\dfrac{\bm{q}}{s},t,\dfrac{t}{s}\right)= {\textstyle\frac{1}{2}} mc^{2}\left[{\frac{1}{c^{2}}\left(\dfrac{\bm{q}}{s}\right)}^{2}- {\left(\dfrac{t}{s}\right)}^{2}-1\right]. \end{equation} The constant third term has been defined accordingly to ensure that $L_{\mathrm{e}}$ converges to $L^{\mathrm{nr}}$ in the limit $\mathrm{d} t/\mathrm{d} s\to1$. Of course, the dynamics following from (\ref{lagnr-fp}) and (\ref{lag1-fp}) are \emph{different} --- which reflects the modification our dynamics encounters if we switch from a non-relativistic to a relativistic description. The Lagrangian~(\ref{lag1-fp}) is no homogeneous form of first order in the velocities $\mathrm{d} q^{\mu}/\mathrm{d} s,\mu=0,\ldots,3$. Therefore, we obtain from Eq.~(\ref{lagid}) the hypersurface condition, also referred to as the mass shell condition: \begin{equation}\label{constraint-lag} \frac{1}{c^{2}}{\left(\dfrac{\bm{q}}{s}\right)}^{2}-{\left(\dfrac{t}{s}\right)}^{2}+1=0\qquad\Leftrightarrow\qquad \frac{1}{c^{2}}{\left(\dfrac{\bm{q}}{t}\right)}^{2}+{\left(\dfrac{s}{t}\right)}^{2}-1=0. \end{equation} We thus encounter the reciprocal value of the relativistic scale factor, $\gamma$, \begin{equation}\label{gamma-inv} \dfrac{s}{t}=\sqrt{1-\frac{1}{c^{2}}{\left(\dfrac{\bm{q}}{t}\right)}^{2}}=\gamma^{-1}, \end{equation} which shows that in the case of the Lagrangian~(\ref{lag1-fp}) the system evolution parameter $s$ is physically nothing else than the particle's proper time. Inserting the condition~(\ref{constraint-lag}) into the Lagrangian yields the constant \emph{value} of $L_{\mathrm{e}}$, $$
L_{\mathrm{e}}\big|_{\text{mass shell}}=-mc^{2}. $$ In contrast to the non-relativistic description, the constant rest energy term $-{\textstyle\frac{1}{2}} mc^{2}$ in the extended Lagrangian~(\ref{lag1-fp}) is essential. Consequently, the extended Lagrangian~(\ref{lag1-fp}) is no homogeneous form of first order in the velocities $\mathrm{d} q^{\mu}/\mathrm{d} s,\mu=0,\ldots,3$, the condition~(\ref{lagid}) is not satisfied \emph{identically}. Yet, in the derivation of~(\ref{lagid}), we have assumed that a corresponding conventional Lagrangian $L$ exists, hence a Lagrangian that depends on $\mathrm{d} t/\mathrm{d} s$ only indirectly via the reparameterization condition $$ \dfrac{\bm{q}}{t}=\dfrac{\bm{q}/\mathrm{d} s}{t/\mathrm{d} s} $$ from Eq.~(\ref{lag1}) applied to its velocities. We must, therefore, make sure that such a corresponding \emph{conventional} Lagrangian $L$ exists, hence a function $L=L_{\mathrm{e}}\,\mathrm{d} s/\mathrm{d} t$ that does not depend anymore on $s$. For the extended Lagrangian $L_{\mathrm{e}}$ from Eq.~(\ref{lag1-fp}), a corresponding conventional Lagrangian $L$ indeed exists. Inserting Eq.~(\ref{constraint-lag}) into Eq.~(\ref{lag1-fp}), we find with Eq.~(\ref{gamma-inv}) \begin{align}
L\left(\bm{q},\dfrac{\bm{q}}{t},t\right)&=\left. L_{\mathrm{e}}\left(\bm{q},\dfrac{\bm{q}}{s},t,\dfrac{t}{s}\right)\right|_{\text{mass shell}}\dfrac{s}{t}\nonumber\\ &=-mc^{2}\,\dfrac{s}{t}\nonumber\\ &=-mc^{2}\sqrt{1-\frac{1}{c^{2}}{\left( \dfrac{\bm{q}}{t}\right)}^{2}}. \label{lag-fp} \end{align} We thus encounter the well-known conventional Lagrangian of a relativistic free particle. In contrast to the equivalent extended Lagrangian from Eq.~(\ref{lag1-fp}), the Lagrangian~(\ref{lag-fp}) is \emph{not} quadratic in the derivatives of the dependent variables, $\bm{q}(t)$. The loss of the quadratic form originates from the \emph{projection} of the hypersurface description within the tangent bundle $T(\mathbb{M}\times\mathbb{R})$ to the description within $(T\mathbb{M})\times\mathbb{R}$. The quadratic form is recovered in the non-relativistic limit by expanding the square root, which yields the Lagrangian $L^{\mathrm{nr}}$ from Eq.~(\ref{lagnr-fp}).
Denoting by $q^{\mu}$ the components of the contravariant four-vector of space-time variables $(q^{0},\ldots,q^{3})=(ct,x,y,z)$, the corresponding covariant vector is then $(q_{0},\ldots,q_{3})=(-ct,x,y,z)$ for the metric $\eta_{\mu\nu}=\mathrm{diag}(-1,1,1,1)$. Adopting the ``summation convention,'' which means to sum over all quantities with pairs of identical covariant and contravariant indices, the non-homogeneous extended Lagrangian from Eq.~(\ref{lag1-fp}) can then be rewritten in covariant notation as $$ L_{\mathrm{e}}\left(q^{\mu},\dfrac{q^{\mu}}{s}\right)= {\textstyle\frac{1}{2}} m\left(\dfrac{q^{\alpha}}{s}\dfrac{q_{\alpha}}{s}-c^{2}\right). $$ The hypersurface condition~(\ref{lagid}) is then expressed as $$ \dfrac{q^{\alpha}}{s}\dfrac{q_{\alpha}}{s}=-c^{2}, $$ which depicts the constant length of the four-velocity vector.
To summarize, with $L_{\mathrm{e}}$ from Eq.~(\ref{lag1-fp}), we have found a \emph{non-trivial} extended Lagrangian $L_{\mathrm{e}}$, i.e.\ an extended Lagrangian that is \emph{non-homogeneous} in its velocities and possesses a corresponding conventional Lagrangian $L=L_{\mathrm{e}}\,\mathrm{d} s/\mathrm{d} t$, with $\mathrm{d} s/\mathrm{d} t$ determined by Eq.~(\ref{lagid}) that now embodies an implicit equation rather than an identity. In addition to the equations of motion for $\bm{q}(s)$, this $L_{\mathrm{e}}$ determines uniquely the correlation $t(s)$ of the laboratory time $t$ to the particle's proper time,~$s$. \subsection{\label{sec:lag1-triv-fp}Trivial extended Lagrangian for a relativistic free particle} Given the conventional Lagrangian~(\ref{lag-fp}), we may immediately set up the corresponding \emph{trivial} extended Lagrangian according to Eq.~(\ref{lag1}) by multiplying $L$ with $\mathrm{d} t/\mathrm{d} s$ \begin{align} L_{\mathrm{e}}^{\mathrm{triv}}\left(\bm{q},\dfrac{\bm{q}}{s},t,\dfrac{t}{s}\right)&= -mc\sqrt{c^{2}{\left(\dfrac{t}{s}\right)}^{2}- {\left(\dfrac{\bm{q}}{s}\right)}^{2}}\nonumber\\ &=-mc\sqrt{-\dfrac{q^{\alpha}}{s}\dfrac{q_{\alpha}}{s}}. \label{eq:lag1-triv-fp} \end{align} We easily convince ourselves that the trivial extended Lagrangian satisfies Eq.~(\ref{lagid}) \emph{identically} \begin{align*} \pfrac{L_{\mathrm{e}}^{\mathrm{triv}}}{\left(\dfrac{q^{\mu}}{s}\right)}\dfrac{q^{\mu}}{s}&= \frac{mc}{\sqrt{-\dfrac{q^{\alpha}}{s}\dfrac{q_{\alpha}}{s}}}\dfrac{q_{\mu}}{s}\dfrac{q^{\mu}}{s}\\ &=-mc\sqrt{-\dfrac{q^{\alpha}}{s}\dfrac{q_{\alpha}}{s}}\\ &\equiv L_{\mathrm{e}}^{\mathrm{triv}} \end{align*} and thus fulfills Eq.~(\ref{lagid-deri}), $$ \pfrac{^{2}L_{\mathrm{e}}^{\mathrm{triv}}} {\left(\dfrac{q^{\mu}}{s}\right)\partial\left(\dfrac{q_{\nu}}{s}\right)}= \frac{mc}{{\left(-\dfrac{q^{\alpha}}{s}\dfrac{q_{\alpha}}{s}\right)}^{3/2}} \left(\dfrac{q^{\nu}}{s}\dfrac{q_{\mu}}{s}-\delta_{\mu}^{\nu}\, \dfrac{q^{\beta}}{s}\dfrac{q_{\beta}}{s}\right), $$ hence \begin{align*} \pfrac{^{2}L_{\mathrm{e}}^{\mathrm{triv}}} {\left(\dfrac{q^{\mu}}{s}\right)\partial\left(\dfrac{q_{\nu}}{s}\right)}\dfrac{q^{\mu}}{s}&= mc{{\left(-\dfrac{q^{\alpha}}{s}\dfrac{q_{\alpha}}{s}\right)}^{-\frac{3}{2}}} \left(\dfrac{q^{\nu}}{s}\dfrac{q_{\mu}}{s}-\delta_{\mu}^{\nu}\, \dfrac{q^{\beta}}{s}\dfrac{q_{\beta}}{s}\right)\dfrac{q^{\mu}}{s}\\ &\equiv0. \end{align*} The subsequent equation of motion for $t(s)$ does \emph{not} determine a parametrization of time $t$ but rather allows for arbitrary parametrizations. As a trivial extended Lagrangian $L_{\mathrm{e}}^{\mathrm{triv}}$ generally follows by multiplying a given conventional Lagrangian $L$ by $\mathrm{d} t/\mathrm{d} s$, a formally covariant description is encountered in the sense that space and time variables are then treated on equal footing. Yet, no additional information on the dynamical system is provided by the transition from $L$ to $L_{\mathrm{e}}^{\mathrm{triv}}$. \subsection{\label{sec:ham1-triv-fp}Trivial extended Hamiltonian for a relativistic free particle} For a trivial extended Lagrangian, is not possible to derive the corresponding trivial extended Hamiltonian as the Legendre transformation of a homogeneous extended Lagrangian is singular. This does not mean that a corresponding extended Hamiltonian does not exist, as it is frequently claimed in literature\cite{johns}. To the contrary, for any conventional Lagrangian $L$ that can be Legendre-transformed into a corresponding conventional Hamiltonian $H$, one can always set up $L_{\mathrm{e}}$ according to Eq.~(\ref{lag1}) and $H_{\mathrm{e}}$ according to Eq.~(\ref{H1-def}). Setting up the extended set of Euler-Lagrange equations for a trivial extended Lagrangian then yields exactly the same description of the given dynamical system as setting up the extended set of canonical equations for the trivial extended Hamiltonian obtained this way.
In order to set up the trivial extended Hamiltonian $H_{\mathrm{e}}^{\mathrm{triv}}$ that corresponds to the trivial extended Lagrangian $L_{\mathrm{e}}^{\mathrm{triv}}$ from Eq.~(\ref{eq:lag1-triv-fp}) of the free relativistic point particle, one must first Legendre-transform the underlying conventional Lagrangian~(\ref{lag-fp}) to the corresponding conventional Hamiltonian according to $$ H(\bm{q},\bm{p},t)=\bm{p}\dot{\bm{q}}-L(\bm{q},\dot{\bm{q}},t),\qquad\bm{p}=\pfrac{L}{\dot{\bm{q}}}. $$ For the particular Lagrangian~(\ref{lag-fp}), one finds $$ \bm{p}=\frac{m\dot{\bm{q}}}{\sqrt{1-\frac{\dot{\bm{q}}^{2}}{c^{2}}}}\quad\Rightarrow\quad H=\frac{mc^{2}}{\sqrt{1-\frac{\dot{\bm{q}}^{2}}{c^{2}}}}. $$ A Hamiltonian must be expressed in terms of the canonical momenta rather than by the velocities, hence $\dot{\bm{q}}$ must be expressed in terms of $\bm{p}$, $$ H^{2}=\frac{m^{2}c^{4}}{1-\frac{\dot{\bm{q}}^{2}}{c^{2}}},\qquad \bm{p}^{2}=\frac{m^{2}\dot{\bm{q}}^{2}}{1-\frac{\dot{\bm{q}}^{2}}{c^{2}}}\quad\Rightarrow\quad H^{2}-\bm{p}^{2}c^{2}=\frac{m^{2}c^{4}}{1-\frac{\dot{\bm{q}}^{2}}{c^{2}}} \left(1-\frac{\dot{\bm{q}}^{2}}{c^{2}}\right)=m^{2}c^{4}, $$ hence $$ H(\bm{q},\bm{p},t)=\sqrt{\bm{p}^{2}c^{2}+m^{2}c^{4}}. $$ The corresponding trivial extended Hamiltonian $H_{\mathrm{e}}^{\mathrm{triv}}$ can now be set up according to the general recipe from Eq.~(\ref{H1-def}) \begin{equation}\label{eq:ham1-triv-fp} H_{\mathrm{e}}^{\mathrm{triv}}(\bm{q},\bm{p},t,e)=\left(\sqrt{\bm{p}^{2}c^{2}+m^{2}c^{4}}-e\right)\dfrac{t}{s}. \end{equation} In contrast to the Lagrangian description, the factor $\mathrm{d} t/\mathrm{d} s$ does not represent a conjugate variable but enters into the canonical equations as an external factor. The trivial extended Hamiltonian~(\ref{eq:ham1-triv-fp}) has exactly the same information content on the underlying dynamical system as the trivial extended Lagrangian from~(\ref{eq:lag1-triv-fp}) and thus yields identical equations of motion. In particular, $H_{\mathrm{e}}^{\mathrm{triv}}$ equally does \emph{not} determine a parametrization of time, $t=t(s)$, but rather allows for arbitrary parametrizations. This can be seen by setting up the respective canonical equation $$ \dfrac{t}{s}=-\pfrac{H_{\mathrm{e}}^{\mathrm{triv}}}{e}=\dfrac{t}{s}. $$ One thus finds an \emph{identity} but no substantial canonical equation for $t=t(s)$. \subsection{\label{sec:lag1-em}Extended Lagrangian for a relativistic particle in an external electromagnetic field} The non-homogeneous extended Lagrangian $L_{\mathrm{e}}$ of a point particle of mass $m$ and charge $\zeta$ in an external electromagnetic field that is described by the potentials $(\phi,\bm{A})$ is given by \begin{align} L_{\mathrm{e}}\!\left(\bm{q},\dfrac{\bm{q}}{s},t,\dfrac{t}{s}\right)= {\textstyle\frac{1}{2}} mc^{2}\!\left[{\frac{1}{c^{2}}\left(\dfrac{\bm{q}}{s}\right)}^{2}\!- {\left(\dfrac{t}{s}\right)}^{2}\!-1\right]\!+\frac{\zeta}{c}\bm{A}(\bm{q},t) \dfrac{\bm{q}}{s}-\zeta\,\phi(\bm{q},t)\dfrac{t}{s}.\nonumber\\ \label{lag1-em} \end{align} The associated hypersurface condition~(\ref{lagid}) for $L_{\mathrm{e}}$ coincides with that for the free-particle Lagrangian from Eq.~(\ref{constraint-lag}) as all terms linear in the velocities drop out \begin{equation}\label{hypersurface-em} {\left(\dfrac{t}{s}\right)}^{2}-\frac{1}{c^{2}} {\left(\dfrac{\bm{q}}{s}\right)}^{2}-1=0. \end{equation} Similar to the free particle case from Eq.~(\ref{lag-fp}), the extended Lagrangian~(\ref{lag1-em}) may be projected into $(T\mathbb{M})\times\mathbb{R}$ to yield the well-known conventional relativistic Lagrangian $L$ \begin{equation}\label{lagr-em} L\left(\bm{q},\dfrac{\bm{q}}{t},t\right)= -mc^{2}\sqrt{1-\frac{1}{c^{2}}{\left( \dfrac{\bm{q}}{t}\right)}^{2}}+\frac{\zeta}{c}\bm{A} \dfrac{\bm{q}}{t}-\zeta\,\phi. \end{equation} Again, the quadratic form of the velocity terms is lost owing to the projection.
For small velocity $\mathrm{d}\bm{q}/\mathrm{d} t$, the quadratic form is regained as the square root in (\ref{lagr-em}) may be expanded to yield the conventional non-relativistic Lagrangian for a point particle in an external electromagnetic field, \begin{equation}\label{lag-em} L^{\mathrm{nr}}\left(\bm{q},\dfrac{\bm{q}}{t},t\right)= {\textstyle\frac{1}{2}} m{\left(\dfrac{\bm{q}}{t}\right)}^{2}+\frac{\zeta}{c}\bm{A} \dfrac{\bm{q}}{t}-\zeta\,\phi-mc^{2}. \end{equation} Significantly, this Lagrangian can be derived \emph{directly}, hence without the detour over the projected Lagrangian~(\ref{lagr-em}), from the extended Lagrangian~(\ref{lag1-em}) by letting $\mathrm{d} t/\mathrm{d} s\to1$.
It is instructive to review the Lagrangian~(\ref{lag1-em}) and its non-relativistic limit~(\ref{lag-em}) in covariant notation. With Einstein's summation convention and the notation $A_{0}(q^{\mu})=-\phi(q^{\mu})$ for the particular constant metric $\eta_{\mu\nu}=\mathrm{diag}(-1,1,1,1)$, the extended Lagrangian~(\ref{lag1-em}) then writes \begin{equation}\label{lag1-em2} L_{\mathrm{e}}\left(q^{\mu},\dfrac{q^{\mu}}{s}\right)= {\textstyle\frac{1}{2}} m\,\eta_{\alpha\beta}\dfrac{q^{\alpha}}{s}\dfrac{q^{\beta}}{s}+ \frac{\zeta}{c}A_{\alpha}\dfrac{q^{\alpha}}{s}-{\textstyle\frac{1}{2}} mc^{2}. \end{equation} The hypersurface condition~(\ref{hypersurface-em}) is then converted into \begin{equation}\label{hypersurface-em2} \eta_{\alpha\beta}\dfrac{q^{\alpha}}{s}\dfrac{q^{\beta}}{s}=-c^{2}. \end{equation} Correspondingly, the non-relativistic Lagrangian~(\ref{lag-em}) has the equivalent representation \begin{equation}\label{lag-em2} L^{\mathrm{nr}}\left(q^{\mu},\dfrac{q^{\mu}}{t}\right)= {\textstyle\frac{1}{2}} m\,\eta_{\alpha\beta}\dfrac{q^{\alpha}}{t}\dfrac{q^{\beta}}{t}+ \frac{\zeta}{c}A_{\alpha}\dfrac{q^{\alpha}}{t}-{\textstyle\frac{1}{2}} mc^{2}. \end{equation} Note that $(\mathrm{d} q^{0}/\mathrm{d} t)(\mathrm{d} q_{0}/\mathrm{d} t)=-c^{2}$, which yields the second half of the rest energy term, so that (\ref{lag-em2}) indeed agrees with (\ref{lag-em}). Comparing the Lagrangian~(\ref{lag-em2}) with the extended Lagrangian from Eq.~(\ref{lag1-em2}) --- and correspondingly the Lagrangians (\ref{lag1-em}) and (\ref{lag-em}) --- we notice that the transition to the non-relativistic description is made by identifying the proper time $s$ with the laboratory time $t=q^{0}/c$. The remarkable formal similarity of the Lorentz-invariant extended Lagrangian~(\ref{lag1-em2}) with the non-invariant conventional Lagrangian~(\ref{lag-em2}) suggests that approaches based on non-relativistic Lagrangians $L^{\mathrm{nr}}$ may be transposed to a relativistic description by (i) introducing the proper time $s$ as the new system evolution parameter, (ii) treating the time $t(s)$ as an \emph{additional dependent variable} on equal footing with the configuration space variables $\bm{q}(s)$ --- commonly referred to as the ``principle of homogeneity in space-time'' --- and (iii) by replacing the conventional non-relativistic Lagrangian $L^{\mathrm{nr}}$ with the corresponding Lorentz-invariant extended Lagrangian $L_{\mathrm{e}}$, similar to the transition from (\ref{lag-em2}) to (\ref{lag1-em2}). \subsection{\label{sec:ham1-em}Extended Hamiltonian for a relativistic particle in an external electromagnetic field} The extended Hamiltonian counterpart $H_{\mathrm{e}}$ of the non-homogeneous extended Lagrangian~(\ref{lag1-em}) for a relativistic point particle in an external electromagnetic field is obtained via the Legendre transformation prescription from Eqs.~(\ref{legendre1}) and (\ref{p-def}). The transition to the extended Hamiltonian $H_{\mathrm{e}}$ is easiest calculated by starting form the covariant form~(\ref{lag1-em2}) of $L_{\mathrm{e}}$ and afterwards converting the results to $3$-vector notation. According to Eqs.~(\ref{p-def}) and (\ref{p0-def}), the canonical momenta $p_{\mu}$ are introduced by \begin{equation}\label{p-em} p_{\mu}=\pfrac{L_{\mathrm{e}}}{\left(\dfrac{q^{\mu}}{s}\right)}= m\,\eta_{\mu\alpha}\dfrac{q^{\alpha}}{s}+\frac{\zeta}{c}A_{\mu}= p_{\mu,\mathrm{kin}}+\frac{\zeta}{c}A_{\mu}. \end{equation} We notice that the \emph{kinetic} momentum $p_{\mu,\mathrm{kin}}=m\,\mathrm{d} q_{\mu}/\mathrm{d} s$ differs from the \emph{canonical} momentum $p_{\mu}$ in the case of a non-vanishing external potential $A_{\mu}\neq0$. The condition for the Legendre transform of $L_{\mathrm{e}}$ to exist is that its Hesse matrix $\partial^{2}L_{\mathrm{e}}/ [\partial(\mathrm{d} q^{\mu}/\mathrm{d} s)\partial(\mathrm{d} q_{\nu}/d s)]$ must be non-singular, hence that the determinant of this matrix does not vanish. For the Lagrangian $L_{\mathrm{e}}$ from Eq.~(\ref{lag1-em2}), this is actually the case as $$ \det\left(\pfrac{^{2}L_{\mathrm{e}}}{\left(\dfrac{q^{\mu}}{s}\right) \partial\left(\dfrac{q_{\nu}}{s}\right)}\right)= \det\left(m\,\delta_{\mu}^{\nu}\right)= m^{4}\ne0. $$ This falsifies claims made in literature\cite{johns} that the Hesse matrix associated with an extended Lagrangian $L_{\mathrm{e}}$ be \emph{generally singular}, and that for this reason an extended Hamiltonian $H_{\mathrm{e}}$ \emph{generally} could not be obtained by a Legendre transformation of an extended Lagrangian $L_{\mathrm{e}}$. The necessary condition for an extended Hamiltonian $H_{\mathrm{e}}$ to emerge form a Legendre transformation of an extended Lagrangian $L_{\mathrm{e}}$ is that $L_{\mathrm{e}}$ must not be a \emph{homogeneous} function of first order in its velocities $\mathrm{d} q^{\mu}/\mathrm{d} s$.
With the Hesse condition being actually satisfied, the extended Hamiltonian $H_{\mathrm{e}}$ that follows as the Legendre transform~(\ref{legendre1}) of $L_{\mathrm{e}}$ reads \begin{align*} \qquad H_{\mathrm{e}}(q^{\mu},p_{\mu})&=\dfrac{q^{\alpha}}{s}\left( m\dfrac{q_{\alpha}}{s}+\frac{\zeta}{c}A_{\alpha}\right)- {\textstyle\frac{1}{2}} m\dfrac{q^{\alpha}}{s}\dfrac{q_{\alpha}}{s}- \frac{\zeta}{c}A_{\alpha}\dfrac{q^{\alpha}}{s}+{\textstyle\frac{1}{2}} mc^{2}\\ &={\textstyle\frac{1}{2}} m\dfrac{q^{\alpha}}{s}\dfrac{q_{\alpha}}{s}+{\textstyle\frac{1}{2}} mc^{2}. \end{align*} As any Hamiltonian must be expressed in terms of the canonical momenta rather than through velocities, $H_{\mathrm{e}}$ takes on the more elaborate final form according to Eq.~(\ref{p-em}) \begin{equation}\label{h1-em} H_{\mathrm{e}}(q^{\mu},p_{\mu})=\frac{1}{2m} \left(p_{\alpha}-\frac{\zeta}{c}A_{\alpha}\right) \left(p^{\alpha}-\frac{\zeta}{c}A^{\alpha}\right)+{\textstyle\frac{1}{2}} mc^{2}. \end{equation} This extended Hamiltonian coincides with the ``super-Hamiltonian'' that was postulated by Misner, Thorne, and Wheeler\cite{misner}.
In covariant notation, the condition $H_{\mathrm{e}}=0$ thus follows as $$ \left(p_{\alpha}-\frac{\zeta}{c}A_{\alpha}\right) \left(p^{\alpha}-\frac{\zeta}{c}A^{\alpha}\right)+m^{2}c^{2}=0, $$ which follows equivalently if the velocities in the hypersurface condition~(\ref{hypersurface-em2}) are replaced by the canonical momenta according to Eq.~(\ref{p-em}). In terms of the conventional $3$-vectors for the canonical momentum $\bm{p}$ and vector potential $\bm{A}$, and the scalars, energy $e$ and electric potential $\phi$, the extended Hamiltonian $H_{\mathrm{e}}$ is equivalently expressed as \begin{equation}\label{h1-em2} \qquad H_{\mathrm{e}}(\bm{q},\bm{p},t,e)=\frac{1}{2m} \left[{\left(\bm{p}-\frac{\zeta}{c}\bm{A}(\bm{q},t)\right)}^{2}- {\left(\frac{e-\zeta\phi(\bm{q},t)}{c}\right)}^{2}\right]+ {\textstyle\frac{1}{2}} mc^{2}, \end{equation} and the condition $H_{\mathrm{e}}=0$ furnishes the usual relativistic energy relation \begin{equation}\label{constraint-em} {\big(e-\zeta\phi(\bm{q},t)\big)}^{2}=c^{2} {\left(\bm{p}-\frac{\zeta}{c}\bm{A}(\bm{q},t)\right)}^{2}+m^{2}c^{4}. \end{equation} The \emph{conventional} Hamiltonian $H$ that describes the same dynamics is determined according to Eq.~(\ref{p0-def0}) as the particular \emph{function}, whose \emph{value} coincides with $e$. Solving $H_{\mathrm{e}}=0$ from Eq.~(\ref{h1-em2}) for $e$, we directly find $H$ as the left-hand side of the equation $H=e$, \begin{equation}\label{h-em} H(\bm{q},\bm{p},t)=\sqrt{c^{2}{\left(\bm{p}-\frac{\zeta}{c}\bm{A}(\bm{q},t)\right)}^{2}+ m^{2}c^{4}}+\zeta\phi(\bm{q},t)=e. \end{equation} The conventional Hamiltonian $H^{\mathrm{nr}}$ that describes the particle dynamics in the non-relativistic limit is obtained from the Lorentz-invariant Hamiltonian~(\ref{h-em}) by expanding the square root $$ H^{\mathrm{nr}}(\bm{q},\bm{p},t)=\frac{1}{2m}{\left(\bm{p}-\frac{\zeta}{c}\bm{A}(\bm{q},t) \right)}^{2}+\zeta\phi(\bm{q},t)+mc^{2}. $$ In contrast to the extended Lagrangian description, a \emph{direct} way to transpose the relativistic extended Hamiltonian from Eq.~(\ref{h1-em2}) into the non-relativistic Hamiltonian $H^{\mathrm{nr}}$ does not exist. We conclude that the Lagrangian approach is more appropriate if we want to ``translate'' a given non-relativistic Hamilton-Lagrange system into the corresponding Lorentz-invariant description.
In order to show that the extended Hamiltonian~(\ref{h1-em2}) and the well-known conventional Hamiltonian~(\ref{h-em}) indeed yield the same dynamics, we now set up the extended set of canonical equations~(\ref{caneq-def}) for the covariant extended Hamiltonian~(\ref{h1-em}) \begin{align} -\pfrac{H_{\mathrm{e}}}{q^{\mu}}=\dfrac{p_{\mu}}{s}&= \frac{\zeta}{mc}\eta^{\alpha\beta}\left(p_{\alpha}-\frac{\zeta}{c}A_{\alpha}\right) \pfrac{A_{\alpha}}{q^{\mu}}\nonumber\\ \hphantom{-}\pfrac{H_{\mathrm{e}}}{p_{\mu}}=\dfrac{q^{\mu}}{s}&= \frac{1}{m}\eta^{\mu\alpha}\left(p_{\alpha}-\frac{\zeta}{c}A_{\alpha}\right).\label{eqmo-em} \end{align} In the notation of scalars and $3$-vectors, the pair of equations~(\ref{eqmo-em}) separates into the following equivalent set of four equations \begin{align} \dfrac{p_{i}}{s}&= \frac{\zeta}{mc}\left(p_j-\frac{\zeta}{c}A_j\right) \pfrac{A^j}{q^{i}}-\frac{\zeta}{mc^{2}} \left(e-\zeta\phi\right)\pfrac{\phi}{q^{i}}\nonumber\\ \dfrac{e}{s}&= -\frac{\zeta}{mc}\left(p_j-\frac{\zeta}{c}A_j\right) \pfrac{A^j}{t}+\frac{\zeta}{mc^{2}} \left(e-\zeta\phi\right)\pfrac{\phi}{t}\nonumber\\ \dfrac{q^{i}}{s}&= \frac{1}{m}\left(p^{i}-\frac{\zeta}{c}A^{i}\right)\nonumber\\ \dfrac{t}{s}&= \frac{1}{mc^{2}}\left(e-\zeta\phi\right). \label{eqmo-em2} \end{align} From the last equation, we deduce the derivative of the inverse function $s=s(t)$ and insert the condition from Eq.~(\ref{constraint-em}) \begin{equation}\label{ds-dt} \dfrac{s}{t}=\frac{mc^{2}}{e-\zeta\phi}= \frac{mc^{2}}{\sqrt{c^{2}{\left(\bm{p}-\frac{\zeta}{c} \bm{A}(\bm{q},t)\right)}^{2}+m^{2}c^{4}}}. \end{equation} The canonical equations~(\ref{eqmo-em2}) can now be expressed equivalently with the time $t$ as the independent variable \begin{align} -\dfrac{p_{i}}{t}&=-\dfrac{p_{i}}{s}\dfrac{s}{t}= -\frac{\zeta c}{\sqrt{c^{2}{\left(\bm{p}-\frac{\zeta}{c} \bm{A}(\bm{q},t)\right)}^{2}+m^{2}c^{4}}} \left(\bm{p}-\frac{\zeta}{c}\bm{A}\right) \pfrac{\bm{A}}{q^{i}}+\zeta\pfrac{\phi}{q^{i}}\nonumber\\ \hphantom{-}\dfrac{e}{t}&=\hphantom{-}\dfrac{e}{s}\dfrac{s}{t}= -\frac{\zeta c}{\sqrt{c^{2}{\left(\bm{p}-\frac{\zeta}{c} \bm{A}(\bm{q},t)\right)}^{2}+m^{2}c^{4}}}\left(\bm{p}-\frac{\zeta}{c}\bm{A}\right) \pfrac{\bm{A}}{t}+\zeta\pfrac{\phi}{t}\nonumber\\ \hphantom{-}\dfrac{q^{i}}{t}&=\hphantom{-}\dfrac{q^{i}}{s}\dfrac{s}{t}= \frac{c^{2}}{\sqrt{c^{2}{\left(\bm{p}-\frac{\zeta}{c} \bm{A}(\bm{q},t)\right)}^{2}+m^{2}c^{4}}} \left(p^{i}-\frac{\zeta}{c}A^{i}\right). \label{eqmo-em3} \end{align} The right-hand sides of Eqs.~(\ref{eqmo-em3}) are exactly the partial derivatives $\partial H/\partial q^{i}$, $\partial H/\partial t$, and $\partial H/\partial p_{i}$ of the Hamiltonian~(\ref{h-em}) --- and hence its canonical equations, which was to be shown.
The physical meaning of the $\mathrm{d} t/\mathrm{d} s$ is worked out by casting it to the equivalent form $$ \dfrac{t}{s}=\sqrt{1+\frac{{\left(\bm{p}-\frac{\zeta}{c} \bm{A}(\bm{q},t)\right)}^{2}}{m^{2}c^{2}}}= \sqrt{1+{\left(\frac{\bm{p}_{\mathrm{kin}}(s)}{mc}\right)}^{2}}=\gamma(s), $$ with $\bm{p}_{\mathrm{kin}}(s)$ the instantaneous \emph{kinetic} momentum of the particle. The dimensionless quantity $\mathrm{d} t/\mathrm{d} s$ thus represents the instantaneous value of the relativistic scale factor $\gamma$. \subsection{\label{sec:lortra}Lorentz transformation as an extended canonical transformation} We know that the Lorentz transformation provides the rules according to which a physical system is transformed from one inertial reference system into an other. On the other hand, a mapping of one Hamiltonian into another is constituted by a canonical transformation. Consequently, the Lorentz transformation must be a particular canonical transformation. As the Lorentz transformation \emph{always} involves a transformation of the time scales \mbox{$t\mapsto T$}, this transformation can only be represented by an \emph{extended} canonical transformation. Its generating function $F_{2}$ is given by \begin{align} F_{2}(\bm{q},\bm{P}_{\mathrm{kin}},t,E_{\mathrm{kin}})=\bm{P}_{\mathrm{kin}}\bm{q}- \gamma\!\left[E_{\mathrm{kin}}t+\bm{\beta}\!\left( \bm{P}_{\mathrm{kin}}ct-\frac{E_{\mathrm{kin}}}{c}\,\bm{q}\right)\!\right]\!+ \frac{\gamma-1}{\beta^{2}}\big(\bm{\beta}\bm{P}_{\mathrm{kin}}\big) \big(\bm{\beta}\bm{q}\big)\nonumber\\ \label{gen-lorentz} \end{align} with $\bm{\beta}=\bm{v}/c$ the constant vector that delineates the scaled relative velocity $\bm{v}$ of both reference systems, and $\gamma$ the dimensionless relativistic scale factor $\gamma=1/\sqrt{1-\bm{\beta}^{2}}$. In order to also cover cases where the particle moves within an external potential, the index ``kin'' indicates that the momenta and the energy are to be understood as the ``kinetic'' quantities, as defined in Eq.~(\ref{p-em}). The generating function~(\ref{gen-lorentz}) generalizes the free-particle generator presented earlier in Ref.~\cite{struck}. The general transformation rules~(\ref{rules}) for extended generating functions of type $F_{2}$ yield for the particular generator from Eq.~(\ref{gen-lorentz}) \begin{align*} \bm{p}_{\mathrm{kin}}=\pfrac{F_{2}}{\bm{q}}&=\bm{P}_{\mathrm{kin}}+ \frac{\gamma\bm{\beta}}{c}\,E_{\mathrm{kin}}+\frac{\gamma-1}{\beta^{2}} \bm{\beta}\big(\bm{\beta}\bm{P}_{\mathrm{kin}}\big),\, e_{\mathrm{kin}}=-\pfrac{F_{2}}{t}=\gamma E_{\mathrm{kin}}+c\gamma\bm{\beta}\bm{P}_{\mathrm{kin}},\\ \bm{Q}=\pfrac{F_{2}}{\bm{P}_{\mathrm{kin}}}&=\bm{q}- \gamma\bm{\beta}\,ct+\frac{\gamma-1}{\beta^{2}} \bm{\beta}\big(\bm{\beta}\bm{q}\big),\qquad\qquad\;\;\: \,T=-\pfrac{F_{2}}{E_{\mathrm{kin}}}=\gamma t- \frac{\gamma}{c}\,\bm{\beta}\bm{q}. \end{align*} In matrix form, the transformation rules for the space-time coordinates, $\bm{Q}$ and $T$, are \begin{equation}\label{lorentz-rules1} \begin{pmatrix} \bm{Q}\\ cT \end{pmatrix} = \begin{pmatrix} 1+\left(\frac{\gamma-1}{\beta^{2}} \bm{\beta}\right)\bm{\beta} & \quad & -\gamma\bm{\beta}\;\\ -\gamma\bm{\beta} & \quad & \gamma \end{pmatrix} \begin{pmatrix} \bm{q}\\ ct \end{pmatrix}. \end{equation} The corresponding linear relation for the kinetic momentum vector $\bm{p}_{\mathrm{kin}}$ and the kinetic energy $e_{\mathrm{kin}}$ is \begin{equation}\label{lorentz-rules2} \begin{pmatrix} \bm{p}_{\mathrm{kin}}\\ e_{\mathrm{kin}}/c \end{pmatrix} = \begin{pmatrix} 1+\left(\frac{\gamma-1}{\beta^{2}} \bm{\beta}\right)\bm{\beta} & \quad & \gamma\bm{\beta}\;\\ \gamma\bm{\beta} & \quad & \gamma \end{pmatrix} \begin{pmatrix} \bm{P}_{\mathrm{kin}}\\ E_{\mathrm{kin}}/c \end{pmatrix}. \end{equation} If we replace the kinetic momenta with the canonical momenta according to Eq.~\!(\ref{p-em}), it is not astonishing to find that the external potentials obey the same transformation rule as the momenta, $$ \begin{pmatrix} \bm{A}\\ \phi \end{pmatrix} = \begin{pmatrix} 1+\left(\frac{\gamma-1}{\beta^{2}} \bm{\beta}\right)\bm{\beta} & \quad & \gamma\bm{\beta}\;\\ \gamma\bm{\beta} & \quad & \gamma \end{pmatrix} \begin{pmatrix} \bm{A}^{\prime}\\ \phi^{\prime} \end{pmatrix}. $$ We easily convince ourselves that the transformation~(\ref{lorentz-rules1}) preserves the condition (\ref{constraint-lag}) that equally applies for a particle in an external potential. Correspondingly, the transformation (\ref{lorentz-rules2}) preserves the conditions (\ref{constraint-em}). As a consequence, we have established the important result that the extended Hamiltonian $H_{\mathrm{e}}$ from Eq.~(\ref{h1-em2}) is also preserved under Lorentz transformations $$ H^{\prime}_{\mathrm{e}}(\bm{P},\bm{Q},T,E)=H_{\mathrm{e}}(\bm{p},\bm{q},t,e). $$ This is in agreement with the general canonical transformation rule for extended Hamiltonians from Eq.~(\ref{F1})
According to the subsequent rule for the conventional Hamiltonians, $H$ and $H^{\prime}$, from Eq.~(\ref{canham1}), and $\partial T/\partial t=\gamma$, we find \begin{equation}\label{canham2} \big(H^{\prime}-E_{\mathrm{kin}}\big)\gamma=H-e_{\mathrm{kin}}. \end{equation} In conjunction with the energy transformation rule from Eq.~(\ref{lorentz-rules2}), $e_{\mathrm{kin}}=\gammaE_{\mathrm{kin}}+\bm{\beta}\gamma\bm{P}_{\mathrm{kin}} c$, we get from Eq.~(\ref{canham2}) the transformation rule for a Hamiltonian $H$ under Lorentz transformations $$ H=\gamma\big(H^{\prime}+\bm{\beta} c\bm{P}_{\mathrm{kin}}\big). $$ As expected, the Hamiltonians, $H$ and $H^{\prime}$, transform equally as their respective values, $e_{\mathrm{kin}}$ and $E_{\mathrm{kin}}$. \subsection{\label{ex:gen-noether} Infinitesimal canonical transformations, generalized Noether theorem} A general infinitesimal extended transformation is generated by \begin{equation}\label{gen-infini} F_{2}(q^{\nu},P_{\nu})=\sum_{\alpha=0}^{n}q^{\alpha}P_{\alpha}+ \delta\epsilon\,I(q^{\nu},p_{\nu}). \end{equation} In this generating function, $\delta\epsilon\in\mathbb{R}$ denotes an infinitesimal parameter, whereas the differentiable function $I(q^{\nu},p_{\nu})$ quantifies the deviation of the actual infinitesimal transformation from the \emph{identity}. We first derive the coordinate transformation rules for the particular generating function~(\ref{gen-infini}) according to the general rules~(\ref{F2}), \begin{align} p_{\mu}&=\pfrac{F_{2}}{q^{\mu}}=P_{\mu}+ \delta\epsilon\,\pfrac{I}{q^{\mu}},\nonumber\\ Q^{\mu}&=\pfrac{F_{2}}{P_{\mu}}=q^{\mu}\,+ \delta\epsilon\,\pfrac{I}{P_{\mu}},\label{genrules-infini}\\ H_{\mathrm{e}}^{\prime}&=H_{\mathrm{e}}\nonumber. \end{align} To first order in $\delta\epsilon$, the variations $\delta p_{\mu}$, $\delta q^{\mu}$, and $\delta H_{\mathrm{e}}$ are obtained from the transformation rules~(\ref{genrules-infini}) as \begin{eqnarray} \delta p_{\mu}&\equiv& P_{\mu}-p_{\mu}\,\,=-\delta\epsilon\,\pfrac{I}{q^{\mu}},\nonumber\\ \delta q^{\mu}&\equiv& Q^{\mu}-q^{\mu}\;=\hphantom{-}\delta\epsilon\,\pfrac{I}{p_{\mu}}, \label{rules-infini}\\ \delta H_{\mathrm{e}}&\equiv&H_{\mathrm{e}}^{\prime}-H_{\mathrm{e}}=0.\nonumber \end{eqnarray} Obviously, any function $I(q^{\nu},p_{\nu})$ is \emph{invariant} under the infinitesimal transformation it defines, $$ \delta I=\sum_{\alpha=0}^{n}\left( \pfrac{I}{q^{\alpha}}\,\delta q^{\alpha}+ \pfrac{I}{p_{\alpha}}\,\delta p_{\alpha}\right)= \delta\epsilon\sum_{\alpha=0}^{n}\left( \pfrac{I}{q^{\alpha}}\,\pfrac{I}{p_{\alpha}}- \pfrac{I}{p_{\alpha}}\,\pfrac{I}{q^{\alpha}}\right)\equiv0. $$ This is \emph{not} necessarily true for the extended Hamiltonian $H_{\mathrm{e}}$. The condition $H_{\mathrm{e}}=0$ from Eq.~(\ref{hamid}) enters into the extended canonical transformation theory in the way that we must \emph{explicitly verify} that $H_{\mathrm{e}}^{\prime}=H_{\mathrm{e}}$ actually holds under the transformation rules of the canonical variables that are defined by the generating function. Only then the physical motion of the transformed system keeps being confined to the phase-space surface $H_{\mathrm{e}}^{\prime}=0$, as required for the system to be \emph{physical}. In the case of the \emph{infinitesimal} transformation~(\ref{rules-infini}), the transformation rule for the extended Hamiltonian $H_{\mathrm{e}}$ is satisfied exactly if $\delta H_{\mathrm{e}}=0$ under the infinitesimal variations of the canonical variables. For the transformation rules~(\ref{rules-infini}), the variation of $H_{\mathrm{e}}$ due to the variations $\delta q^{\nu}$ and $\delta p_{\nu }$ of the canonical variables is given by \begin{align*} \delta H_{\mathrm{e}}&=\sum_{\alpha=0}^{n}\left( \pfrac{H_{\mathrm{e}}}{q^{\alpha}}\,\delta q^{\alpha}+ \pfrac{H_{\mathrm{e}}}{p_{\alpha}}\,\delta p_{\alpha}\right)\\ &=\delta\epsilon\sum_{\alpha=0}^{n}\left( \pfrac{H_{\mathrm{e}}}{q^{\alpha}}\pfrac{I}{p_{\alpha}}- \pfrac{H_{\mathrm{e}}}{p_{\alpha}}\pfrac{I}{q^{\alpha}}\right)\\ &=\delta\epsilon\,{\left[H_{\mathrm{e}},I\right]}_{\text{ext}}, \end{align*} with the last expression defining the extended Poisson bracket. Thus, the canonical transformation rule $\delta H_{\mathrm{e}}=0$ from Eqs.~(\ref{rules-infini}) is actually fulfilled if and only if the characteristic function $I(q^{\nu},p_{\nu})$ in~(\ref{gen-infini}) satisfies \begin{equation}\label{noetherinvariant} \sum_{\alpha=0}^{n}\left(\pfrac{I}{q^{\alpha}}\pfrac{H_{\mathrm{e}}}{p_{\alpha}}- \pfrac{I}{p_{\alpha}}\pfrac{H_{\mathrm{e}}}{q^{\alpha}}\right)= {\left[I,H_{\mathrm{e}}\right]}_{\text{ext}}=0. \end{equation} Along the system trajectory, the canonical equations~(\ref{caneq-def}) apply. As a consequence, the partial derivatives of $H_{\mathrm{e}}$ in~(\ref{noetherinvariant}) may be replaced accordingly to yield \begin{equation}\label{invar-g} \sum_{\alpha=0}^{n}\left(\pfrac{I}{q^{\alpha}}\dfrac{q^{\alpha}}{s}+ \pfrac{I}{p_{\alpha}}\dfrac{p_{\alpha}}{s}\right)=\dfrac{I}{s}=0. \end{equation} Thus, $I(q^{\nu},p_{\nu})$ must ``commute'' with the extended Hamiltonian $H_{\mathrm{e}}$, hence must be \emph{invariant} along the system's phase-space trajectory in order for the transformation~(\ref{gen-infini}) to comply with the requirement $\delta H_{\mathrm{e}}=0$ for an extended canonical transformation. Then and only then the generating function~(\ref{gen-infini}) defines an extended \emph{canonical} transformation and thus ensures the action functional~(\ref{canbed2a}) to be preserved. The correlation~(\ref{invar-g}) of a system invariant $I$ to a transformation that preserves the action functional --- hence to a \emph{canonical} transformation --- establishes the most general form of Noether's theorem in the realm of the extended Hamilton-Lagrange formulation of point mechanics, \begin{equation}\label{gen-noether} {\left[I,H_{\mathrm{e}}\right]}_{\text{ext}}=0\quad\Longleftrightarrow\quad \dfrac{I}{s}=0\quad\Longleftrightarrow\quad\delta H_{\mathrm{e}}=0. \end{equation} We may rewrite the condition~(\ref{noetherinvariant}) in terms of a conventional Hamiltonian $H$ if we distinguish the space coordinates $q^{i},\;i=1,\ldots,n$ from the time coordinate $t$. With the replacements $q^{0}=ct, p_{0}=-e/c$, $e$ denoting the instantaneous \emph{value} of the conventional Hamiltonian $H$, and $$ \pfrac{H_{\mathrm{e}}}{t}=\pfrac{H}{t}\dfrac{t}{s},\qquad \pfrac{H_{\mathrm{e}}}{e}=-\dfrac{t}{s},\qquad \pfrac{H_{\mathrm{e}}}{q^{i}}=\pfrac{H}{q^{i}}\dfrac{t}{s},\qquad \pfrac{H_{\mathrm{e}}}{p_{i}}=\pfrac{H}{p_{i}}\dfrac{t}{s}, $$ according to the correlation~(\ref{H1-def}) of extended and conventional Hamiltonians, we find for $I=I(\bm{p},\bm{q},t,e)$ \begin{equation}\label{noetherinvariant3} \pfrac{I}{t}+\pfrac{I}{e}\pfrac{H}{t}+ \sum_{i=1}^{n}\left(\pfrac{I}{q^{i}}\pfrac{H}{p_{i}}- \pfrac{I}{p_{i}}\pfrac{H}{q^{i}}\right)=0. \end{equation} Due to the conventional canonical equations $$ \pfrac{H}{t}=\dfrac{e}{t},\qquad \pfrac{H}{p_{i}}=\dfrac{q^{i}}{t},\qquad \pfrac{H}{q^{i}}=-\dfrac{p_{i}}{t}, $$ Eq.~(\ref{noetherinvariant3}) is thus equivalent to \begin{equation}\label{noetherinvariant4} \dfrac{I}{t}=0. \end{equation} In this notation, the symmetry transformation rules~(\ref{rules-infini}) pertaining to the invariant~(\ref{noetherinvariant4}) assume the equivalent form \begin{equation}\label{rules-infini2} \delta p_{i}=-\delta\epsilon\,\pfrac{I}{q^{i}},\qquad \delta q^{i}=\delta\epsilon\,\pfrac{I}{p_{i}},\qquad \deltae=\delta\epsilon\,\pfrac{I}{t},\qquad \delta t=-\delta\epsilon\,\pfrac{I}{e}. \end{equation} We can always eliminate or induce an $e$-dependence of $I$ by inserting the conventional Hamiltonian according to $e=H$. A representation $I=I(\bm{p},\bm{q},t)$ of the invariant $I$ does \emph{not} depend on $e$, which means that $\delta t=0$. Then, the resulting symmetry transformation does not involve a transformation of time. In contrast, if $I=I(\bm{p},\bm{q},t,e)$, then the invariant defines a symmetry transformation that includes a transformation of time, $\delta t\not=0$. Equivalent representations $I=I(\bm{p},\bm{q},t,e)$ and $I=I(\bm{p},\bm{q},t)$ of the invariant $I$ reflect the same underlying system symmetry, yet depicted at different instants of time $t$.
Summarizing, the set of extended canonical transformations covers \emph{all} transformations that leave the action functional in the generalized form of Eq.~(\ref{canbed2}) invariant. As each canonical transformation can be defined in terms of an infinitesimal generating function $F_{2}$ from Eq.~(\ref{gen-infini}), the characteristic function $I(\bm{p},\bm{q},t,e)$ that is contained in $F_{2}$ then constitutes the corresponding constant of motion. Conversely, \emph{each} invariant $I$ of a dynamical system can be inserted into the generating function $F_{2}$ of the infinitesimal canonical transformation. The subsequent canonical transformation rules then define the corresponding infinitesimal symmetry transformation of the respective dynamical system. With the extended canonical transformation approach, we thus encounter a \emph{generalization} of Noether's theorem in the realm of Hamiltonian point dynamics. \subsubsection{\label{ex:Ham-Symm} Example: Symmetry generated by the extended Hamiltonian $H_{\mathrm{e}}$} A trivial yet important example of an invariant $I$ is furnished by the extended Hamiltonian $H_{\mathrm{e}}$ itself $$ \delta H_{\mathrm{e}}=\delta\epsilon\,{\left[H_{\mathrm{e}},H_{\mathrm{e}}\right]}_{\text{ext}}=0, \qquad\dfrac{H_{\mathrm{e}}}{s}=0. $$ The infinitesimal transformation rules~(\ref{rules-infini}) thus define a \emph{canonical} transformation. With $\delta\epsilon=\delta s$, their explicit form is $$ \delta p_{\mu}=-\delta\epsilon\,\pfrac{H_{\mathrm{e}}}{q^{\mu}}= \dfrac{p_{\mu}}{s}\,\delta s,\qquad \delta q^{\mu}=\delta\epsilon\,\pfrac{H_{\mathrm{e}}}{p_{\mu}}= \dfrac{q^{\mu}}{s}\,\delta s. $$ This is obviously the infinitesimal transformation that shifts the extended set of canonical coordinates one step $\delta s$ along the system's extended phase-space trajectory, which always resides on the surface $H_{\mathrm{e}}(q^{\nu},p_{\nu})\stackrel{\not\equiv}{=}0$. Thus, the symmetry transformation corresponding to the constant value of $H_{\mathrm{e}}$ is that the system's symplectic structure is maintained along its evolution parameter, $s$. \subsubsection{\label{ex:tdho} Example: Symmetry of the time-dependent harmonic oscillator at $\delta t=0$} The time-dependent harmonic oscillator is a simple one-degree-of-freedom example of a non-autonomous dynamical system, i.e., a system whose Hamiltonian depends explicitly on the independent variable, $t$, \begin{equation}\label{tdham} H(q,p,t)={\textstyle\frac{1}{2}} p^{2}+{\textstyle\frac{1}{2}}\omega^{2}(t)\,q^{2}. \end{equation} Herein, $\omega(t)$ denotes the system's time-dependent circular frequency. The value $e$ of the Hamiltonian $H$ is thus not a conserved quantity. The canonical equations and the equation of motion immediately follow as $$ \dot{q}=\pfrac{H}{p}=p,\quad\dot{p}=-\pfrac{H}{q}=-\omega^{2}(t)\,q, \qquad\ddot{q}+\omega^{2}(t)\,q=0. $$ A conserved quantity $I$ for this system is constituted by the quadratic form \begin{equation}\label{tdinv} I=\beta_{e}(t)\,p^{2}+2\alpha_{e}(t)\,pq+\gamma_{e}(t)\,q^{2}, \end{equation} provided that the time functions $\beta_{e}(t)$, $\alpha_{e}(t)$, and $\gamma_{e}(t)$ satisfy the equations \begin{equation}\label{tdinvcond} {\textstyle\frac{1}{2}}\beta_{e}\ddot{\beta}_{e}-{\textstyle\frac{1}{4}}{\dot{\beta}_{e}}^{2}+ \omega^{2}(t)\,\beta_{e}^{2}=1,\qquad\dot{\beta}_{e}=-2\alpha_{e}, \qquad\beta_{e}\gamma_{e}-\alpha_{e}^{2}=1. \end{equation} We easily prove the invariance of $I$ directly by calculating its total time derivative and inserting the canonical equations and the conditions~(\ref{tdinvcond}).
Geometrically, the quadratic form~(\ref{tdinv}) represents an ellipse centered at the origin of the $(q,p)$-phase space with the actual coordinates $q,p$ defining its boundary, which varies its shape but retains its area $\pi I$. Thus, the invariant $I$ represents the conserved area of an ellipse with time-dependent parameters $\beta_{e}(t)$, $\alpha_{e}(t)$, and $\gamma_{e}(t)$ that passes through $(q(t),p(t))$.
The symmetry transformation corresponding to the invariant~(\ref{tdinv}) follows from Eqs.~(\ref{rules-infini2}) $$ \delta p=-\delta\epsilon\pfrac{I}{q}= \delta\sigma\left(\gamma_{e}q+\alpha_{e}p\right),\quad \delta q=\delta\epsilon\pfrac{I}{p}= -\delta\sigma\left(\alpha_{e}q+\beta_{e}p\right),\quad\! \delta t=-\delta\epsilon\pfrac{I}{e}\!=\!0, $$ introducing the abbreviation $-2\delta\epsilon\equiv\delta\sigma$. In matrix notation, this infinitesimal canonical transformation of coordinate $q$ and momentum $p$ reads \begin{equation}\label{tdho-infini} \begin{pmatrix}Q\\ P\end{pmatrix}= \left[\Eins+\mathbb{A}_{\delta\sigma}\right] \begin{pmatrix}q\\ p\end{pmatrix},\qquad \mathbb{A}_{\delta\sigma}=\delta\sigma\begin{pmatrix}-\alpha_{e}&-\beta_{e}\\ \hphantom{-}\gamma_{e}&\hphantom{-}\alpha_{e}\end{pmatrix}, \end{equation} with $\Eins$ denoting the $2\times 2$ unit matrix. As the coefficients of $\mathbb{A}_{\delta\sigma}$ do not depend on the canonical variables $q,p$, we may directly set up the pertaining \emph{finite} transformation. Equation~(\ref{tdho-infini}) may be regarded as a Taylor expansion that could by truncated after the linear term because of very small $\delta\sigma$. The finite transformation for arbitrary $\sigma\in\mathbb{R}$ is then given by the exponential of $\mathbb{A}_{\sigma}$, hence $$ \begin{pmatrix} Q\\ P \end{pmatrix} =\mathbb{M} \begin{pmatrix} \,q\\ \,p \end{pmatrix}, \qquad\mathbb{M}=\exp{(\mathbb{A}_{\sigma})}. $$ The general scheme for deriving the matrix exponential $\exp{(\mathbb{A})}$ for a $2\times 2$ matrix $\mathbb{A}=(a_{ij}),\;i,j=1,2$ is expressed in terms of the expression $D$, $$ D=\sqrt{{\textstyle\frac{1}{4}}{\left(a_{11}-a_{22}\right)}^{2}+a_{12}\,a_{21}} $$ as \begin{align} \mathbb{M}=\exp{\left({\textstyle\frac{1}{2}}(a_{11}+a_{22})\right)} \begin{pmatrix} \cosh D+{\textstyle\frac{1}{2}}(a_{11}-a_{22})D^{-1} \sinh D&a_{12}D^{-1}\sinh D\\[
amount] \hspace*{-22mm}a_{21}D^{-1}\sinh D&\hspace*{-22mm} \cosh D-{\textstyle\frac{1}{2}}(a_{11}-a_{22})D^{-1}\sinh D \end{pmatrix}.\nonumber\\ \label{general-M} \end{align} For the particular matrix $\mathbb{A}_{\sigma}$ from Eq.~(\ref{tdho-infini}), we find $a_{11}+a_{22}=0$ and $D=i\sigma$. Due to the purely imaginary $D$, the hyperbolic sine and cosine functions in matrix exponential are thus converted into trigonometric sines and cosines, which finally yields \begin{equation}\label{tdho-fini} \begin{pmatrix} Q\\[
amount]P \end{pmatrix} = \begin{pmatrix} \cos\sigma-\alpha_{e}\sin\sigma& -\beta_{e}\sin\sigma\\[
amount] \gamma_{e}\sin\sigma&\cos\sigma+ \alpha_{e}\sin\sigma \end{pmatrix} \begin{pmatrix} q\\[
amount]p \end{pmatrix}. \end{equation} Note that $(Q,P)$ and $(q,p)$ as well as the ellipse parameters $\alpha_{e}$, $\beta_{e}$, and $\gamma_{e}$ refer to the same instant of time as the actual symmetry transformation is associated with $\delta t=0$. The inverse transformation is then obtained as $$ \begin{pmatrix}q\\[
amount]p \end{pmatrix} = \begin{pmatrix}\cos\sigma+\alpha_{e}\sin\sigma& \beta_{e}\sin\sigma\\[
amount] -\gamma_{e}\sin\sigma&\cos\sigma- \alpha_{e}\sin\sigma \end{pmatrix} \begin{pmatrix} Q\\[
amount]P \end{pmatrix}. $$ Inserting $q$ and $p$ as functions of $Q$ and $P$ into the invariant~(\ref{tdinv}), we find that the representation of $I$ retains its form in the transformed variables $$ I=\beta_{e}(t)\,P^{2}+2\alpha_{e}(t)\,PQ+\gamma_{e}(t)\,Q^{2}. $$ Thus, $(Q,P)$ and $(q,p)$ both lie on the same ellipse, but shifted with respect to each other on the ellipse's perimeter. The geometric meaning of the one-parameter symmetry transformation $\mathbb{M}$ from Eq.~(\ref{tdho-fini}) that is associated with the invariant $I$ from Eq.~(\ref{tdinv}) is thus to map any point on this ellipse into another point on the \emph{same} ellipse. The free parameter $\sigma$ of the transformation group then specifies the particular destination point $(Q,P)$ with respect to the source point, $(q,p)$. This can be seen from the parametric representation of the ellipse~(\ref{tdinv}) \begin{equation}\label{ellipara} q=\sqrt{\frac{I}{\gamma_{e}}}\left(\cos\phi-\alpha_{e}\sin\phi \right),\qquad p=\sqrt{I\gamma_{e}}\sin\phi. \end{equation} Letting $\phi$ run along the interval $0\leq\phi\leq 2\pi$, we perform one turn on the ellipse's perimeter. The symmetry transformation~(\ref{tdho-fini}) then acts on $(q,p)$ according to \begin{align*} \begin{pmatrix} Q\\[
amount]P \end{pmatrix} &= \begin{pmatrix} \cos\sigma-\alpha_{e}\sin\sigma&-\beta_{e}\sin\sigma\\[
amount] \gamma_{e}\sin\sigma&\cos\sigma+\alpha_{e}\sin\sigma \end{pmatrix} \begin{pmatrix} \sqrt{I/\gamma_{e}}\left(\cos\phi-\alpha_{e}\sin\phi\right)\\[
amount] \sqrt{I\gamma_{e}}\sin\phi \end{pmatrix}\\ &= \begin{pmatrix} \sqrt{I/\gamma_{e}}\left(\cos(\phi+\sigma)- \alpha_{e}\sin(\phi+\sigma)\right)\\[
amount] \sqrt{I\gamma_{e}}\sin(\phi+\sigma) \end{pmatrix}. \end{align*} Thus, $(Q,P)$ is shifted counterclockwise with respect to $(q,p)$ on the ellipse's perimeter exactly by the phase angle $\sigma$ in the parameter representation~(\ref{ellipara}). This accounts for $\sigma$ being referred to as a ``phase advance''. \begin{figure}
\caption{Visualization of the finite symmetry transformation~(\ref{tdho-fini}) pertaining to the invariant $I$ from Eq.~(\ref{tdinv}) of the time-dependent harmonic oscillator.}
\label{tdho-fig}
\end{figure} The integral over the closed curve $C$ comprising the shaded region $A_{\sigma}$ of Fig.~\ref{tdho-fig} measures the enclosed area \begin{align*} A_{\sigma}&={\textstyle\frac{1}{2}}\oint_{C}q\mathrm{d} p-p\mathrm{d} q ={\textstyle\frac{1}{2}}\int_{\phi}^{\phi+\sigma}\left( q\dfrac{p}{\phi}-p\dfrac{q}{\phi}\right)\mathrm{d}\phi\\ &={\textstyle\frac{1}{2}} I\int_{\phi}^{\phi+\sigma}\left( \cos^{2}\phi-\alpha_{e}\sin\phi\cos\phi+ \sin^{2}\phi+\alpha_{e}\sin\phi\cos\phi\right)\mathrm{d}\phi\\ &={\textstyle\frac{1}{2}} I\sigma. \end{align*} Note that the phase advance $\sigma$ does \emph{not} depict the polar angle from vectors $(q,p)$ to $(Q,P)$. Instead, $\sigma$ is proportional to the shaded area $A_{\sigma}$. \subsubsection{\label{ex:tdho2} Example: Symmetry of the time-dependent harmonic oscillator at $\delta t\not=0$} Replacing the quadratic $p$-dependence in the invariant~(\ref{tdinv}) of the time-dependent harmonic oscillator~(\ref{tdham}) according to $$ e={\textstyle\frac{1}{2}} p^{2}+{\textstyle\frac{1}{2}}\omega^{2}(t)\,q^{2}, $$ we arrive at an \emph{equivalent} representation of the invariant that now depends on the energy variable, $e$ \begin{equation}\label{tdinv2} I=2\beta_{e}(t)\,e-\dot{\beta}_{e}(t)\,pq+{\textstyle\frac{1}{2}}\ddot{\beta}_{e}(t)\,q^{2}. \end{equation} Of course, the function $\beta_{e}(t)$ must again satisfy the second-order equation from Eq.~(\ref{tdinvcond}) in order for $I$ to actually establish an invariant. The particular infinitesimal rules for the corresponding symmetry transformation from Eq.~(\ref{rules-infini2}) are \begin{equation}\label{tdho-infini2}
\left.\begin{pmatrix}Q\\ P\end{pmatrix}\right|_{T}= \left[\Eins+\mathbb{A}_{\delta\epsilon}\right]
\left.\begin{pmatrix}q\\ p\end{pmatrix}\right|_{t},\quad \mathbb{A}_{\delta\epsilon}=\delta\epsilon\begin{pmatrix}-\dot{\beta}_{e}&0\\ -\ddot{\beta}_{e}&\dot{\beta}_{e}\end{pmatrix},\qquad T=t-2\delta\epsilon\,\beta_{e}(t). \end{equation} As the coefficients of $\mathbb{A}_{\delta\epsilon}$ do not explicitly depend on $\epsilon$, we can set up the matrix exponential $\mathbb{M}=\exp(\mathbb{A}_{\delta\epsilon})$ according to the general scheme~(\ref{general-M}) in order to finally derive the \emph{finite} transformation matrix that corresponds to the infinitesimal mapping~(\ref{tdho-infini2}), $$ \mathbb{M}=\begin{pmatrix}\exp(-\delta\epsilon\,\dot{\beta}_{e})&0\\[
amount] -(\delta\epsilon\,\ddot{\beta}_{e}/\delta\epsilon\,\dot{\beta}_{e}) \sinh(\delta\epsilon\,\dot{\beta}_{e})&\;\; \exp(\delta\epsilon\,\dot{\beta}_{e})\end{pmatrix},\qquad \delta\epsilon=-\frac{\delta t}{2\beta_{e}}. $$ Here, $\delta\epsilon$ still denotes an infinitesimal $\epsilon$ interval. The actual one-parameter symmetry transformation~(\ref{tdho-infini2}) is associated with a transformation of time $t\mapsto T$. As the coefficients of $\mathbb{A}_{\delta\epsilon}$ are time-derivatives of the ellipse function $\beta_{e}(t)$ and thus generally depend on time $t$, we must substitute $\delta\epsilon=-\delta t/2\beta_{e}(t)$ and \emph{integrate} all terms in $\mathbb{M}$ that are proportional to $\delta t$ over the finite interval $T-t$ that corresponds to a \emph{finite} interval $\Delta\epsilon=\epsilon_{1}-\epsilon_{0}$, \begin{align*} m_{11}=\exp(-\delta\epsilon\,\dot{\beta}_{e})\quad\to\quad m_{11}&=\exp\left(\int_{t}^{T} \frac{\dot{\beta}_{e}(\tau)}{2\beta_{e}(\tau)}\mathrm{d}\tau\right)\\ &=\exp\left(\int_{t}^{T}\dfrac{}{\tau}\ln\sqrt{\beta_{e}}\, \mathrm{d}\tau\right)=\sqrt{\frac{\beta_{e}(T)}{\beta_{e}(t)}}=\frac{1}{m_{22}}. \end{align*} With the identity $\sinh\ln\,x=\left(x-x^{-1}\right)/2$, the matrix element $m_{21}$ follows as \begin{align*} m_{21}=-\frac{\delta t\,\ddot{\beta}_{e}}{\delta t\,\dot{\beta}_{e}} \sinh(\delta\epsilon\,\dot{\beta}_{e})\quad\to\quad m_{21}&=-\frac{\dot{\beta}_{e}(T)-\dot{\beta}_{e}(t)} {\beta_{e}(T)-\beta_{e}(t)}\sinh\ln\sqrt{\frac{\beta_{e}(t)}{\beta_{e}(T)}}\\ &=\frac{\dot{\beta}_{e}(T)-\dot{\beta}_{e}(t)} {2\sqrt{\beta_{e}(T)\beta_{e}(t)}}. \end{align*} The \emph{finite} symmetry mapping $(q,p)_{t}\mapsto(Q,P)_{T}$ is thus finally obtained as \begin{align}
\left.\begin{pmatrix}Q\\[
amount] P\end{pmatrix}\right|_{T}&= \frac{1}{\sqrt{\beta_{e}(T)\beta_{e}(t)}} \begin{pmatrix}\beta_{e}(T)&0\\[
amount] \alpha_{e}(t)-\alpha_{e}(T)&\;\; \beta_{e}(t)\end{pmatrix}
\left.\begin{pmatrix}q\\[
amount] p\end{pmatrix}\right|_{t} \label{tdho-fini2}\\ \Delta\sigma&=-2\Delta\epsilon=\int_{t}^{T} \frac{\mathrm{d}\tau}{\beta_{e}(\tau)}.\nonumber \end{align} The symmetry mapping~(\ref{tdho-fini2}) is referred to as the \emph{Floquet transformation}. \subsubsection{\label{ex:rot-kepler} Example: Rotational symmetry of the Kepler system} The classical Kepler system is a two-body problem with the mutual interaction following an inverse square force law. In Cartesian coordinates, where no distinction between covariant and contravariant coordinates is needed (all indexes lowered), this system is described by a Hamiltonian \begin{equation}\label{kepham} H(\bm{q},\bm{p},t)={\textstyle\frac{1}{2}} p_{1}^{2}+{\textstyle\frac{1}{2}} p_{2}^{2}+V(\bm{q},t) \end{equation} containing the interaction potential \begin{equation}\label{keppot} V(\bm{q},t)=-\frac{\mu(t)}{\sqrt{q_{1}^{2}+q_{2}^{2}}}=-\frac{\mu(t)}{r}, \end{equation} with $\mu(t)=G\big[m_{1}(t)+m_{2}(t)\big]$ the possibly time-dependent gravitational coupling strength that is induced by possibly time-dependent masses $m_{1}$ and $m_{2}$ of the interacting bodies. As the potential~(\ref{keppot}) spatially depends on $r$ only, it is obviously invariant with respect to rotations in configuration space $(q_{1},q_{2})$, \begin{equation}\label{Kepler-rotation} \begin{pmatrix} Q_{1}\\ Q_{2} \end{pmatrix} = \begin{pmatrix} \hphantom{-}\cos\epsilon&\sin\epsilon\\ -\sin\epsilon&\cos\epsilon \end{pmatrix} \begin{pmatrix} q_{1}\\ q_{2} \end{pmatrix} \end{equation} where $\epsilon$ denotes the counterclockwise rotation angle. This symmetry is not affected if we choose $\epsilon\equiv\delta\epsilon$ to be very small. We may then restrict ourselves in Eq.~(\ref{Kepler-rotation}) to first-order terms in $\delta\epsilon$ and insert the replacements $\cos\delta\epsilon\approx 1$, $\sin\delta\epsilon\approx\delta\epsilon$. This yields the infinitesimal transformation rules \begin{equation}\label{Kepler-rotation-infini} Q_{1}=q_{1}+\delta\epsilon\,q_{2},\qquad Q_{2}=q_{2}-\delta\epsilon\,q_{1}. \end{equation} This transformation can be regarded as being defined by a generating function of the form of Eq.~(\ref{gen-infini}), namely \begin{equation}\label{Kepler-rotation-gen} F_{2}(q_{1},q_{2},P_{1},P_{2},t,E)=-tE+q_{1}P_{1}+q_{2}P_{2}+ \delta\epsilon\,(p_{1}q_{2}-p_{2}q_{1}). \end{equation} The transformation rules for the canonical momenta, energy, and time emerge from the generating function~(\ref{Kepler-rotation-gen}) by applying the general canonical rules from Eqs.~(\ref{rules}), $$ p_{1}=\pfrac{F_{2}}{q_{1}}=P_{1}-\delta\epsilon\,p_{2},\quad\! p_{2}=\pfrac{F_{2}}{q_{2}}=P_{2}+\delta\epsilon\,p_{1},\quad\! T=-\pfrac{F_{2}}{E}=t,\quad\!e=-\pfrac{F_{2}}{t}=E. $$ The rules from Eqs.~(\ref{Kepler-rotation-infini}) are indeed reproduced as to first order in $\delta\epsilon$, we find the configuration space transformation rules $$ Q_{1}=\pfrac{F_{2}}{P_{1}}=q_{1}+\delta\epsilon\,q_{2},\qquad Q_{2}=\pfrac{F_{2}}{P_{2}}=q_{2}-\delta\epsilon\,q_{1}. $$ According to Eq.~(\ref{noetherinvariant4}), the expression proportional to $\delta\epsilon$ in Eq.~(\ref{Kepler-rotation-gen}) must be a constant of motion in order for the infinitesimal generating function $F_{2}$ to define a \emph{canonical} transformation, hence to comply with the finite symmetry transformation~(\ref{Kepler-rotation}) that preserves the physical system. Thus $$ I=p_{1}q_{2}-p_{2}q_{1},\qquad\dfrac{I}{t}=0, $$ which establishes the well-known conservation law of angular momentum in --- possibly time-dependent --- central-force fields. As the transformation rules~(\ref{Kepler-rotation}) only depend on the parameter $\epsilon$ and \emph{not} on the canonical variables, the transformation is referred to as a \emph{global} symmetry transformation.
As with any generating function of a canonical transformation, we can derive from Eq.~(\ref{Kepler-rotation-gen}) the rules of both the configuration space coordinates and the respective canonical momenta. In matrix form, the infinitesimal rules for the momenta can be rewritten as $$ \begin{pmatrix}P_{1}\\ P_{2}\end{pmatrix}= \left[\Eins+\mathbb{A}_{\delta\epsilon}\right] \begin{pmatrix}p_{1}\\ p_{2}\end{pmatrix},\qquad \mathbb{A}_{\delta\epsilon}=\delta\epsilon \begin{pmatrix}\hphantom{-}0&1\\-1&0\end{pmatrix}, $$ with $\Eins$ denoting the $2\times 2$ unit matrix. The corresponding \emph{finite} transformation is then $$ \begin{pmatrix}P_{1}\\ P_{2}\end{pmatrix}= \exp{(\mathbb{A}_{\epsilon})} \begin{pmatrix}p_{1}\\ p_{2}\end{pmatrix},\qquad \exp{(\mathbb{A}_{\epsilon})}=\begin{pmatrix}\hphantom{-}\cos\epsilon& \sin\epsilon\\ -\sin\epsilon&\cos\epsilon\end{pmatrix}, $$ which coincides with the rules of the configuration space variables from Eq.~(\ref{Kepler-rotation}). This reflects the fact that the Hamiltonian~(\ref{kepham}) is equally invariant under rotations in momentum space. \subsubsection{\label{ex:RL-kepler} Example: Symmetry associated with the Runge-Lenz invariant of the time-independent Kepler system} As Noether's theorem associates the constants of motion of a dynamical system with system symmetries, it can be applied in both directions. In Sect.~\ref{ex:rot-kepler}, the constant of motion was determined for a system symmetry that could be deduced directly from the form of the Hamiltonian. Conversely, if a constant of motion is known to exist, then we can then derive the related system symmetry. For the time-independent Kepler system~(\ref{kepham}), (\ref{keppot}) with $\mu=\text{const.}$, one component of the Runge-Lenz vector is given by \begin{equation}\label{Runge-Lenz1} I_{1}=-q_{1}p_{2}^{2}+q_{2}p_{1}p_{2}+\mu\frac{q_{1}}{\sqrt{q_{1}^{2}+q_{2}^{2}}}. \end{equation} We easily convince ourselves that $I_{1}$ commutes with the Hamiltonian $H$ from~(\ref{kepham}) with (\ref{keppot}). Along the system's phase-space trajectory, we then have $$ [I_{1},H]=0\quad\Longleftrightarrow\quad\dfrac{I_{1}}{t}=0. $$ Using the invariant $I_{1}$ as the characteristic function $I$ in the generating function~(\ref{gen-infini}), the subsequent transformation rules~(\ref{rules-infini}) then define the corresponding infinitesimal symmetry transformation that preserves the action functional~(\ref{canbed2a}). The so obtained transformation is not particularly enlightening. Yet, a better representation of the symmetry that is associated with the Runge-Lenz invariant can be derived in the extended Hamiltonian formalism. In this context, we may express the invariant $I_{1}$ equivalently as a function of $\bm{q}$, $\bm{p}$, and $e$, with $e$ being defined as the \emph{value} of the Hamiltonian $H$ from Eq.~(\ref{kepham}), $$ e={\textstyle\frac{1}{2}} p_{1}^{2}+{\textstyle\frac{1}{2}} p_{2}^{2}- \frac{\mu}{\sqrt{q_{1}^{2}+q_{2}^{2}}}. $$ The $\mu$-dependent term of the invariant $I_{1}$ can thus be replaced by an $e$-term according to $$ \mu\frac{q_{1}}{\sqrt{q_{1}^{2}+q_{2}^{2}}}= {\textstyle\frac{1}{2}} q_{1}p_{1}^{2}+{\textstyle\frac{1}{2}} q_{1}p_{2}^{2}-q_{1}e, $$ which yields an equivalent extended phase-space representation of the Runge-Lenz invariant $I_{1}=I_{1}(\bm{q},\bm{p},e)$ as a \emph{symmetric} quadratic form in the canonical momenta, \begin{equation}\label{Runge-Lenz1a} I_{1}={\textstyle\frac{1}{2}} q_{1}p_{1}^{2}+q_{2}p_{1}p_{2}- {\textstyle\frac{1}{2}} q_{1}p_{2}^{2}-q_{1}e. \end{equation} As expected, the invariant $I_{1}$ commutes with the Hamiltonian of the time-indepen\-dent Kepler system ($\mu=\text{const.})$ $$ {\left[I_{1},H\right]}_{\text{ext}}=p_{1}(H-e)=0, $$ hence establishes an invariant along the system's phase-space trajectory as $H=e$ by definition. Due to the $e$-dependence of the invariant $I_{1}$, the corresponding symmetry transformation now includes a transformation of time according to rules~(\ref{rules-infini2}). Explicitly, the infinitesimal transformation rules are obtained as \begin{align} \delta p_{1}&=-\delta\epsilon\pfrac{I_{1}}{q_{1}}=\delta\epsilon\left( {\textstyle\frac{1}{2}} p_{2}^{2}-{\textstyle\frac{1}{2}} p_{1}^{2}+e\right)\qquad \delta p_{2}=-\delta\epsilon\pfrac{I_{1}}{q_{2}}= -\delta\epsilon\,p_{1}p_{2}\nonumber\\ \delta q_{1}&=\hphantom{-}\delta\epsilon\pfrac{I_{1}}{p_{1}}= \delta\epsilon\left(q_{1}p_{1}+q_{2}p_{2}\right)\qquad\quad\:\, \delta q_{2}=\hphantom{-}\delta\epsilon\pfrac{I_{1}}{p_{2}}= \delta\epsilon\left(p_{1}q_{2}-p_{2}q_{1}\right)\nonumber\\ \deltae&=\hphantom{-}\delta\epsilon\pfrac{I_{1}}{t}=0 \qquad\qquad\qquad\qquad\qquad\;\: \delta t=-\delta\epsilon\pfrac{I_{1}}{e}=\delta\epsilon\,q_{1}. \label{rl-rules-infini} \end{align} The transformation rules for the new configuration space $Q_{1},Q_{2}$ variables depend \emph{linearly} on the original ones, $q_{1},q_{2}$. We may thus rewrite the infinitesimal configuration space transformation $Q_{i}=q_{i}+\delta q_{i},\; i=1,2$ in matrix form as \begin{equation}\label{RL-rotation-infini} {\left.\begin{pmatrix}Q_{1}\\ Q_{2}\end{pmatrix}
\right|}_{t+q_{1}\delta\epsilon}= \left[\Eins+\mathbb{A}_{\delta\epsilon}\right]
{\left.\begin{pmatrix}q_{1}\\ q_{2}\end{pmatrix}\right|}_{t},\qquad \mathbb{A}_{\delta\epsilon}(p_{1},p_{2})= \delta\epsilon{\left.\begin{pmatrix}\hphantom{-}p_{1}&p_{2}\\
-p_{2}&p_{1}\end{pmatrix}\right|}_{t}, \end{equation} with $\Eins$ denoting the $2\times 2$ unit matrix. The form of the $2\times 2$ matrix $\mathbb{A}_{\delta\epsilon}=(a_{ij})$ from Eq.~(\ref{RL-rotation-infini}) with $a_{11}=a_{22}$ and $a_{12}=-a_{21}$ results from the particular representation~(\ref{Runge-Lenz1a}) of the Runge-Lenz invariant $I_{1}$. With $\delta\epsilon$ still an \emph{infinitesimal} variation of the parameter $\epsilon$, the transformation~(\ref{RL-rotation-infini}) can be expressed equivalently in terms of the matrix exponential $\exp(\mathbb{A}_{\delta\epsilon})$. Then, the \emph{infinitesimal} symmetry transformation then takes on the exceptionally simple form \begin{equation}\label{RL-rotation} {\left.\begin{pmatrix}Q_{1}\\ Q_{2}\end{pmatrix}
\right|}_{t+q_{1}\delta\epsilon}= \exp{(p_{1}\,\delta\epsilon)} \begin{pmatrix}\hphantom{-}\cos(p_{2}\,\delta\epsilon)& \sin(p_{2}\,\delta\epsilon)\\ -\sin(p_{2}\,\delta\epsilon)&\cos(p_{2}\,\delta\epsilon) \end{pmatrix}
{\left.\begin{pmatrix}q_{1}\\ q_{2}\end{pmatrix}\right|}_{t}, \end{equation} The system symmetry that corresponds to the Runge-Lenz invariant from Eq.~(\ref{Runge-Lenz1a}) is thus given by a \emph{local scaled rotation} of the configuration space variables. In contrast to the example of Sect.~\ref{ex:rot-kepler}, the transformation~(\ref{RL-rotation}) depends on the actual coordinates $q_{1},p_{1},p_{2}$. It is, therefore, referred to as a \emph{local} symmetry transformation.
Owing to the fact that the Hamiltonian~(\ref{kepham}) with potential~(\ref{keppot}) is invariant under swappings $q_{1}\leftrightarrow q_{2}$ \emph{and} $p_{1}\leftrightarrow p_{2}$, the second component $I_{2}$ of the invariant Runge-Lenz vector is obtained by flipping all indexes of $I_{1}$, $$ I_{2}={\textstyle\frac{1}{2}} q_{2}p_{2}^{2}+q_{1}p_{1}p_{2}- {\textstyle\frac{1}{2}} q_{2}p_{1}^{2}-q_{2}e. $$ The infinitesimal transformation of the configuration space coordinates follows as $$ {\left.\begin{pmatrix}Q_{1}\\ Q_{2}\end{pmatrix}
\right|}_{t+q_{2}\delta\epsilon}= \left[\Eins+\mathbb{B}_{\delta\epsilon}\right]
{\left.\begin{pmatrix}q_{1}\\ q_{2}\end{pmatrix}\right|}_{t},\qquad \mathbb{B}_{\delta\epsilon}(p_{1},p_{2})=\delta\epsilon{\left. \begin{pmatrix}p_{2}&-p_{1}\\
p_{1}&\hphantom{-}p_{2}\end{pmatrix}\right|}_{t}. $$ Again, the transformation can be expressed equivalently in terms of the matrix exponential $\exp(\mathbb{B}_{\delta\epsilon})$, where $\delta\epsilon$ denotes an infinitesimal shift of the symmetry transformation's parameter $$ {\left.\begin{pmatrix}Q_{1}\\ Q_{2}\end{pmatrix}
\right|}_{t+q_{2}\delta\epsilon}= \exp{(p_{2}\,\delta\epsilon)} \begin{pmatrix}\cos(p_{1}\,\delta\epsilon)& -\sin(p_{1}\,\delta\epsilon)\\ \sin(p_{1}\,\delta\epsilon)&\hphantom{-}\cos(p_{1}\,\delta\epsilon) \end{pmatrix}
{\left.\begin{pmatrix}q_{1}\\ q_{2}\end{pmatrix}\right|}_{t}, $$ \subsection{\label{ex:conv-noether} Extended point transformations, conventional Noether theorem} The derivation of Noether's theorem in the context of the Lagrangian formalism is restricted to \emph{extended point transformations}, hence canonical transformations for which the new space-time coordinates only depend on the old space-time coordinates and \emph{not} on the set of old momentum coordinates. Yet, the extended canonical transformation approach allows to describe more general possible symmetry mappings as the rules~(\ref{rules-infini}) are \emph{not} restricted to point transformations. Consequently, equation~(\ref{gen-noether}) in conjunction with the infinitesimal canonical mapping~(\ref{rules-infini}) represents a \emph{generalized formulation} of Noether's theorem. In order to derive the \emph{conventional} Noether theorem in the Hamiltonian description, we restrict ourselves to the case of an infinitesimal point transformation, which is defined by a generating function~(\ref{gen-infini}) with characteristic function $I$ that is \emph{linear} in the momenta $p_{\nu}$ \begin{equation}\label{gen-infini2} I(q^{\nu},p_{\nu})=-\sum_{\alpha=0}^{n}\eta^{\alpha}(q^{\nu})\,p_{\alpha}+ f(q^{\nu}), \end{equation} hence with functions $\eta^{\mu}=\eta^{\mu}(q^{\nu}),\,f=f(q^{\nu})$ that depend on the space-time coordinates only. With this $I$, the transformation rules for space and time coordinates follow as ($\mu,\nu=0,\ldots,n,\,i=1,\ldots,n$) $$ \delta q^{\mu}=-\epsilon\eta^{\mu}(q^{\nu})\quad \Leftrightarrow\quad\delta q^{i}=-\epsilon\eta^{i}(\bm{q},t),\quad \delta t=-\epsilon\xi(\bm{q},t),\;\xi=\eta^{0}/c. $$ The condition~(\ref{noetherinvariant}) for this transformation to preserve the extended Hamiltonian $H_{\mathrm{e}}$, hence for the function~(\ref{gen-infini2}) to represent a conserved quantity along the system's evolution is \begin{equation}\label{noetherinvariant2} \sum_{\beta=0}^{n}\left[\eta^{\beta}\pfrac{H_{\mathrm{e}}}{q^{\beta}}+ \pfrac{H_{\mathrm{e}}}{p_{\beta}}\left(\pfrac{f}{q^{\beta}}-\sum_{\alpha=0}^{n} p_{\alpha}\pfrac{\eta^{\alpha}}{q^{\beta}}\right)\right]=0. \end{equation} Distinguishing the canonical time and energy variables from the canonical space and momentum coordinates, the Noether function~(\ref{gen-infini2}) has the equivalent representation \begin{equation}\label{gen-infini2a} I(\bm{q},\bm{p},e,t)=\xi(\bm{q},t)\,e-\sum_{i=1}^{n}\eta^{i}(\bm{q},t)\,p_{i}+f(\bm{q},t), \end{equation} which represents a conserved quantity if Eq.~(\ref{noetherinvariant3}) is satisfied. In the last step, the energy variable $e$ may be replaced by the conventional Hamiltonian $H$. We thus find the conventional Noether function in the Hamiltonian formulation \begin{equation}\label{gen-infini2b} I(\bm{q},\bm{p},t)=\xi(\bm{q},t)\,H-\sum_{i=1}^{n}\eta^{i}(\bm{q},t)\,p_{i}+f(\bm{q},t), \end{equation} which is an invariant provided that Eq.~(\ref{noetherinvariant3}) holds with $\partial I/\partiale=0$. Due to their different dependence on the canonical variables, the Noether functions~(\ref{gen-infini2a}) and~(\ref{gen-infini2b}) yield different transformation rules from Eqs.~(\ref{rules-infini2}). However, these rules are compatible as \begin{equation}\label{gen-infini2c} \delta\bar{p}_{i}=\delta p_{i}-\dfrac{p_{i}}{t}\delta t,\quad \delta\bar{q}^{i}=\delta q^{i}-\dfrac{q^{i}}{t}\delta t,\quad \delta\bar{e}=\deltae-\dfrac{H}{t}\delta t,\quad \delta\bar{t}=0, \end{equation} if the barred quantities denote the variations derived from Eq.~(\ref{gen-infini2b}) and the unbarred those derived from Eq.~(\ref{gen-infini2a}). As the function $I(\bm{q},\bm{p},t)$ does \emph{not depend} on the energy variable, $e$, the subsequent transformation rules are associated with an \emph{identical} time transformation, $T=t,\;\delta\bar{t}=0$. In contrast, $I(\bm{q},\bm{p},e,t)$ from Eq.~(\ref{gen-infini2a}) accounts for an infinitesimal time shift transformation $T=t-\epsilon\xi,\;\delta t=-\epsilon\xi$. The connection of both equally valid sets of transformation rules is given by Eqs.~(\ref{gen-infini2c}).
With these formulations, we are led to interpreting the conventional Noether theorem in the reverse direction. If we can find functions $f(\bm{q},t)$, $\xi(\bm{q},t)$, and $\eta^{i}(\bm{q},t)$ such that for a given conventional Hamiltonian $H$ the total time derivative of $I$ vanishes, $\mathrm{d} I/\mathrm{d} t=0$, then the invariant $I$ in the forms of Eqs.~(\ref{gen-infini2a}) or~(\ref{gen-infini2b}) defines a corresponding extended canonical point transformation according to Eqs.~(\ref{rules-infini2}). \subsection{\label{sec:cq}Canonical quantization in the extended Hamiltonian formalism} The transition from classical dynamics to the corresponding quantum description is most easily made in terms of the ``canonical quantization prescription.'' The quantum description of a dynamical system whose classical limit is represented by a Hamiltonian $H$ is accordingly obtained by reinterpreting our dynamical variables $q^{\mu}(s)$ and $p_{\mu}(s)$ as \emph{operators} $\hat{q}^{\mu}(s)$ and $\hat{p}_{\mu}(s)$ that act on a \emph{wave function} $\psi$. In the configuration space representation, the quantum mechanical operators are \begin{equation}\label{oper-def} \hat{q}^{\mu}=q^{\mu}\Eins,\qquad \hat{p}_{\mu}=-i\hbar\pfrac{}{q^{\mu}}, \end{equation} with $\Eins$ denoting the identity operator. In the \emph{extended formalism}, an additional pair of operators is given for the index $\mu=0$. Because of $q^{0}\equiv ct$, $p_{0}\equiv-e/c$, these operators are expressed equivalently as $$ \hat{t}=t\Eins,\qquad\hat{e}=i\hbar\pfrac{}{t}. $$ With $e_{\mathrm{e}}$ denoting the \emph{value} of the extended Hamiltonian $H_{\mathrm{e}}$, we encountered in Sect.~\ref{sec:caneq} another additional pair of canonically conjugate variables, $(e_{\mathrm{e}},s)$. The corresponding operators are $$ \hat{s}=s\Eins,\qquad\hat{e}_{\mathrm{e}}=i\hbar\pfrac{}{s}. $$ For explicitly $s$-dependent extended Hamiltonians $H_{\mathrm{e}}$ and wave functions $\psi(q^{\mu},s)$, the classical equation $H_{\mathrm{e}}=e_{\mathrm{e}}$ from Eq.~(\ref{p0-def1}) thus translates into the equation of motion for the wave function $\psi(q^{\mu},s)$, $$ \hat{H}_{\mathrm{e}}\,\psi=i\hbar\pfrac{\psi}{s}. $$ This equation was postulated earlier by Feynman.\cite{feynman50} The usual cases with no $s$-dependence of $H_{\mathrm{e}}$ and $\psi$ are then \emph{directly} obtained from the condition $H_{\mathrm{e}}=0$ for the classical extended Hamiltonian~(\ref{hamid}) \begin{equation}\label{eq:gen-schroedinger} \hat{H}_{\mathrm{e}}\,\psi(q^{\mu})=0. \end{equation} Equation~(\ref{eq:gen-schroedinger}) is the relativistic extension of the Schr\"odinger equation.
For the extended Hamiltonian of a point particle in an external electromagnetic field from Eq.~(\ref{h1-em}), we immediately find the Klein-Gordon equation, inserting Eqs.~(\ref{oper-def}) \begin{equation}\label{klein-gordon0} \left[\left(i\hbar\pfrac{}{q^{\alpha}}+\frac{\zeta}{c}A_{\alpha}\right) \left(i\hbar\pfrac{}{q_{\alpha}}+\frac{\zeta}{c}A^{\alpha}\right)+ m^{2}c^{2}\right]\psi(q^{\mu})=0. \end{equation} The non-relativistic limit is encountered by letting $s\to t$. The corresponding extended Hamiltonian $H_{\mathrm{e}}=H-e=0$ from~(\ref{H1-triv}) with $H(\bm{q},\bm{p},t)$ a conventional \emph{non-relativistic} Hamiltonian then yields the associated non-relativistic wave equation for $\psi(q^{\mu})\equiv\psi(\bm{q},t)$: $$ \hat{H}\psi=i\hbar\pfrac{\psi}{t}, $$ which is referred to as the Schr\"odinger equation. \subsection{\label{sec:kg}Path integral derivation of the Klein-Gordon equation for a relativistic point particle in an electromagnetic field} Apart from the important additional rest energy term $-{\textstyle\frac{1}{2}} mc^{2}$, the extended Lagrangian~(\ref{lag1-em2}) for a relativistic classical point particle in a external electromagnetic field agrees with the Lagrangian proposed by Feynman\cite{feynman48} on the basis of a formal reasoning. We have seen that this Lagrangian $L_{\mathrm{e}}$ is actually \emph{not} a mere formal construction, but has the physical meaning to describe the \emph{same dynamics} as the corresponding conventional Lorentz-invariant Lagrangian from Eq.~(\ref{lagr-em}). As the extended Lagrangian~(\ref{lag1-em2}) is thus identified as \emph{physically significant}, it can be concluded that the path integral erected on this Lagrangian yields the correct quantum description of a relativistic point particle in an external electromagnetic field.
For an infinitesimal proper time step $\epsilon\equiv\Delta s$, the action $S_{\mathrm{e},\epsilon}$ for the extended Lagrangian~(\ref{lag1-em2}) writes to first order in $\epsilon$ \begin{equation}\label{action1} \qquad S_{\mathrm{e},\epsilon}=\epsilon L_{\mathrm{e}}={\textstyle\frac{1}{2}} m\,\eta_{\alpha\beta} \frac{(q^{\alpha}_{b}-q^{\alpha}_{a}) (q^{\beta}_{b}-q^{\beta}_{a})}{\epsilon}+ \frac{\zeta}{c}(q^{\alpha}_{b}-q^{\alpha}_{a})\, A_{\alpha}(q^{\mu}_{c})-{\textstyle\frac{1}{2}} mc^{2}\epsilon. \end{equation} The potentials $A_{\alpha}$ are to be taken at the space-time location $q^{\mu}_{c}=(q^{\mu}_{b}+q^{\mu}_{a})/2$. We insert this particular action function into Eq.~(\ref{trans-infini}) and perform a transformation of the integration variables $q^{\mu}_{a}$, $$ q^{\mu}_{b}-q^{\mu}_{a}=\xi^{\mu}\quad\Rightarrow\quad \mathrm{d}^{4}q_{a}=\mathrm{d}^{4}\xi. $$ The integral~(\ref{trans-infini}) has now the equivalent representation \begin{equation}\label{trans-infini2} \psi(q^{\mu}_{b})=\frac{1}{M}\int\exp\left[ \frac{i}{\hbar}S_{\mathrm{e},\epsilon}\right]\psi(q^{\mu}_{b}-\xi^{\mu})\,\mathrm{d}^{4}\xi, \end{equation} while the action $S_{\mathrm{e},\epsilon}$ from Eq.~(\ref{action1}) takes on the form $$ S_{\mathrm{e},\epsilon}=\frac{m}{2}\eta_{\alpha\beta}\frac{\xi^{\alpha}\xi^{\beta}}{\epsilon}+ \frac{\zeta}{c}\xi^{\alpha}\!\left[A_{\alpha}(q^{\mu}_{b})\!-\!{\textstyle\frac{1}{2}} \xi^{\beta}\pfrac{A_{\alpha}(q^{\mu}_{b})}{q^{\beta}}\right]- \epsilon\frac{mc^{2}}{2}. $$ Here, we expressed the potentials $A_{\alpha}(q^{\mu}_{c})$ to first order in terms of their values at $q^{\mu}_{b}$. In the following, we skip the index ``$b$'' in the coordinate vector as all $q^{\mu}$ refer to that particular space-time event from this point of our derivation.
In order to match the quadratic terms in $S_{\mathrm{e},\epsilon}$, the wave function $\psi(q^{\mu}-\xi^{\mu})$ under the integral~(\ref{trans-infini2}) must be expanded up to second order in the $\xi^{\mu}$, $$ \psi(q^{\mu}-\xi^{\mu})=\psi(q^{\mu})-\xi^{\alpha} \pfrac{\psi(q^{\mu})}{q^{\alpha}}+{\textstyle\frac{1}{2}}\xi^{\alpha}\xi^{\beta} \pfrac{^{2}\psi(q^{\mu})}{q^{\alpha}\partial q^{\beta}}-\ldots $$ The rest energy term in $S_{\mathrm{e},\epsilon}$ depends only on $\epsilon$. It can, therefore, be taken as a factor in front of the integral and expanded up to first order in $\epsilon$. The total expression~(\ref{trans-infini2}) for the transition of the wave function $\psi$ thus follows as \begin{align} \psi=\frac{1}{M} \left(1-\epsilon\frac{imc^{2}}{2\hbar}\right) &\int_{-\infty}^{\infty}\exp\left\{\frac{i}{\hbar\epsilon}\left[ \frac{m}{2}\eta_{\alpha\beta}\xi^{\alpha}\xi^{\beta}+ \frac{\zeta\epsilon}{c}A_{\alpha}\xi^{\alpha}-\frac{\zeta\epsilon}{2c} \pfrac{A_{\alpha}}{q^{\beta}}\xi^{\alpha}\xi^{\beta}\right]\right\} \nonumber\\ &\times\left[\psi-\xi^{\alpha} \pfrac{\psi}{q^{\alpha}}+{\textstyle\frac{1}{2}}\xi^{\alpha}\xi^{\beta} \pfrac{^{2}\psi}{q^{\alpha}\partial q^{\beta}}\right]\mathrm{d}^{4}\xi. \label{trans-infini3} \end{align} Prior to actually calculating the Gaussian type integrals, we may simplify the integrand in~(\ref{trans-infini3}) by taking into account that the third term in the exponential function is of order of $\epsilon$ smaller than the first one. We may thus factor out this term and expand it up to first order in $\epsilon$ $$ \exp\left[-\frac{i\zeta\epsilon}{2\hbar c}\pfrac{A_{\alpha}}{q^{\beta}} \xi^{\alpha}\xi^{\beta}\right]=1-\frac{i\zeta\epsilon}{2\hbar c} \pfrac{A_{\alpha}}{q^{\beta}}\xi^{\alpha}\xi^{\beta}+\ldots $$ Omitting terms of higher order than quadratic in the $\xi^{\mu}$, the integral becomes \begin{align*} \psi=\frac{1}{M} \left(1-\epsilon\frac{imc^{2}}{2\hbar}\right) &\int_{-\infty}^{\infty}\exp\left\{\frac{i}{\hbar}\left[ \frac{m}{2\epsilon}\eta_{\alpha\beta}\xi^{\alpha}\xi^{\beta}+ \frac{\zeta}{c}A_{\alpha}\xi^{\alpha}\right]\right\}\\ &\!\!\times\left[\psi-\xi^{\alpha} \pfrac{\psi}{q^{\alpha}}+{\textstyle\frac{1}{2}}\xi^{\alpha}\xi^{\beta}\left( \pfrac{^{2}\psi}{q^{\alpha}\partial q^{\beta}}-\frac{i\zeta}{\hbar c} \pfrac{A_{\alpha}}{q^{\beta}}\,\psi\right)\right]\mathrm{d}^{4}\xi. \end{align*} The integral over the entire space-time can now be solved analytically to yield \begin{align*} \psi&=\frac{1}{M}{\left( \frac{2\pi\hbar\epsilon}{im}\right)}^{2} \left(1-\epsilon\frac{imc^{2}}{2\hbar}\right) \exp\left\{-\epsilon\frac{i\zeta^{2}}{2\hbar mc^{2}} A^{\alpha}A_{\alpha}\right\}\\ &\quad\times\left[\psi+\epsilon\frac{\zeta}{mc}A^{\alpha} \pfrac{\psi}{q^{\alpha}}+\frac{\epsilon}{2}\left( \pfrac{^{2}\psi}{q^{\alpha}\partial q^{\beta}} -\frac{i\zeta}{\hbar c}\pfrac{A_{\alpha}}{q^{\beta}}\psi\right) \left(\frac{\epsilon\zeta^{2}}{m^{2}c^{2}}A^{\alpha}A^{\beta}+ \frac{i\hbar}{m}\eta^{\alpha\beta}\right)\right]. \end{align*} We may omit the term quadratic in $\epsilon$ that is contained in the rightmost factor and finally expand the exponential function up to first order in $\epsilon$ \begin{align} \psi&=\frac{1}{M}{\left(\frac{2\pi\hbar\epsilon}{im}\right)}^{2} \left(1-\epsilon\frac{imc^{2}}{2\hbar}\right) \left(1-\epsilon\frac{i\zeta^{2}}{2\hbar mc^{2}} A^{\alpha}A_{\alpha}\right)\nonumber\\ &\quad\times\left[\psi+\epsilon\frac{\zeta}{mc}A^{\alpha} \pfrac{\psi}{q^{\alpha}}+\epsilon\frac{i\hbar}{2m}\left( \pfrac{^{2}\psi}{q^{\alpha}\partial q_{\alpha}} -\frac{i\zeta}{\hbar c}\pfrac{A^{\alpha}}{q^{\alpha}}\psi\right)\right]. \label{kg1} \end{align} The normalization factor $M$ is now obvious. As the equation must hold to zero order in $\epsilon$, we directly conclude that $M={\left(2\pi\hbar\epsilon/im\right)}^{2}$. This means, furthermore, that the sum over all terms proportional to $\epsilon$ must vanish. The five terms in~(\ref{kg1}) that are linear in $\epsilon$ thus establish the equation $$ \frac{m^{2}c^{2}}{\hbar^{2}}\psi= \pfrac{^{2}\psi}{q^{\alpha}\partial q_{\alpha}} -\frac{\zeta^{2}A^{\alpha}A_{\alpha}}{\hbar^{2}c^{2}} \psi+\frac{2\zeta A^{\alpha}}{i\hbar c} \pfrac{\psi}{q^{\alpha}}+\frac{\zeta}{i\hbar c} \pfrac{A^{\alpha}}{q^{\alpha}}\psi. $$ This equation has the equivalent product form \begin{equation}\label{klein-gordon} \left(\pfrac{}{q^{\alpha}}-\frac{i\zeta}{\hbar c}A_{\alpha}\right) \left(\pfrac{}{q_{\alpha}}-\frac{i\zeta}{\hbar c}A^{\alpha}\right) \psi={\left(\frac{mc}{\hbar}\right)}^{2}\psi, \end{equation} which constitutes exactly the Klein-Gordon equation for our metric $\eta_{\mu\nu}$. It coincides with the wave equation~(\ref{klein-gordon0}) that emerged from the canonical quantization formalism.
We remark that Feynman\cite{feynman50} went the procedure developed here in the opposite direction. He started with the Klein-Gordon equation and deduced from analogies with the non-relativistic case a classical Lagrangian similar to that of Eq.~(\ref{lag1-em2}), but without its rest energy term $-{\textstyle\frac{1}{2}} mc^{2}$. The obtained Lagrangian was \emph{not} identified as \emph{physically significant}, i.e., as exactly the extended Lagrangian $L_{\mathrm{e}}$ that describes the corresponding classical system, but rated as ``purely formal.''\cite{feynman48} \subsection{\label{sec:prop}Space-time kernel for the free relativistic point particle} The hypersurface condition~(\ref{constraint-lag}) is to be disregarded setting up the parameterized kernel~(\ref{kernel-para}) as virtual particles are to be included. The components of the extended free-particle Lagrangian~(\ref{lag1-fp}) can then be treated as \emph{independent}. The corresponding action functional $S$ from Eq.~(\ref{principle1}) thus splits into a sum of independent action functionals, \begin{equation}\label{actint-fp} S_{\mathrm{e}}[q^{\nu}(s)]={\textstyle\frac{1}{2}} m\int_{s_{a}}^{s_{b}}\left( \dfrac{q^{\alpha}}{s}\dfrac{q_{\alpha}}{s}-c^{2}\right)\mathrm{d} s= \sum_{\alpha}S[q^{\alpha}(s)]. \end{equation} Hence, the parameterized space-time kernel~(\ref{kernel-para}) separates into a product of path integrals. For the free particle, the individual path integrals can be solved analyti\-cally.\cite{feynman,kleinert} Expressed in terms of $s$ as the independent variable, the result for one degree of freedom $q^{k}$ is \begin{equation}\label{freekernel-1d} K_{s}(q^{k}_b,q^{k}_a)=\sqrt{\frac{m}{2\pi i\hbar(s_{b}-s_{a})}}\exp \left[\frac{im}{2\hbar}\frac{{(q^{k}_{b}-q^{k}_{a})}^{2}}{s_{b}-s_{a}}\right]. \end{equation} The total parameterized space-time kernel $K_{\sigma}(b,a)$ is then obtained for $S_{\mathrm{e}}$ from Eq.~(\ref{actint-fp}) as $$ K_{s}(b,a)=-\frac{m^{2}c}{4\pi^{2}\hbar^{2}{(s_{b}-s_{a})}^{2}} \exp\left\{\frac{im}{2\hbar}\left[\frac{(q^{\alpha}_{b}-q^{\alpha}_{a})(q_{\alpha,b}- q_{\alpha,a})}{s_{b}-s_{a}}-c^{2}(s_{b}-s_{a})\right]\right\}. $$ The term proportional to \mbox{$(s_{b}-s_{a})$} in the exponential function originates from the rest energy term $-{\textstyle\frac{1}{2}} mc^{2}$ in the extended Lagrangian~(\ref{lag1-fp}) and, correspondingly, in the action integral~(\ref{actint-fp}). The integration over the parameter variable $s$ is worked out by means of a Wick rotation. The parameter interval is then $\sigma=i(s_{b}-s_{a})$. With $\tau$ defined by $$ \tau^{2}=\frac{(q^{\alpha}_{b}-q^{\alpha}_{a})(q_{\alpha,b}-q_{\alpha,a})}{c^{2}}, $$ the parameterized space-time kernel $K_{\sigma}(b,a)$ takes on the equivalent form $$ K_{\sigma}(b,a)=\frac{m^{2}c}{4\pi^{2}\hbar^{2}}\,\sigma^{-2} \exp\left[-\frac{mc^{2}}{2\hbar}\left( \frac{\tau^{2}}{\sigma}+\sigma\right)\right]. $$ According to Eq.~(\ref{kernel-gen}), the space-time propagator $K(b,a)$ for a free relativistic wave packet is finally acquired by integrating $K_{\sigma}(b,a)$ over all parameter intervals $\sigma$ \begin{equation}\label{kernel0-fp} K(b,a)=\frac{m^{2}c}{4\pi^{2}\hbar^{2}}\int_{0}^{\infty} \sigma^{-2}\exp\left[-\frac{mc^{2}}{2\hbar}\left( \frac{\tau^{2}}{\sigma}+\sigma\right)\right]\mathrm{d}\sigma. \end{equation} The integral is proportional to the integral representation of the Bessel function $K_1$ of second kind and order one\cite{magnus}, that is also referred to as MacDonald function, \begin{equation}\label{intrep} \int_{0}^{\infty}\sigma^{-2}\exp\left[ -\frac{M}{2}\left(\frac{\tau^{2}}{\sigma}+ \sigma\right)\right]\mathrm{d}\sigma=\frac{2}{\tau}K_{1}(M\tau),\qquad M=\frac{mc^2}{\hbar}. \end{equation} For our metric $\eta_{\mu\nu}=\mathrm{diag}(-1,1,1,1)$, a positive $\tau^{2}$ represents a \emph{space-like} connection of the events $a$ and $b$. The kernel $K(b,a)$ from Eq.~(\ref{kernel0-fp}) is then given by \begin{equation}\label{kernel-expl}
K(b,a)=\frac{m^{2}c^2}{2\pi^{2}\hbar^{2}}\,\frac{1}{|q_b-q_a|}K_{1}\left(\frac{mc}{\hbar}|q_b-q_a|\right),\qquad\tau^2>0. \end{equation} If $\tau^{2}$ is negative, one encounters a \emph{time-like} connection of the events $a$ and $b$. The kernel $K(b,a)$ is then expressed in terms of the Hankel function $H_1^{(1)}(x)=-\frac{2}{\pi}K_1(i x)$ as: \begin{equation}\label{kernel-expl-hankel}
K(b,a)=\frac{im^{2}c^2}{4\pi\hbar^{2}}\,\frac{1}{|q_b-q_a|}H_{1}^{(1)}\left(\frac{mc}{\hbar}|q_b-q_a|\right),\qquad\tau^2<0. \end{equation} We may convince ourselves by direct substitution that the kernels~(\ref{kernel-expl}) and~(\ref{kernel-expl-hankel}) satisfy the zero-potential case \mbox{($A_{\mu}=0$)} of the Klein-Gordon equation~(\ref{klein-gordon}): \begin{equation*} \ppfrac{}{q^{\alpha}}{q_{\alpha}}K(b,a)=\pm\frac{m^2c^2}{\hbar^2}\,K(b,a). \end{equation*} As a consequence, so does a free-particle wave function $\psi(\bm{q},t)$ if its space-time propagation is calculated according to Eq.~(\ref{wave-evol}).
In order to determine the non-relativistic limit $c\to\infty$ of Eq.~(\ref{kernel-expl}), we consider the asymptotic behavior of $\tau$ and the Bessel function $K_{1}$: \begin{align*} \tau=\sqrt{-{(t_{b}-t_{a})}^{2}+{(\bm{q}_{b}-\bm{q}_{a})}^{2}/c^{2}} &\quad\stackrel{c\to\infty}{=}\quad i(t_{b}-t_{a})\\ \frac{1}{\tau}K_1(M\tau)&\quad\stackrel{c\to\infty}{=}\quad\sqrt{\frac{\pi}{2M\tau^3}}\,\exp(-M\tau)\\ \exp\left(-\frac{mc^{2}}{\hbar}\tau\right) &\quad\stackrel{c\to\infty}{=}\quad\exp\left[\frac{im}{2\hbar} \frac{{(\bm{q}_{b}-\bm{q}_{a})}^{2}}{t_{b}-t_{a}}\right]. \end{align*} The nonrelativistic kernel $K(b,a)$ the kernel for three spatial degrees of freedom becomes $$ K_{\bm{q}}(b,a)={\left[\frac{m}{2\pi i\hbar(t_{b}-t_{a})}\right]}^{3/2}\, \exp\left[\frac{im}{2\hbar}\frac{{(\bm{q}_{b}-\bm{q}_{a})}^{2}}{t_{b}-t_{a}}\right]. $$ This kernel generalizes the one-dimensional case (Eq.~{\ref{freekernel-1d}}) and satisfies again the Schr\"odinger equation\cite{feynman,kleinert}. \section{Conclusions} Starting from the space-time formulation of the action principle, we have demonstrated that the Lagrangian as well as the Hamiltonian description of classical dynamics can consistently be reformulated in order to be compatible with special relativity. In the emerging \emph{extended} version of the Hamilton-Lagrange formalism, the dynamics is described as a motion on a hypersurface within an \emph{extended} phase space. With the specific correlations of extended Lagrangian $L_{\mathrm{e}}$ and extended Hamiltonian $H_{\mathrm{e}}$ to their conventional counterparts $L$ and $H$ given in this paper, the extended formalism retains the \emph{form} of the long-established conventional Hamilton-Lagrange formalism. The extended Hamilton-Lagrange formalism thus provides an \emph{equivalent physical description} of dynamical systems that is particularly appropriate for special relativity.
The physical significance of the Lorentz invariant extended Hamiltonian $H_{\mathrm{e}}$ of a point particle in an external electromagnetic field was demonstrated by showing that the subsequent \emph{extended} set of canonical equations, in conjunction with the condition $H_{\mathrm{e}}=0$, is \emph{equivalent} to the set of canonical equations that follows from the well-known conventional Hamiltonian $H$ for this system. It was shown that the condition $H_{\mathrm{e}}=0$ is automatically satisfied on the system path that is defined by the solution of the canonical equations. For this reason, the hypersurface condition $H_{\mathrm{e}}=0$ actually does \emph{not} represent a constraint for the system. The corresponding non-homogeneous extended Lagrangian $L_{\mathrm{e}}$ was shown to be \emph{quadratic} in its velocity terms, hence similar in its \emph{form} with the conventional Lagrangian $L$ that describes the non-relativistic limit. This makes the extended formalism particularly suited for analytical approaches that depend on the Lagrangian to be quadratic in the velocities --- like Feynman's path integral formalism. Devising the ``quantum version'' of the action principle, one of Feynman's achievements was to derive --- by means of his path integral approach to quantum physics --- the Schr\"odinger equation as the quantum description of a physical system whose classical limit is described by the non-relativistic Lagrangian $L$ for a point particle in an external potential. This is generally regarded as the \emph{proof of principle} for the path integral formalism.
Similar to the extension of the conventional Hamilton-Lagrange formalism in the realm of classical physics, the general form of the relativistic extension of Feynman's path integral approach is obtained by consistently treating space and time variables on equal footing. We have shown that the hypersurface condition from the classical extended formalism appears in the context of the extended path integral formalism as an \emph{additional uncertainty relation}.
On the basis of the extended Lagrangian $L_{\mathrm{e}}$ of a classical relativistic point particle in an external electromagnetic field, we could derive the Klein-Gordon equation as the corresponding quantum description by means of the space-time version of the path integral formalism. Correspondingly, we can regard the emerging of the Klein-Gordon equation as the proof of principle of the \emph{relativistic generalization} of Feynman's path integral approach that is based on Lorentz invariant \emph{extended Lagrangians} $L_{\mathrm{e}}$ in conjunction with the additional \emph{uncertainty relation}. \section*{Acknowledgment} The author is indebted to Prof.~Dr.~Walter Greiner from the \emph{Frankfurt Institute of Advanced Studies} (FIAS) for his critical comments and encouragement.
\end{document} |
\begin{document}
\title{A set of moves for Johansson representation of 3-manifolds. An outline.\footnote{This research has been partially supported by a predoctoral grant from the U.N.E.D. (1999).}} \author {Rub\'en Vigara \\Departamento de Matem\'aticas Fundamentales \\U.N.E.D., Spain\\[email protected]} \date{July 2004} \maketitle \begin{abstract} A Dehn sphere $\Sigma$ \cite{Papa} in a closed 3-manifold $M$ is a 2-sphere immersed in $M$ with only double curve and triple point singularities. The sphere $\Sigma$ fills $M$ \cite{Montesinos} if it defines a cell-decomposition of $M$. The inverse image in $S^{2}$ of the double curves of $\Sigma$ is the Johansson diagram of $\Sigma$ \cite{Johansson1} and if $\Sigma$ fills $M$ it is possible to reconstruct $M$ from the diagram. A Johansson representation of $M$ is the Johansson diagram of a filling Dehn sphere of $M$. In \cite{Montesinos} it is proved that every closed 3-manifold has a Johansson representation coming from a nulhomotopic filling Dehn sphere. In this paper a set of moves for Johansson representations of 3-manifolds is given. In a forthcoming paper \cite{RHomotopies} it is proved that this set of moves suffices for relating different Johansson representations of the same 3-manifold coming from nulhomotopic filling Dehn spheres. The proof of this result is outlined here.(Math. Subject Classification: 57N10, 57N35) \end{abstract}
\section{Introduction.\label{SECTION Introduction}}
Through the whole paper all 3-manifolds are assumed to be closed, that is, compact connected and without boundary, and all surfaces are assumed to be compact and without boundary. A surface may have more than one connected component. We will denote a 3-manifold by $M$ and a surface by $S$.
Let $M$ be a 3-manifold.
A subset $\Sigma\subset M$ is a \textit{Dehn surface} in $M$ \cite{Papa} if there exists a surface $S$ and a transverse immersion $f:S\rightarrow M$ such that $\Sigma=f\left( S\right) $. In this situation we say that $f$ \textit{parametrizes} $\Sigma$. If $S$ is a 2-sphere then $\Sigma$ is a \textit{Dehn sphere}. For a Dehn surface $\Sigma\subset M$, its singularities are divided into \textit{double points }(Figure \ref{fig1a}), and \textit{triple points} (Figure \ref{fig1b})\textit{, }and they are arranged along \textit{double curves }(see section \ref{SECTION Preliminaries} below for definitions). A Dehn surface $\Sigma\subset M$ \textit{fills} $M$ \cite{Montesinos} if it defines a cell-decomposition of $M$ in which the 0-skeleton is the set of triple points of $\Sigma$; the 1-skeleton is the set of double and triple points of $\Sigma$; and the 2-skeleton is $\Sigma$ itself. Filling Dehn spheres of 3-manifolds are defined in \cite{Montesinos} following ideas of W. Haken (see \cite{Haken1}). In \cite{Montesinos} it is proved the following Theorem (see also \cite{Anewproof}):
\begin{theorem} [\cite{Montesinos}]\label{ThmMontesinos}Every closed orientable 3-manifold has a nulhomotopic filling Dehn sphere. \end{theorem}
A filling Dehn sphere is \textit{nulhomotopic} if one (and hence any) of its parametrizations is nulhomotopic, that is, homotopic to a constant map.
\begin{figure}\label{fig1a}
\label{fig1b}
\end{figure}
Let $\Sigma\subset M$ be a filling Dehn sphere and $f:S^{2}\rightarrow M$ a transverse immersion parametrizing $\Sigma$. In this case we say that $f$ is a \textit{filling immersion}. The inverse image by $f$ in $S^{2}$ of the set of double and triple points of $\Sigma$ is the \textit{singular set} of $f$. The singular set of $f$, together with the information of how its points are identified by $f$ in $M$, is the \textit{Johansson diagram} of $\Sigma$ in the notation of \cite{Montesinos}. As it is stated in \cite{Montesinos}, for a given diagram in $S^{2}$ it is possible to know if it is the Johansson diagram for a filling Dehn sphere $\Sigma$ in some 3-manifold $M$. If this is the case, it is possible also to reconstruct such $M$ from the diagram. Thus, Johansson diagrams are a suitable way for representing all closed, orientable 3-manifolds and it is interesting to further study them. For a 3-manifold $M$, we say that a Johansson diagram of a filling Dehn sphere of $M$ is a \textit{Johansson representation} of $M$ (see \cite{Montesinos}). In \cite{Montesinos} an algorithm is given for obtaining a Johansson representation of a closed orientable 3-manifold $M$ from any Heegaard diagram of $M$. A simpler algorithm is given in \cite{Anewproof}. In both papers, the Johansson representations obtained come from nulhomotopic filling Dehn spheres of $M$.
We will deal here with the problem of deciding how different Johansson representations of the same 3-manifold are related to each other. With this problem in mind, we study how different filling Dehn spheres of the same 3-manifold are related to each other. In the forthcoming paper \cite{RHomotopies}, the following Theorem is proved.
\begin{theorem} \label{MAINtheorem}Let $M$ be a closed 3-manifold. Let $f,g:S^{2}\rightarrow M$ be two nulhomotopic filling immersions. Then, there is a finite sequence of filling immersions $f=f_{0},f_{1},...,f_{n}=g$ such that for each $i=0,...,n-1$ the immersions $f_{i}$ and $f_{i+1}$ differ by an ambient isotopy of $S^{2}$, or by an ambient isotopy of $M$, or by one of the moves depicted in Figure 2. \end{theorem}
\begin{figure}\label{fig2a}
\label{fig2b}
\label{fig2c}
\end{figure}
This theorem gives a complete set of moves for relating Johansson representations of the same 3-manifold coming from nulhomotopic filling Dehn spheres (see Corollary \ref{CORtoMAINtheorem}).
The detailed proof of Theorem \ref{MAINtheorem} is quite long, and uses both smooth and combinatorial techniques. In this paper we will give an outline of this proof. The paper is organized as follows.
In section \ref{SECTION Preliminaries}, we give some preliminary definitions about Dehn surfaces and cell complexes. Most of section \ref{SECTION FillingHomotopy} and sections \ref{SECTION Pushing Disks} to \ref{SECTION Growing} introduce some partial results for giving a sketch of the proof of Theorem \ref{MAINtheorem}. This sketch of the proof is given in section \ref{SECTION ProofofMainTheorem}. A reader wishing to skip details can jump directly from sections \ref{SECTION FillingHomotopy} to \ref{SECTION Diagrams}.
The proof of Theorem \ref{MAINtheorem} in \cite{RHomotopies} relies on three Key Lemmas that we will state here without proof. In section \ref{SECTION FillingHomotopy} we present some results about regular homotopies of immersions of surfaces in 3-manifolds, and we introduce the concept of \textit{filling-preserving moves} and \textit{filling homotopy} for filling immersions. Key Lemma 1 is stated in section \ref{SECTION Pushing Disks}, where we define the modifications of immersions of surfaces in 3-manifolds by \textit{pushing-disks}. These kind of modifications was defined in \cite{HommaNagase}, and Key Lemma 1 assert that every regular homotopy can be decomposed into pushing disks with some nice properties.
In section \ref{SECTION Spiral Piping} we introduce a surgery method for modifying Dehn surfaces that will be useful later, and in section \ref{SECTION What can be done} we present three examples of modifications of filling Dehn surfaces that can be done using only the filling-preserving moves defined in section \ref{SECTION FillingHomotopy}.
In section \ref{SECTION Shellability} we introduce some combinatorial tools that will be essential in Key Lemmas 2 and 3: the concept of \textit{shelling} of a cell complex and the concept of \textit{simplicial collapsing} for a simplicial complex. These concepts will appear almost everywhere in sections \ref{SECTION InflatingTriangulations} to \ref{SECTION ProofofMainTheorem}. In the same section \ref{SECTION Shellability} we introduce smooth triangulations of manifolds, that give us the theorethical basis for applying these previously defined combinatorial concepts to our case.
We explain in section\ref{SECTION InflatingTriangulations} how any smooth triangulation $T$ of a 3-manifold $M$ can be ''inflated'' to obtain a filling Dehn sphere of $M$. This inflated filling Dehn sphere of $M$ has the property that it is transverse to any Dehn sphere of $M$ that lies in the 2-skeleton of $T$. When the triangulation $T$ of $M$ is ''sufficiently good'' with respect to a filling Dehn surface $\Sigma$ of $M$ we can use it to obtain from $\Sigma$ another filling surfaces ''as complicated as we want'' using only filling-preserving moves. These constructions are used in Key Lemma 2, which is stated in the same section \ref{SECTION InflatingTriangulations}. Key Lemma 2 gives a method for transforming any pushing disk (as defined in section \ref{SECTION Pushing Disks}) in a pushing disk which can be performed used only filling-preserving moves by putting many new sheets of the Dehn surface in the middle of the pushing ball (also defined in section \ref{SECTION Pushing Disks}). The basis for proving Key Lemma 2 in \cite{RHomotopies} is that when a pushing disk is ''sufficiently good'', it can be performed using only filling-preserving moves.
In section \ref{SECTION FillingPairs} we discuss briefly how two filling Dehn spheres of the same 3-manifold can intersect each other, and this discussion is used in section \ref{SECTION Growing}, where we state Key Lemma 3. Key Lemma 3 assures that when two filling Dehn surfaces intersect in a ''sufficiently good'' way, the inflating constructions introduced in section \ref{SECTION InflatingTriangulations} can be made simultaneously for one of them and for the union of both.
All the constructions that we have mentioned above as ''sufficiently good'' are intimately related with the concept of shelling.
In section \ref{SECTION Diagrams} we translate Theorem \ref{MAINtheorem} for Johansson representations of 3-manifolds and we give some examples, and in section \ref{SECTION Duplication} we explain briefly how we can obtain a nulhomotopic Johansson representation of a 3-manifold $M$ from any Johansson representation of $M$.
In the final section \ref{SECTION Miscelany} we make a brief discussion about some related problems.
This paper is part of the Ph. D. Thesis of the author, which has been done under the supervision of Prof. J. M. Montesinos. I'm very grateful to him for all his valuable advices, specially for his suggestions and comments during the writing of this paper and his careful reading of the previous versions of this manuscript.
\section{Preliminaries.\label{SECTION Preliminaries}}
Because our starting point is Theorem \ref{ThmHassHughes} below, we will work in the smooth category. Nevertheless, if one can check that the analogue of Theorem \ref{ThmHassHughes} in the PL category is true (we don't know of any reference), all our constructions have their translation to the PL case and so Theorem \ref{MAINtheorem} would also be true in the PL case.
Thus, all the manifolds are assumed equipped with a smooth structure and maps between two manifolds are assumed to be smooth with respect to their respective smooth structures.
For the standard definitions of differential topology (immersions, tranversality, etc.), see \cite{Hirsch} or \cite{Guillemin}, for example. For a general treatment about PL topology we refer to \cite{Hudson}, for example.
For a subset $X$ of a manifold, we denote the interior, the closure and the boundary of $X$ by $int(X)$, $cl(X)$ and $\partial X$ respectively.
Let $A$ and $B$ be two sets. For a map $f:A\rightarrow B$ the \textit{singular values} or \textit{singularities} of $f$ are the points $x\in B$ with $\#\left\{ f^{-1}(x)\right\} >1$, and the \textit{singular points} of $f $ are the inverse image points by $f$ of the singularities of $f$. The \textit{singular set} $S(f)$ of $f$ is the set of singular points of $f$ in $A$, and the \textit{singularity set} $\bar{S}(f)$ of $f$ is the set of singularities of $f$ in $B$. Of course $f(S(f))=\bar{S}(f)$. This is a notation similar but slightly different to that of \cite{A.Shima2}.
From now on, $M$ will denote a 3-manifold in the conditions indicated at the beginning of section \ref{SECTION Introduction}.
Let $\Sigma$ be a Dehn surface in $M$.
Let $S$ be a surface and $f:S\rightarrow M$ a transverse immersion parametrizing $\Sigma$. In this case we say that the surface $S$ is the \textit{domain} of $\Sigma$. For any $x\in M$ it is $\#\left\{ f^{-1} (x)\right\} \leq3$ \cite{Hempel}. The singularities of $f$ are divided into double points of $f$, with $\#\left\{ f^{-1}(x)\right\} =2$, and triple points of $f$, with $\#\left\{ f^{-1}(x)\right\} =3$. A small neighbourhood of a double or a triple point looks like in Figures \ref{fig1a} and \ref{fig1b} respectively. The singularity set $\bar{S}(f)$ of $f$, the set of triple points of $f$, and the domain $S$ (up to homeomorphism) do not depend upon the parametrization $f$ of $\Sigma$ we have chosen. We define the singularity set of $\Sigma$, and we denote it by $\bar{S}(\Sigma)$, as the singularity set of any parametrization of $\Sigma$. A \textit{double curve} of $\Sigma$ is the image of an immersion $\bar{\gamma}:S^{1}\rightarrow M$ contained in the singularity set of $\Sigma$ \cite{A.Shima2}. The singularity set of $\Sigma$ is the union of the double curves of $\Sigma$. Because $S$ is compact, $\Sigma$ has a finite number of double curves. Following \cite{A.Shima2}, we denote by $T(\Sigma)$ the set of triple points of $\Sigma$. The Dehn surface $\Sigma$ is \textit{embedded} if its singularity set is empty. A \textit{standardly embedded sphere} in $M$ is a 2-sphere embedded in $M$ that bounds a 3-ball in $M$.
A \textit{component} of $\Sigma$ is the image by $f$ of a connected component of the domain $S$. Note that the components of $\Sigma$ may not coincide with the connected components of $\Sigma$.
A Dehn surface $\Sigma$ in $M$ fills $M$ if it defines a cell-decomposition of $M$ as it has been indicated in section \ref{SECTION Introduction}. This definition generalizes to general surfaces a definition given in \cite{Montesinos} for Dehn spheres.
The following Proposition gives an equivalent definition of filling Dehn surface.
\begin{proposition} \label{PROPfillsMifandonlyIF}$\Sigma$ fills $M$ if and only if
\begin{enumerate} \item $M-\Sigma$ is a disjoint union of open 3-balls,
\item $\Sigma-\bar{S}(\Sigma)$ is a disjoint union of open 2-disks, and
\item $\bar{S}(\Sigma)-T(\Sigma)$ is a disjoint union of open intervals. \end{enumerate} \end{proposition}
The following statements and definitions that we will give now for cell complexes are valid also for simplicial complexes. We consider the cells of a cell complex as \textit{open} cells. If $K$ is a cell complex, and $\epsilon,\epsilon^{\prime}$ are two cells of $K$, we will denote $\epsilon<\epsilon^{\prime}$ when $\epsilon$ is a face of $\epsilon^{\prime}$, that is, when it is $cl(\epsilon)\subset cl(\epsilon^{\prime})$. The cells $\epsilon$ and $\epsilon^{\prime}$ are \textit{incident} if $\epsilon <\epsilon^{\prime}$ or $\epsilon^{\prime}<\epsilon$, and they are \textit{adjacent} if $cl(\epsilon)\cap cl(\epsilon^{\prime})\neq\varnothing$. For a cell $\epsilon$ of $K$, we define the (open) \textit{star} of $\epsilon$ as the union of all the cells $\epsilon^{\prime}$ of $K$ with $\epsilon <\epsilon^{\prime}$. The star of $\epsilon$ is denoted by\textit{\ } $star(\epsilon)$.
If $\epsilon$ is a cell of the cell complex $K$, and $P$ is a vertex (0-cell) of $\epsilon$, we say that $\epsilon$ is \textit{self-adjacent} at $P$ if a regular neighbourhood of $P$ in $K$ intersects $\epsilon$ in more than one connected component. Otherwise we say that $\epsilon$ is \textit{regular at }$P$. We say that $\epsilon$ is \textit{regular} if it is regular at every vertex of $\epsilon$. The complex $K$ is \textit{regular at }$P$ if every cell of $K$ incident with $P$ is regular at $P$, and $K$ is \textit{regular} if every cell of $K$ is regular (compare \cite{Massey}). A filling Dehn surface $\Sigma$ of $M$ is \textit{regular (regular at a triple point)} if the cell decomposition of $M$ that defines $\Sigma$ is regular (at this triple point).
If $\Sigma$ is a filling Dehn surface, then a connected component of $M-\Sigma$ is called a \textit{region} of $M-\Sigma$, and a connected component of $\Sigma-\bar{S}(\Sigma)$ is sometimes called a \textit{face} of $\Sigma$.
\section{Filling homotopy.\label{SECTION FillingHomotopy}}
An \textit{ambient isotopy} of a manifold $N$ is a map $\varsigma :N\times\left[ 0,1\right] \rightarrow N$ such that $\varsigma_{t} =\varsigma(\cdot,t)$ is a diffeomorphism for each $t\in\left[ 0,1\right] $ and $\varsigma_{0}=id_{N}$. Two immersions $f,g:S\rightarrow M$ are \textit{ambient isotopic in }$M$ if there is an ambient isotopy $\bar {\varsigma}$ of $M$ with $\bar{\varsigma}\circ f=g$. The same immersions are \textit{ambient isotopic in }$S$ if there is an ambient isotopy $\varsigma$ of $S$ with $f\circ\varsigma=g$. We generally say that $f$ and $g$ are \textit{ambient isotopic} if they are related by ambient isotopies of $S$ and ambient isotopies of $M$.
Two immersions $f,g:S\rightarrow M$ from a surface $S$ into the 3-manifold $M $, are \textit{regularly homotopic} if there is an homotopy $H:S\times\left[ 0,1\right] \rightarrow M$ with $H(\cdot,0)=f$ and $H(\cdot,1)=g$, and such that $H(\cdot,t)$ is an immersion for each $t\in\left[ 0,1\right] $. The homotopy $H$ defines a smooth path of immersions from $S$ into $M$ having $f$ and $g$ as its endpoints. If $f$ and $g$ are regularly homotopic, they are indeed homotopic. The converse is not true in general. Nevertheless, an immediate corollary of Theorem 1.1 in \cite{HassHugues} or Theorem 6 in \cite{Li Banghe} is the next Theorem
\begin{theorem} \label{ThmHassHughes}Two immersions from the 2-sphere $S^{2}$ into a 3-manifold are regularly homotopic if and only if they are homotopic. \end{theorem}
In particular, two parametrizations of nulhomotopic filling Dehn spheres of $M$ must be regularly homotopic.
In \cite{HommaNagase} is introduced a set of \textit{elementary deformations} for immersions of surfaces in 3-manifolds. This set of moves is composed by the \textit{saddle move} (which is called \textit{elementary deformation of type VI}, in the notation of \cite{HommaNagase}) of Figure \ref{fig2a}, together with the moves depicted in Figure 3 (Figures \ref{fig3a}, \ref{fig3b} and \ref{fig3c} have been taken from \cite{Yashiro}). We will call these elementary deformations the \textit{Homma-Nagase moves}. In \cite{HommaNagase2} it is proved the following Theorem
\begin{theorem} \label{THM hommaNagase}Two transverse immersions from a closed surface $S$ into a 3-manifold $M$ are regularly homotopic if and only if we can deform them into one another by a finite sequence of Homma-Nagase moves, together with ambient isotopies of $M$. \end{theorem}
The proof of this Theorem in \cite{HommaNagase2} is in the PL category. A proof of the smooth version of this result is indicated in \cite{Roseman}. An equivalent result, also in the differentiable case is Theorem 3.1 of \cite{HassHugues}.
\begin{figure}\label{fig3a}
\end{figure} \addtocounter{figure}{-1} \begin{figure}\label{fig3b}
\label{fig3c}
\end{figure}
We will propose another set of moves (Haken moves), which is the result of substituting in the Homma-Nagase set of moves the moves of type II and III by the \textit{finger moves} 1 and 2 depicted in Figures \ref{fig2b} and \ref{fig2c} respectively. The following Lemma can be easily proved:
\begin{lemma} \label{EQUIVALENCEHOmma_NAGASE-HAKEN}The Homma-Nagase set of moves and the Haken set of moves are equivalent. \end{lemma}
For proving this Lemma it must be shown that each Homma-Nagase move can be obtained using Haken moves (and ambient isotopies) and viceversa. Thus, in Theorem \ref{THM hommaNagase} we can substitute the Homma-Nagase moves by the Haken moves.
The Haken moves fit better than the Homma-Nagase moves when we are dealing with filling Dehn surfaces. In the Haken set of moves, the Homma-Nagase move of type $I$ is called \textit{finger move 0}. For $i=0,1,2$ a finger move $i$ is a finger move $+i$ when it happens from left to right in the figure, and it is a finger move $-i$ if it happens in the opposite sense. A saddle move is equivalent (symmetric) in both senses.
\begin{lemma} \label{LemmaTYPE???ISFILLINGPRESERVING}Let $f,g:S\rightarrow M$ be two immersions. Then:
\begin{enumerate} \item if $f$ and $g$ are related by a finger move 0, then one of them is not a filling immersion;
\item if $f$ and $g$ are related by a finger move 1 or 2, then $f$ is a filling immersion if and only if $g$ is a filling immersion; and
\item if $f$ and $g$ are related by a saddle move and $f$ is a filling immersion, then $g$ is not necessarily a filling immersion. \end{enumerate} \end{lemma}
This Lemma can be proved by inspection, using the characterization of filling immersions given by Proposition \ref{PROPfillsMifandonlyIF}.
Lemma \ref{LemmaTYPE???ISFILLINGPRESERVING} inspired us the following definition. If $f:S\rightarrow M$ is a filling immersion and we modify $f$ by a Haken move, we say that that move is \textit{filling-preserving} if the immersion $g$ we get after the move is again a filling immersion. With this notation, Lemma \ref{LemmaTYPE???ISFILLINGPRESERVING} means that a finger move 0 cannot be filling-preserving, finger moves 1 and 2 are always filling-preserving, and saddle moves are sometimes filling-preserving and sometimes not. The next step is the following Definition:
\begin{definition} \label{DEFfillinghomotopic}Let $f,g:S\rightarrow M$ be two filling immersions. We say that $f$ and $g$ are \textit{filling homotopic} if there is a finite sequence $f=f_{0},f_{1},...,f_{n}=g$ of immersions such that for each $i=0,...,n-1$ the immersions $f_{i}$ and $f_{i+1}$ are ambient isotopic or they are related by a filling-preserving move. \end{definition}
Note that in the previous Definition, all the immersions of the sequence $f_{0},...,f_{n}$ are filling immersions. With this notation, Theorem \ref{MAINtheorem} can be restated as follows
\begin{theorem} If $f,g:S^{2}\rightarrow M$ are nulhomotopic filling immersions, then they are filling homotopic. \end{theorem}
This gives a partial answer to the following Conjecture:
\begin{conjecture} \label{CONJECture}Regularly homotopic filling immersions of arbitrary surfaces are filling homotopic. \end{conjecture}
Perhaps the proof of Theorem \ref{MAINtheorem} given in \cite{RHomotopies} and sketched here can be adapted to a more general case but we still don't know how to do this.
\section{Pushing disks.\label{SECTION Pushing Disks}}
Let $f,g:S\rightarrow M$ be two immersions. Assume that there is a closed disk $D\subset S$ such that:
\begin{enumerate} \item $f$ and $g$ agree in $S-D$;
\item $f\mid_{D}$ and $g\mid_{D}$ are both embeddings;
\item $f\left( D\right) $ and $g\left( D\right) $ intersect only in $f\left( \partial D\right) =g\left( \partial D\right) $; and
\item $f\left( D\right) \cup g\left( D\right) $ bounds a 3-ball $B$ in $M$ (Figure 4). \end{enumerate}
Then we say that $g$ is obtained from $f$ by pushing the disk $D$ through $B$ or along $B$ (see Figure 4). The pair $\left( D,B\right) $ is a \textit{pushing disk} (see \cite{HommaNagase}). In the pushing disk $\left( D,B\right) $, the disk $D$ is \textit{the pushed disk}, $B$ is the \textit{pushing ball} and we say also that $f\left( \partial D\right) =g\left( \partial D\right) $ is the \textit{equator} of $B$ and it is denoted by $eq(B)$. If both $f$ and $g$ are transverse immersions, we say that the pushing disk $\left( D,B\right) $ is \textit{transverse}. In the pushing disk $\left( D,B\right) $, the ''rest'' of the immersed surface, $f(S-D)$, may intersect $B$ in any manner (Figure \ref{fig4b}). If we are given the immersion $f$ and the pushing disk $\left( D,B\right) $, then the immersion $g$ is well defined up to an ambient isotopy of $S$.
\begin{figure}\label{fig4a}
\label{fig4b}
\end{figure}
We will say that two (transverse) immersions $f,g:S\rightarrow M$ are \textit{regularly homotopic by (transverse) pushing disks} if there is a finite sequence $f=f_{0},f_{1},...,f_{n}=g$ of (transverse) immersions such that $f_{i}$ is obtained from $f_{i-1}$ by a pushing disk for $i=1,...,n$.
The first Step in the proof of Theorem \ref{MAINtheorem} is the following Lemma, whose proof is in \cite{RHomotopies}.
\begin{lemma} [Key Lemma 1]\label{KeyLemma1}Let $f,g:S\rightarrow M$ be two regularly homotopic immersions, then:
\begin{enumerate} \item [ A)]they are regularly homotopic by pushing disks.
\item[ B)] if they are transverse, then they are regularly homotopic by transverse pushing disks.
\item[ C)] if $f$ and $g$ agree over a disk $D$ of $S$, such that the restrictions $f\mid_{D}=g\mid_{D}$ are embeddings, then in cases A) and B) the pushing disks can be chosen keeping $D$ fixed. \end{enumerate} \end{lemma}
Thus, we can decompose any regular homotopy into a finite sequence of pushing disks. Note that the Homma-Nagase moves and the Haken moves are special kinds of transverse pushing disks. However, Theorem \ref{THM hommaNagase} decomposes a regular homotopy into transverse pushing disks \underline{and} ambient isotopies of $M$. Disposing of this ambient isotopy is the hardest part in the proof of Key Lemma 1 in \cite{RHomotopies}. In the same way as an immersion behaves locally as an embedding, a regular homotopy behaves locally as an isotopy. Using this, the proof of Key Lemma 1 will be obtained after a detailed study of isotopies of embedded surfaces in 3-manifolds, and it is mainly inspired in \cite{HudsonZeeman}.
\section{Spiral piping.\label{SECTION Spiral Piping}}
In \cite{Banchoff} it is explained how to modify Dehn surfaces by \textit{surgery}, also called \textit{piping }(see \cite{RourkeSanderson}, p. 67). We introduce now a special kind of piping that will be useful later. Let $\Sigma$ be a Dehn surface in $M$, and let $P$ be a triple point of $\Sigma$. If $P$ is the triple point depicted in Figure \ref{fig5a}, consider the surface $\Sigma^{\prime}$ that is exactly identical with $\Sigma$ except in a neighbourhood of $P$ that can be as small as necessary. In this neighbourhood of $P$, the Dehn surface $\Sigma^{\prime}$ looks like Figure \ref{fig5b}, and we say that $\Sigma^{\prime}$ is obtained from $\Sigma$ by a \textit{spiral piping around} $P$.
\begin{proposition} \label{PROPSpiralPipingPreserveFillingness}In this situation, if $\Sigma$ is a (regular) filling Dehn surface of $M$, then $\Sigma^{\prime}$ is a (regular) filling Dehn surface of $M$. \end{proposition}
See \cite{Anewproof} for more details.
\begin{figure}\label{fig5a}
\label{fig5b}
\end{figure}
If the two sheets of $\Sigma$ that become connected by the piping (the two vertical sheets in Figure 5) belong to different components $\Sigma_{1}$ and $\Sigma_{2}$ of $\Sigma$, then after performing the spiral piping these two components of $\Sigma$ become a unique component $\Sigma_{1}\#\Sigma_{2}$ of $\Sigma^{\prime}$.
If $S$ is the domain of $\Sigma$, and $S^{\prime}$ is the domain of $\Sigma^{\prime}$ it is easy to check that $S^{\prime}$ is the result of removing the interior of two small closed disks $\delta_{1},\delta_{2}$ from $S$ and identify their boundaries in an adequate way. If $\delta_{1}$ and $\delta_{2}$ belong to different connected components $S_{1},S_{2}$ of $S$ respectively, then $S^{\prime}$ is the result of substituting the union $S_{1}\cup S_{2}$ in $S$ by the connected sum $S_{1}\#S_{2}$.
The following Definition and Theorem appear in \cite{Anewproof}.
\begin{definition} \label{DEFfillingCOLLECTION}A Dehn surface $\Sigma\subset M$ that fills $M$ is called a \textit{filling collection of spheres} in $M$ if its domain is a disjoint union of a finite number of 2-spheres. \end{definition}
\begin{theorem} \label{THM fillingcollection implies fillingSPHERE}If $M$ has a filling collection of spheres $\Sigma$, then $M$ has a filling Dehn sphere $\Sigma^{\prime}$. If each component of $\Sigma$ is nulhomotopic, we can choose $\Sigma^{\prime}$ to be nulhomotopic. \end{theorem}
\begin{proof} Let $\Sigma$ be a filling collection of spheres in $M$, and let $\Sigma _{1},...,\Sigma_{m}$ be the different components of $\Sigma$.
The 2-skeleton of any cell decomposition of $M$ is connected because $M$ is connected. Then, $\Sigma$ is connected. Thus, we can assume that $\Sigma_{1},...,\Sigma_{m}$ are ordered in such a way that $\Sigma_{1} \cup...\cup\Sigma_{k}$ is connected for every $k\in\left\{ 1,...,m\right\} $. In particular, $\Sigma_{k}$ intersects $\Sigma_{1}\cup...\cup\Sigma_{k-1}$ for all $k\in\left\{ 2,...,m\right\} $.
Because $\Sigma_{1}\cap\Sigma_{2}$ is nonempty, the intersection $\Sigma _{1}\cap\Sigma_{2}$ contains a double curve of $\Sigma$, and because $\Sigma$ fills $M$, this double curve contains at least one triple point $P$ of $\Sigma$. Connecting $\Sigma_{1}$ and $\Sigma_{2}$ near $P$ by a spiral piping, we obtain a new Dehn sphere $\Sigma_{1}\#\Sigma_{2}$ such that $\left( \Sigma_{1}\#\Sigma_{2}\right) \cup\Sigma_{3}\cup...\cup\Sigma_{m}$ still fills $M$.
Because $\Sigma_{3}$ intersects $\Sigma_{1}\cup\Sigma_{2}$, it intersects $\Sigma_{1}\#\Sigma_{2}$. Where $\Sigma_{1}\#\Sigma_{2}$ and $\Sigma_{3}$ intersect transversely there is a triple point of $\Sigma$ (and therefore of $\left( \Sigma_{1}\#\Sigma_{2}\right) \cup...\cup\Sigma_{m}$). We can perform another piping operation (as before) obtaining a new Dehn sphere $\Sigma_{1}\#\Sigma_{2}\#\Sigma_{3}$, and such that the new Dehn surface $\left( \Sigma_{1}\#\Sigma_{2}\#\Sigma_{3}\right) \cup\Sigma_{4} \cup...\cup\Sigma_{m}$ still fills $M$.
Inductively, for $k>3$, we obtain a Dehn sphere $\Sigma_{1}\#\Sigma _{2}\#...\#\Sigma_{k}$ piping $\Sigma_{1}\#\Sigma_{2}\#...\#\Sigma_{k-1}$ with $\Sigma_{k}$ around a triple point of $\Sigma$ lying in the intersection of $\Sigma_{1}\#\Sigma_{2}\#...\#\Sigma_{k-1}$ and $\Sigma_{k}$, with the property that $\left( \Sigma_{1}\#\Sigma_{2}\#...\#\Sigma _{k}\right) \cup\Sigma_{k+1}\cup...\cup\Sigma_{m}$ still fills $M$.
Repeating this operation we finally obtain a Dehn sphere $\Sigma^{\prime }=\Sigma_{1}\#\Sigma_{2}\#...\#\Sigma_{m}$ that fills $M$.
If all the components of $\Sigma$ are nulhomotopic, this implies that we can deform the Dehn sphere $\Sigma_{m}$ continuously to a point. If $g_{m} :S^{2}\rightarrow M$ is an immersion parametrizing $\Sigma^{\prime}$, we can use this deformation to construct an homotopy between $g_{m}$ and an immersion $g_{m-1}$ parametrizing $\Sigma_{1}\#\Sigma_{2}\#...\#\Sigma_{m-1}$. In the same way, we can construct an homotopy between $g_{m-1}$ and an immersion $g_{m-2}$ parametrizing $\Sigma_{1}\#\Sigma_{2}\#...\#\Sigma_{m-2}$. Repeating this process, we finally obtain that $g_{m}$ is homotopic to an immersion $g_{1}$ parametrizing $\Sigma_{1}$ and so $g_{m}$ is nulhomotopic. \end{proof}
\begin{figure}\label{fig6a}
\label{fig6b}
\end{figure}
Another property of spiral pipings is that \textit{they do not disturb filling homotopies}, as it is stated in the following Lemma, that we will state without proof:
\begin{lemma} \label{LEMMAIgnoringPipings}Let $f,g:S\rightarrow M$ be two filling immersions such that $g$ is obtained from $f$ after a finger move +2 through the triple point $P$ of $f$. Let $S^{\prime},f^{\prime},g^{\prime}$ be the surface and immersions that come from $S,f,g$ respectively after performing a spiral piping around $P$. We assume that this spiral piping is as small as necessary, in comparation with the finger move (Figure \ref{fig6b}). Then, $f^{\prime}$ and $g^{\prime}$ are filling homotopic. \end{lemma}
In the situation explained in the previous Lemma we say that the immersions $f^{\prime},g^{\prime}$ are related by a \textit{piping passing move} through $P$.
\section{What can be done using filling-preserving moves.\label{SECTION What can be done}}
We will give here three examples of operations to be performed in a filling Dehn surface using only filling-preserving moves.
We start considering a filling Dehn surface $\Sigma$ of the 3-manifold $M$.
\subsection{Inflating a double point.\label{SUBSECTION InflatingDoublePoint}}
\begin{figure}\label{fig7}
\end{figure}
Let $P$ be a double point of $\Sigma$. Consider a standardly embedded 2-sphere $\Sigma_{P}$ in $M$ as in Figure \ref{fig7}. The sphere $\Sigma_{P}$ contains $P$, and its intersection with $\Sigma$ is the union of two circles. These two circles intersect themselves at $P$ and at other point $Q$, which are the unique double points of $\Sigma$ lying in $\Sigma_{P}$. Note that the union $\Sigma\cup\Sigma_{P}$ is a filling Dehn surface of $M $. Consider a filling Dehn surface $\Sigma\#\Sigma_{P}$ that is the result of modifying $\Sigma \cup\Sigma_{P}$ by a spiral piping around $P$ (see section \ref{SECTION Spiral Piping}).
\begin{proposition} \label{PropositionINFLATINGdoublepoint}We can choose the piping such that $\Sigma$ is filling homotopic to $\Sigma\#\Sigma_{P}$. \end{proposition}
\begin{proof} We consider the filling Dehn surface $\Sigma\#\Sigma_{P}$ as in Figure \ref{fig8a}. This surface is identical with $\Sigma\cup\Sigma_{P}$ except in a small neighbourhood of $P$, where it looks like Figure \ref{fig8b}.
\begin{figure}\label{fig8a}
\label{fig8b}
\label{fig8c}
\label{fig8d}
\label{fig8e}
\label{fig8f}
\end{figure}
We can uninflate $\Sigma_{P}$ through $P$ by an ambient isotopy of $M$, until we reach the situation depicted in Figure \ref{fig8c}.
We ''open the entrance of the tunnel'' using two filling-preserving saddle moves, over and under the sheet of $\Sigma$ containing the spiral piping (Figure \ref{fig8c}), and we get the situation of Figure \ref{fig8d}. Now, after three consecutive finger moves -1 we make $\Sigma_{P}$ desappear completely (Figures \ref{fig8e} and \ref{fig8f}). \end{proof}
In the previous Proposition, the statement should be ''every parametrization of $\Sigma$ is filling homotopic to a parametrization of $\Sigma\#\Sigma_{P} $'', which is a little stronger that the chosen statement, but we use this language for sake of simplicity.
We say that the Dehn surface $\Sigma\#\Sigma_{P}$ as in the previous proposition is obtained from $\Sigma$ \textit{by inflating} $P$.
\subsection{Inflating 2-disks.\label{SUBSECTION InflatingDisks}}
Let $R$ be a region of $M-\Sigma$. Consider a closed disk $w$ in $M$ such that: (i) $(\partial w,int(w))\subset(\partial R,R)$; (ii) $\partial w$ contains no triple point of $\Sigma$; and (iii) $\partial w$ contains a finite number $n>0$ of double points of $\Sigma$ ($n=4$ in Figure \ref{fig9a}). In this situation, we say that $w$ is an $n$\textit{-gon} in $R$. We say that the double points of $\Sigma$ in $\partial w$ are the \textit{vertices} of $w$ and that the connected components of $\partial w-\left\{ \text{vertices of }w\right\} $ are the \textit{edges} of $w$.
We can take a closed 3-ball $B_{w}\subset M$ with $w$ in its interior as in Figure \ref{fig9b}. We take $B_{w}$ in such a way that, if $\Sigma_{w}$ denote the 2-sphere bounding $B_{w}$, it verifies that $\Sigma\cup\Sigma_{w}$ is a Dehn surface in $M$ (that is, $\Sigma_{w}$ intersects $\Sigma$ transversely). The complement of $\Sigma$ in $B_{w}$ has $2n+1$ connected components. The closure of one of the connected components of $B_{w}-\Sigma$ contains $w$ and it is a prism $w\times\left[ -1,1\right] $ over $w$ with $w\times\left\{ 0\right\} \approx w$; its intersection with $\Sigma$ is the ''boundary wall'' $\partial w\times\left[ -1,1\right] $ and $w\times\left\{ -1\right\} ,w\times \left\{ -1\right\} $ are two $n$-gons in $R$ ''parallel'' to $w$. There is a 2-gon prism (with one quadrangular face in $\partial w\times\left[ -1,1\right] $ and the other quadrangular face in $\Sigma_{w}$) along each of the $n$ edges of $w$. Close to each vertex of $w$ there appears a trihedron with two faces in $\Sigma$ and the other face in $S_{w}$. In this situation, we say that the 2-sphere $\Sigma_{w}$ is obtained by\textit{\ inflating }$w$. Note that the new Dehn surface $\Sigma\cup\Sigma_{w}$ is another filling Dehn surface of $M$.
\begin{figure}\label{fig9a}
\label{fig9b}
\end{figure}
Now, consider a point $P$ in $\Sigma_{w}$ which is a triple point of $\Sigma\cup\Sigma_{w}$, and consider a filling Dehn surface $\Sigma \#\Sigma_{w}$ that is obtained from $\Sigma\cup\Sigma_{w}$ by a spiral piping around $P$. Then, we have the following Proposition:
\begin{proposition} \label{PROPinflating2-cells}We can choose the piping such that $\Sigma$ is filling homotopic to $\Sigma\#\Sigma_{w}$. \end{proposition}
\begin{proof} We start with the filling Dehn surface $\Sigma$. The point $P$ is a double point of $\Sigma$. We inflate $P$ to obtain a 2-sphere $\Sigma_{P}$ (piped with $\Sigma$), such that $\Sigma\#\Sigma_{P}$ is filling homotopic to $\Sigma$. We choose $\Sigma_{P}$ inside $B_{w}$ (see Figure \ref{fig10a}). Now, we modify $\Sigma\#\Sigma_{P}$ by $n-1$ consecutive finger moves +1 along the edges of $w$ until we reach to the situation of Figure \ref{fig10c}. We modify the Dehn surface of Figure \ref{fig10c} by an ambient isotopy of $M$ until we reach the filling Dehn surface $\Sigma^{\prime}$ of Figure \ref{fig10d}. Note that $\Sigma^{\prime}$ differ from $\Sigma\#\Sigma_{w}$ by a saddle move. This saddle move is filling preserving because both $\Sigma^{\prime}$ and $\Sigma\#\Sigma_{w}$ are filling Dehn surfaces of $M$. Thus, $\Sigma$ and $\Sigma\#\Sigma_{w}$ are filling homotopic.
\begin{figure}\label{fig10a}
\label{fig10b}
\label{fig10c}
\label{fig10d}
\end{figure}
\end{proof}
The same operation as the one of inflating of $n$-gons can be done if we start with a closed disk $w$ in $M$ contained in $\Sigma$. Consider a closed disk $w$ in $M$ which is the closure of a regular face of $\Sigma$. In this case we say that $w$ is an $n$\textit{-gon in} $\Sigma$ if $n>0$ is the number of triple points of $\Sigma$ lying in $\partial w$. We can inflate it in the same way as an $n$-gon in a region of $M-\Sigma$, but in this case we must introduce in Figures 9 and 10 an ''horizontal plane'' corresponding to the sheet of $\Sigma$ that contains $w$. We inflate a 2-sphere $\Sigma_{P}$ upside the $n$-gon $w$. Then, we use a finger move +1 to make it cross the $n$-gon $w$. Using finger moves +2 instead of finger moves +1 and having a little care in the final saddle move of the previous proof, it can be seen also that we can connect the 2-sphere $\Sigma_{w}$ with $\Sigma$ by a spiral piping such that the resulting filling Dehn surface $\Sigma\#\Sigma_{w}$ is filling homotopic to $\Sigma$. The final care to which we referred to above, consists on using two saddle moves (one up and the other down the $n$-gon $w$) and a final finger move of type -1 (we leave the details to the reader.
In both cases (the $n$-gon $w$ in a region of $M-\Sigma$ or in the Dehn sphere $\Sigma$), we say that the Dehn surface $\Sigma\#\Sigma_{w}$ is obtained from $\Sigma$ \textit{by inflating} $w$.
Thus, in the previous two paragraphs, we have seen that starting with a filling Dehn surface $\Sigma$ and using only filling-preserving moves we can obtain filling Dehn surface of $M$ more and more complicated, in analogy with the finer and finer subdivisions of simplicial complexes or cell complexes.
\subsection{Passing over 3-cells.\label{SUBSECTION PassingCells}}
Let $\Delta$ be a regular face of $\Sigma$ and $R$ a regular region of $M-\Sigma$ incident with $\Delta$. The regularity implies that $cl(\Delta)$ is a closed disk and $cl(R)$ is a closed 3-ball. Take a parametrization $f:S\rightarrow M$ of $\Sigma$, and take the immersion $g:S\rightarrow M\;$that is obtained from $f$ by a pushing disk $(D,B)$ as it is indicated in Figure \ref{fig11}. There is an open disk $\tilde{\Delta}$ in $S$ such that the restriction of $f$ to $\tilde{\Delta}$ is an embedding and it is $f(\tilde{\Delta})=\Delta$. The pushed disk $D$ in $S$ contains $cl(\tilde {\Delta})$ in its interior and it is as close to $\tilde{\Delta}$ as necessary, such that $f\mid_{D}$ is an embedding. The pushing ball $B$ contains the region $R$ and it is $cl(R)\cap\partial B=cl(\Delta)$. The disk $g(D)$ is a closed disk outside $cl(R)$ running in parallel to $\partial R-\Delta$. In \cite{RHomotopies} it is proved the following:
\begin{lemma} \label{LemmaPassing3-cells}If $g$ is a filling immersion, then it is filling homotopic to $f$. \end{lemma}
\begin{figure}\label{fig11}
\end{figure}
In paper \cite{RHomotopies} a study is made to ascertain which properties of the pair $\Delta,R$ are necessary and sufficient for $g$ to be a filling immersion.
\section{Shellability. Smooth triangulations.\label{SECTION Shellability}}
In the proof of Theorem \ref{MAINtheorem} we use an exhaustive use of the concept of \textit{shelling}.
\begin{definition} \label{DEFfreen-cell}Let $N$ be an $n$-manifold with boundary, and let $C\subset N$ be a closed $n$-ball in $N$. We say that $C$ is free in $N$ if $C\cap\partial N$ is a closed $(n-1)$-ball. \end{definition}
Let $B$ be a closed $n$-ball, and let $K$ be a regular cell decomposition of $B$.
\begin{definition} \label{DEFShellable}$K$ is shellable if there exists an ordering $C_{1} ,C_{2},...,C_{k}$ of its $n$-cells such that $cl(C_{i})$ is free in the closure of $(\underset{j\geq i}{\cup}C_{j})$. If this is the case, we say that the ordering $C_{1},C_{2},...,C_{k}$ is a shelling of $K$. \end{definition}
While cell decompositions of 2-disks are always shellable (see Lemma 1 of \cite{Sanderson}), non-shellable cell decompositions of $n$-balls exist for $n>2$. In \cite{Bruggeser-Mani} it is proved that every cell decomposition of an $n$-ball has a shellable subdivision (a cell decomposition $\sigma K$ of $B$ is a \textit{subdivision} of the cell decomposition $K$ of $B$ if every cell of $\sigma K$ is contained in a cell of $K$.
For the proof of Theorem \ref{MAINtheorem}, we will work with triangulations of a 3-manifold $M$ and we will require that a set of non necessary disjoint 3-balls in $M$ (endowed with the induced cell decomposition) are all shellable at once, and with some special properties. For this, we will use the work of Whitehead about simplicial collapsings (see \cite{Whitehead} or \cite{Glaser}).
A simplicial complex whose underlying polyhedron is a ball induces in a natural way a cell decomposition of the ball. Thus, we can consider simplicial complexes also as particular cell complexes.
If $K$ is a simplicial complex, a simplex of $K$ is \textit{maximal} if it is not a proper face of another simplex of $K$. If $\epsilon^{i}$ is a maximal $i$-simplex of $K$, an $(i-1)$-face $\epsilon^{i-1}$ of $\epsilon^{i}$ is \textit{free} in $K$ if it is not a face of another $i$-simplex of $K$ different from $\epsilon^{i}$. If $\epsilon^{i}$ is maximal in $K$ and $\epsilon^{i-1}$ is free in $K$, then the result of removing $(\epsilon ^{i},\epsilon^{i-1})$ from $K$ is another simplicial complex $K^{\prime}$ and it is said that $K^{\prime}$ is obtained from $K$ by a \textit{simplicial collapsing}. The complex $K$ \textit{collapses simplicially} into a subcomplex $K^{\prime}$ if $K^{\prime}$ is obtained from $K$ after a finite sequence of simplicial collapsings. In particular, $K$ is \textit{collapsible} if it collapses simplicially into a point. If $\sigma K$ is a subdivision of $K$ and $K^{\prime}$ is a subcomplex of $K$, then $\sigma K^{\prime}$ will denote the corresponding subcomplex of $\sigma K$.
\begin{theorem} \cite{Whitehead}\label{THWhiteheadsubdivisions}If $K$ is any finite simplicial complex, there is a (stellar) subdivision $\sigma K$ of $K$ such that $\sigma B^{n}$ collapses simplicially into $\sigma B^{n-1}$, where $B^{n}$ is any subcomplex of $K$ which is a closed $n$-ball and $B^{n-1}$ is any subcomplex of $\partial B^{n}$ which is a closed $(n-1)$-ball. \end{theorem}
For triangulations of an $n$-ball, shellability obviously implies collapsibility. Note that the converse is not obvious because in shellability we require that the space after each step remains a ball, while in ''collapsings'' it might not even be a manifold. (The example given in \cite{Rudin} is not shellable, but it is simplicially collapsible, compare \cite{Chillingworth}). However,\ the converse is almost true in our case according to the following observation that arises from the proof of Theorem 6 in \cite{Bing}:
\begin{lemma} \label{LEMABing}If $K$ is a collapsible triangulation of the 3-ball, then the second derived subdivision of $K$ is shellable. \end{lemma}
Smooth triangulations of manifolds are introduced in \cite{Whitehead2} for relating the smooth and PL categories in manifold theory. A triangulation of an $n$-manifold $N$ is an homeomorphism $h:K\rightarrow N$, where $K$ is a rectilinear simplicial complex of some euclidean space. If $N$ has a smooth structure, the triangulation $h$ is \textit{smooth} (with respect to this structure) if the restriction of $h$ to each simplex of $K$ is a smooth map. We identify the manifold $N$ with the simplicial complex $K$. In \cite{Whitehead2} (see also \cite{Munkres}) it is proved that: (i) any $n$-manifold with a smooth structure admits smooth triangulations; and (ii) two smooth triangulations of the same smooth manifold have a common smooth subdivision (!). If $f:S\rightarrow M$ is a transverse immersion from the surface into the 3-manifold $M$, then there are smooth triangulations $K$ and $T$ of $S$ and $M$ respectively such that $f$ is simplicial with respect to them (for more general results of this kind, see \cite{Verona}).
If $f:S\rightarrow M$ is a filling immersion and $K,T$ are triangulations of $S,M$ respectively such that $f$ is simplicial with respect to them, then the triangulation $T$ triangulates also the closure of each region of $M-f(S) $. If $R$ is a regular region of $M-f(S)$, we say that $T$ \textit{shells} $R $ if it induces a shellable triangulation on $cl(R)$. If $R$ is not regular, we cut first $cl(R)$ along its self-adjacencies to obtain a closed 3-ball $\widetilde{cl(R)}$. The triangulation $T$ on $cl(R)$ lifts naturally to $\widetilde{cl(R)}$, and so we say that $T$ \textit{shells} $R$ if the induced triangulation on $\widetilde{cl(R)}$ is shellable. The triangulation $T$ \textit{shells} the filling immersion $f$ (or the filling Dehn surface $f(S)$) if $T$ shells each region of $M-f(S)$.
All these results imply the following
\begin{theorem} \label{THMsmoothtriangulations exist}Let $S_{1},...,S_{k}$ be a finite collection of surfaces, and for each $i=1,...,k$ let $f_{i}:S_{i}\rightarrow M $ be a transverse immersion. Then, there is a smooth triangulation $T$ of $M$ such that:
\begin{enumerate} \item the Dehn surfaces $f_{1}(S_{1}),...,f_{k}(S_{k})$ are contained in the 2-skeleton of $T$;
\item if $f_{i}$ is a filling immersion for some $i=1,...,k$, the triangulation $T$ shells $f_{i}$;
\item if $f_{i}$ and $f_{j}$ differ by a pushing disk $(D,B)$ for some $i,j\in\left\{ 1,...,k\right\} $, then the triangulation $T$ restricted to $B $ collapses simplicially into $f_{j}(D)$. \end{enumerate} \end{theorem}
\begin{proof} First of all, we have seen that for each $i=1,...,k$ there are smooth triangulations $K_{i},T_{i}$ such that $f_{i}$ is simplicial with respect to them. According to \cite{Whitehead2}, the smooth triangulations $T_{1} ,...,T_{k}$ have a common smooth subdivision $T_{0}$. Then, all the Dehn surfaces $f_{1}(S_{1}),...,f_{k}(S_{k})$ are contained in the 2-skeleton of $T_{0}$. Take a subdivision $T_{0}^{\prime}$ of $T_{0}$ in the conditions of Theorem \ref{THWhiteheadsubdivisions}, and take $T$ as the second derived subdivision of $T_{0}^{\prime}$.
If $f_{i}$ and $f_{j}$ differ by a pushing disk $(D,B)$, then $T_{0}$ triangulates $B$ and $f_{j}(D)$, and so because $T_{0}^{\prime}$ has been chosen following Theorem \ref{THWhiteheadsubdivisions}, the triangulation $T_{0}^{\prime}$ restricted to $B$ collapses simplicially into $f_{j}(D)$. Simplicial collapsing is preserved by stellar subdivisions \cite{Whitehead} and so it is also preserved by derived subdivisions. Thus, $T$ restricted to $B$ collapses simplicially into $f_{j}(D)$.
If $f_{i}$ is a filling immersion and $R$ is a regular region of $M-f_{i}(S_{i})$, then $T_{0}$ triangulates $cl(R)$. By the choosing of $T_{0}^{\prime}$, $T_{0}^{\prime}$ induces a collapsible triangulation of $cl(R)$, and by Lemma \ref{LEMABing}, $T$ induces a shellable triangulation on $cl(R)$. If $R$ is not regular, perhaps we need to do more stellar subdivisions on $cl(R)$ to have the required shelling property on $\widetilde{cl(R)}$, but this does not alter the previous construction because stellar subdivisions preserve shellability \cite{Bruggeser-Mani} and collapsibility. \end{proof}
\begin{definition} \label{DEF GoodTriangulations}In the hypothesis of the previous Theorem, we say that $T$ is a good triangulation of $M$ with respect to $f_{1},...,f_{k}$. \end{definition}
With these results, we have prepared the ground for the following sections.
\section{Inflating triangulations.\label{SECTION InflatingTriangulations}}
Now we will explain how we can associate to any triangulation of the 3-manifold $M$ a filling collection of spheres of $M$.
Let $B_{1},B_{2}$ be two closed 3-balls in $M$. We say that $B_{1},B_{2}$ \textit{intersect normally} if they intersect as in Figure \ref{fig12a}. The 2-spheres $\partial B_{1},\partial B_{2}$ must intersect transversely in a unique simple closed curve. If $B_{1},B_{2}$ intersect normally, then $B_{1}\cap B_{2}$, $cl(B_{1}-B_{2})$ and $cl(B_{2}-B_{1})$ are 3-balls. If $B_{1},B_{2},B_{3}$ are 3-balls in $M$, they \textit{intersect normally} if they intersect as in Figure \ref{fig12b}. Each pair $B_{i},B_{j}$ with $i\neq j$ intersect normally and $\partial B_{1},\partial B_{2}$ and $\partial B_{3}$ intersect transversely at two triple points.
\begin{figure}\label{fig12a}
\label{fig12b}
\end{figure}
Let $T$ be a smooth triangulation of $M$. (We refer to the 0-simplexes, 1-simplexes, 2-simplexes and 3-simplexes of $T$ as vertices, edges, triangles and tetrahedra of $M$, respectively.) We can construct a filling collection of spheres of $M$ by ''inflating'' $T$ assigning to each simplex $\epsilon$ of the 2-skeleton $T^{2}$ of $T$ a 2-sphere $S\epsilon$ embedded in $M$ in such a way that their union $\mathbb{T}=\underset{\epsilon\in T^{2}}{\cup}S\epsilon$ fills $M$. We will do this as follows.
\begin{figure}\label{fig13a}
\label{fig13b}
\end{figure}
First, if $v_{1},...,v_{m_{0}}$ are the vertices of $M$, for $i=1,...,m_{0}$ the 2-sphere $Sv_{i}\subset M$ bounds a closed 3-ball $Bv_{i}$ in $M$ contained in the open star $star\left( v_{i}\right) $ and with $v_{i}$ in its interior. The 2-spheres $Sv_{1},...,Sv_{m_{0}}$ are pairwise disjoint (Figure \ref{fig13a}) and the triangulation $T$ of $M$ induces a triangulation of $Bv_{i}$ as a \textit{cone} from $v_{i}$ over $Sv_{i}$ for each $i=1,...,m_{0} $ (see Figure \ref{fig13b}). The 2-sphere $Sv_{i}$ intersects transversely each $i$-simplex $\epsilon^{i}\in star\left( v_{i}\right) \subset T$ in a $\left( i-1\right) $-simplex of this induced triangulation of $Sv_{i}$.
If $e_{1},...,e_{m_{1}}$ are the edges of $M$, for $j=1,...,m_{1}$ the 2-sphere $Se_{j}\subset M$ bounds a closed 3-ball $Be_{j}$ in $M$ as in Figure \ref{fig14a}. The 3-ball $Be_{j}$ is contained in the open star $star\left( e_{j}\right) $ and it intersects $e_{j}$ in a closed sub-arc $\tilde{e} _{j}\subset e_{j}$. The 2-sphere $Se_{j}$ and $e_{j}$ intersect transversely at the endpoints of the arc $\tilde{e}_{j}$ We take $Be_{1},...,Be_{m_{1}}$ pairwise disjoint, and for each $i\in\left\{ 1,...,m_{0}\right\} $ and $j\in\left\{ 1,...,m_{1}\right\} $ the 3-balls $Bv_{i}$ and $Be_{j}$ are also disjoint unless $v_{i}$ and $e_{j}$ are incident. In this case, $Bv_{i}$ and $Be_{j}$ intersect normally (Figure \ref{fig14b}) and $Bv_{i}\cap Be_{j}$ intersects $e_{j}$ in another closed sub-arc of $e_{j}$. Considering the two points of the intersection $Se_{j}\cap e_{j}$ as the ''poles'' of $Se_{j}$, each triangle $t$ of $M$ incident with $e_{j}$ intersects $Se_{j}$ transversely in an open arc which is the interior of a ''meridian'' $a$ with its endpoints at the poles. The intersection $cl(t)\cap Be_{j}$ is a closed disk bounded by $a\cup\tilde{e}_{j}$.
\begin{figure}\label{fig14a}
\label{fig14b}
\end{figure}
Finally, if $t_{1},...,t_{m_{2}}$ are the triangles of $M$, for $k=1,...,m_{2} $ the 2-sphere $St_{k}$ bounds a 3-ball $Bt_{k}$ as in Figure \ref{fig15a}. The 3-ball $Bt_{k}$ is contained in the (open) star $star\left( t_{k}\right) $ and it intersects $t_{k}$ in a closed disk $\tilde{t}_{k}\subset t_{k}$, and the intersection of $St_{k}$ with $t_{k}$ is transverse. The 3-ball $Bt_{k}$ is disjoint with $B\epsilon$ for every $\epsilon\in T^{2}$ different from $t_{k}$ unless $\epsilon$ is incident with $t_{k}$. In this case, $Bt_{k}$ and $B\epsilon$ intersect normally. Moreover, if we have $v_{i}<e_{j}<t_{k}$, then the 3-balls $Bv_{i},Be_{j},Bt_{k}$ intersect normally (Figure \ref{fig15b}) and there is one of the two triple points of $Sv_{i}\cap Se_{j}\cap St_{k}$ in each of the two tetrahedra of $star\left( t_{k}\right) $.
\begin{figure}\label{fig15a}
\label{fig15b}
\end{figure}
If $T$ is a cell decomposition of $M$ instead of a triangulation, the previous construction can easily be generalized.
It is easy to check that the Dehn surface $\mathbb{T}=\underset{\epsilon\in T^{2}}{\cup}S\epsilon$ so constructed is a filling collection of spheres in $M$. Moreover, $\mathbb{T}$ is regular and it is transverse to the (smooth) simplexes of the triangulation $T$ of $M$.
In particular, as a Corollary of Theorem \ref{THM fillingcollection implies fillingSPHERE} this construction implies the main Theorem of \cite{Montesinos}:
\begin{theorem} $M$ has a nulhomotopic filling Dehn sphere. \end{theorem}
Note that in this case, in contradistinction of \cite{Montesinos} or \cite{Anewproof}, we have not made any assumption about the orientability of $M$.
The following result follows directly from the construction.
\begin{proposition} \label{PROPinflatetriangulationsFILLS}Let $S$ be a surface and $f:S\rightarrow M$ a transverse immersion. Let $K,T$ be triangulations of $S,M $ respectively such that $f$ is simplicial with respect to them. Then $f\left( S\right) \cup\mathbb{T}$ is a regular filling surface of $M$. \end{proposition}
In the previous Proposition the immersion $f$ can be any transverse immersion, filling or not. Assume now that $f:S\rightarrow M$ is a filling immersion and put $\Sigma:=f(S)$. Let $K,T$ be triangulations of $S$ and $M$ respectively such that $f$ is simplicial with respect to them. By the previous Proposition, $\Sigma\cup\mathbb{T}$ fills $M$, and by the same methods of the proof of Theorem \ref{THM fillingcollection implies fillingSPHERE}, we can obtain from $\Sigma\cup\mathbb{T}$ a unique filling Dehn surface of $M$. If we look at the proof of Theorem \ref{THM fillingcollection implies fillingSPHERE}, this can be done in many different ways because there are many possibilities for performing the spiral pipings. We say that each filling Dehn sphere $\Sigma^{\prime}$ of $M$ that is obtained from $\Sigma$ in this way is a $T$\textit{-inflating} of $\Sigma$. Let $\Sigma^{\prime}$ be a $T$-inflating of $\Sigma$. By Proposition \ref{PROPinflatetriangulationsFILLS} and Proposition \ref{PROPSpiralPipingPreserveFillingness}, $\Sigma^{\prime}$ is regular because spiral pipings preserve regularity. There is an immersion $f^{\prime}:S\rightarrow M$ parametrizing $\Sigma^{\prime}$ that \textit{comes from} $f$ in a natural way, that is, $f^{\prime}$ agrees with $f$ in most of $S$ except in the small disks where we perform the pipings. We say also that $f^{\prime}$ is a $T$\textit{-inflating} of $f$. The first application of shellability is the next result proved in \cite{RHomotopies}.
\begin{proposition} \label{PROPshellableimpliesfillinghomotopictoinflated}If $T$ shells $f$, then there is a $T$-inflating $f^{\prime}$ of $f$ filling homotopic to $f$. Moreover, we can choose $f^{\prime}$ such that there are only two spiral pipings connecting $\Sigma$ with components of $\mathbb{T}$ and such that the rest of spiral pipings are performed around triple points of $\mathbb{T}$. \end{proposition}
By Theorem \ref{THMsmoothtriangulations exist}, passing to suitable subdivisions we can assume that $T$ shells $f$, and thus we have:
\begin{corollary} \label{CorolarYfillinghomotopictoREGULAR}If $f:S\rightarrow M$ is a filling immersion, then $f$ is filling homotopic to a regular filling immersion. \end{corollary}
The proof of Proposition \ref{PROPshellableimpliesfillinghomotopictoinflated} is made by repeatedly applications of the constructions of sections \ref{SUBSECTION InflatingDoublePoint} and \ref{SUBSECTION InflatingDisks}, using that each region of $M-\Sigma$ has a shellable triangulation and that each triangulation of a 2-disk is shellable \cite{Sanderson}. As an example, we will illustrate the starting point of this construction in which we ''inflate'' a tetrahedron of $T$.
\begin{example} \label{EXAMPLE Inflating a Tetrahedron}Inflating a tetrahedron.
\vspace {10pt} \begin{figure}\label{fig16}
\end{figure}
Let $f$, $\Sigma$ and $T$ be as in Proposition \ref{PROPshellableimpliesfillinghomotopictoinflated}. Let $R$ be a region of $M-\Sigma$. Because $T$ induces a shellable triangulation on $R$, there is a tetrahedron $\sigma$ of $T$ such that $cl(\sigma)$ is free in $cl(R)$. Assume that $\sigma$ is as in Figure \ref{fig16}. In this case, the intersection $cl(\sigma)\cap cl(R)$ is the closure of a triangle $t_{0}$ of $\sigma$, and $t_{0}$ has exactly one vertex $v_{1}$ which is a triple point of $\Sigma$ and exactly one edge $e_{3}$ (incident with $v_{1}$) contained in a double curve of $\Sigma$.
We can think that the triangle $t_{0}$ is the triangle $t$ of Figure \ref{fig15b} before.
\begin{enumerate} \item Consider the intersection point $Q_{0}$ of the 2-sphere $Sv_{1}$ with the edge $e_{3}$. We inflate $Q_{0}$ to obtain a small 2-sphere $\Sigma _{Q_{0}}$ piped with $\Sigma$ (Fig. \ref{fig17}(b)). After a finger move +2 through $v_{1}$ (Fig. \ref{fig17}(c)) and ambient isotopy of $M$ (Fig. \ref{fig17}(d)), $\Sigma_{Q_{0}} $ is transformed into $Sv_{1}$ and $\Sigma\#\Sigma_{Q_{0}}$ is transformed into $\Sigma\#Sv_{1}$.
\vspace {10pt}
\begin{figure}\label{fig17}
\end{figure}
\item Take the point $Q_{1}$ of intersection of $Se_{3}$ with $e_{3}$ that lies inside $Bv_{1}$ (Fig. \ref{fig18}(a)). We inflate $Q_{1}$ (Fig. \ref{fig18}(b)) and apply a piping passing move through $Q_{0}$ (Fig. \ref{fig18}(c)) to obtain $\Sigma \#Sv_{1}\#Se_{3}$ (Fig. \ref{fig18}(d)).
\item Take the triple point $P_{0}$ of $\mathbb{T}$ in the intersection $Sv_{1}\cap Se_{3}\cap St_{0}$ that lies in $\sigma$ (Fig. \ref{fig19a}). This point $P_{0}$ is now a double point of $\Sigma\#Sv_{1}\#Se_{3}$. We inflate $P_{0}$. If $A$ is the intersection point of the double curve $Sv_{1}\cap Se_{3}$ of $\mathbb{T}$ with the triangle $t_{0}$, after a finger move +2 through $A$ (Fig. \ref{fig19b}) and an ambient isotopy of $M$ (Figs. \ref{fig19c} and \ref{fig19d}), $\Sigma_{P_{0}}$ is transformed into $St_{0}$ and $\Sigma\#Sv_{1}\#Se_{3}$ into $\Sigma\#Sv_{1}\#Se_{3}\#St_{0}$.
\vspace {10pt}
\begin{figure}\label{fig18}
\end{figure}
\item Consider now the other vertex $v_{2}$ of $e_{3}$ different from $v_{1} $. Take the triple point $P_{1}$ of $\mathbb{T}$ in the intersection $Sv_{2}\cap Se_{3}\cap St_{0}$ that lies in $\sigma$. We inflate $P_{1}$ to obtain the (piped) 2-sphere $\Sigma_{P_{1}}$ contained in $Bv_{2}$. We need now two consecutive finger moves +2 and an ambient isotopy of $M$ for transforming $\Sigma_{P_{1}}$ into $Sv_{1}$ (Fig. \ref{fig20}).
\item In a similar way as in 4, if $e_{1}$ is the edge of $t_{0}$ incident with $v_{2}$ and different from $e_{3}$, we inflate $Se_{1}$ from the intersection $Sv_{2}\cap St_{0}$ (Fig. \ref{fig21}(a)). After this, we inflate $Sv_{3} $ from $Se_{1}\cap St_{0}$ and $Se_{2}$ from $Sv_{3}\cap St_{0}$ (Fig. \ref{fig21}(b)). After a final finger move +2 (Fig. \ref{fig21}(c)) we have just inflated all the 2-spheres $S\epsilon$ for $\epsilon<t_{0}$.
\begin{figure}\label{fig19a}
\label{fig19b}
\label{fig19c}
\label{fig19d}
\end{figure}
\item Now, consider the triangle $t_{1}$ of $\sigma$ different from $t_{0}$ which is incident with $e_{3}$. Let $P_{2}$ be the triple point of $Sv_{1}\cap Se_{3}\cap St_{1}$ that lies in $\sigma$. We inflate $P_{2}$ to obtain the 2-sphere $\Sigma_{P_{2}}$ contained in $Bt_{1}$. The triangle $t_{1}$ is not contained in the Dehn surface $\Sigma$, and because of this we need only a finger move +1 to make $\Sigma_{P_{2}}$ cross $Sv_{2}$, and then an ambient isotopy of $M$ for transforming $\Sigma_{P_{2}}$ into $St_{1}$.
\vspace {10pt}
\begin{figure}\label{fig20}
\end{figure}
\item Now, in a similar (but simpler because $t_{1}\not \subset\Sigma$) way as in 4 and 5, we inflate inductively all the remaining 2-spheres $S\epsilon$ for $\epsilon<t_{1}$.
\item If $t_{2}$ is another triangle of $\sigma$ different from $t_{1}$, in a similar way as in 6 and 7 we inflate the 2-sphere $St_{2}$ and the remaining 2-spheres $S\epsilon$ for $\epsilon<t_{2}$. When inflating $St_{2}$ there will be a slight difference with the case of $t_{1}$ because we have just inflated the 2-spheres corresponding to two edges of $t_{2}$.
\vspace {10pt}
\begin{figure}\label{fig21}
\end{figure}
\item For the final triangle $t_{3}$, we have just inflated all the 2-spheres $S\epsilon$ for $\epsilon<t_{3}$ with the exception of $St_{3}$. If $\Sigma^{\prime}$ is the filling Dehn surface that we have at this moment, we can see that the closure of the part of $t_{3}$ lying outside the 3-balls $B\epsilon$'s with $\epsilon<t_{3}$ is a 6-gon $w$ in $M-\Sigma^{\prime}$. Inflating $w$ as in section \ref{SUBSECTION InflatingDisks} and after an ambient isotopy of $M$ we get also $St_{3}$. \end{enumerate} \end{example}
In the previous example, we see that the way of constructing the filling Dehn surface $\Sigma^{\prime}$ from the filling Dehn surface $\Sigma$ in Proposition \ref{PROPshellableimpliesfillinghomotopictoinflated} is in some sense to make $\Sigma$ \textit{grow} inductively following a path given by the triangulation $T$. The growing path we will follow for proving Proposition \ref{PROPshellableimpliesfillinghomotopictoinflated} in \cite{RHomotopies} will not be exactly as in Example \ref{EXAMPLE Inflating a Tetrahedron}. There (in \cite{RHomotopies}) we will inflate first from $\Sigma$ all the 2-spheres of $\mathbb{T}$ corresponding to the simplexes of $T$ contained in $\Sigma$ starting in a similar way as in Steps 1 to 5 of the Example \ref{EXAMPLE Inflating a Tetrahedron}. Then, the shellability conditions imposed to $T$ will give us the growing path of $\Sigma$ on the regions of $M-\Sigma$ using similar methods to that of Steps 6 to 9 of Example \ref{EXAMPLE Inflating a Tetrahedron}.
We will say that the $T$-inflating $\Sigma^{\prime}$ of $\Sigma$ ($f^{\prime}$ of $f$) as in previous Proposition is a $T$\textit{-growth} of the filling Dehn sphere $\Sigma$ (of the filling immersion $f$). Note that to be a $T$-growth is stronger than to be a $T$-inflating.
The next (but not the last) application of the constructions of section \ref{SECTION Shellability} is the following.
Let $f,g:S\rightarrow M$ be transverse immersions that differ by the pushing disk $(D,B)$, and assume that $f$ is a filling immersion. In this situation, the immersion $g$ will not necessarily be a filling immersion. Consider triangulations $K,T$ of $S$ and $M$ respectively such that $f$ and $g$ are simplicial with respect to them. Take a $T$-inflating $f^{\prime}$ of $f$ such that $f^{\prime}$ agree with $f$ over $D$ (because $f^{\prime}$ agrees with $f$ in most of $S$ except in some small disks of $S$, we require that these small disks do not intersect $D$), and consider the immersion $g^{\prime}$ that is obtained from $f^{\prime}$ after the pushing disk $(D,B)$. We can assume that $g^{\prime}$ agree with $g$ except in the disks of $S$ where $f^{\prime}$ ''disagrees'' with $f^{\prime}$. The Dehn surface $g^{\prime}(S)$ is obtained from $g(S)\cup\mathbb{T}$ by spiral pipings, and because $g$ is also simplicial (with respect to $K,T$), $g^{\prime}$ is a $T$-inflating of $g$.
\begin{figure}\label{fig22a}
\label{fig22b}
\label{fig22c}
\end{figure}
Because both $f,g$ are simplicial (with respect to $K,T$), the triangulation $T$ induces a triangulation of the pushing ball $B$.
\begin{lemma} [Key Lemma 2]\label{KeyLemma2}If the (induced) triangulation of $B$ collapses simplicially into $g(D)$, then $f^{\prime}$ is filling homotopic to $g^{\prime}$. \end{lemma}
\begin{proof} [Sketch of the Proof]Because $B$ is triangulated by $T$, it can be easily shown that $f(S)\cup g(S)\cup\mathbb{T}$ induces a cell decomposition of $B$. If $B$ collapses simplicially into $g(D)$, then we can define a special shelling of this cell decomposition of $B$ induced by $f(S)\cup g(S)\cup \mathbb{T}$. This special shelling has the property that it will allow us to apply Lemma \ref{LemmaPassing3-cells} repeatedly (Figure \ref{fig22b}) to the filling Dehn sphere $f(S)\cup\mathbb{T}$ until we get $g(S)\cup\mathbb{T}$. Substituting finger moves 2 by piping passing moves where it is required, this deformation of $f(S)\cup\mathbb{T}$ into $g(S)\cup\mathbb{T}$ defines also a deformation of $f^{\prime}$ into $g^{\prime}$ by filling preserving moves. \end{proof}
\section{Filling pairs.\label{SECTION FillingPairs}}
Let $\Sigma_{1}$ and $\Sigma_{2}$ be two filling Dehn surfaces of $M$. Assume by simplicity that both are regular.
If we are not given more information about $\Sigma_{1}$ and $\Sigma_{2}$, we don't know how they are related to each other. The only thing we can say, if $M$ is not $S^{3}$, is that they must have nonempty intersection.
\begin{definition} \label{DEF Fillingpair}We say that $\Sigma_{1}$ and $\Sigma_{2}$ form a filling pair in $M$ if their union $\Sigma_{1}\cup\Sigma_{2}$ is also a regular filling Dehn surface $M$. \end{definition}
In particular, if $\Sigma_{1}$ and $\Sigma_{2}$ form a filling pair in $M$, they intersect transversely.
If $\Sigma_{1}$ and $\Sigma_{2}$ are a filling pair in $M$, then $\Sigma_{2}$ induces a cell decomposition on the closure of each region of $M-\Sigma_{1}$ and viceversa. Because both $\Sigma_{1}$ and $\Sigma_{2}$ are regular, all these induced cell decompositions are also regular. If $R_{1}$ is a region of $M-\Sigma_{1}$, then we say that $\Sigma_{2}$ \textit{shells} $R_{1}$ if $\Sigma_{2}$ induces a shellable cell decomposition of the 3-ball $cl(R_{1})$. We say that $\Sigma_{2}$ \textit{shells} $\Sigma_{1}$ if $\Sigma_{2}$ shells each region of $M-\Sigma_{1}$.
\begin{definition} \label{DEF MutuallyShellable}Let $\Sigma_{1}$ and $\Sigma_{2}$ form a filling pair in $M$. We say that $\Sigma_{1}$ and $\Sigma_{2}$ are mutually shellable if $\Sigma_{1}$ shells $\Sigma_{2}$ and $\Sigma_{2}$ shells $\Sigma_{1}$. \end{definition}
The following result is proved in detail in \cite{RHomotopies}.
\begin{proposition} \label{PROPfillinghomotopictoMutuallyShellable}Let $\Sigma_{1},\Sigma_{2}$ be regular filling Dehn surfaces of $M$ such that they intersect transversely. If $f_{1}:S_{1}\rightarrow M$ parametrizes $\Sigma_{1}$, then $f_{1}$ is filling homotopic to an immersion $f_{1}^{\prime}:S_{1}\rightarrow M$ such that $\Sigma_{1}^{\prime}:=f^{\prime}(S_{1})$ and $\Sigma_{2}$ form a mutually shellable filling pair in $M$. \end{proposition}
\begin{proof} [Sketch of the Proof]Let $f_{2}:S_{2}\rightarrow M$ be a parametrization of $\Sigma_{2}$, and let $T$ be a good triangulation of $M$ with respect to $f_{1}$ and $f_{2}$ (definition \ref{DEF GoodTriangulations}). Then, $T$ shells every region of $M-\Sigma_{1}$ and every region of $M-\Sigma_{2}$. The union $\Sigma_{1}\cup\Sigma_{2}\cup\mathbb{T}$ is a regular filling Dehn surface of $M$ by Proposition \ref{PROPinflatetriangulationsFILLS}. Take a $T$-growth $f_{1}^{\prime}$ of $f_{1}$, and put $\Sigma_{1}^{\prime} =f_{1}^{\prime}(S_{1})$. We make the spiral pipings that transform $\Sigma _{1}\cup\mathbb{T}$ into $\Sigma_{1}^{\prime}$ small enough, such that they do not intersect $\Sigma_{2}$. Because regularity is preserved by spiral pipings, it is not difficult to see that $\Sigma_{1}^{\prime}\cup\Sigma_{2}$ is a regular filling Dehn surface of $M$. It is also easy to see that $\Sigma_{2} $ induces a shellable cell decomposition on every region of $M-\Sigma _{1}^{\prime}$ using that $\Sigma_{2}$ is a subcomplex of $T$ and the construction of $\mathbb{T}$. The non-trivial part is to check that $\Sigma_{1}^{\prime}$ induces a shellable cell decomposition on every component of $M-\Sigma_{2}$. This is made in detail in \cite{RHomotopies}, and it is parallel to the proof of Key Lemma 2 above. If $R_{2}$ is a region of $M-\Sigma_{2}$, the first thing that it is checked is that $\mathbb{T} \;$induces a shellable cell decomposition on $cl(R_{2})$. This is done following the proof of Lemma \ref{LEMABing} in \cite{Bing}, using that the restriction to $cl(R_{2})$ of the triangulation $T$ is simplicially collapsible. After this, it is seen that the presence of $\Sigma_{1}$ do not alter this shellability property. Finally, the presence of the spiral pipings might affect the shelling in some cases. In \cite{RHomotopies} it is explained how this can occur, and how we can modify locally $f_{1}^{\prime}$ by filling-preserving moves until $f_{1}^{\prime}$ verifies the statement of Proposition \ref{PROPfillinghomotopictoMutuallyShellable}. \end{proof}
\section{Simultaneous growings.\label{SECTION Growing}}
The last application of shellability will be the following one.
Assume that $\Sigma_{1},\Sigma_{2}$ are two regular filling Dehn surfaces of $M$ and that there are two points $P,Q$ where $\Sigma_{1}$ and $\Sigma_{2}$ intersect as in Figure \ref{fig23a}. We call $\Sigma_{1}\#\Sigma_{2}$ to the Dehn surface of $M$ that arises piping $\Sigma_{1}$ with $\Sigma_{2}$ near $P,Q$ as in Figure \ref{fig23b}. We assume also that the points $P,Q$ have the property that $\Sigma_{1}\#\Sigma_{2}$ is another filling Dehn surface of $M$. Let $f:S_{1}\rightarrow M,g:S_{2}\rightarrow M$ be parametrizations of $\Sigma _{1},\Sigma_{2}$ respectively. Consider the two small disks $\delta_{1} ,\delta_{2}$ of $S_{1}$ and $S_{2}$ respectively whose respective images by $f$ and $g$ disappear after the piping. In this situation we can construct a parametrization $f\#g:S_{1}\#S_{2}\rightarrow M$ of $\Sigma_{1}\#\Sigma_{2}$ ''coming'' from $f,g$, where the surface $S_{1}\#S_{2}$ is the result of identify $S_{1}-\delta_{1}$ and $S_{2}-\delta_{2}$ along the boundary of $\delta_{1}$ and $\delta_{2}$. We can assume also that the immersion $f\#g$ agrees with $f$ over $S_{1}-\delta_{1}$ and that $f\#g$ agrees with $g$ over $S_{2}-\delta_{2}$.
\begin{figure}\label{fig23a}
\label{fig23b}
\end{figure}
Let $K_{1},K_{2},T$ be triangulations of $S_{1},S_{2},M$ that make $f,g$ and $f\#g$ simplicial, and assume that $T$ shells every region of $M-\Sigma_{1}$. Consider a $T$-growth $f^{\prime}$ of $f$ as in Proposition \ref{PROPshellableimpliesfillinghomotopictoinflated}, such that $f^{\prime}$ agrees with $f$ in all of $S_{1}$ except in two small disks. We can take these two disks of $S_{1}$ where $f^{\prime}$ ''disagrees'' with $f$ such that their respective images by $f$ are far away from Figure 23 (that is, they do not affect the piping between $\Sigma_{1}$ and $\Sigma_{2}$). In this situation, we can consider also the ''piped immersion'' $f^{\prime}\#g:S_{1} \#S_{2}\rightarrow M$ that agrees with $f^{\prime}$ in $S_{1}-\delta_{1}$ and with $g$ in $S_{2}-\delta_{2}$, as the result of pasting $f^{\prime}$ and $g$ by means of the piping in exactly the same way as $f$ was pasted with $g$ in $f\#g$.
\begin{remark} \label{RemarkexistPQ}if $\Sigma_{1},\Sigma_{2}$ are the surfaces $\Sigma _{1}^{\prime},\Sigma_{2}$ that result from the proof of the previous Proposition, then there always exists such pair of points $P,Q$ as considered here. \end{remark}
We know that $f^{\prime}$ is filling homotopic to $f$ because it is a $T$-growth of $f$, but we also have:
\begin{lemma} [Key Lemma 3]\label{Key Lemma3}If $\Sigma_{2}$ shells $\Sigma_{1}$, then we can choose $f^{\prime}$ such that $f^{\prime}\#g$ is a $T$-growth of $f\#g$. \end{lemma}
In particular, if $\Sigma_{2}$ shells $\Sigma_{1}$, then we can choose $f^{\prime}$ such that $f^{\prime}\#g$ is filling homotopic to $f^{\prime}$.
This Lemma is also proved in detail in \cite{RHomotopies}. The required property that $\Sigma_{2}$ shells $\Sigma_{1}$ implies that the growing of $f$ into $f^{\prime}$ in the proof of Proposition \ref{PROPshellableimpliesfillinghomotopictoinflated} can be adapted to $\Sigma_{2}$ in such a way that \textit{the growing from }$f$\textit{\ into }$f^{\prime}$\textit{\ defines simultaneously a growing from }$f\#g$ \textit{\ into} $f^{\prime}\#g$ when we introduce $\Sigma_{2}$.
\section{Proof of Theorem \ref{MAINtheorem}.\label{SECTION ProofofMainTheorem}}
With these tools we can make a sketch of the proof of Theorem \ref{MAINtheorem}.
\begin{proof} [Sketch of the Proof of Theorem \ref{MAINtheorem}]We are given a pair $\Sigma_{1},\Sigma_{2}$ of nulhomotopic filling Dehn spheres of $M$ and two parametrizations $f,g$ of them respectively. We will introduce the following notation: we take two different 2-spheres $S_{1},S_{2}$ and we will consider that $S_{i}$ is the domain of $\Sigma_{1}$ for $i=1,2$. In particular, it is $\Sigma_{1}=f(S_{1})$ and $\Sigma_{2}=g(S_{2})$.
Modifying $f$ if necessary by an ambient isotopy of $M$ we can assume that $\Sigma_{1}$ and $\Sigma_{2}$ have nonempty transverse intersection.
By Proposition \ref{PROPfillinghomotopictoMutuallyShellable} and Remark \ref{RemarkexistPQ}, we can assume that $\Sigma_{1},\Sigma_{2}$ form a mutually shellable filling pair of spheres of $M$ and that there are two points $P,Q$ of $\Sigma_{1}\cup\Sigma_{2}$ where $\Sigma_{1}$ and $\Sigma_{2}$ intersect as in Figure \ref{fig23a} in section \ref{SECTION Growing}.
\begin{figure}\label{fig24a}
\label{fig24b}
\end{figure}
Consider the filling Dehn surface $\Sigma_{1}\#\Sigma_{2}$ and the parametrization $f\#g$ as in section \ref{SECTION Growing}. We consider also the disks $\delta_{1}\subset S_{1}$ and $\delta_{2}\subset S_{2}$ as in section \ref{SECTION Growing}. We denote $f\#g$ by $h$ for simplicity. $
$
$
h$\textsl{\ is filling homotopic to }$f$\textsl{.}
$
$Consider a small standardly embedded 2-sphere $\Sigma_{2}^{\ast} $ and a parametrization $g_{\ast}:S_{2}\rightarrow M$ of $\Sigma_{2}^{\ast}$ as in Figure \ref{fig24b}. This sphere shares with $\Sigma_{2}$ a 2-disk $\bar{D}$ containing $g(\delta_{2})$ in its interior, and the immersions $g$ and $g_{\ast}$ agree over $\tilde{D}:=g_{\ast}^{-1}(\bar{D})$.
By the Key Lemma 1, we can deform $g$ into $g_{\ast}$ by a finite sequence of transverse pushing disks leaving $\tilde{D}$ fixed. Let $(D_{1},B_{1} ),...,(D_{k},B_{k})$ be this sequence of pushing disks, and let $g=g_{0} ,g_{1},...,g_{k}=g_{\ast}:S_{2}\rightarrow M$ be the sequence of transverse immersions such that $g_{i}$ is obtained from $g_{i-1}$ by the pushing disk $(D_{i},B_{i})$.
\begin{figure}\label{fig25a}
\label{fig25b}
\end{figure}
Modifying slightly $\Sigma_{1}$ and the piping between $\Sigma_{1}$ and $\Sigma_{2}$ by an ambient isotopy of $M$ if necessary, we can assume that all these pushing disks are transverse also with $\Sigma_{1}$ and $\Sigma _{1}\#\Sigma_{2}$. Because the pushing disks $(D_{i},B_{i})$ leave $\tilde{D}$ fixed, we can think of these pushing disks as acting over the immersion $h=f\#g$ instead of over $g$, and we can consider the sequence of transverse immersions $h=h_{0},h_{1},...,h_{k}:S_{1}\#S_{2}\rightarrow M$ such that $h_{i}$ is obtained from $h_{i-1}$ by the pushing disk $(D_{i},B_{i})$. Note that $h_{k}(S_{1}\#S_{2})=\Sigma_{1}\#\Sigma_{2}^{\ast}$, where $\Sigma _{1}\#\Sigma_{2}^{\ast}$ is obtained piping $\Sigma_{1}$ with $\Sigma _{2}^{\ast}$ exactly in the same way as $\Sigma_{1}$ is piped with $\Sigma _{2}$, and then a final transverse pushing disk $(D_{k+1},B_{k+1})$ transforms $h_{k}$ into $f$. Thus, we can assume that \textit{there is a finite sequence of transverse pushing disks leaving }$S_{1}-\delta_{1}$\textit{\ fixed\ that transform }$h=f\#g$\textit{\ into }$f$.
\begin{figure}\label{fig26a}
\label{fig26b}
\end{figure}
Take a good triangulation $T$ of $M$ with respect to $f,g,g_{1},...,g_{k} ,h,h_{1},...,h_{k}$ (see Theorem \ref{THMsmoothtriangulations exist} and Definition \ref{DEF GoodTriangulations}).
The triangulation $T$ shells $f$ because $f$ is a filling immersion, so consider a $T$-growth $f^{\prime}$ of $f$ such that the pipings of $\Sigma _{1}$ with the components of $T$ do not affect $\Sigma_{2}$ neither the piping between $\Sigma_{1}$ and $\Sigma_{2}$. Because $\Sigma_{1}$ and $\Sigma_{2}$ form a filling pair, in particular we have that $\Sigma_{2}$ shells $\Sigma_{1}$. By Key Lemma 3, we can take $f^{\prime}$ such that it defines also a $T$-growth $h^{\prime}:=f^{\prime}\#g$ of $h=f\#g$.
Consider now the sequence of immersions $h^{\prime}=h_{0}^{\prime} ,h_{1}^{\prime},...,h_{k}^{\prime},h_{k+1}^{\prime}=f^{\prime}$ such that $h_{i}^{\prime}$ is obtained from $h_{i-1}^{\prime}$ by the pushing disk $(D_{i},B_{i})$, for $i=1,...,k+1$.
Note that by construction, each $h_{i}^{\prime}$ is a $T$-inflating of $h_{i} $. Because of the choosing of $T$, for each $i=1,...,k+1$ the triangulation $T$ restricted to the pushing ball $B_{i}$ collapses simplicially into $g(D_{i})=h^{\prime}(D_{i})$. By Key Lemma 2 this implies that $h_{i}^{\prime }$ is filling homotopic to $h_{i-1}^{\prime}$ for each $i=1,...,k+1$.
\begin{figure}\label{fig27a}
\label{fig27b}
\end{figure}
Resuming, we have that $h$ is filling homotopic to $h^{\prime}$ because $h^{\prime}$ is a $T$-growth of $h$, $h^{\prime}$ is filling homotopic to $f^{\prime}=h_{k+1}^{\prime}$ by repeatedly applications of Key Lemma 2 to the pushing disks $(D_{i},B_{i})$ for $i=1,...,k+1$, and $f^{\prime}$ is filling homotopic to $f$ because $f^{\prime}$ is a $T$-growth of $f$. Therefore, $h$ is filling homotopic to $f$.
By the same arguments, we have that $h$ is filling homotopic to $g$ and thus $f$ and $g$ are filling homotopic.
\end{proof}
\section{Diagrams.\label{SECTION Diagrams}}
Let $\Sigma\subset M$ be a Dehn surface in $M$, and let $f:S\rightarrow M$ be a parametrization of $\Sigma$. As we have pointed out in section \ref{SECTION Introduction}, the singular set $S(f)$, together with the information about how its points are identified by $f$ is what we call the Johansson diagram of $\Sigma$. We will give a more detailed definition of Johansson diagram. This new definition is equivalent to the definition given in \cite{Papa}. We assume now by simplicity that both $S$ and $M$ are orientable.
Let $\bar{\gamma}:S^{1}\rightarrow M$ be a parametrization of a double curve of $\Sigma$. Because both $S$ and $M$ are orientable, the inverse image by $f$ of $\bar{\gamma}(S^{1})$ is the union of two different closed curves in $S(f)$. There are exactly two different immersions $\gamma_{1},\gamma _{2}:S^{1}\rightarrow S$ such that $f\circ\gamma_{1}=f\circ\gamma_{2} =\bar{\gamma}$. In this situation, we say that $\gamma_{1}$ and $\gamma_{2}$ are \textit{lifted curves} of $\bar{\gamma}$ under $f$ and that they are \textit{sisters} under $f$.
A \textit{complete parametrization} of the singularity set $\bar{S}(\Sigma)$ of $\Sigma$ is a set $\mathcal{\bar{D}}=\left\{ \bar{\alpha}_{1},\bar{\alpha }_{2},...,\bar{\alpha}_{m}\right\} $ of immersions from $S^{1}$ into $M$ such that: (i) each $\bar{\alpha}_{i}$ parametrizes a double curve of $\Sigma$; (ii) $\bar{\alpha}_{i}(S^{1})\neq\bar{\alpha}_{j}(S^{1})$ if $i\neq j$; and (iii) $\bar{S}(\Sigma)=\underset{i=1}{\overset{m}{\cup}}\bar{\alpha}_{i} (S^{1})$. If $\mathcal{\bar{D}}$ is a complete parametrization of $\bar {S}(\Sigma)$ and we denote by $\mathcal{D}$ the set of all lifted curves of the curves of $\mathcal{\bar{D}}$, the map $\tau:\mathcal{D}\rightarrow \mathcal{D}$ that assigns to each curve of $\mathcal{D}$ its sister curve under $f$ defines a free involution of $\mathcal{D}$. The pair $(\mathcal{D} ,\tau)$ contains all the information about the singular set $S(f)$ and about how the points of $S(f)$ are identified by the map $f$: two different points $A,B\in S$ verify $f(A)=f(B)$ if and only if there is a parametrized curve $\alpha\in\mathcal{D}$ and a $z\in S^{1}$ with $A=\alpha(z)$ and $B=\tau \alpha(z)$.
The pair $(\mathcal{D},\tau)$ of the previous paragraph is the model we will presently use for defining (\textit{abstract}) \textit{diagram}. We have seen that every Dehn surface has an associated Johansson diagram. Thus, we can define an \textit{abstract diagram} in a surface $S$ as a collection $\mathcal{D}$ of closed curves in $S$ together with a free involution $\tau:\mathcal{D}\rightarrow\mathcal{D}$, such that the curves of $\mathcal{D}$ can be coherently identified by $\tau$. The natural question now is if this diagram $(\mathcal{D},\tau)$ is the Johansson diagram coming from a transverse immersion $f:S\rightarrow M$ of $S$ into some orientable 3-manifold $M$. If this occurs, we say that the (abstract) diagram $(\mathcal{D},\tau)$ in the surface $S$ is \textit{realizable }(compare \cite{Papa}) and that the immersion $f$ \textit{realizes} the diagram $(\mathcal{D},\tau)$.
\begin{figure}\label{fig28a}
\label{fig28b}
\end{figure}
The first condition that must verify the curves of $\mathcal{D}$ is that they intersect transversely as in Figure \ref{fig28b} at some points of $S$ which are the \textit{double points} of the diagram $(\mathcal{D},\tau)$. We define that two different points $A,B\in S$ are \textit{related} by the diagram $(\mathcal{D} ,\tau)$ if there is a curve $\alpha\in\mathcal{D}$ and a $z\in S^{1}$ with $A=\alpha(z)$ and $B=\tau\alpha(z)$. With this notation, each double point $A$ of the diagram will be related with two points $B,C$ of the surface $S$. If $(\mathcal{D},\tau)$ is realizable, these two points $B,C$ must be different and they must be also related by the diagram (see Figure \ref{fig28b}). Thus, the double points of the diagram must be arranged in \textit{triplets} of pairwise related points (the diagram is \textit{riveted} in the notation of \cite{Carter}). If $f:S\rightarrow M$ realizes the diagram, each of these triplets compose the inverse image by $f$ of a triple point of $f$. In Figure 29 we have given names to the double points of the diagram in such a way that related points are denoted with the same name. We consider two diagrams on $S$ as equivalent if they are related by an homeomorphism of $S$ or by reparametrization of the curves of the diagram.
The following is a survey of the main result of \cite{Johansson2} about the realizability of diagrams. We will denote the diagram $(\mathcal{D},\tau)$ simply by $\mathcal{D}$.
\begin{figure}\label{fig29a}
\label{fig29b}
\end{figure}
Assume that we are given an abstract diagram $\mathcal{D}$ on the surface $S$ (Figure 29). For each $\alpha\in\mathcal{D}$, we consider two \textit{neighbouring curves} $\lambda,\lambda^{\prime}$ that run parallel to $\alpha$ and such that $\lambda$ and $\lambda^{\prime}$ lie on different sides of $\alpha$ (Figure 29). We say that the two neighbouring curves of the same curve of the diagram are \textit{opposite} neighbouring curves of the diagram. The neighbouring curves of the diagram can be taken such that they only intersect near the double points of the diagram and exactly as depiced in Figures 29 and \ref{fig30}. We call \textit{neighbouring points} of the diagram to the intersection points of the neighbouring curves with the curves of the diagram. With these assumptions, there appear four neighbouring points around each double point of the diagram (Figure \ref{fig30}). Consider two related double points $A,B$ of the diagram. Because they are related, there is a curve $\alpha \in\mathcal{D}$ and a $z\in S^{1}$ with $A=\alpha(z)$ and $B=\tau\alpha(z)$. If we orient the curves $\alpha,\tau\alpha$ using the standard orientation of $S^{1}$, then near $A$ the curve $\alpha$ pass through the points $A_{1},A,A_{2}$ in this order, where $A_{1},A_{2}$ are neighbouring points of the diagram. In the same way, near $B$ the curve $\tau\alpha$ pass trough the points $B_{1},B,B_{2}$ in this order, where $B_{1},B_{2}$ are neighbouring points of the diagram. We assume that the neighbouring curves are so chosen that in this situation $A_{i}$ is related by the diagram with $B_{i}$ for $i=1,2$ (see Figure \ref{fig30}).
Once we have drawn the neighbouring curves of the diagram as in the previous paragraph, we give some definitions. If two neighbouring curves $\lambda,\mu$ pass through related neighbouring points, as the curves $\lambda$ and $\mu$ of Figure \ref{fig30}, we say that $\lambda,\mu$ are \textit{elementary related}. If we orient all the curves of the diagram using the standard orientation of $S^{1}$ and if we consider the surface $S$ oriented, for a curve $\alpha\in \mathcal{D}$ we say that the neighbouring curve of $\alpha$ lying on the left-hand side of $\alpha$ is \textit{elementary G-related} with the neighbouring curve of $\tau\alpha$ lying on the right-hand side of $\tau \alpha$, and equivalently, that the neighbouring curve of $\alpha$ lying on the right-hand side of $\alpha$ is elementary G-related with the neighbouring curve of $\tau\alpha$ lying on the left-hand side of $\tau\alpha$.
\begin{definition} \label{DEF-G-Classes}\cite{Johansson2}Two neighbouring curves $\lambda,\mu$ of the diagram $\mathcal{D}$ are in the same G-class if there exists a finite sequence $\lambda=\lambda_{0},\lambda_{1},...,\lambda_{k}=\mu$ of neighbouring curves of the diagram such that $\lambda_{i-1}$ is elementary related or elementary G-related with $\lambda_{i}$ for $i=1,...,k $. \end{definition}
\begin{figure}\label{fig30}
\end{figure}
In the diagrams of Figures \ref{fig29a} and \ref{fig29b} we have drawn in the same way the neighbouring curves in the same G-class. From the construction of G-classes can be checked whithout difficulty the following
\begin{lemma} \label{LeMMA G-Classes-singularity set}If $\mathcal{D}$ is a diagram in $S$ and $f:S\rightarrow M$ realizes $\mathcal{D}$, then the number of G-classes of $\mathcal{D}$ is twice the number of connected components of the singularity set $\bar{S}(f)$. \end{lemma}
The following Theorem appears in \cite{Johansson2}.
\begin{theorem} \label{THM Johansson G-Clases}\cite{Johansson2}A diagram $\mathcal{D}$ in the orientable surface $S$ is realizable by a transverse immersion $f:S\rightarrow M$ of $S$ into an orientable 3-manifold $M$ if and only if there are no opposite neighbouring curves of the diagram in the same G-class. \end{theorem}
This Theorem gives an easy method for checking realizability on a wide class of diagrams. An analogue result is given in \cite{Carter} for diagrams with no closed components in surfaces with boundary.
Though Theorem \ref{THM Johansson G-Clases} was stated in \cite{Johansson2} for diagrams in the 2-disk without singular boundary points, as it is pointed out in \cite{Papa} the proof can be extended directly to the case stated here. More exactly, Theorem \ref{THM Johansson G-Clases} is also true if we remove from $S$ a finite number of open disks not touching the diagram.
The key for proving Theorem \ref{THM Johansson G-Clases} is \textit{2-sidedness}. Every immersion $f:S\rightarrow M$ with both $S$ and $M$ orientable is \textit{2-sided}, and this 2-sidedness is reflected in the neighbouring curves of the diagram. An immersion $f:S\rightarrow M$ is 2-sided if there exists an immersion $F:S\times\left[ -1,1\right] \rightarrow M$ with $F(X,0)=f(X)$ for every $X\in S$. Put $\Sigma=f(S)$. If $f$ is 2-sided and transverse, we can choose $F$ as close to $f$ as we want, such that in a neighbourhood of a double curve of $\Sigma$ the image of $F$ looks like Figure \ref{fig31}. In that Figure, we can see that the \textit{lower sheet} $\Sigma ^{-}=F(S\times\left\{ -1\right\} )$ and the \textit{upper sheet} $\Sigma ^{+}=F(S\times\left\{ 1\right\} )$ intersect $\Sigma$ in some curves that behave exactly as the images by $f$ of the neighbouring curves of the Johansson diagram $\mathcal{D}$ of $f$. We can assume without loss of generality that in this case, the neighbouring curves of the diagram compose exactly the inverse image $f^{-1}(\Sigma^{-}\cup\Sigma^{+})$. In \cite{Johansson2} it is proved the following Proposition.
\vspace {10pt} \begin{figure}\label{fig31}
\end{figure}
\begin{proposition} \label{PROPsameG-class-sameSheet}If two neighbouring curves of $\mathcal{D}$ are in the same G-class, then their images by $f$ must be contained in the same sheet $\Sigma^{-}$ or $\Sigma^{+}$. \end{proposition}
This implies that if $\mathcal{D}$ is realizable by a 2-sided immersion, there cannot be two opposite neighbouring curves in the same G-class.
On the other hand, if there are not two opposite neighbouring curves of the abstract diagram $\mathcal{D}$ on $S$ in the same G-class, we can make an identification $\sim$ on the thickened surface $S\times\left[ -1,1\right] $ compatible with the diagram such that neighbourhoods of sister curves are identified as in Figure \ref{fig31}. The quotient $\hat{M}(\mathcal{D})=S\times\left[ -1,1\right] /\sim$ is a 3-manifold with boundary and it verifies: (i) the canonical projection $\pi:S\times\left[ -1,1\right] \rightarrow\hat {M}(\mathcal{D})$ is an immersion; (ii) if we take the inclusion $j:S\rightarrow S\times\left[ -1,1\right] $ given by $j(X)=(X,0)$, then $\pi\circ j$ is a transverse immersion realizing $\mathcal{D}$; and (iii) $\hat{M}(\mathcal{D}) $ is orientable. See \cite{Johansson1} and \cite{Johansson2} for more details.
Going back to the immersion $f$, if it is a filling immersion, the singularity set $\bar{S}(f)$ must be connected and by Lemma \ref{LeMMA G-Classes-singularity set} this implies that the Johansson diagram $\mathcal{D}$ of $f$ has only two G-classes of neighbouring curves. In this case, the manifold with boundary $\hat{M}(\mathcal{D})$ constructed from $\mathcal{D}$ as in the previous paragraph \textit{is uniquely determined by }$\mathcal{D}$ and it is homeomorphic to a regular neighbourhood of the filling Dehn surface $\Sigma\subset M$. Because $f$ is a filling immersion the boundary of $\hat{M}(\mathcal{D})$ must be composed by a union of 2-spheres. Pasting a 3-ball to $\hat{M}(\mathcal{D})$ along each boundary component we obtain a closed 3-manifold $M(\mathcal{D})$ homeomorphic to $M$. This is the way for reconstructing a 3-manifold $M$ from a Johansson representation of $M$.
\begin{figure}\label{fig32a}
\label{fig32b}
\end{figure}
Now assume that we are given the realizable diagram $\mathcal{D}$ in the orientable surface $S$ and that we want to know if it is the Johansson diagram of a filling Dehn sphere of some 3-manifold. First of all, the diagram $\mathcal{D}$ must \textit{fill} the surface $S$, (that is, $S-\mathcal{D}$ must be a disjoint union of open 2-disks, and $\mathcal{D}-\left\{ \text{double points of }\mathcal{D}\right\} $ must be a disjoint union of open intervals), and in particular the curves of $\mathcal{D}$ must compose a connected graph on $S$. By Lemma \ref{LeMMA G-Classes-singularity set} this implies that $\mathcal{D}$ has exactly two opposite G-classes of neighbouring curves (because $\mathcal{D}$ is realizable). As in the previous paragraph, the construction of $\hat{M}(\mathcal{D})$ is uniquely determined by the diagram and we have that $\mathcal{D}$ is the Johansson diagram of a filling Dehn sphere of some 3-manifold $M\;$if and only if $\partial\hat {M}(\mathcal{D})$ is a collection of 2-spheres. If this occurs, we say that $\mathcal{D}$ is a \textit{filling diagram} and pasting a 3-ball to $\hat {M}(\mathcal{D})$ along each boundary component of $\hat{M}(\mathcal{D})$ we obtain the required closed 3-manifold $M(\mathcal{D})$ that is also uniquely determined by the diagram $\mathcal{D}$. The construction of $\hat {M}(\mathcal{D})$ can be made in an algorithmic way. The diagrams of Figures \ref{fig29a}, \ref{fig29b} and \ref{fig32a} are all examples of (realizable) filling diagrams. The diagram of Figure \ref{fig29a} appears in the original paper \cite{Johansson1} and it is a Johansson representation of the 3-sphere. Its corresponding filling Dehn sphere is \textit{Johansson's sphere} (see Fig. 8 of \cite{A.Shima2}). The diagram of Figure \ref{fig29b} represents $S^{2}\times S^{1}$. The diagram of Figure \ref{fig32a} is a diagram of a filling Dehn torus $\Sigma_{0}$ with only one triple point in an Euclidean 3-manifold $M$. This euclidean manifold coincides with the Seifert manifold $M(S_{333})=(Oo0\mid-1;(3,1),(3,1),(3,1))$ (see \cite[p. 155]{MONTLIbro}), and it is the result of identifying the faces of a solid cube in pairs as in Figure \ref{fig32b}. The filling Dehn torus $\Sigma_{0}$ is the image in $M$ of the boundary of the cube under this identification.
In Figure 33 is depicted how the Haken moves (except finger move $0$) for immersions are reflected in the Johansson diagrams. These compose the \textit{diagram moves}, and we denote them with the same names of the corresponding moves of immersions. If we perform a diagram move in a filling diagram, the move is \textit{filling-preserving} if the resulting diagram is again a filling diagram.
\begin{figure}\label{fig33a}
\label{fig33b}
\end{figure} \addtocounter {figure}{-1} \begin{figure}\label{fig33c}
\label{fig33d}
\label{fig33e}
\label{fig33f}
\end{figure}
Let $\mathcal{D}$ be a filling diagram on the surface $S$, and let $i:\hat {M}(\mathcal{D})\hookrightarrow M(\mathcal{D})$ the inclusion map. Let denote simply by $f$ the immersion $i\circ\pi\circ j:S\rightarrow M(\mathcal{D})$ that realizes $\mathcal{D}$, and put $\Sigma=f(S)$.
If we perform a filling-preserving diagram move in $\mathcal{D}$, this move will come from a filling-preserving move of $f$ and thus the new diagram $\mathcal{D}^{\prime}$ we obtain verifies that $M(\mathcal{D}^{\prime })=M(\mathcal{D})$. As it happened with Haken moves, a finger move $\pm$1 or $\pm$2 in the filling diagram $\mathcal{D}$, will always be filling-preserving, and a saddle move on $\mathcal{D}$ may be or not a filling-preserving one.
The neighbouring curves of the diagram help us to perform the saddle move and the finger move 1.
A saddle move can be performed in the diagram $\mathcal{D}$ everytime we have two arcs connecting related points of the diagram as in Figure \ref{fig33a}. The two neighbouring curves of $\mathcal{D}$ that intersect any of the two arcs must belong to the same G-class. If we have such a pair of arcs, because $\mathcal{D}$ is a filling diagram (the image by $f$ of) these arcs must bound a 2-gon $w$ in $M(\mathcal{D})-\Sigma$ as in Figure \ref{fig33b}, and we can perform a saddle move on $f$ by pushing along $w$ any of the two sheets of $\Sigma$ that bound $w$. This saddle move on $f$ is reflected in the saddle move of the diagram $\mathcal{D}$ as depicted in Figure \ref{fig33a}.
If we perform a finger move +1 on the diagram $\mathcal{D}$ there appear a new pair $\nu,\tau\nu$ of sister curves of the diagram. The diagram $\mathcal{D}$ tells us how we must identify the new double points (labeled $1$ and $2$ in Figure \ref{fig33c}) that appear in the new diagram $\mathcal{D}^{\prime} $, but there is some ambiguity (that do not occur for finger moves 2) because there are two ways for identifying $\nu$ with $\tau\nu$ (for a given orientation of $\nu$ there are two possible orientations of $\tau\nu$). This ambiguity disappears when we draw the neighbouring curves of the diagram $\mathcal{D}^{\prime}$, using that related neighbouring points of the diagram must lie on neighbouring curves of the same G-class.
We say that a filling diagram $\mathcal{D}$ on a surface $S$ is \textit{nulhomotopic} if the immersion $f=i\circ\pi\circ j$ as above is nulhomotopic. The diagram of Figure \ref{fig29a} is nulhomotopic (every diagram representing $S^{3}$ must be nulhomotopic), while the diagram of Figure \ref{fig29b} is not nulhomotopic.
The following result is a Corollary of Theorem \ref{MAINtheorem}.
\begin{corollary} \label{CORtoMAINtheorem}Two nulhomotopic filling diagrams on $S^{2}$ represents the same 3-manifold if and only if they are related by a finite sequence of filling preserving moves. \end{corollary}
\section{Duplication.\label{SECTION Duplication}}
It is possible to obtain algorithmicaly a nulhomotopic Johansson representation of $M$ from \textit{any}, nulhomotopic or not, Johansson representation $\mathcal{D}$ of $M$. We will call this process \textit{duplication} of diagrams and it can be made using Johansson's construction of $\hat{M}(\mathcal{D})$ as follows.
Let $f:S^{2}\rightarrow M$ be a filling immersion, and put $\Sigma=f(S)$. Take a thickening $F:S^{2}\times\left[ -1,1\right] \rightarrow M$ of $f$ as in the previous section, such that near a double curve of $\Sigma$ the image of $F$ intersects itself as in Figure \ref{fig31}, and consider the upper sheet $\Sigma^{+}=F(S^{2}\times\left\{ 1\right\} )$ and the lower sheet $\Sigma^{-}=F(S^{2}\times\left\{ -1\right\} )$, which are two filling Dehn spheres of $M$ parallel to $\Sigma$ at both sides of $\Sigma$. The Johansson diagram $\mathcal{D}$ of $f$ has two G-classes of neighbouring curves. We can take the neighbouring curves of $\mathcal{D}$ such that their images by $f$ compose the intersection of $\Sigma$ with $\Sigma^{-}\cup\Sigma^{+}$. We call the \textit{upper} (\textit{lower}) G-class to the G-class of neighbouring curves of $\mathcal{D}$ whose image by $f$ is contained in the upper (lower) sheet $\Sigma^{+}$ ($\Sigma^{-}$).
\begin{figure}\label{fig34a}
\label{fig34b}
\end{figure}
The Dehn spheres $\Sigma,\Sigma^{+}$ form a filling pair of spheres in $M$. Near a triple point $P$ of $\Sigma$ there will be eight triple points of the union $\Sigma\cup\Sigma^{+}$ as in Figure \ref{fig34a}. In some situations the Dehn sphere $\Sigma\#\Sigma^{+}$ that we obtain piping $\Sigma$ with $\Sigma^{+}$ near $P$ as in Figure \ref{fig34b} is a filling Dehn sphere of $M$. Assume that this is the case. This filling Dehn sphere $\Sigma\#\Sigma^{+}$ is the image by $F$ of the boundary of $S^{2}\times\left[ 0,1\right] $ with a small cylinder connecting $S^{2}\times\left\{ 0\right\} $ with $S^{2}\times\left\{ 1\right\} $ removed, and thus it is nulhomotopic.
\begin{figure}\label{fig35a}
\label{fig35b}
\end{figure}
The Johansson diagram of $\Sigma\#\Sigma^{+}$ can be obtained algorithmically from the Johansson diagram $\mathcal{D}$ of $\Sigma$. Let $f^{+} :S^{2}\rightarrow M$ be the parametrization of $\Sigma^{+}$ given by $f^{+}(X)=F(X,1)$.
First, remember that the upper G-class of neighbouring curves of $\mathcal{D} $ composes the inverse image by $f$ of $\Sigma\cap\Sigma^{+}$. A second observation is that the Johansson diagram of $f^{+}$ is a copy $\mathcal{D} ^{+}$ of $\mathcal{D}$. The third observation is the following
\begin{remark} \label{REMARKparallelDehnSpheres}The relative position of $\Sigma$ with respect to $\Sigma^{+}$ is exactly the same as the relative position of $\Sigma^{-}$ with respect to $\Sigma$. \end{remark}
This Remark implies that if we call the G-classes of $\mathcal{D}^{+}$ with the same name as their respective copies in $\mathcal{D}$, \textit{the lower G-class of }$\mathcal{D}^{+}$\textit{\ composes the inverse image by } $f$\textit{\ of }$\Sigma\cap\Sigma^{+}$.
The Johansson diagram $\mathcal{D}\#\mathcal{D}^{+}$ of $\Sigma\#\Sigma^{+} $ is what we call a \textit{duplicate} of $\mathcal{D}$, and it can be obtained from $\mathcal{D}$ and $\mathcal{D}^{+}$ as indicated in Figure 35. In Figure \ref{fig35a} we depict how the cube of Figure \ref{fig34a} is seen in the Johansson diagrams of $\Sigma$ (grey) and $\Sigma^{+}$ (white). In both diagrams we draw the lower G-class and the upper G-class similarly. As an example we apply this construction to the diagram of Figure \ref{fig29b} in Figure 36.
It is not always possible to obtain a triple point of $\Sigma$ such that piping $\Sigma$ with $\Sigma^{+}$ near $P$ as in Figure \ref{fig34b} the resulting Dehn sphere $\Sigma\#\Sigma^{+}$ fills $M$, but \textit{we can always deform }$\Sigma$\textit{\ by filling-preserving moves} to obtain another filling Dehn sphere $\Sigma^{\prime}$ with a triple point where the (filling-preserving) duplication is possible. This deformation can be made very easily: for example, if $P$ is one of the two triple points that appear after a finger move +1, then duplication is possible at $P$.
\begin{figure}\label{fig36a}
\label{fig36b}
\label{fig36c}
\end{figure}
Duplication allows us to give a more general version of Corollary \ref{CORtoMAINtheorem}:
\begin{theorem} \label{THEOREMDuplicates}Two filling diagrams on $S^{2}$ represents the same 3-manifold if and only if their duplicates are related by a finite sequence of filling preserving moves. \end{theorem}
\section{Miscelany.\label{SECTION Miscelany}}
\subsection{Invariants of 3-manifolds.\label{SUBSECTION Invariants}}
The first immediate application of Corollary \ref{CORtoMAINtheorem} is the search of invariants of 3-manifolds. If we could assign to each nulhomotopic diagram on $S^{2}$ an object which remains invariant under filling-preserving diagram moves, then this object defines a 3-manifold invariant. If $\varphi$ denotes such an invariant, for computing $\varphi$ for a given manifold $M$ we would need a nulhomotopic Johansson representation of $M$. If we have an arbitrary Johansson representation $\mathcal{D}$ of $M$ and we don't know if it is a nulhomotopic one, duplicating $\mathcal{D}$ we will be able to compute $\varphi$ from $\mathcal{D}$. Nevertheless, duplication produce very complicated diagrams (the number of triple points of a duplication of $\mathcal{D}$ is eight times the number of triple points of $\mathcal{D}$ minus $2$), and for this reason it should be interesting to know how to decide if a given filling diagram in $S^{2}$ is nulhomotopic or not. This is an open problem. In \cite{Montesinos} it is indicated an algorithm to obtain a nulhomotopic Johansson representation of $M$ from any Heegaard diagram of $M$. A simpler algorithm is studied in detail in \cite{Anewproof}.
\subsection{The diagram group.\label{SUBSECTION The diagram Group}}
Let $\mathcal{D}$ be a realizable diagram on $S^{2}$, and let $f:S^{2} \rightarrow M$ be a transverse immersion parametrizing $\mathcal{D}$. There is an easy way for obtaining a presentation of the fundamental group of $\Sigma:=f(S^{2})$ in terms of the diagram $\mathcal{D}$. If $\mathcal{D} =\left\{ \alpha_{1},...,\alpha_{n}\right\} $, then we define the \textit{diagram group} \[
\pi\left( \mathcal{D}\right) =\left| \alpha_{1},...,\alpha_{n}:\alpha _{1}\cdot\tau\alpha_{1}=...=\alpha_{n}\cdot\tau\alpha_{n}=r_{1}=...=r_{k}
=1\right| \text{ ,} \] where the relators $r_{1},...,r_{k}$ are given by the triplets points of $\mathcal{D}$. If $P_{1},...,P_{k}$ are the triple points of $\Sigma$, and $P_{j}$ is the triple point $P$ of Figure \ref{fig28a}, which is reflected in the triplet of $\mathcal{D}$ of Figure \ref{fig28b}, then the associated relator is \[ r_{j}=\alpha\beta\gamma\text{ .} \]
It can be proved that $\pi_{1}\left( \Sigma\right) $ is isomorphic to the diagram group \cite{Tesis} and thus, if $\Sigma$ fills $M$ the diagram group gives a presentation of the fundamental group of $M$. This presentation is due to W. Haken (see Problem 3.98 of \cite{KirbyProblems}), and we have not seen it in printed form.
With this presentation, it can be proved the following (unpublished) theorem of W. Haken (see also \cite{Haken1}). If $\mathcal{D}$ is a connected realizable diagram in $S^{2}$ with only two double curves $\alpha,\tau\alpha$, then both are simple or both are non-simple.
\begin{theorem} \label{THeoremDtwocurvesSimplyConnectedorZ3}If $\alpha$ and $\tau\alpha$ are simple, then $\pi\left( \mathcal{D}\right) \simeq\mathbb{Z}_{3}$, and if both are non-simple, then $\pi\left( \mathcal{D}\right) \simeq1$. \end{theorem}
\subsection{Checking fillingness.\label{SUBSECTION Checking fillingness}}
As we have said, to test if a realizable diagram $\mathcal{D}$ is a filling diagram is to check if $\partial\hat{M}(\mathcal{D})$ is a collection of 2-spheres. Though the complete construction of the manifold with boundary $\hat{M}(\mathcal{D})$ from the diagram $\mathcal{D}$ can be made in an algorithmic way using Johansson's construction, it is interesting to have faster methods for checking fillingness. The following result will give us a method for saving time in this process. It can be proved easily using Euler's characteristic techniques.
\begin{lemma} \label{LEMMADfills if P+2 boundary com}A realizable diagram $\mathcal{D}$ on the genus $g$ surface $S$ is a filling diagram if and only if it fills $S$ and $\partial\hat{M}(\mathcal{D})$ has $p+\chi(S)$ connected components, where $p$ is the number of triplets (of pairwise related double points) of $\mathcal{D}$. \end{lemma}
The diagram group can help us also for checking fillingness. In Lemma 4.9 of \cite{Hempel}, it is proved that if a 3-manifold with boundary $\hat{M}$ has a boundary component which is not a 2-sphere, then $\hat{M}$ has a double cover. Thus,
\begin{lemma} \label{LEMMANosubgroupOfindexTWOfills}If $\mathcal{D}$ is a realizable connected diagram in $S^{2}$ and the diagram group $\pi\left( \mathcal{D} \right) $ has no subgroup of index $2$, then $\mathcal{D}$ is a filling diagram. \end{lemma}
This Lemma, together with Theorem \ref{THeoremDtwocurvesSimplyConnectedorZ3} gives the following.
\begin{corollary} \label{CORDIagramsWithtwoCurvesFill}If $\mathcal{D}$ is a realizable connected diagram in $S^{2}$ with only two curves, then it is a filling diagram.
\end{corollary}
\subsection{\label{SUBSECTION Eversion}Filling eversion.}
Theorem applies not only to filling Dehn spheres but to their parametrizations. Let $f:S^{2}\rightarrow M$ be a filling immersion, and consider also the immersion $g:S^{2}\rightarrow M$ given by $g=f\circ a$, where $a$ denotes now the antipodal map of $S^{2}$. In this situation, the fact that $f(S^{2})$ is filling homotopic to $g(S^{2})$ is trivial because they are the same Dehn sphere of $M$. Theorem \ref{MAINtheorem} asserts that $f$ is filling homotopic to $g$, which is now a non-trivial fact. This is a filling version of the problem of the eversion of the 2-sphere (see \cite{Morin-Petit}). In \cite{Max-Banchoff} it is proved that every eversion of the 2-sphere in $S^{2}$ has at least one quadruple point (compare also \cite{NowikTahl}). Using this, it can be seen that any parametrization $f$ of the Johansson's sphere and its antipodal parametrization $g$ cannot be taken into one another by using only finger moves 1 and filling-preserving saddle moves. This means that finger move 2 cannot be dispensed off from the statement of Theorem \ref{MAINtheorem}.
\subsection{The non-orientable case.\label{SUBSECTION Non-orientable}}
In our discussion about diagrams in section \ref{SECTION Diagrams} we have assumed that surfaces and 3-manifolds are orientable. If $f:S\rightarrow M$ is a transverse immersion and $S$ or $M$ (or both) are non-orientable, then the inverse image by $f$ of a double curve $\bar{\alpha}$ of $f$ may be a unique closed curve $\alpha$ in $S$, such that $\alpha$ is a 2-fold covering of $\bar{\alpha}$ (see \cite{Johansson1}). For this reason, in this general case we need a more general definition of abstract diagram. The one given in section \ref{SECTION Diagrams} can be adapted to this general case by allowing a \textit{non-free} involution $\tau$ and requiring that the curves $\alpha$ of the diagram with $\tau\alpha=\alpha$ commute with the antipodal map of $S^{1}$. Another fact is that in the general case, the immersion $f$ might be \textit{1-sided}, and thus the proof of Theorem \ref{THM Johansson G-Clases} breaks down.
Johansson proves in \cite{Johansson1} a Theorem characterizing diagrams in $S^{2}$ which are realizable in 3-manifolds (orientable or not). It is an interesting problem to generalize this last theorem of Johansson to cover diagrams in any surface and immersions in general 3-manifolds (orientable or not). This is certainly not very difficult but it is an intermediate step to generalize our theory of filling immersions to cover general Dehn surfaces (orientable or not) immersed in general 3-manifolds (orientable or not). This is an open program.
\subsection{A question of R. Fenn.\label{SUBSECTION Fenn's question}}
The following question was asked to us by R. Fenn:
\textit{Do filling Dehn surfaces in }$M$\textit{\ lift to embeddings in }$M\times\left[ 0,1\right] $\textit{?}
We do not know the complete answer to this question. In \cite{Giller} (see also \cite{Carter-Saito}) it is given an algorithm for deciding if a Dehn $\Sigma$ surface in $\mathbb{R}^{3}$ lift to an embedding in $\mathbb{R}^{4}$ in terms of the Johansson diagram of $\Sigma$. In the same paper it is given an example of a Dehn sphere $\Sigma_{1}$ in $\mathbb{R}^{3}$ that does not lift to an embedding in $\mathbb{R}^{4}$. The Johansson diagram of $\Sigma _{1}$ has only two non-simple curves, and by Corollary \ref{CORDIagramsWithtwoCurvesFill} $\Sigma_{1}$ will be a filling Dehn sphere of $S^{3}$. On the other hand, Johansson's example of Figure \ref{fig29a} represents a liftable (to an embedding in $\mathbb{R}^{4}$) filling Dehn sphere of $S^{3}$ (see also \cite{Giller}). Thus in $S^{3}$ there are liftable and non-liftable filling Dehn spheres. A generalization for general 3-manifolds of the mentioned result of \cite{Giller} could be applied for giving a complete answer to Fenn's question.
\subsection{The triple point spectrum.\label{SUBSECTION Triplepoint Spectrum}}
The minimal number of triple points of filling Dehn surfaces of a 3-manifold $M$ satisfying some particular property can be in some cases a topological invariant of $M$. We define the \textit{triple point number} $t(M)$ of a closed orientable 3-manifold $M$ as the minimal number of triple points of all its filling Dehn surfaces and the \textit{genus }$g$\textit{\ triple point number} $t_{g}(M)$ of $M$ as the minimal number of triple points of all its genus $g$ filling Dehn surfaces. The ordered collection $\left( t_{0}(M),t_{1}(M),t_{2}(M),...\right) $ of all the genus $g$ triple point numbers of the 3-manifold $M$ for all $g\geq0$ is what we call the \textit{triple point spectrum} $\frak{T}(M)$ of $M$. We can make similar definitions imposing topological restrictions on the filling Dehn surfaces considered. For example, we can define the \textit{nulhomotopic triple point number} of $M$ as the minimal number of triple points of all its nulhomotopic filling Dehn surfaces, and in a similar way the \textit{nulhomotopic genus }$g$\textit{\ triple point number} or the \textit{nulhomotopic triple point spectrum }can be defined. All of them are topological invariants of the 3-manifold and give a measure of the\textit{\ complexity} of the manifold in the same way as the Heegaard genus, for example. If we have a filling Dehn surface $\Sigma$ in a 3-manifold, using pipings as that of Figure \ref{fig23b}, perhaps we can reduce the number of triple points of $\Sigma$, but increasing the genus of the filling Dehn surface. So there is some relation between the different genus $g$ triple point numbers that would be interesting to clarify.
Any Dehn sphere in a closed orientable 3-manifold has an even number of triple points (\cite{Haken1}, p. 105). This is not the case for genus $g>0$ Dehn surfaces, as it can be seen in the example given by Figure \ref{fig32a}. This means that if we want a set of moves for relating \textit{all} Dehn surfaces (of any genus) of any 3-manifold, the Homma-Nagase moves introduced here, together with pipings, do not suffice because all of them are operations that preserve the parity of the number of triple points.
We define that a genus $g$ filling Dehn surface $\Sigma$ of a 3-manifold $M$ is \textit{minimal} if there is no other genus $g$ filling Dehn surface of $M $ with less triple points than $\Sigma$. Minimal filling Dehn surfaces, in particular minimal filling Dehn spheres, should have interesting properties, and their classification is another interesting problem. The classification of minimal Dehn spheres has been solved for $S^{3}$ in \cite{A.Shima2}. In that work, A. Shima gives in a different context six examples of Dehn spheres in $S^{3}$ with only 2 triple points. Three of these six examples fill $S^{3}$ (one of them is Johansson's sphere of Figure \ref{fig29a}) and they are minimal because, as we have said, any filling Dehn sphere must have at least 2 triple points. It can be deduced by the main theorem of \cite{A.Shima2} that these three examples are the unique possible minimal filling Dehn spheres in $S^{3}$.
Finally, we want to introduce a later definition. We say that a filling Dehn surface $\Sigma$ in a 3-manifold $M$ is \textit{irreducible} if the only allowable filling preserving moves on $\Sigma$ are finger move +1 or +2. That is, $\Sigma$ is irreducible if any Dehn surface $\Sigma^{\prime}$ which can be obtained performing a filling-preserving move on $\Sigma$ has more triple points than $\Sigma$. Johansson's sphere is not irreducible, while Example 1.3 of \cite{A.Shima2} is irreducible. This means that minimality does not imply irreducibility. We are interested also in the converse question: are there examples of non-minimal irreducible filling Dehn surfaces?
\end{document} |
\begin{document}
\title{Fast Density Estimation for Density-based Clustering Methods}
\author{ Difei~Cheng\thanks{D. cheng, R. Xu and R. Jin are with Academy of Mathematics and Systems Science,
Chinese Academy of Sciences, Beijing 10090, China and School of Mathematical Sciences,
University of Chinese Academy of Sciences, Beijing 10049, China}, {\it Student~Member,~IEEE}, Ruihang~Xu, {\it Student~Member,~IEEE}, Bo Zhang\thanks{B. Zhang is with LSEC and Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing 100190, China and School of Mathematical Sciences, University of Chinese Academy of Sciences, Beijing 100049, China (email: [email protected]) \bf Corresponding author: Bo Zhang}, {\it Member, IEEE,}\\ and Ruinan~Jin, {\it Student~Member,~IEEE}, }
\maketitle
\begin{abstract} Density-based clustering algorithms are widely used for discovering clusters in pattern recognition and machine learning. They can deal with non-hyperspherical clusters and are robust to outliers. However, the runtime of density-based algorithms is heavily dominated by neighborhood finding and density estimation which is time-consuming. Meanwhile, the traditional acceleration methods using indexing techniques such as KD trees may not be effective in dealing with high-dimensional data. To address these issues, this paper proposes a fast range query algorithm, called Fast Principal Component Analysis Pruning (FPCAP), with the help of the fast principal component analysis technique in conjunction with geometric information provided by the principal attributes of the data. FPCAP can deal with high-dimensional data and can be easily applied to density-based methods to prune unnecessary distance calculations in neighborhood finding and density estimation. As an application in density-based clustering methods, FPCAP is combined with the Density Based Spatial Clustering of Applications with Noise (DBSCAN) algorithm, and an improved DBSCAN (called IDBSCAN) is then obtained. IDBSCAN preserves the advantage of DBSCAN and, meanwhile, greatly reduces the computation of redundant distances. Experiments on seven benchmark datasets demonstrate that the proposed algorithm improves the computational efficiency significantly. \end{abstract}
\begin{IEEEkeywords} Density-based Clustering, Principle component analysis, Pruning. \end{IEEEkeywords}
\IEEEpeerreviewmaketitle
\section{Introduction}\label{sec:introduction}
\IEEEPARstart{C}{lustering} aims to partition a set of data objects into several clusters so that objects in each cluster keep high degree of similarity, and thus has been widely used in data mining \cite{berkhin2006survey}, vector quantization \cite{coates2011importance}, dimension reduction \cite{boutsidis2014randomized} and manifold learning \cite{canas2012learning}.
Density-based clustering is one of the most important clustering methods. It is based on density estimation of data points and defines clusters as dense regions separated by low-density regions. It does not need to know the number of clusters, and meanwhile, it has the ability to discover clusters with arbitrary shapes and is robust to outliers. So far, many density-based methods have been proposed, such as DBSCAN \cite{ester1996density}, the Ordering Points To Identify the Clustering Structure (OPTICS) \cite{ankerst1999optics}, Clustering by Fast Search and Find of Density Peaks (CFSFDP) \cite{rodriguez2014clustering} and Mean-Shift Clustering \cite{cheng1995mean,anand2013semi}.
DBSCAN is probably the most prominent density-based clustering algorithm so far. It is based on the key idea that for each core data point of a cluster its neighborhood of a given radius $\varepsilon$ has to contain at least a minimum number of data points ($\textrm{MinPts}$). However, DBSCAN has some disadvantages. First, the parameters $\varepsilon$ and $\textrm{MinPts}$ have a significant influence on the clustering results and are difficult to choose. OPTICS \cite{ankerst1999optics} was then proposed to overcome this difficulty by creating an augmented ordering of the database representing a density-based clustering structure which includes the information of the clustering results obtained by DBSCAN corresponding to a broad range of parameters settings. Secondly, the time complexity of DBSCAN is $O(n^2)$. Thus many improved methods have been proposed to accelerate DBSCAN, which can be roughly divided into two categories: sampling-based improvements and partition-based improvements. Sampling-based methods \cite{chen2021block,borah2004improved,liu2006fast,jang2019dbscan++,chen2018fast} try to reduce the number of range queries by skipping some queries from specific points in the process of finding neighbors and calculating the density of each point, whilst partition-based methods \cite{mahran2008using,gunawan2013faster,gan2017hardness} use grid partitions to divide the data into several groups to process separately. When finding neighbors, most of the above improved DBSCAN algorithms speed up DBSCAN by reducing the number of range queries. But some sampling-based methods, such as those in \cite{chen2021block} and \cite{chen2018fast}, still need to calculate the ${\varepsilon}/2$-neighborhoods and $2\varepsilon$-neighborhoods many times. Thus it is important to reduce the range query time during neighborhood finding. Traditionally, indexing techniques such as KD-tree \cite{bentley1977complexity}, Cover tree \cite{beygelzimer2006cover}, quadtree-like hierarchical tree \cite{beygelzimer2006cover} and R tree \cite{guttman1984r} are used to reduce the range query time in these sampling-based algorithms. However, the construction of the tree structures is complex, and these indexing techniques such as KD tree are very difficult to deal with high-dimensional data sets.
Motivated by the work in \cite{lai2010fast} which applies the Fast Principal Component Analysis (Fast-PCA) technique \cite{sharma2007fast} in accelerating the global $k$-means algorithm \cite{likas2003global}, we propose a fast range queries method (called FPCAP), based on the Fast-PCA technique in conjunction with certain geometric information provided by the principal attributes of the data. FPCAP can greatly reduce the range query time by reducing the computation of redundant distances during neighborhood finding and density estimation, and meanwhile, it avoids any complex data structures which are needed to embed the data points themselves and are not easy to implement. Further, FPCAP can deal with high-dimensional data sets. In experiments, FPCAP is compared with the KD tree indexing technique. As an application in density-based clustering methods, FPCAP is applied to DBSCAN to obtain an improved DBSCAN algorithm which is called IDBSCAN. IDBSCAN is compared with the original DBSCAN algorithm as well as the original DBSCAN algorithm with the KD tree indexing technique. The experimental results on seven clustering benchmark datasets illustrate that both FPCAP and IDBSCAN outperform the other compared algorithms on computational efficiency. In addition, IDBSCAN is an exact DBSCAN algorithm which produces the same results as did by DBSCAN.
The remaining part of the paper is organized as follows. In Section \ref{sec2}, we briefly introduce some related work, the DBSCAN algorithm, the Fast-PCA algorithm and the KD-tree algorithm. Our proposed algorithm is proposed in Section \ref{sec3}. Experimental results are provided in Section \ref{sec4}, and conclusions are given in Section \ref{sec5}.
\section{Related Work}\label{sec2}
\subsection{The DBSCAN algorithm}
DBSCAN \cite{ester1996density} has two important parameters $\varepsilon$ and $\textrm{MinPts}$. A point $x$ is called a core point if the number of points within the $\varepsilon$-neighborhood of $x$ is more than $\textrm{MinPts}$. A point $y$ is called directly density-reachable from a core point $x$ if $y$ is in the $\varepsilon$-neighborhood of $x$. A point $y$ is called density-reachable from a core point $x$ if there is a series of points $y_1,y_2,...,y_n$ with $y=y_n$, $x=y_1$ and for any pair $y_i,y_{i+1}$ we have that $y_{i+1}$ is directly density-reachable from $y_i$. Two points are density-connected if they both are density-reachable from the same core point. A cluster $C$ in DBSCAN satisfies the following two conditions: \begin{itemize} \item Maximality: any point which is density-reachable from a core point in cluster $C$ is also in cluster $C$. \item Connectivity: any two points in cluster $C$ are density-connected. \end{itemize}
Details of DBSCAN are described in Algorithm \ref{alg:DBSCAN}. \begin{algorithm}
\caption{The DBSCAN algorithm}\label{alg:DBSCAN}
\hspace*{0.02in}{\bf Input:} $D=\{x_{1},x_{2},\ldots,x_{n}\}$, $\varepsilon$, $\textrm{MinPts}$ \\
\hspace*{0.02in}{\bf Output:} $classifications$
\begin{algorithmic}[1]
\STATE Initialize $classifications$ to an $n$-dimensional vector with its component being Unclassified;
$seed=\emptyset$; $cluster\_{id}=1$; $ID=1$; $id=1$
\STATE Calculate
$N_{\varepsilon}(x_{id})=\{x\in D|d(x,x_{id})<\varepsilon\}$, where $x_{id}\in D$ and $d(x,y)$
denotes the distance between two points.
\STATE If $|N_{\varepsilon}(x_{id})|<\textrm{MinPts}$ and $x_{id}$'s classification is Unclassified, then $classifications(id)$ is Noise,
where $|D|$ denotes the cardinality of the set $D$;
otherwise, for every point $y$ whose classification is Unclassified or Noise
in $N_{\varepsilon}(x_{id})$, we set
\begin{eqnarray*}
classifications(p):=cluster\_{id},
\end{eqnarray*}
where $p$ is index of $y$, and if $y$'s classification is Unclassified, update $seed=seed\cup\{y\}$.
\STATE If $seed\neq\emptyset$, update $seed=seed/x_{id}$
\STATE If $seed\neq\emptyset$, choose a point $y$ in $seed$, update $id=index\;of\;y$ and go back
to Step 2. Otherwise, if $classification(id)=cluster\_{id}$, update $cluster\_{id}=cluster\_{id}+1$
\STATE Update $ID=ID+1$, $id=ID$. If $classifications(id)$ is Unclassified, go back to Step 2;
otherwise, go back to Step 6 until $ID=n$.
\end{algorithmic} \end{algorithm}
\subsection{Fast-PCA}
Fast-PCA \cite{sharma2007fast} is designed to find $h$ leading eigenvectors by using a fixed-point algorithm \cite{hyvarinen1997fast}. Its computational cost is much less than that of the eigenvalue decomposition (EVD) based PCA. Algorithm \ref{alg:fpca} presents details of Fast-PCA.
\begin{algorithm}
\caption{Fast-PCA}\label{alg:fpca}
{\bf Input:} Data $X=[X_{1},\ldots,X_{h}]$ of size $n\times h$, where $X_{i}$ is the $i$th feature
with size $n\times 1$ and $\sum X_{i}=0$, that is, $X$ is assumed to be zero-centered, and the desired
number $h$ of leading eigenvectors. \\
{\bf Output:} projection of data, $Z$, with size $n\times h$.\\
\vspace*{-0.2in}
\begin{algorithmic}[1]
\STATE Compute covariance $\sum_{X}=XX^{T}$, and set $p=1$
\STATE Randomly initialize eigenvector $\phi_{p}$ of size $n\times 1$
\STATE Update $\phi_{p}$ as $\phi_{p}\gets\sum_{X}\phi_p$
\STATE Do the Gram-Schmidt orthogonalization process for $\phi_p$:
\begin{equation}
\phi_p\gets\phi_p - \sum_{j=1}^{p-1}(\phi_p^{T}\phi_j)\phi_j
\end{equation}
\STATE Normalize $\phi_p$ by dividing it by its norm:
\begin{equation}
\phi_p\gets\phi_p/\|\phi_p\|
\end{equation}
\STATE If $\phi_p$ has not converged, go back to Step 3. (The Fast-PCA algorithm for the $p$-th
basis vector converges when the new and old values $\phi_{p}$ point in the same direction,
i.e. $\left(\phi_p^+\right)^T\phi_p\approx 1$, where $\phi_p^+$ is the new value of $\phi_p$).
\STATE Set $p=p+1$ and go to Step 2 until $p=h$
\STATE Get the orthogonal projection matrix: $\Phi=[\phi_1,\phi_2,...\phi_{h}]$.
The columns of the projection matrix are sorted by the descending order of
the corresponding eigenvalues.
\STATE Get the projection of data: $Z=X\Phi$
\end{algorithmic} \end{algorithm}
\subsection{KD-tree}
KD-tree was introduced by Bentley \cite{bentley1977complexity,bentley1975multidimensional} as a binary tree that stores $k$-dimensional data. As a famous space-partitioning data structure used to organize points in a $k$-dimensional space, KD-tree subdivides data like a binary tree at each recursive level of the tree but uses $k$ keys for all levels of the tree which is different from a binary tree that uses a single key. For example, to build a KD-tree from three-dimensional points in the $(x,y,z)$ coordinates, the coordinate of keys would be chose circularly from $x,y,z$ for successive levels of the KD-tree. A often-used scheme for cycling the keys chooses the coordinate that has the widest dispersion or largest variance to be the key for a particular level of recursion, and the splitting node is positioned at the spatial median of the chose coordinate. As an acceleration structure, it has been used in a variety of applications, including range queries for fixed-radius near neighbors and nearest neighbors searching. For example, the KD-tree indexing technique is frequently used for the range query in the process of finding fixed-radius near neighbors or calculating densities in density-based clustering. When the KD-tree is used for range queries, it first locates the smallest sample subspace according to the judgment method of a binary tree and then backtracks each parent node. When the distance between the target point and the parent node is less than the threshold, the searching enters another subspace of the parent node.
\section{The proposed algorithm}\label{sec3}
\subsection{Fast-PCA pruning}
For a given data set $D$ of $n$ points, it is time-consuming to find neighbors for each point by simply comparing the distances between the point and all other $n-1$ points in the data set, which has a time complexity of $O(n^{2}h)$, where $h$ is the dimension of the data. An indexing technique can be used to accelerate the above process of finding neighbors, but it is difficult to deal with high-dimensional data. To address this issue, we propose a fast range query algorithm to reduce the redundant distance calculations and improve the computational efficiency, based on Fast-PCA in conjunction with some geometric information provided by the principal attributes of the data.
Let $D=\{x_{1},x_{2},\ldots,x_{n}\}$ be a set of data points, where $x_{i}$ is an $h$-dimensional vector and can be represented as $x_{i}=\sum_{j=1}^{h}x_{i,j}e_j$ with the $e_j$'s being the $h$-dimensional orthonormal basis vectors.
Take $X=[x_{1}^{T},x_{2}^{T},\ldots,x_{n}^{T}]$ in Algorithm \ref{alg:fpca} and denote by $\phi_{i}$ the eigenvector corresponding to the $i$th largest eigenvalue, $i=1,\ldots,h$, obtained by Algorithm \ref{alg:fpca}. Then $x_{i}$ can be rewritten as $x_{i}=\sum_{j=1}^{h}z_{i,j}\phi_{j}$, where $z_{i,j}=\langle x_i,\phi_j\rangle$. In fact, $Z=(z_{i,j})_{n\times h}$ which is the projection of data obtained by Algorithm \ref{alg:fpca}. For any $x_i\in D$ define \begin{eqnarray}\label{x-split} x_{i}:=\hat{z}_{i}+\tilde{z}_{i}, \end{eqnarray} where $\hat{z}_{i}=\sum_{j=1}^{h_1}z_{i,j}\phi_{j}$, $\tilde{z}_{i}=\sum_{j=h_{1}+1}^{h}z_{i,j}\phi_{j}$ and $h_1$ is an integer which is less than $h$ and to be determined later. Then \begin{eqnarray}\nonumber
&&\|x_{i}-x_{j}\|^2\\ \nonumber &&\;=\langle x_{i}-x_{j},x_{i}-x_{j}\rangle\\ \nonumber &&\;=\langle(\hat{z}_{i}+\tilde{z}_{i})-(\hat{z}_{j}+\tilde{z}_{j}),(\hat{z}_{i} +\tilde{z}_{i})-(\hat{z}_{j}+\tilde{z}_{j})\rangle\\ \nonumber &&\;=\langle \hat{z}_{i}-\hat{z}_{j},\hat{z}_{i}-\hat{z}_{j}\rangle +\langle\tilde{z}_{i}-\tilde{z}_{j},\tilde{z}_{i}-\tilde{z}_{j}\rangle\\ \nonumber
&&\;=\|\hat{z}_{i}-\hat{z}_{j}\|^2+\|\tilde{z}_{i}-\tilde{z}_{j}\|^2\\ \label{eq:1}
&&\;\ge\|\hat{z}_{i}-\hat{z}_{j}\|^{2}
+\left|\|\tilde{z}_i\|-\|\tilde{z}_j\|\right|^2, \end{eqnarray} where the Cauchy-Schwarz inequality is used to obtain the last inequality.
The following results follow easily from \eqref{eq:1}.
\begin{theorem}\label{t:1} Given two points $x_{i},x_{j}\in D$ with the form (\ref{x-split}),
$(i)$ if $\|\hat{z}_i-\hat{z}_j\|>\varepsilon$ then $x_i\notin N_{\varepsilon}(x_j)$, where $N_{\varepsilon}(x_j)$ is an $\varepsilon$-neighborhood of $x_j$;
$(ii)$ if $\|\hat{z}_{i}-\hat{z}_{j}\|^2+\left|\|\tilde{z}_i\|-\|\tilde{z}_j\|\right|^2>\varepsilon^2$ then $x_{i}\notin N_{\varepsilon}(x_{j})$. \end{theorem}
By Theorem \ref{t:1} (i), if $\|\hat{z}_i-\hat{z}_j\|>\varepsilon$ then it is known that $x_i$ is not in the
$\varepsilon$-neighborhood of $x_j$ without calculating the distance between $x_i$ and $x_j$. The time complexity of calculating the distance $\|x_{i}-x_{j}\|$ for each pair of points $x_i,x_j\in D$
is $O(n^{2}h)$, whilst that of calculating $\|\hat{z}_i-\hat{z}_j\|$ is $O(n^{2}h_1)$ for each pair of points $\hat{z_i},\hat{z_j}$ corresponding to the points $x_i,x_j\in D$. Since $\hat{z_i}$ and $\hat{z_j}$ are the projections of $x_i$ and $x_j$ on the space spanned by the eigenvectors $\phi_1,\ldots,\phi_{h_1}$ corresponding to the largest $h_1$ eigenvalues and $h_1$ is usually much smaller than $h$, the calculation of $\|\hat{z_i}-\hat{z_j}\|$ is faster than that of
$\|x_{i}-x_{j}\|$. In fact, the value of $h_1$ can be determined to be the smallest number of $d$ such that the following inequality is satisfied for a given real number $p$: \begin{eqnarray}\label{eq:6} \frac{\sum_{i=1}^{d}\lambda_{i}}{\sum_{j=1}^{h}\lambda_{j}}\ge p, \end{eqnarray} where $\lambda_{i}$ is the $i$th largest eigenvalue.
In the experiments conducted in Section \ref{sec4}, $p$ can be taken as $0.7,0.8,0.9$ or $0.99$, and in such cases, $h_1$ is smaller than $h/2$ for most of the data sets.
This means that Theorem \ref{t:1} (i) can be used to exclude the data points $x_i$'s that are not in the $\varepsilon$-neighborhood of $x_j$ with a lower computational cost by only verifying whether or not $\|\hat{z}_i-\hat{z}_j\|>\varepsilon$ for their projections $\hat{z}_i$ and $\hat{z}_j$ on the space spanned by the eigenvectors $\phi_1,\ldots,\phi_{h_1}$ corresponding to the largest $h_1$ eigenvalues.
On the other hand, in the case when $\|\hat{z_i}-\hat{z_j}\|<\varepsilon$, we do not know if $x_i$ is still in the $\varepsilon$-neighborhood of $x_j$. However, this can be justified with Theorem \ref{t:1} (ii)
by further calculating $\left|\|\tilde{z_i}\|-\|\tilde{z_j}\|\right|^2$ with a much lower cost. In fact, since \begin{eqnarray*}\label{eq:2}
\|\tilde{z}_{i}\|^2&=&\langle\tilde{z}_{i},\tilde{z}_{i}\rangle =\langle x_{i}-\hat{z}_{i},x_{i}-\hat{z}_{i}\rangle\\ &=&\langle x_{i},x_{i}\rangle+\langle\hat{z}_{i},\hat{z}_{i}\rangle-2\langle x_{i},\hat{z}_{i}\rangle\\ &=&\langle x_{i},x_{i}\rangle-\langle\hat{z}_{i},\hat{z}_{i}\rangle\\
&=&\|x_{i}\|^{2}-\|\hat{z}_{i}\|^{2}, \end{eqnarray*} we have \begin{eqnarray}\label{eq:3}
\|\tilde{z}_{i}\|=\left(\|x_{i}\|^{2}-\|\hat{z}_{i}\|^2\right)^{1/2}. \end{eqnarray}
This means that $|\|\tilde{z_i}\|-\|\tilde{z_j}\||$ can be calculated with $O(nh)$ time-complexity. In fact, $|\|\tilde{z_i}\|-\|\tilde{z_j}\||$ can be calculated and stored in advance.
By the discussions above it is known that Theorem \ref{t:1} can be used to exclude the data points $x_i$'s that are not in the $\varepsilon$-neighborhood of $x_j$ with the time complexity $O(n^{2}h_1)+O(nh)$ which is lower than $O(n^{2}h)$ since, in general, $h_1<<h$. However, if the conditions in Theorem \ref{t:1} (i) and (ii) are not satisfied, then we need to calculate the distance between $x_i$ and $x_j$ to see if $x_i$ is in the $\varepsilon$-neighborhood of $x_j$.
For a single-range query, Theorem \ref{t:1} helps to reduce the time complexity in the neighborhood finding process from $O(n^2h)$ to $O(n^{2}h_1)+O(nh)$. This is a good improvement when $h=O(n)$ and $h_1<<h$. However, if $n$ is large enough, the $O(n^2)$ time-complexity is still quite high, and so it is necessary to further reduce the time complexity by reducing the number of accesses during range query, that is, we use Theorem \ref{t:1} to deal with only small part of the other $n-1$ points in finding the $\varepsilon$-neighborhood for each point during a single-range query. This can be done by introducing a reference point, as discussed below.
Suppose $z_{p_i,1}$ is the projection of $x_{p_i}\in D$ in the first principal component $\phi_1$ obtained by Algorithm \ref{alg:fpca}, $i=1,\ldots,n$, and $z_{p_1,1},z_{p_2,1},\ldots,z_{p_n,1}$
are arranged in decreasing order. If $|z_{p_i,1}-z_{p_j,1}|>\varepsilon$ for some positive integers $i,j$ with $j\le i\le n$, then we have $|z_{p_j,1}-z_{p_k,1}|>\varepsilon$
for any integer $k$ with $i\leq k\leq n$ and so, by Theorem \ref{t:1} (i), $x_{p_k}\notin N_{\varepsilon}(x_{p_j})$ for $i\leq k\leq n$.
As a result, we do not need to consider the point $x_{p_k}$ for $j\leq k\leq n$ when we do the range query for $x_{p_j}$, that is, when we search for the $\varepsilon$-neighborhood of $x_{p_j}$. Hence, the number of accesses in range query is reduced. Further, The calculation of the distance between $x_{p_k}$and $x_{p_j}$ can be pruned in batch for all integers $k$'s with $j\leq k\leq n$.
In the above process, only the projection on the first principal component $\phi_1$ is considered for each data point. Unfortunately, this may lead to a wrong conclusion that the distance of the projections $z_i,z_j$ on the first principal component $\phi_1$ of certain data points $x_i,x_j\in D$ is smaller than $\varepsilon$, but the distance between the points $x_i,x_j$ may actually be quite large, especially when the dimension $d$ of the data points is high. A natural way to address this issue is to consider projections on the first $n_a$ principal components $\phi_1,\phi_2,\ldots,\phi_{n_a}$ for the data points with the integer $n_a\ge1$, as illustrated in Figure \ref{fig:why2d}. However, it is not easy to extend the process discussed in the preceding paragraph from the projections on the first principal component to those on the first several principal components. We will do this by introducing a reference point.
\begin{figure}
\caption{An example for the phenomenon of the "overcrowding effect" and the choice of reference points }
\label{fig:why2d}
\end{figure}
Note that Figure \ref{fig:why2d} (a) presents the projections on the first two principal components of the data points represented by the blue points, whilst Figure \ref{fig:why2d} (b) shows the projections on the first principal components of these data points. From Figure \ref{fig:why2d} it is clear that the projections on the first two principal components of the data points are very well separated (see Figure \ref{fig:why2d} (a)), but the projections on the first principal component of some data points are very close or even almost coincide (see Figure \ref{fig:why2d}(b)).
For an integer $n_a\ge1$ let $q=\sum_{j=1}^{n_a}q_{j}\phi_j$ be a reference point. Hereafter, for convenience, we call such $q$ an $n_a$-dimensional point and write $q=(q_1,\ldots,q_{n_a})$. Set \begin{eqnarray}\label{eq:4.1} \hat{z_{i}}=z_{i}^{0}+z_{i}^{1}, \end{eqnarray} where $z_i^0=\sum_{k=1}^{n_a}z_{i,k}\phi_k$ and $z_i^1=\sum_{k=n_a+1}^{h_1}z_{i,k}\phi_k$. Then \begin{eqnarray}\label{eq:4.2}
\|\hat{z_{i}}-\hat{z_{j}}\|^{2}=\|z_{i}^{0}-z_{j}^{0}\|^{2}+\|z_{i}^{1}-z_{j}^{1}\|^{2} \end{eqnarray} with \begin{eqnarray}\nonumber
\|z_i^0-z_j^{0}\|&=&\|z_{i}^0-q+q-z_{j}^0\|\\ \label{eq:4.3}
&\ge&\left|\|z_i^{0}-q\|-\|z_{j}^{0}-q\|\right|=|d_i-d_j|, \end{eqnarray} where \begin{eqnarray}\label{eq:5}
d_i:=\|q-z_i^0\|,\;\;\;i=1,\ldots,n. \end{eqnarray}
Rearrange $d_1,\ldots,d_n$ in decreasing order as $d_{p_{1}},d_{p_{2}},\ldots,d_{p_{n}}$ and set $d=[d_{p_{1}},d_{p_{2}},\ldots,d_{p_{n}}]$. Then we have the following result which can be used to prune unnecessary distance calculations for a batch of points at once.
\begin{theorem}\label{c:1}
For any two different points $x_{p_{m}},x_{p_{k}}\in D$ with $0<m<k\leq n$, if $|d_{p_m}-d_{p_k}|>\varepsilon$ then $\{x_{p_l}, n\ge l\ge k\}\not\subset N_{\varepsilon}(x_{p_m})$ and $\{x_{p_l},0<l\le m\}\not\subset N_{\varepsilon}(x_{p_k})$. \end{theorem}
\begin{proof} By \eqref{eq:1}, \eqref{eq:4.1}, \eqref{eq:4.2}, \eqref{eq:4.3} and \eqref{eq:5} it follows that for $n\ge l\ge k$ we have \begin{eqnarray*}
\|x_{p_l}-x_{p_m}\|&\ge&\|\hat{z}_{p_l}-\hat{z}_{p_m}\|\ge\|z_{p_l}^0-z_{p_m}^0\|\\
&\ge& |d_{p_l}-d_{p_m}|\ge|d_{p_k}-d_{p_m}|>\varepsilon. \end{eqnarray*}
This implies that $x_{p_l}\notin N_{\varepsilon}(x_{p_{m}})$. The case $0<l\le m$ can be proved similarly. \end{proof}
Figure \ref{fig:why2d} (a) illustrates the significance of choosing a two-dimensional reference point in pruning unnecessary distance calculations. Suppose we want to search for the $\varepsilon$-neighborhood of the point whose projection on the first two principal components $\phi_1,\phi_2$ is $p$. The annular region associated with $p$ is given by
\begin{eqnarray*} R_p=\{z=\sum_{j=1}^{2}z_j\phi_j\;:\;d-\varepsilon\le\|z-q\|\le d+\varepsilon\},
\end{eqnarray*} where $d=\|p-q\|$ and $q$ is a two-dimensional reference point defined as $q=\sum_{j=1}^{2}q_j\phi_j$.
By Theorem \ref{c:1} there is no need to access all the points whose projections on $\phi_1,\phi_2$ are outside of the annular region $R_p$ in the process of finding an $\varepsilon$-neighborhood of $p$.
\begin{figure}
\caption{The main idea of the neighborhood finding process. }
\label{fig:diagram2}
\end{figure}
Based on Theorems \ref{t:1} and \ref{c:1}, we propose a Fast PCA Pruning (FPCAP) algorithm to accelerate the range query for $x_{p_m}\in D$ in neighborhood finding. The main idea of the FPCAP algorithm is shown in Figure \ref{fig:diagram2}, and the detailed algorithm is given in Algorithm \ref{alg:rq}. Algorithm \ref{alg:rq} can be roughly divided into four stages. Stage I excludes all the points on one side of $x_{p_{m+t}}$, that is, Stage I reduces the redundant distance calculations in batches. Stages II and III exclude the point at which the current iteration is reached if the condition in Step 4 or Step 6 in Algorithm \ref{alg:rq} holds.
In Stage IV, we have to calculate the distance between $x_{p_{m}}$ and $x_{p_{m+t}}$ to see if $x_{p_{m+t}}$ is in the neighborhood of $x_{p_m}$: \begin{eqnarray*}
d_{p_{m},p_{m+t}}=\|x_{p_m}-x_{p_{m+t}}\|. \end{eqnarray*} When we do range query for $x_{p_k}$, the parameter $\rm{step}=1$ or $-1$ in Algorithm \ref{alg:rq}
means finding neighbors of $x_{p_k}$ from the set $\{x_{p_j}|k<j\leq n\}$ or $\{x_{p_j}|0<j<k\}$.
\begin{algorithm} \caption{FPCAP}\label{alg:rq} \hspace*{0.02in} {\bf Input:} row data matrix $X=[x_1,x_2,\ldots,x_n]^T$ of size $n\times h$, projection data matrix $Z=[z_1,z_2,\ldots,z_n]^T$ of size $n\times h$, candidate point $x_{p_{m}}$, reference point $q$, $\varepsilon$, $\rm{step}$, $h_1$, $t=0$, $h'=1$, $\rm{Diff}=0$. \\ \hspace*{0.02in} {\bf Output:} $\varepsilon$-neighborhood $N_{\varepsilon}(x_{p_{m}})$ of $x_{p_{m}}$.\\
\begin{algorithmic}[1]
\STATE Calculate $d_i=\|q-z_i^0\|$ for $i=1,\ldots,n$, and rearrange them in decreasing order as
$d_{p_1},d_{p_2},\ldots,d_{p_n}$.
\STATE $t=t+\rm{step}$
\STATE If $m+t>n$, stop.
\STATE Stage I: Calculate
\begin{eqnarray*}
d=|d_{p_{m}}-d_{p_{m+t}}|.
\end{eqnarray*}
If $d>\varepsilon$, stop.
\STATE Stage II: Calculate
\begin{eqnarray*}
\rm{Diff}:=\rm{Diff}+\|z_{p_{m},h'}-z_{p_{m-t},h'}\|^2.
\end{eqnarray*}
If $\rm{Diff}>\varepsilon^{2}$, go to Step 2.
\STATE Update $h'=h'+1$. If $h'\leq h_{1}$, go back to Step 5.
\STATE Stage III: Calculate
\begin{eqnarray*}
&&\|\tilde{z}_{p_{m}}\|_2=(\|x_{p_{m}}\|^2-\|\hat{z}_{p_{m}}\|^2)^{1/2},\\
&&\|\tilde{z}_{p_{m+t}}\|=(\|x_{p_{m+t}}\|^2-\|\hat{z}_{p_{m+t}}\|^2)^{1/2}.
\end{eqnarray*}
If $\rm{Diff}+(\|\tilde{z}_{p_m}\|-\|\tilde{z}_{p_{m+t}}\|)^2>\varepsilon^2$, go to Step 2.
\STATE Stage IV: Calculate
\begin{eqnarray*}
d_{p_{m},p_{m+t}}=\|x_{p_{m}}-x_{p_{m+t}}\|.
\end{eqnarray*}
If $d_{p_{m},p_{m+t}}\le\varepsilon$, Update
\begin{eqnarray*}
N_{\varepsilon}(x_{p_{m}})=N_{\varepsilon}(x_{p_{m}})\cup \{x_{p_{m+t}}\}
\end{eqnarray*}
and go to Step 2.
\end{algorithmic} \end{algorithm}
The reference point $q$ in Algorithm \ref{alg:rq} needs to be given in advance, and its choice is essential for the effectiveness of the algorithm. In this paper, the $n_{a}$-dimensional reference point $q$ ($n_a\ge1$) is chosen as \begin{eqnarray}\label{refp} q=(Z_{min,n_a},\ldots,Z_{min,n_a})=Z_{min,n_a}\sum_{j=1}^{n_a}\phi_j, \end{eqnarray} where \begin{eqnarray*}
Z_{min,n_a}:=\min\{z_{i,j}:\;1\le i\le n,\;1\le j\le n_a\} \end{eqnarray*} and $Z=(z_{i,j})_{n\times h}$ is the projection of the data obtained by Algorithm \ref{alg:fpca}, that is, $x_i=\sum_{j=1}^{h}z_{i,j}\phi_{j}$ for $x_i\in D$ with $z_{i,j}=\langle x_i,\phi_j\rangle$, $i=1,\ldots,n,\;j=1,\ldots,h$ (see the beginning of this section). Thus
\begin{eqnarray}\nonumber d_i&=&\|q-z_i^0\|\\ \label{ref-d}
&=&\left(\sum_{j=1}^{n_a}|z_{i,j}-Z_{min,n_a}|^2\right)^{1/2},\;i=1,\ldots,n. \end{eqnarray}
Note that the special case when $n_a=1$ is considered in \cite{lai2010fast} in a different context where the Fast-PCA technique \cite{sharma2007fast} was applied to accelerate the global $k$-means algorithm proposed in \cite{likas2003global}. In this case the one-dimensional reference point
$q=\min_{1\le i\le n}z_{i,1}$, where $z_{i,1}$ is the projection value of $x_i$ along the first principal component $\phi_1$, that is, $z_{i,1}=\langle x_i,\phi_1\rangle$, so $d_i=|z_{i,1}-q|$, $i=1,\ldots,n$.
\begin{figure}
\caption{Illustration of pruning process in Stages I and II in Algorithm \ref{alg:rq} }
\label{fig:d31}
\end{figure}
Figure \ref{fig:d31} illustrates the pruning process in Stages I and II in Algorithm \ref{alg:rq} on the D31 data set which is a two-dimensional synthetic data sets with $3100$ cardinalities and $31$ clusters. Figure \ref{fig:d31} (a) shows the clustering results of the DBSCAN algorithm with $\varepsilon=1.32$ and $\textrm{MinPts}=68$. In Figure \ref{fig:d31} (b), the green and red lines represent the directions of the first and second principal components, respectively. The distances between the big green point $p$ and the two red lines both are $\varepsilon$. When we search for the $\varepsilon$-neighborhood for $p$, the orange points and the purple points represent the data points pruned in Stages I and II in Algorithm \ref{alg:rq}, while the blue points are the ones whose distances to $p$ must be calculated. The results show that there are only a small number of points whose distances to $p$ really need to be calculated. In other words, Algorithm \ref{alg:rq} can prune unnecessary distance calculations effectively when finding neighbors and estimating densities.
\subsection{An improved DBSCAN (IDBSCAN) algorithm}
The proposed FPCAP algorithm can be used together with a density-based clustering algorithm to get an improved density-based clustering algorithm. Figure \ref{fig:diagram} shows the framework of the proposed FPCAP algorithm combined with a density-based clustering algorithm which consists of the initialization stage (Stage I) and the clustering stage (Stage II). The main part of the initialization stage is fast principal component analysis. A new representation of the $h$-dimensional raw data $D=\{x_1,x_{2},\ldots,x_{n}\}$ can be obtained from this stage with $Z=(z_{p_i,j})_{n\times h}$ being the projection of data obtained by Algorithm \ref{alg:fpca}. The clustering stage is the combination of the density-based clustering algorithm and the FPCAP algorithm. The final clustering results are obtained after this stage.
\begin{figure*}
\caption{The framework of the proposed FPCAP algorithm combined with a density-based clustering algorithm}
\label{fig:diagram}
\end{figure*}
As an example of the above framework, in this subsection, the FPCAP algorithm is applied in the neighborhood-finding process of the DBSCAN algorithm to get an improved DBSCAN (IDBSCAN) algorithm. The detailed IDBSCAN algorithm is given in Algorithm \ref{alg:fDBSCAN}.
\begin{algorithm} \caption{IDBSCAN}\label{alg:fDBSCAN} \hspace*{0.02in} {\bf Input:} row data matrix $X=[x_1,x_2,\ldots,x_n]^T$ of size $n\times h$, $\varepsilon$, $\text{p}$\\ \hspace*{0.02in} {\bf Output:} the clustering result \begin{algorithmic}[1]
\STATE Get the projection data matrix $Z=[z_1,z_2,\ldots,z_n]^T$ of size $n\times h$
by Algorithm \ref{alg:fpca}
\STATE Choose the reference point $q$ as in (\ref{refp})
\STATE Run Algorithm \ref{alg:DBSCAN} (DBSCAN) with Algorithm \ref{alg:rq} (FPCAP)
used in the neighborhood-finding process
\STATE Return the clustering result \end{algorithmic} \end{algorithm}
\subsubsection{Correctness analysis}
In view of the indeterminacy in DBSCAN-like methods, border points may belong to different clusters in the case when the order of the data points appeared in DBSCAN and IDBSCAN is different. However, if the order of the data points appeared in DBSCAN and IDBSCAN is assumed to be the same,
then IDBSCAN and DBSCAN produce the same result. On the other hand, by Theorems \ref{t:1} and \ref{c:1} (see also Figure \ref{fig:diagram2}) the conditions in Stages I, II and III in Algorithm \ref{alg:rq} are only used to justify if $x_{p_{m+t}}\notin N_{\varepsilon}(x_{p_{m}})$, and if the conditions in Stages I, II and III in Algorithm \ref{alg:rq} are not satisfied then we need to calculate the distance between the original points $x_{p_{m+t}}$ and $x_{p_{m}}$ to see if $x_{p_{m+t}}\in N_{\varepsilon}(x_{p_{m}})$ (see Stage IV in Algorithm \ref{alg:rq}).
Hence, IDBSCAN and DBSCAN obtain the same $\varepsilon$-neighborhood result. Since the process of IDBSCAN is the same as that of DBSCAN except for the different methods in finding $\varepsilon$-neighborhoods, IDBSCAN improves the efficiency of DBSCAN without losing correctness under the assumption that the order of the data points appeared in DBSCAN and IDBSCAN is the same. Even without the above assumption, DBSCAN and IDBSCAN still produce the same clustering result if we only consider the clustering result of core points.
\subsubsection{Complexity analysis}
We focus on the time complexity analysis about the distance calculation which dominates the runtime of the range query in FPCAP (Algorithm \ref{alg:rq}).
Denote by $n_1$ the total number of pruning in Stage I of Algorithm \ref{alg:rq}, and denote by $n_2$ the total number of pruning for range query in Stages II and III of Algorithm \ref{alg:rq}. Thus, on searching for the $\varepsilon$-neighborhood for $x_{p_j}$ in Algorithm \ref{alg:rq}, if the condition $|d_{p_j}-d_{p_k}|>\varepsilon$ is satisfied for $j<k\leq n$ then $n_1$ is increased by $n-(k-1)$, and
if the conditions in Theorem \ref{t:1} are satisfied for $x_{p_j}$ and $x_{p_k}$ then $n_2$ is increased by $1$. Define $n_{0}$ to be the average number of distance computations in Stage IV. Then \begin{eqnarray}\label{n-split} n_{0}:=(n-1)-\frac{n_{1}}{n}-\frac{n_{2}}{n}. \end{eqnarray} The complexity of calculation of \eqref{eq:5} and \eqref{eq:3} is $O(nn_{a})$ and $O(nh)$, respectively. The complexity of Stages II and IV is $O(n(n-{n_1}/{n})h_1)$ and $O(n(n-{n_1}/{n}-{n_2}/{n})(h-h_1))$, respectively. As a result, the whole time complexity of FPCAP (Algorithm \ref{alg:rq}) is $O(nn_{a}+n(n-{n_1}/{n})h_1+n(n-{n_1}/{n}-{n_2}/{n})(h-h_1)+nh)$. In the case when $n-n_1/n=O(1)$, the complexity is $O(nh)$, which is a very good improvement. Take the experiments conducted on the subset of Reactionnetwork data set (n=20000) in Section \ref{sec5} as example, when $n_{a}=1$ and $\varepsilon=1000,10000,20000$ and $30000$, ${n_1}/{n}=19615,17125,14706$ and $12387$ and $n_{0}=110, 147, 452\ \text{and}\ 1162$. When $\varepsilon$ is small, the value of $n-{n_1}/{n}<<n$ and $n_0<<n$. In addition, when $p=0.8$, $h1=h/7$ which is much smaller than $h$. Note that the FPCAP (Algorithm \ref{alg:rq}) includes the sorting process of the distance from the data points to the reference point, and its complexity is $O(n\log n)$, but its runtime is very short compared to the overall runtime of FPCAP (Algorithm \ref{alg:rq}).
\section{Experimental Results}\label{sec4}
\subsection{Data sets}
We now conduct several experiments on four real-world data sets and three synthetic data sets to illustrate the efficiency of the FPCAP and the IDBSCAN algorithms. Clickstream is a $12$-dimensional real-world data set with $165474$ cardinalities which are composed of information on clickstream from online stores offering clothing for pregnant women. Household is a $7$-dimensional real-world data set with $2049280$ cardinalities including all attributes except the temporal columns data and time. Mocap is a $36$-dimensional real-world data set with $78095$ cardinalities, which has $5$ types of hand postures from $12$ users. ReactionNetwork is a KEGG Metabolic Reaction Network (Undirected) Data set of $65554$ data points of $28$ dimensions.
Dim-set consists of three synthetic datasets, Dim6, Dim10, Dim15, with Gaussian clusters and $4051, 6751$ and $10126$ cardinalities, respectively. The above real-world data sets and synthetic data sets can be downloaded from the UCI Machine Learning repository\footnote{\url{http://archive.ics.uci.edu/ml/}} and clustering datasets website\footnote{\url{http://cs.joensuu.fi/sipu/datasets/}}, respectively. Duplicated data points and points with missing coordinates are deleted. Each attribute of the data is normalized to $[0,100000]$.
\subsection{Algorithms}
The following algorithms are used in the experiments: \begin{itemize} \item FPCAP$_1$: Fast-PCA pruning initialized with the one-dimensional reference point $q$ given
by \eqref{refp} with $n_a=1$, \item FPCAP$_2$: Fast-PCA pruning initialized with the two-dimensional reference point $q$ given
by \eqref{refp} with $n_a=2$, \item KD tree: KD tree indexing technique on row data, \item KD tree with PCA: KD tree indexing technique on projected data obtained with Fast-PCA
(Algorithm \ref{alg:fpca}). \end{itemize}
All the experiments are conducted on a single PC with Intel Core 2.9GHz i7 CPU (16 Cores) and 32G RAM.
\subsection{Experimental results}
\subsubsection{Experiment 1: Effect of the choice of the reference point}
\begin{figure}
\caption{The number of pruning against the dimensionality $n_a$ of the reference point $q$ on seven data sets}
\label{fig:distance_na}
\end{figure}
In the initialization process of FPCAP, the choice of the reference point is very important and needs to be determined first. Experiment 1 is conducted to compare the effect on the pruning result of the dimensionality $n_a$ of the reference point $q$.
The value of $n_1$ appearing in the expression \eqref{n-split}, which is the total number of pruning in Stage I of Algorithm \ref{alg:rq}, determines the total number of accesses during range query, that is, the total number of distance calculations in Stage II in Algorithm \ref{alg:rq} which dominates the runtime of Algorithm \ref{alg:fDBSCAN}. Therefore, Experiment 1 evaluates the initialization method in terms of the dimensionality $n_a$ of the reference point $q$, based on $n_1$.
Figure \ref{fig:distance_na} presents the experimental results on the number of pruning calculations in Stage I of Algorithm \ref{alg:rq} against the dimensionality $n_a$ of the reference point $q$ for different neighborhood parameter $\varepsilon$ on the three synthetic datasets, Dim6, Dim10, Dim15, and the subsets of the four real-world data sets, Clickstream, Household, Mocap and Reactionnetwork, with each subset containing $20000$ data points. From Figure \ref{fig:distance_na} we have the following observations:
1) Initializing with a two-dimensional reference point (i.e., $n_a=2$) outperforms the other cases with $n_a\not=2$ on most of the data sets used in the experiments such as Dim6, Dim10, Dim15, Clickstream and Household for different choices of $\varepsilon$; this means that the initialization with $n_a=2$ can effectively reduce the number of accesses during range query; 2) Initializing with a one-dimensional reference point (i.e., $n_a=1$) outperforms the other cases with $n_a>1$ on Mocap and Reactionnetwork for different choice of $\varepsilon$. Based on the above observations, we use the cases $n_a=1,2$ to initialize FPCAP (i.e., FPCAP$_1$ and FPCAP$_2$) in the remaining experiments.
\subsubsection{Experiment 2: Effect of the neighborhood radius $\varepsilon$ in range query}
\begin{figure}
\caption{The runtime of the four algorithms on the seven data sets with different $\varepsilon$ }
\label{fig:runtime_eps}
\end{figure}
\begin{figure}
\caption{The number of distance calculations on the seven data sets with different $\varepsilon$ }
\label{fig:distance_eps}
\end{figure}
Experiment 2 compares the four algorithms, FPCAP$_1$, FPCAP$_2$, KD tree and KD tree with PCA, on the same seven data sets as used in Experiment 1 for different $\varepsilon$, in terms of their runtime and $n_{0}$ (i.e., the total number of distance calculations). Note that the runtime of the four algorithms was recorded as the average over $10$ duplicate tests to reduce randomness. Figure \ref{fig:runtime_eps} presents the runtime of the four algorithms against $\varepsilon$. The results show that FPCAP$_1$ and FPCAP$_2$ outperforms both KD tree and KD tree with PCA greatly on the three synthetic data sets and three real-world data sets, Household, Mocap and Reactionnetwork, and FPCAP$_1$, FPCAP$_2$ and KD tree with PCA outperforms KD tree on Clickstream with FPCAP$_2$ having the best performance among the four algorithms, KD tree with PCA performing better than FPCAP$_1$ when $\varepsilon<2$ and FPCAP$_1$ having a better performance than KD tree with PCA when $\varepsilon>2$. Further, it is seen from Figure \ref{fig:runtime_eps} that the performance of FPCAP$_1$ and FPCAP$_2$ is similar on the seven data sets with FPCAP$_2$ performing slightly better than FPCAP$_1$ on the three synthetic data sets and two real-world data sets, Clickstream and Household, and FPCAP$_1$ performing slightly better than FPCAP$_2$ on Mocap and Reactionnetwork. Furthermore, Figure \ref{fig:runtime_eps} shows that the runtime of KD tree and KD tree with PCA increases much faster than FPCAP$_1$ and FPCAP$_2$ with $\varepsilon$ increasing. Figure \ref{fig:distance_eps} presents the value of $n_0$ (i.e., the total number of distance calculations) of the pruning algorithms, KD tree, KD tree with PCA and FPCAP, against $\varepsilon$. The results in Figure \ref{fig:distance_eps} illustrate that, compared to the KD indexing technique, FPCAP reduces more distance calculations.
\subsubsection{Experiment 3: Effect of the cardinality (data sample size) on the runtime}
\begin{figure}
\caption{
The runtime of the four algorithms against the cardinality (or data sample size) on the four real-world data sets. The unit of the sample size is ten thousand. }
\label{runtime_n}
\end{figure}
Experiment 3 was conducted to compare the runtime of the four algorithms, FPCAP$_1$, FPCAP$_2$, KD tree and KD tree with PCA, on the subsets of the four real-world data sets, Clickstream, Household, Mocap and Reactionnetwork, for different cardinality (or data sample size).
Figure \ref{runtime_n} presents the runtime of FPCAP$_1$, FPCAP$_2$, KD tree and KD tree with PCA against the data sample size $n$ on the subsets of the four real-world data sets. The results in Figure \ref{runtime_n} show that FPCAP (FPCAP$_1$ and FPCAP$_2$) significantly outperforms the KD tree indexing techniques (KD tree and KD tree with PCA) as $n$ increases. It is further seen from Figure \ref{runtime_n} that the runtime of KD tree and KD tree with PCA increases much faster than that of FPCAP$_1$ and FPCAP$_2$ with $n$ increasing.
\subsubsection{Experiment 4: Performance of FPCAP in combination with DBSCAN}
In this experiment we examine the performance of the Fast-PCA pruning algorithm, FPCAP, in combination with DBSCAN. This we do by comparing with the naive way of neighbourhood finding and the KD tree indexing technique in combination with the DBSCAN algorithm. The five comparing algorithms are as follows:
\begin{itemize}
\item DBSCAN: The original DBSCAN algorithm (Algorithm \ref{alg:DBSCAN}) without pruning in the neighbourhood finding process; \item DBSCAN$_1$: DBSCAN (Algorithm \ref{alg:DBSCAN}) with the KD tree indexing technique on the projected data obtained through Fast-PCA (Algorithm \ref{alg:fpca}); \item DBSCAN$_2$: DBSCAN (Algorithm \ref{alg:DBSCAN}) with KD tree indexing technique on row data; \item IDBSCAN$_1$: IDBSCAN (Algorithm \ref{alg:fDBSCAN}) with $n_a=1$; \item IDBSCAN$_2$: IDBSCAN (Algorithm \ref{alg:fDBSCAN}) with $n_a=2$. \end{itemize}
We first compare the runtime of the five comparing algorithms for different $\varepsilon$, $\textrm{Minpts}$ and $p$. Table \ref{tab:p} shows their runtime against $p$.
From Table \ref{tab:p} it is seen that (1) IDBSCAN has the best performance on all seven data sets in terms of runtime; (2) the KD tree indexing technique combined with Fast-PCA (DBSCAN$_1$) outperforms the original KD tree indexing technique (DBSCAN$_2$) on all the four real-world data sets (except for the three synthetic datasets, Dim6, Dim10, Dim15, on which the performance of the two algorithms are similar); (3) for large $\varepsilon$, DBSCAN with the KD tree indexing techniques (DBSCAN$_1$ and DBSCAN$_2$) need a much longer runtime than the original DBSCAN algorithm does on the three synthetic data sets, Dim6, Dim10 and Dim15; (4) the parameter $p$ varies from $0.7$ to $0.99$, but the runtime of IDBSCAN$_1$ and IDBSCAN$_2$ does not change largely, meaning that the runtime of IDBSCAN$_1$ and IDBSCAN$_2$ is not sensitive to the choice of the parameter $p$. In addition, Figure \ref{fig:initialization}, which presents the total runtime of performing Fast-PCA and KD tree construction, illustrates that the time used in initialization, that is, the runtime of Fast-PCA is negligible compared with that of IDBSCAN and that the runtime of fast-PCA is much smaller than that of the construction of the KD tree.
\begin{table*} \centering \begin{tabular}{cccccccc} \toprule Data sets& [MinPts, $\varepsilon$] & DBSCAN & DBSCAN$_1$ & DBSCAN$_2$ & $\text{p}\ (h_1)$ & IDBSCAN$_1$ & IDBSCAN$_2$ \\ \midrule \multirow{6}{*} {Clickstream} &\multirow{3}{*}{[10, 15000]} & \multirow{3}{*}{22.09} & \multirow{3}{*}{6.45} & \multirow{3}{*}{11.33} & 80\% (6)& 7.77&\textbf{5.83} \\
& & & & &90\% (7)& 8.13& \textbf{5.97} \\
& & & & &99\% (9)& 7.82& \textbf{5.96} \\ \cmidrule{2-8} &\multirow{3}{*}{[20, 30000]} & \multirow{3}{*}{22.50} &\multirow{3}{*}{18.33} &\multirow{3}{*}{27.31} & 80\% (6)& 11.72& \textbf{11.18} \\
& & & & &90\% (7)& 11.74&\textbf{11.13} \\
& & & & &99\% (9)& 11.67&\textbf{11.11} \\ \cmidrule{1-8} \multirow{6}{*} {Household} &\multirow{3}{*}{[5, 1000]} & \multirow{3}{*}{19.88} & \multirow{3}{*}{3.47} &\multirow{3}{*}{6.66} &80\% (2)& 2.73& \textbf{1.59} \\
& & & & &90\% (4)& 2.76&\textbf{1.61} \\
& & & & &99\% (6)& 2.80&\textbf{1.63} \\ \cmidrule{2-8} &\multirow{3}{*}{[10, 3000]} & \multirow{3}{*}{19.98} & \multirow{3}{*}{14.45}& \multirow{3}{*}{20.60} & 80\% (2)& 7.59&\textbf{4.72} \\
& & & & &90\% (4)& 7.51&\textbf{4.89} \\
& & & & &99\% (6)& 7.48& \textbf{4.89} \\ \cmidrule{1-8} \multirow{6}{*} {Dim6} &\multirow{3}{*}{[3, 1500]} & \multirow{3}{*}{0.82} &\multirow{3}{*}{0.78} &\multirow{3}{*}{0.80} & 70\% (3)& 0.14& \textbf{0.13} \\
& & & & &90\% (4)& \textbf{0.13}& \textbf{0.13}\\
& & & & &99\% (6)& 0.15& \textbf{0.13}\\ \cmidrule{2-8} &\multirow{3}{*}{[5, 5000]} & \multirow{3}{*}{1.04} & \multirow{3}{*}{3.15}& \multirow{3}{*}{3.15} & 70\% (3)& 0.48& \textbf{0.43}\\
& & & & &90\% (4)& 0.48& \textbf{0.44} \\
& & & & &99\% (6)& 0.47& \textbf{0.41} \\ \cmidrule{1-8} \multirow{6}{*} {Dim10} & \multirow{3}{*}{[3, 2000]} & \multirow{3}{*}{2.52} &\multirow{3}{*}{2.62} &\multirow{3}{*}{2.48} & 80\% (3)& \textbf{0.34}& 0.36\\
& & & & &90\% (5)& \textbf{0.34}& 0.35\\
& & & & &99\% (8)& \textbf{0.34}& 0.35\\ \cmidrule{2-8} &\multirow{3}{*}{[5, 6000]} & \multirow{3}{*}{2.89} & \multirow{3}{*}{7.43}& \multirow{3}{*}{7.45} & 80\% (3)& 0.98& \textbf{0.97}\\
& & & & &90\% (5)& 1.00&\textbf{0.96}\\
& & & & &99\% (8)& 1.01&\textbf{0.97}\\ \cmidrule{1-8} \multirow{6}{*} {Dim15} & \multirow{3}{*}{[3, 3000]} & \multirow{3}{*}{6.34} &\multirow{3}{*}{5.65} &\multirow{3}{*}{5.28} & 80\% (4)& 1.01& \textbf{0.99}\\
& & & & &90\% (5)& 1.03& \textbf{0.96} \\
& & & & &99\% (8)& 1.03& \textbf{0.96} \\ \cmidrule{2-8} &\multirow{3}{*}{[5, 9000]} & \multirow{3}{*}{7.14} & \multirow{3}{*}{16.34}& \multirow{3}{*}{16.11} & 80\% (4)& 2.77&\textbf{2.63} \\
& & & & &90\% (5)& 2.75& \textbf{2.63}\\
& & & & &99\% (8)& 2.69& \textbf{2.61}\\ \cmidrule{1-8} \multirow{6}{*} {Mocap} & \multirow{3}{*}{[5, 6000]} & \multirow{3}{*}{35.40} &\multirow{3}{*}{5.56} &\multirow{3}{*}{9.92} & 80\% (15)& \textbf{3.19}& 4.14\\
& & & & &90\% (21)& \textbf{3.30}& 4.07 \\
& & & & &99\% (31)& \textbf{3.29}& 4.17\\ \cmidrule{2-8} &\multirow{3}{*}{[10, 14000]} & \multirow{3}{*}{35.85} & \multirow{3}{*}{23.18}& \multirow{3}{*}{34.92} & 80\% (15)& \textbf{7.03}&9.21 \\
& & & & &90\% (21)& \textbf{6.84}& 9.18\\
& & & & &99\% (31)& \textbf{6.98}&9.22\\ \cmidrule{1-8} \multirow{6}{*} {Reactionnetwork} & \multirow{3}{*}{[3, 5000]} & \multirow{3}{*}{31.40} &\multirow{3}{*}{9.37} &\multirow{3}{*}{10.95} & 80\% (4)& \textbf{2.79}& 3.97\\
& & & & &90\% (6)& \textbf{2.80}& 3.93 \\
& & & & &99\% (12)& \textbf{2.75}& 3.97 \\ \cmidrule{2-8} &\multirow{3}{*}{[5, 10000]} & \multirow{3}{*}{30.94} & \multirow{3}{*}{18.55}& \multirow{3}{*}{21.08} & 80\% (4)& \textbf{4.92}&7.30 \\
& & & & &90\% (6)& \textbf{4.89}& 7.08\\
& & & & &99\% (12)& \textbf{4.96}& 7.12\\ \bottomrule
\end{tabular} \caption{Runtime (in seconds) of the five comparing algorithms on five benchmark data sets with different $p$. }\label{tab:p} \end{table*}
\begin{figure}
\caption{The total runtime of performing Fast-PCA and KD tree construction, where KD tree construction$_1$ stands for the construction of KD tree on row data and KD tree construction$_2$ represents the construction of KD tree on projected data obtained by Fast-PCA (Algorithm \ref{alg:fpca}). }
\label{fig:initialization}
\end{figure}
\subsection{Comprehensive analysis: Range query in high dimensions}
\begin{figure}
\caption{Range query in high dimensions}
\label{fig:case}
\end{figure}
\begin{theorem}\label{t:0}
Suppose the sample points $\{x_1,\ldots, x_n\}$ are uniformly distributed in a $d$-dimensional hypercube $S$ with side length $r$. Denote by $R_{d,e}(x)$ the fraction of the sample points captured by a $d$-dimensional sphere of radius $r_{0}$ and centered at $x\in S$, where $e=r_0/r$. If $0\leq e\leq {1/2}$ then it follows that for any $x\in S$, \begin{eqnarray*} L_{d,e}\le R_{d,e}(x)\leq U_{d,e}, \end{eqnarray*} where $L_{d,e}=V_d(1)e^{d}2^{-d}$, $U_{d,e}=V_d(1)e^d$ and $V_{d}(r)={\pi^{d/2}r^d}/\Gamma((d/2)+1)$ is the volume of a sphere of radius $r$ with the Gamma function $\Gamma(t)$ for $t>0$ satisfying that $\Gamma(t+1)=t\Gamma(t)$ and $\Gamma(1/2)=\sqrt{\pi}$. \end{theorem}
\begin{proof} Denote by $W_d(r)$ the volume of the hypercube with side length $r$. In the case when the sphere of radius $r_0$ and centered at $x\in S$ is inside of the hypercube with side length $r$ without intersection, as seen in Figure \ref{fig:case} (a), we have $R_{d,e}(x)=V_d(r_0)/W_d(r)=V_d(1)e^d$ which is independent of $x\in S$ and thus is a upper bound of $R_{d,e}(x)$ for all $x\in S$. In the case when the center $x\in S$ of the sphere locates at one of the vertices of the hypercube, as seen in Figure \ref{fig:case} (b), we have $R_{d,e}(x)=V_d(r_0)/[W_d(r)2^d]=V_d(1)e^d2^{-d}$ which is a lower bound of $R_{d,e}(x)$. \end{proof}
Figure \ref{fig:highdimension} presents the relationship between the radius of the sphere and the upper bound $U_{d,e}$ of $R_{d,e}(x)$ given in Theorem \ref{t:0}. The results illustrate the sparsity of high-dimensional data in the sense that for high-dimensional data which are uniformly distributed in a $d$-dimensional hypercube $S$ of side length $r$, the number of the data points contained in a sphere of radius $r_0$ and centered at $x\in S$ is getting much smaller with $d$ increasing when $r_0/r\le1/2$. For example, for sample points uniformly distributed in a $d$-dimensional hypercube $S$ of side length $r$, if $d>20$ then the sphere of radius $r/2$ and centered at $x\in S$ only contains $2.46\times 10^{-8}$ percent of the total number of sample points. Consider the process of the DBSCAN algorithm. Due to the sparsity of high-dimensional data, the search radius of range query increases with the dimension $d$. For the KD tree indexing technique, the larger the search radius is, the more sample points need to be accessed in the process of range query, thus reducing the efficiency of range query. Therefore, for the range query method combined with the KD tree indexing technique, when the search radius is bigger than $r/2$, at least $50$ percent of the sample points need to be accessed in each range query.
\begin{figure}
\caption{The upper bound $U_{d,e}$ against $e$ for dimension $d=1,2,3,5,10,20$, where $U_{d,e}$ and $e$ are defined in Theorem \ref{t:0}. Note that
when $e=1/2$, $U_{d,e}=1,0.7854,0.5236,0.1645,0.00249,2.46\times 10^{-8}$, corresponding to $d=1,2,3,5,10,20$, respectively. }
\label{fig:highdimension}
\end{figure}
\section{Conclusion}\label{sec5}
In order to accelerate the process of range query in density-based methods which are one of the most popular clustering methods and have wide applications, we proposed a fast range query algorithm (called FPCAP), based on the fast principal component analysis which prunes unnecessary distance calculations in the range search process. By combining FPCAP with DBSCAN, we obtained an Improved DBSCAN (called IDBSCAN) algorithm. Experimental results on real-world and synthetic data sets demonstrate that both FPCAP and IDBSCAN improve the computational efficiency and outperform other compared methods. FPCAP can also be combined with other density-based clustering methods to improving their efficiency.
\ifCLASSOPTIONcaptionsoff
\fi
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{bio-chengdifei}}] {Difei~Cheng} received BSc degree in mathematics and applied mathematics from Shandong University, Jinan, China, in 2017. He is currently pursuing his PhD degree in machine learning and pattern recognition with Institute of Applied Mathematics, Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing, China.
His current research interests include clustering, unsupervised feature learning, manifold learning, metric learning and deep learning. \end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{bio-xuruihang}}] {Ruihang~Xu} received BSc degree in mathematics and statistics from Xidian University, Xi'an, China, in 2018. He is currently pursuing his PhD degree in machine learning with Institute of Applied Mathematics, Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing, China. His current research interests include deep learning and image processing. \end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{bio-zhangbo}}] {Bo~Zhang} (M'10) received BSc degree in mathematics from Shandong University, Jinan, China, MSc degree in mathematics from Xi'an Jiaotong University, Xi'an, China, and PhD degree in applied mathematics from the University of Strathclyde, Glasgow, UK, in 1983, 1985, and 1992, respectively.
After being a postdoc at Keele University, UK and a Research Fellow at Brunel University, UK, from 1992 to 1997, he joined Coventry University, Coventry, UK, in 1997, as a Senior Lecturer, where he was promoted to Reader in Applied Mathematics in 2000 and to Professor of Applied Mathematics in 2003. He is currently a Professor with Institute of Applied Mathematics, Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing, China. His current research interests include direct and inverse scattering problems, radar and sonar imaging, machine learning, and data mining. He is currently an Associate Editor of the IEEE TRANSACTION ON CYBERNETICS, and Applicable Analysis. \end{IEEEbiography}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{bio-jinruinan}}] {Ruinan~Jin} received the B.S.c. degree in Schiffsmotor from Wuhan University of Technology, China, in 2017. He is currently pursuing the M.Sc degree in machine learning and pattern recognition with Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing, China. His current research interests include stochastic optimization, unsupervised feature learning, deep learning theory, causal discovery. \end{IEEEbiography}
\end{document} |
\begin{document}
\title{Central limit theorems for the radial spanning tree}
\date{}
\renewcommand{\fnsymbol{footnote}}{\fnsymbol{footnote}}
\author{Matthias Schulte\footnotemark[1]\;\, and Christoph Th\"ale\footnotemark[2]\,}
\footnotetext[1]{Institute of Stochastics, Karlsruhe Institute of Technology,
Germany, [email protected]}
\footnotetext[2]{Faculty of Mathematics, Ruhr University Bochum, Germany, [email protected]}
\maketitle
\begin{abstract} Consider a homogeneous Poisson point process in a compact convex set in $d$-dimensional Euclidean space which has interior points and contains the origin. The radial spanning tree is constructed by connecting each point of the Poisson point process with its nearest neighbour that is closer to the origin. For increasing intensity of the underlying Poisson point process the paper provides expectation and variance asymptotics as well as central limit theorems with rates of convergence for a class of edge functionals including the total edge length.
\\ {\bf Keywords}. {Central limit theorem, directed spanning forest, Poisson point process, radial spanning tree, random graph.}\\ {\bf MSC}. Primary 60D05; Secondary 60F05, 60G55. \end{abstract}
\section{Introduction and results}
Random graphs for which the relative position of their vertices in space determines the presence of edges found considerable attention in the probability and combinatorics literature during the last decades,\ cf.\ \cite{BaBla,FranceschettiMester,Haenggi,Penrose}. Among the most popular models are the nearest-neighbour graph and the random geometric (or Gilbert) graph. Geometric random graphs with a tree structure have attracted particular interest. For example the minimal spanning tree has been studied intensively in stochastic optimization, cf.\ \cite{Steele,Yukich}. Bhatt and Roy \cite{BhattRoy} have proposed a model of a geometric random graph with a tree structure, the so-called minimal directed spanning tree, and in \cite{BaBo} another model has been introduced by Baccelli and Bordenave. This so-called radial spanning tree is also of interest in the area of communication networks as discussed in \cite{BaBo,Bordenave}.
To define the radial spaning tree formally, let $W\subset\R^d$, $d\geq 2$, be a compact convex set with $d$-dimensional Lebesgue measure $\lambda_d(W)>0$ which contains the origin $0$, and let $\eta_t$ be a Poisson point process in $W$ whose intensity measure is a multiple $t\geq 1$ of the Lebesgue measure restricted to $W$, see Section \ref{sec:Preparations} for more details. For a point $x\in\eta_t$ we denote by $n(x,\eta_t)$ the nearest neighbour of $x$ in $\eta_t\cup\{0\}$ which is closer to the origin than $x$, i.e., for which $\|n(x,\eta_t)\|\leq\|x\|$ and $\|x-n(x,\eta_t)\|\leq \|x-y\|$ for all $y\in\eta_t\cap (B^d(0,\|x\|)\setminus\{x\})$, where $\|\cdot\|$ stands for the Euclidean norm and $B^d(z,r)$ is the $d$-dimensional closed ball with centre $z\in\R^d$ and radius $r\geq 0$. In what follows, we call $n(x,\eta_t)$ the radial nearest neighbour of $x$. The radial spanning tree $\RST(\eta_t)$ with respect to $\eta_t$ rooted at the origin $0$ is the random tree in which each point $x\in\eta_t$ is connected by an edge to its radial nearest neighbour, see Figure \ref{fig1} and Figure \ref{fig3d} for pictures in dimensions $2$ and $3$.
In the original paper \cite{BaBo}, the radial spanning tree was constructed with respect to a stationary Poisson point process in $\R^d$, and properties dealing with individual edges or vertices such as edge lengths and degree distributions as well as the behaviour of semi-infinite paths were considered. Moreover, spatial averages of edge lengths within $W$ have been investigated, especially when $W$ is a ball with increasing radius. Semi-infinite paths and the number of infinite subtrees of the root were further studied by Baccelli, Coupier and Tran \cite{BacCoupierTran}, while Bordenave \cite{Bordenave} considered a closely related navigation problem. However, several questions related to cumulative functionals as the total edge length remained open. For example, the question of central limit theorem for the total edge length of the radial spanning tree within a convex observation window $W$ has been brought up by Penrose and Wade \cite{PenroseWade} and is still one of the prominent open problems. Note that for such a central limit theorem the set-up in which the intensity goes to infinity and the set $W$ is kept fixed is equivalent to the situation in which $W$ increases to $\R^d$ for fixed intensity. The main difficulty in proving a central limit theorem is that the usual existing techniques are (at least not directly) applicable, see \cite{PenroseWade} for a more detailed discussion. One reason for this is the lack of spatial homogeneity of the construction as a result of the observation that the geometry of the set of possible radial nearest neighbours of a given point changes with the distance of the point to the origin. The main contribution of our paper is a central limit theorem together with an optimal rate of convergence. Its proof relies on a recent Berry-Esseen bound for the normal approximation of Poisson functionals from Last, Peccati and Schulte \cite{LPS}, which because of its geometric flavour is particularly well suited for models arising in stochastic geometry. The major technical problem in order to prove a central limit theorem is to control appropriately the asymptotic behaviour of the variance. This sophisticated issue is settled in our text on the basis of a recent non-degeneracy condition also taken from \cite{LPS}.
Although a central limit theorem for the radial spanning tree is an open problem, we remark that central limit theorems for edge-length functionals of the minimal spanning tree of a random point set have been obtained by Kesten and Lee \cite{KestenLee} and later also by Chatterjee and Sen \cite{ChatterjeeSan}, Penrose \cite{PenroseCLT} and Penrose and Yukich \cite{PenroseYukich}. Moreover, Penrose and Wade \cite{PenroseWadeCLT} have shown a central limit theorem for the total edge length of the minimal directed spanning tree.
\medspace
\begin{figure}
\caption{\small Simulations of radial spanning trees in the unit square with $t=50$ (left) and $t=300$ (right). They have been produced with the freely available \texttt{R}-package \texttt{spatgraphs}.}
\label{fig1}
\end{figure}
\begin{figure}
\caption{\small A simulation of a radial spanning tree in the unit cube with $t=500$. It has been produced with the freely available \texttt{R}-package \texttt{spatgraphs}.}
\label{fig3d}
\end{figure}
In order to state our main results formally, we use the notation $\ell(x,\eta_t):=\|x-n(x,\eta_t)\|$ and define the edge-length functionals \begin{equation}\label{eq:LtaDef} \cL_t^{(a)} := \sum_{x\in\eta_t} \ell(x,\eta_t)^a\,, \qquad a\geq 0\,, \quad t\geq 1\,, \end{equation} of $\RST(\eta_t)$ (the assumption that $a\geq 0$ is discussed in Remark \ref{rem:a>=0} below). Note that $\cL_t^{(0)}$ is just the number of vertices of $\RST(\eta_t)$, while $\cL_t^{(1)}$ is its total edge length. Our first result provides expectation and variance asymptotics for $\cL_t^{(a)}$. To state it, denote by $\kappa_d$ the volume of the $d$-dimensional unit ball and by $\Gamma(\,\cdot\,)$ the usual Gamma function. An explicit representation of the constant $v_a$ in the next theorem will be derived in Lemma \ref{lem:ExistenceVarianceAsymptotics} below.
\begin{theorem}\label{thm:Variance} Let $a\geq 0$. Then \begin{equation}\label{eq:ExpectationAsymptotics} \lim_{t\to\infty}t^{a/d-1}\,\BE[\cL_t^{(a)}] = \left({2\over\kappa_d}\right)^{a/d}\Gamma\left(1+{a\over d}\right)\,\lambda_d(W) \end{equation} and there exists a constant $v_a\in(0,\infty)$ only depending on $a$ and $d$ such that \begin{equation}\label{eq:VarianceAsymptotics} \lim_{t\to\infty}t^{2a/d-1}\,\BV[\cL_t^{(a)}] = v_a\,\lambda_d(W)\,. \end{equation} \end{theorem}
We turn now to the central limit theorem. Our next result in particular ensures that, after suitable centering and rescaling, the functionals $\cL_t^{(a)}$ converge in distribution to a standard Gaussian random variable, as $t\to\infty$.
\begin{theorem}\label{thm:CLT} Let $a\geq 0$ and let $Z$ be a standard Gaussian random variable. Then there is a constant $C\in(0,\infty)$ only depending on $W$, the parameter $a$ and the space dimension $d$ such that $$
\sup_{s\in\R}\Big| \BP\Bigg(\frac{\cL_t^{(a)}-\BE[\cL_t^{(a)}]}{\sqrt{\BV[\cL_t^{(a)}]}}\leq s\Bigg) - \BP(Z \leq s)\Big| \leq C\, t^{-1/2}\,, \qquad t\geq 1\,. $$ \end{theorem}
For $a=1$ and $a=0$ Theorem \ref{thm:CLT} says that the total edge length and the number of edges satisfy a central limit theorem, as $t\to\infty$. While for $a>0$ the result is non-trivial, a central limit theorem in the case $a=0$ is immediate from the following observations. Namely, there is a canonical one-to-one correspondence between the points of $\eta_t$ and the edges in the radial spanning tree. Moreover, the number of points of $\eta_t$ is a Poisson-distributed random variable $\eta_t(W)$ with mean (and variance) $t\lambda_d(W)$, and it is well-known from the classical central limit theorem that -- under suitable normalization -- such a random variable is well approximated by a standard Gaussian random variable. Since for this situation the rate $t^{-1/2}$ is known to be optimal, the rate of convergence in Theorem \ref{thm:CLT} can in general not be improved.
The radial spanning tree is closely related to another geometric random graph, namely the directed spanning forest, which has also been introduced in \cite{BaBo}. The directed spanning forest $\operatorname{DSF}(\eta_t)$ with respect to a direction $e\in\mathbb{S}^{d-1}$ is constructed in the following way. For $x\in\R^d$ let $H_{x,e}$ be the half-space $H_{x,e}:=\{y\in\R^d: \langle e, y-x\rangle\leq 0\}$. We now take the points of $\eta_t$ as vertices of ${\rm DST}(\eta_t)$ and connect each point $x\in\eta_t$ with its closest neighbour $\hat{n}(x,\eta_t)$ in $\eta_t\cap (H_{x,e}\setminus\{x\})$. If there is no such point, we put $\hat{n}(x,\eta_t)=\emptyset$. This means that we look for the neighbour of a vertex of the directed spanning forest always in the same direction, whereas this direction changes according to the the relative position to the origin in case of the radial spanning tree. The directed spanning forest can be regarded as a local approximation of the radial spanning tree at distances far away from the origin.
As for the radial spanning tree we define $\ell_e(x,\eta_t):=\|x-\hat{n}(x,\eta_t)\|$ if $\hat{n}(x,\eta_t)\neq\emptyset$ and $\ell_e(x,\eta_t):=0$ if $\hat{n}(x,\eta_t)=\emptyset$, and $$ \widehat{\mathcal{L}}_{t}^{(a)} := \sum_{x\in\eta_t} \ell_e(x,\eta_t)^a\,, \qquad a\geq 0\,, \quad t\geq 1\,. $$ In order to avoid boundary effects it is convenient to replace the Poisson point process $\eta_t$ in $W$ by a unit-intensity stationary Poisson point process $\eta$ in $\R^d$. For this set-up we introduce the functionals \begin{equation}\label{eq:widehatLW} \widehat{\mathcal{L}}_W^{(a)} := \sum_{x\in\eta\cap W} \ell_e(x,\eta)^a\,, \quad a\geq 0\,, \qquad t\geq 1\,. \end{equation}
Let us recall that a forest is a family of one or more than one disjoint trees, while a tree is an undirected and simple graph without cycles. Although with strictly positive probability $\operatorname{DSF}(\eta_t)$ is a union of more than one disjoint trees, Coupier and Tran have shown in \cite{CoupierTran} that in the directed spanning forest $\operatorname{DSF}(\eta)$ of a stationary Poisson point process $\eta$ \textit{in the plane} any two paths eventually coalesce, implying that $\operatorname{DSF}(\eta)$ is almost surely a tree. Resembling the intersection behaviour of Brownian motions, where the intersection of any finite number of independent Brownian motions in $\R^d$ with different starting points is almost surely non-empty if and only if $d=2$, cf.\ \cite[Theorem 9.3 (b)]{MP}, it seems that a similar property is no more true for dimensions $d\geq 3$. Note that in contrast to these dimension-sensitive results, our expectation and variance asymptotics and our central limit theorem hold for any space dimension $d\geq 2$.
A key idea of the proof of Theorem \ref{thm:Variance} is to show that, for $a\geq 0$, $$ \lim_{t\to\infty} t^{a/d-1}\BE[\mathcal{L}_t^{(a)}]=\BE[\widehat{\mathcal{L}}_W^{(a)}] $$ and \begin{equation}\label{eq:EVIntro} \lim_{t\to\infty} t^{2a/d-1}\BV[\mathcal{L}_t^{(a)}]=\lim_{r\to\infty}\frac{\BV[\widehat{\mathcal{L}}_{B^d(0,r)}^{(a)}]}{\kappa_d r^d} \lambda_d(W)\,. \end{equation} In other words this means that the expectation and the variance of $\cL_t^{(a)}$ can be approximated by those of $\widehat{\cL}_W^{(a)}$ or $\widehat{\cL}_{B^d(0,r)}^{(a)}$, which are much easier to study because of translation invariance. We shall use a recent non-degeneracy criterion from \cite{LPS} to show that the right-hand side in \eqref{eq:EVIntro} is bounded away from zero, which implies then the same property also for the edge-length functionals of the radial spanning tree. To prove Theorem \ref{thm:CLT} we use again recent findings from \cite{LPS}. These are reviewed in Section \ref{sec:Preparations} together with some other background material. In Section \ref{sec:Variance} we establish Theorem \ref{thm:Variance} before proving Theorem \ref{thm:CLT} in the final Section \ref{sec:ProofCLT}.
\section{Preliminaries}\label{sec:Preparations}
\paragraph{Notation.} Let $\mathcal{B}(\R^d)$ be the Borel $\sigma$-field on $\R^d$ and let $\lambda_d$ be the Lebesgue measure on $\R^d$. For a set $A\in\mathcal{B}(\R^d)$ we denote by $\interior(A)$ the interior of $A$ and by $\lambda_d|_A$ the restriction of $\lambda_d$ to $A$. For $z\in\R^d$ and $r\geq 0$, $B^d(z,r)$ stands for the closed ball with centre $z$ and radius $r$, and $B^d$ is the $d$-dimensional closed unit ball, whose volume is given by $\kappa_d:=\lambda_d(B^d)$. By $\mathbb{S}^{d-1}$ we denote the unit sphere in $\R^d$.
\paragraph{Poisson point processes.} Let $\bN$ be the set of $\sigma$-finite counting measures on $\R^d$ and let it be equipped with the $\sigma$-field that is generated by all maps $\bN\ni \mu\mapsto\mu(A)$, $A\in\mathcal{B}(\R^d)$. For a $\sigma$-finite non-atomic measure $\mu$ on $\R^d$ a Poisson point process $\eta$ with intensity measure $\mu$ is a random element in $\bN$ such that \begin{itemize} \item for all $n\in\N$ and pairwise disjoint sets $A_1,\ldots,A_n\in\mathcal{B}(\R^d)$ the random variables $\eta(A_1),\hdots,\eta(A_n)$ are independent, \item for $A\in\mathcal{B}(\R^d)$ with $\mu(A)\in(0,\infty)$, $\eta(A)$ is Poisson distributed with parameter $\mu(A)$. \end{itemize}
We say that $\eta$ is stationary with intensity $t>0$ if $\mu=t\lambda_d$, implying that $\eta$ has the same distribution as the translated point process $\eta+z$ for all $z\in\R^d$. By abuse of terminology we also speak about $t$ as intensity if the measure $\mu$ has the form $t\lambda_d|_W$ for some possibly compact subset $W\subset\R^d$.
A Poisson point process $\eta$ can with probability one be represented as $$ \eta = \sum_{n=1}^m \delta_{x_n}, \quad x_1,x_2,\hdots\in \R^d, \qquad m\in\N\cup\{0,\infty\}\,, $$ where $\delta_x$ stands for the unit-mass Dirac measure concentrated at $x\in\R^d$. In particular, if $\mu(\R^d)<\infty$, $\eta$ can be written in form of the distributional identity $$ \eta=\sum_{n=1}^{M} \delta_{X_n} $$ with i.i.d.\ random points $(X_n)_{n\in\N}$ that are distributed according to $\mu(\cdot)/\mu(\R^d)$ and a Poisson distributed random variable $M$ with mean $\mu(\R^d)$ that is independent of $(X_n)_{n\in\N}$.
As usual in the theory of point processes, we may identify $\eta$ with its support and write, by slight abuse of notation, $x\in\eta$ whenever $x$ is charged by the random measure $\eta$. Moreover, we write $\eta\cap A$ for the restriction of $\eta$ to a subset $A\in\mathcal{B}(\R^d)$. Although we interpreted $\eta$ as a random set in the introduction, we prefer from now on to regard $\eta$ as a random measure.
To deal with functionals of $\eta$, the so-called multivariate Mecke formula will turn out to be useful for us. It states that, for $k\in\N$ and a non-negative measurable function $f: (\R^d)^k\times \bN\to\R$, \begin{equation}\label{eq:MeckeMulti} \begin{split} \BE\Big[\sum_{(x_1,\hdots,x_k)\in\eta_{\neq}^k} &f(x_1,\hdots,x_k,\eta)\Big] \\ &= \int_{(\R^d)^k}\BE[f(x_1,\hdots,x_k,\eta+\delta_{x_1}+\hdots+\delta_{x_k})]\,\mu^k(\dint(x_1,\hdots,x_k))\,, \end{split} \end{equation} where $\eta_{\neq}^k$ stands for the set of all $k$-tuples of distinct points of $\eta$ and the integration on the right-hand side is with respect to the $k$-fold product measure of $\mu$ (see Theorem 1.15 in \cite{SWNewPers}). It is a remarkable fact that \eqref{eq:MeckeMulti} for $k=1$ also characterizes the Poisson point process $\eta$.
\paragraph{Variance asymptotics and central limit theorems for Poisson functionals.}
As a Poisson functional we denote a random variable $F$ which only depends on a Poisson point process $\eta$. Every Poisson functional $F$ can be written as $F=f(\eta)$ almost surely with a measurable function $f:\bN\to\R$, called a representative of $F$. For a Poisson functional $F$ with representative $f$ and $z\in \R^d$, the first-order difference operator is defined by $$ D_zF := f(\eta+\delta_z) - f(\eta)\,. $$ In other words, $D_zF$ measures the effect on $F$ when a point $z$ is added to $\eta$. Moreover, for $z_1,z_2\in \R^d$, the second-order difference operator is given by \begin{align*} D_{z_1,z_2}^2F &:= D_{z_1}(D_{z_2}F) =f(\eta+\delta_{z_1}+\delta_{z_2})-f(\eta+\delta_{z_1})-f(\eta+\delta_{z_2})+f(\eta)\,. \end{align*} The difference operators play a crucial role in the following result, which is the key tool to establish our central limit theorem, see \cite[Proposition 1.3]{LPS}.
\begin{proposition}\label{prop:LPS}
Let $(F_t)_{t\geq 1}$ be a family of Poisson functionals depending on the Poisson point processes $(\eta_t)_{t\geq 1}$ with intensity measures $t\lambda_d|_W$ for $t\geq 1$, where $W\subset\R^d$ is a compact convex set with interior points. Suppose that there are constants $c_1,c_2\in(0,\infty)$ such that \begin{equation}\label{eq:MomentsDifferenceOperator}
\BE[|D_zF_t|^5]\leq c_1 \quad \text{and}\quad\BE[|D_{z_1,z_2}^2F_t|^5]\leq c_2\,, \qquad z,z_1,z_2\in W\,, \quad t\geq 1\,. \end{equation} Further assume that $t^{-1}\BV[F_t]\geq v$, $t\geq t_0$, for some $v,t_0\in(0,\infty)$ and that \begin{equation}\label{eq:SupDifferenceOperator} \sup_{z_1\in W,\,t\geq 1} t\int_{W}\BP(D_{z_1,z_2}^2F_t\neq 0)^{1/20}\,\dint z_2<\infty\,, \end{equation} and let $Z$ be a standard Gaussian random variable. Then there is a constant $C\in(0,\infty)$ such that \begin{equation}\label{eq:BoundLPS}
\sup_{s\in\R} \Big| \BP\Big(\frac{F_t-\BE[F_t]}{\sqrt{\BV[F_t]}}\leq s \Big) -\BP(Z\leq s) \Big| \leq C\, t^{-1/2}\,, \qquad t\geq 1\,. \end{equation} \end{proposition}
The proof of Proposition \ref{prop:LPS} given in \cite{LPS} is based on a combination of Stein's method for normal approximation, the Malliavin calculus of variations and recent findings concerning the Ornstein-Uhlenbeck semigroup on the Poisson space around the so-called Mehler formula. Clearly, \eqref{eq:BoundLPS} implies that $(F_t-\BE[F_t])/\sqrt{\BV[F_t]}$ converges in distribution to a standard Gaussian random variable, as $t\to\infty$, but also delivers a rate of convergence for the so-called Kolmogorov distance. A similar bound is also available for the Wasserstein distance, but in order to keep the result transparent, we restrict here to the more prominent and (as we think) natural Kolmogorov distance.
One problem to overcome when one wishes to apply Proposition \ref{prop:LPS} is to show that the liming variance of the family $(F_t)_{t\geq 1}$ of Poisson functionals is not degenerate in that $t^{-1}\BV[F_t]$ is uniformly bounded away from zero for sufficiently large $t$. As discussed in the introduction we will relate the variance of $\mathcal{L}_t^{(a)}$, as $t\to\infty$, to the variance of $\widehat{\mathcal{L}}_{B^d(0,r)}^{(a)}$, as $r\to\infty$. A key tool to show that $\BV[\widehat{\mathcal{L}}_{W}^{(a)}]\geq v_a \lambda_d(W)$ with a constant $v_a\in(0,\infty)$ is the following result, which is a version of Theorem 5.2 in \cite{LPS}.
\begin{proposition}\label{prop:LPSvariance} Let $\eta$ be a stationary Poisson point process in $\R^d$ with intensity measure $\lambda_d$ and let $F$ be a square-integrable Poisson functional depending on $\eta$ with representative $f:\bN\to\R$. Assume that there exist $k\in\N$, sets $I_1,I_2\subset\{1,\hdots,k\}$ with $I_1\cup I_2=\{1,\hdots,k\}$, a constant $c\in(0,\infty)$ and bounded sets $W_0,A_1,\hdots,A_k\subset\R^d$ with strictly positive Lebesgue measure such that $$
\big| \BE\big[ f(\eta+\delta_x+\sum_{i\in I_1}\delta_{x+y_i}) - f(\eta+\delta_x+\sum_{i\in I_2}\delta_{x+y_i}) \big] \big| \geq c $$ for $x\in W_0$ and $y_i\in A_i$, $i\in\{1,\hdots,k\}$. Then, there is a constant $v\in(0,\infty)$ only depending on $c$, $k$ and $A_1,\hdots,A_k$ such that $$ \BV[F]\geq v \, \lambda_d(W_0)\,. $$ \end{proposition}
\begin{proof} We define $U=\{(x_1,\hdots,x_{k+1})\in(\R^d)^{k+1}: x_1\in W_0, x_{i+1}\in x_1+A_i, i\in\{1,\hdots,k\}\}$. It follows from Theorem 5.2 in \cite{LPS} that \begin{equation}\label{eq:BoundVarianceLPS}
\BV[F] \geq \frac{c^2}{4^{k+2}(k+1)!} \min_{\emptyset \neq J\subset \{1,\hdots,k+1\}} \inf_{\substack{V\subset U,\\ \lambda_d^{k+1}(V)\geq \lambda_d^{k+1}(U)/2^{k+2}}} \lambda_d^{|J|}(\Pi_J(V))\,, \end{equation}
where $\Pi_J(\cdot)$ stands for the projection onto the components whose indices belong to $J$ and $|J|$ denotes the cardinality of $J$. We have that \begin{equation}\label{eq:ZwischenrechnungVarianz1} \begin{split} \lambda_d^{k+1}(U) & = \int_{W_0}\int_{(\R^d)^k} {\bf 1}\{ x_{i+1}\in x_1+A_i, i\in\{1,\hdots,k\}\} \, \dint(x_2,\hdots,x_{k+1}) \, \dint x_1\\ & = \lambda_d(W_0) \prod_{i=1}^k \lambda_d(A_i)\,. \end{split} \end{equation} Since $A_1,\hdots,A_k$ are bounded, there is a constant $R\in(0,\infty)$ such that, for $(x_1,\hdots,x_{k+1})\in U$, $$
\|x_i-x_j\|\leq R\,, \quad i,j\in\{1,\hdots,k+1\}\,. $$ Let $V\subset U$ be such that $\lambda_d^{k+1}(V)\geq \lambda_d^{k+1}(U)/2^{k+2}$ and fix $\emptyset\neq J\subset \{1,\hdots,k+1\}$. We write $x_J$ (resp.\ $x_{J^C}$) for the vector consisting of all variables from $x_1,\ldots,x_{k+1}$ with index in $J$ (resp.\ $J^C$). Then, we have that \begin{align*}
\lambda_d^{k+1}(V) & \leq \int_{(\R^d)^{k+1}} {\bf 1}\{x_J\in\Pi_J(V)\} {\bf 1}\{(x_J,x_{J^C})\in U\} \, \dint(x_1,\hdots,x_{k+1})\\
& \leq (\kappa_d R^d)^{|J^C|} \int_{(\R^d)^{|J|}} {\bf 1}\{ x_J\in\Pi_J(V)\} \, \dint x_J = (\kappa_d R^d)^{|J^C|}\,\lambda_d^{|J|}(\Pi_J(V))\,. \end{align*} Because of $\lambda_d^{k+1}(V)\geq \lambda_d^{k+1}(U)/2^{k+2}$ and \eqref{eq:ZwischenrechnungVarianz1}, this implies that $$
\lambda_d^{|J|}(\Pi_{J}(V))\geq \frac{\lambda_d^{k+1}(V)}{(\kappa_d R^d)^{|J^C|}} \geq \frac{ \prod_{i=1}^k \lambda_d(A_i)}{ 2^{k+2}(\kappa_d R^d)^{|J^C|}} \, \lambda_d(W_0)\,. $$ Together with \eqref{eq:BoundVarianceLPS}, this concludes the proof. \end{proof}
\section{Proof of Theorem \ref{thm:Variance}}\label{sec:Variance}
We prepare the proof of Theorem \ref{thm:Variance} by collecting some properties of the Poisson functionals we are interested in. Recall that the edge-length functional $\mathcal{L}_t^{(a)}$ defined at \eqref{eq:LtaDef} depends on the Poisson point process $\eta_t$ with intensity measure $t\lambda_d|_W$ for $t\geq 1$. It has representative $$ \mathcal{L}^{(a)}(\operatorname{RST}(\xi)) = \sum_{x\in\xi} \ell(x,\xi)^a\,, \qquad \xi\in\bN\,, $$
where $\ell(x,\xi)=\|x-n(x,\xi)\|$ is the distance between $x$ and its radial nearest neighbour $n(x,\xi)$ in $\xi+\delta_0$. The functional $\ell(\,\cdot\,,\,\cdot\,)$ is monotone and homogeneous in the sense that \begin{equation}\label{eq:monotone} \ell(x,\xi_1+\xi_2+\delta_x)\leq \ell(x,\xi_1+\delta_x)\,, \qquad x\in\R^d\,,\quad \xi_1,\xi_2\in\bN\,, \end{equation} and \begin{equation}\label{eq:homogene} \ell(sx,s\xi)=s \ell(x,\xi)\,, \qquad x\in\R^d\,, \quad \xi\in\bN\,, \quad s>0\,, \end{equation} where $s\xi$ is the counting measure $\sum_{x\in\xi}\delta_{sx}$ we obtain by multiplying each point of $\xi$ with $s$.
The random variable $\widehat{\mathcal{L}}^{(a)}_W$ defined at \eqref{eq:widehatLW} depends on a stationary Poisson point process $\eta$ with intensity one. A representative of $\widehat{\mathcal{L}}^{(a)}_W$ is given by $$ \widehat{\mathcal{L}}^{(a)}_W(\operatorname{DSF}(\xi))=\sum_{x\in\xi\cap W} \ell_e(x,\xi)^a\,, \qquad \xi\in\bN\,, $$
where $\ell_e(x,\xi)=\|x-\hat{n}(x,\xi)\|$ is the distance between $x$ and its closest neighbour in $(\xi-\delta_x)\cap H_{x,e}$. Similarly to $\ell(\,\cdot\,,\,\cdot\,)$, the functional $\ell_e(\,\cdot\,,\,\cdot\,)$ is monotone in that \begin{equation}\label{eq:monotoneelle} \ell_e(x,\xi_1+\xi_2+\delta_x)\leq \ell_e(x,\xi_1+\delta_x)\,, \qquad x\in\R^d, \quad \xi_1,\xi_2\in\bN\,. \end{equation}
We make use of two auxiliary results in the proof of Theorem \ref{thm:Variance}, which play a crucial role in Section \ref{sec:ProofCLT} as well.
\begin{lemma}\label{lem:KonstanteAlphad} There is a constant $\alpha_W\in(0,\infty)$ only depending on $W$ and $d$ such that $$
\lambda_d(B^d(x,u)\cap B^d(0,\|x\|)\cap W)\geq\alpha_W\lambda_d(B^d(x,u))=\alpha_W\kappa_d u^d $$
for all $x\in W$ and $0\leq u\leq \|x\|$. \end{lemma} \begin{proof}
Let $x\in W$ and $0\leq u\leq \|x\|$ be given and define $\hat{x}:=(1-u/(2\|x\|))x$. Since $\|x-\hat{x}\|=u/2$ and $\|\hat{x}\|=\|x\|-u/2$, $B^d(\hat{x},u/2)$ is contained in $B^d(x,u)$ and in $B^d(0,\|x\|)$ so that \begin{align}\label{eq:InclusionSets}
B^d(x,u)\cap B^d(0,\|x\|)\cap W \supset B^d(\hat{x},u/2)\cap W\,. \end{align} It follows from the proof of Lemma 2.5 in \cite{LastPenrose2013} that there is a constant $\gamma_W\in(0,\infty)$ only depending on $W$ and the dimension $d$ such that $$ \lambda_d(B^d(y,r)\cap W)\geq \gamma_W \kappa_d r^d $$
for all $y\in W$ and $0\leq r \leq \max_{z_1,z_2\in W}\|z_1-z_2\|$. Together with \eqref{eq:InclusionSets}, we obtain that $$
\lambda_d(B^d(x,u)\cap B^d(0,\|x\|)\cap W) \geq \lambda_d(B^d(\hat{x},u/2)\cap W)\geq 2^{-d}\gamma_W \kappa_d u^d\,, $$ and the choice $\alpha_W:=2^{-d}\gamma_W$ concludes the proof. \end{proof}
The previous lemma allows us to derive exponential tails for the distribution of $\ell(x,\eta_t+\delta_x)$ as well as bounds for the moments.
\begin{lemma}\label{lem:ExpTail} For all $t\geq 1$, $x\in W$ and $u\geq 0$ one has that \begin{equation}\label{eq:BoundTailell} \BP(\ell(x,\eta_t+\delta_x)\geq u) \leq \exp(-t\alpha_W\kappa_d u^d)\,, \end{equation} where $\alpha_W$ is the constant from Lemma \ref{lem:KonstanteAlphad}. Furthermore, for each $a\geq0$ there is a constant $c_a\in(0,\infty)$ only depending on $a$, $d$ and $\alpha_W$ such that \begin{equation}\label{eq:BoundMomentsell} t^{a/d} \,\BE[\ell(x,\eta_t+\delta_x)^{a}]\leq c_a\,, \qquad x\in W\,, \quad t\geq 1\,. \end{equation} \end{lemma} \begin{proof}
Since $\ell(x,\eta_t+\delta_x)$ is at most $\|x\|$, \eqref{eq:BoundTailell} is obviously true if $u>\|x\|$. For $0\leq u \leq \|x\|$ we have that \begin{align*}
\BP(\ell(x,\eta_t+\delta_{x})\geq u) & = \BP(\eta_t(B^d(x,u)\cap B^d(0,\|x\|)\cap W)=0)\\
&=\exp(-t\lambda_d(B^d(x,u)\cap B^d(0,\|x\|)\cap W))\\ &\leq \exp(-t\alpha_W\kappa_du^d)\,, \end{align*} where we have used that $\eta_t$ is a Poisson point process and Lemma \ref{lem:KonstanteAlphad}. For fixed $a>0$ the previous inequality implies that \begin{equation}\label{eq:XXXRRRTTT} \begin{split} t^{a/d}\, \BE[\ell(x,\eta_t+\delta_x)^{a}] & = \int_0^\infty \BP(t^{a/d}\ell(x,\eta_t+\delta_x)^{a} \geq u) \, \dint u\\ & \leq \int_0^\infty \exp(-\alpha_W\kappa_d u^{d/a}) \, \dint u\\ & = \frac{a}{d (\alpha_W\kappa_d)^{a/d}}\int_0^\infty v^{a/d-1} \exp(-v) \, \dint v\\ & = \frac{1}{(\alpha_W \kappa_d)^{a/d}}\Gamma\Big(1+\frac{a}{d}\Big)=:c_a \end{split} \end{equation} for all $t\geq 1$. Finally, if $a=0$, \eqref{eq:BoundMomentsell} is obviously true with $c_0=1$. \end{proof}
\begin{remark}\label{rem:a>=0}\rm In Theorem \ref{thm:Variance} and Theorem \ref{thm:CLT} we assume that $a\geq 0$. One reason for this is the first equality in \eqref{eq:XXXRRRTTT}, which is false in case that $a<0$. A similar relation is also used for the variance asymptotics studied below. \end{remark}
The proof of Theorem \ref{thm:Variance} is divided into several steps. We start with the expectation asymptotics, generalizing thereby the approach in \cite{BaBo} from the planar case to higher dimensions.
\begin{proof}[Proof of \eqref{eq:ExpectationAsymptotics}] First, consider the case $a=0$. Here, we have that $$t^{a/d-1}\,\BE[\cL_t^{(a)}]=t^{-1}\,\BE[\eta_t(W)]=\lambda_d(W)\,.$$ Next, suppose that $a>0$. We use the Mecke formula \eqref{eq:MeckeMulti} to see that $$ t^{a/d-1}\,\BE[\cL_t^{(a)}] =\int_W t^{a/d}\,\BE[\ell(x,\eta_t+\delta_x)^a]\,\dint x\,. $$ By computing the expectation in the integral, we obtain that \begin{equation}\label{eq:ExpectAsymp1} t^{a/d-1}\,\BE[\cL_t^{(a)}] = a\int_W \int_0^\infty u^{a-1}\BP(t^{1/d}\ell(x,\eta_t+\delta_{x})\geq u)\,\dint u \,\dint x\,, \end{equation} where \begin{align}\label{eq:WESExpectAsymp}
\BP(t^{1/d}\ell(x,\eta_t+\delta_{x})\geq u) =& \exp(-t\lambda_d(B^d(x,t^{-1/d}u)\cap B^d(0,\|x\|)\cap W))\,, \quad u\geq 0\,, \end{align}
since $\eta_t$ is a Poisson point process with intensity measure $t\lambda_d|_W$. Since, as a consequence of Lemma \ref{lem:ExpTail}, $$ \BP(t^{1/d}\ell(x,\eta_t+\delta_{x})\geq u) \leq \exp(-\alpha_W\kappa_du^d) $$ and $$ a \int_W \int_0^\infty u^{a-1} \exp\left(-\alpha_W\kappa_d u^{d}\right)\,\dint u \, \dint x<\infty\,, $$ we can apply the dominated convergence theorem to \eqref{eq:ExpectAsymp1} and obtain that $$ \lim_{t\to\infty}t^{a/d-1}\,\BE[\cL_t^{(a)}] = a \int_W\int_0^\infty u^{a-1} \lim_{t\to\infty}\BP(t^{1/d}\ell(x,\eta_t+\delta_{x})\geq u)\,\dint u\,\dint x $$ with the probability $\BP(t^{1/d}\ell(x,\eta_t+\delta_{x})\geq u)$ given by \eqref{eq:WESExpectAsymp}. For all $x\in\interior{(W)}$ we have that $$
\lim_{t\to\infty} t\lambda_d(B^d(x,t^{-1/d}u)\cap B^d(0,\|x\|)\cap W)=\frac{\kappa_d}{2}u^d $$ and, consequently, $$ \lim_{t\to\infty} \BP(t^{1/d}\ell(x,\eta_t+\delta_{x})\geq u) = \exp\Big(-{1\over 2}\kappa_du^{d}\Big)\,. $$ Summarizing, we find that \begin{align*} \lim_{t\to\infty}t^{a/d-1}\,\BE[\cL_t^{(a)}] &= \lambda_d(W)\,a\,\int_0^\infty u^{a-1}\,\exp\Big(-{1\over 2}\kappa_du^{d}\Big)\,\dint u\\ & = \left({2\over\kappa_d}\right)^{a/d}\Gamma\Big(1+{a\over d}\Big)\,\lambda_d(W)\,. \end{align*} This completes the proof of \eqref{eq:ExpectationAsymptotics}. \end{proof}
Recall from the definition of the edge-length functionals $\widehat{\mathcal{L}}_W^{(a)}$ for the directed spanning forest that $\ell_e(x,\eta+\delta_x)$ is the distance from $x$ to the closest point of the unit-intensity stationary Poisson point process $\eta$, which is contained in the half-space $H_{x,e}$. A computation similar to that in the proof of \eqref{eq:ExpectationAsymptotics} shows that, for $a>0$ and $x\in\R^d$, \begin{equation}\label{eq:Expectationellea} \begin{split} \BE[\ell_e(x,\eta+\delta_x)^a] & = \BE[\ell_e(0,\eta+\delta_0)^a] = a \int_0^\infty u^{a-1}\BP(\ell(0,\eta+\delta_0)\geq u) \, \dint u\\
& = a \int_0^\infty u^{a-1}\exp(-\kappa_d u^d/2) \, \dint u = \left({2\over\kappa_d}\right)^{a/d}\Gamma\left(1+{a\over d}\right)\,, \end{split} \end{equation} whence, by the Mecke formula \eqref{eq:MeckeMulti}, $$ \lim_{t\to\infty} t^{a/d-1}\, \BE[\mathcal{L}^{(a)}_t] = \BE[\widehat{\mathcal{L}}_W^{(a)}]\,. $$
Our next goal is to establish the existence of the variance limit. Positivity of the limiting variance is postponed to Lemma \ref{lem:VarianceStrictlyPositive} below.
\begin{lemma}\label{lem:ExistenceVarianceAsymptotics} For any $a\geq 0$ the limit $\lim\limits_{t\to\infty}t^{2a/d-1}\BV[\cL_t^{(a)}]$ exists and equals $v_a\,\lambda_d(W)$ with a constant $v_a\in[0,\infty)$ given by \begin{align*} v_a & =\int_{\R^d} \BE[\ell_e(0,\eta+\delta_0+\delta_z)^a \ell_e(z,\eta+\delta_0+\delta_z)^a] - \BE[\ell_e(0,\eta+\delta_0)^a ] \, \BE[\ell_e(z,\eta+\delta_z)^a ] \, \dint z\\ & \qquad\qquad\qquad + \BE[\ell_e(0,\eta+\delta_0)^{2a}]\,, \end{align*} where $e\in\mathbb{S}^{d-1}$ is some fixed direction. \end{lemma}
\begin{proof} For $a=0$, $t^{2a/d-1}\BV[\cL_t^{(a)}]=t^{-1}\BV[\eta_t(W)]=t^{-1}\BE[\eta_t(W)]=\lambda_d(W)$ since $\eta_t(W)$ is a Poisson random variable with mean $t\lambda_d(W)$. Hence, $v_0=1$ and we can and will from now on restrict to the case $a>0$, where we re-write the variance of $\mathcal{L}_t^{(a)}$ as $$ \BV[\mathcal{L}^{(a)}_t] = \BE\Big[\sum_{(x,y)\in\eta^2_{t,\neq}} \ell(x,\eta_t)^a \ell(y,\eta_t)^a\Big]-\Big(\BE\Big[\sum_{x\in\eta_t} \ell(x,\eta_t)^{a}\,\Big]\Big)^2+\BE\Big[\sum_{x\in\eta_t} \ell(x,\eta_t)^{2a}\Big]\,. $$ Now, we use the multivariate Mecke equation \eqref{eq:MeckeMulti} to deduce that $$t^{2a/d-1}\BV[\cL_t^{(a)}] = T_1(t)+T_2(t)$$ with $T_1(t)$ and $T_2(t)$ given by \begin{align*} T_1(t) &:= t\int_W\int_W t^{2a/d} \, \BE[\ell(x,\eta_t+\delta_x+\delta_y)^a \, \ell(y,\eta_t+\delta_x+\delta_y)^a]\\ &\qquad\qquad\qquad\qquad -t^{2a/d}\,\BE[\ell(x,\eta_t+\delta_x)^a] \, \BE[\ell(y,\eta_t+\delta_y)^a]\,\dint y \, \dint x\,,\\ T_2(t) &:= \int_W t^{2a/d}\,\BE[\ell(x,\eta_t+\delta_x)^{2a}]\,\dint x\,. \end{align*} It follows from $T_2(t)=t^{2a/d-1}\BE[\mathcal{L}^{(2a)}_t]$, the expectation asymptotics \eqref{eq:ExpectationAsymptotics} and \eqref{eq:Expectationellea} that \begin{equation}\label{eq:limit3} \lim_{t\to\infty} T_2(t) = \lim_{t\to\infty} t^{2a/d-1}\BE[\mathcal{L}^{(2a)}_t] = \BE[\ell_e(0,\eta+\delta_0)^{2a}] \lambda_d(W)\,. \end{equation} Using the substitution $y=x+t^{-1/d}z$, we re-write $T_{1}(t)$ as \begin{equation}\label{eq:T1} \begin{split} T_{1}(t) & = \int_W\int_{\R^d} {\bf 1}\{x+t^{-1/d}z\in W\}\\ & \hskip 1.5cm \times \Big(t^{2a/d} \, \BE[\ell(x,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a \ell(x+t^{-1/d}z,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a]\\ & \hskip 2.15cm -t^{2a/d} \, \BE[\ell(x,\eta_t+\delta_x)^a]\,\BE[\ell(x+t^{-1/d}z,\eta_t+\delta_{x+t^{-1/d}z})^a]\Big) \,\dint z \, \dint x\,. \end{split} \end{equation} Let $A$ be the union of the events $$
A_1=\{\eta_t(B^d(x,t^{-1/d}\|z\|/2)\cap B^d(0,\|x\|))=0\} $$ and $$
A_2=\{\eta_t(B^d(x+t^{-1/d}z,t^{-1/d}\|z\|/2)\cap B^d(0,\|x+t^{-1/d}z\|))=0\}\,. $$ Since the expectation factorizes on the complement $A^C$ of $A$ by independence, we have that \begin{align*} & \BE[\ell(x,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a \, \ell(x+t^{-1/d}z,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a\,{\bf 1}_{A^C}]\\ &= \BE[\ell(x,\eta_t+\delta_x)^a\,{\bf 1}_{A_1^C}] \, \BE[\ell(x+t^{-1/d}z,\eta_t+\delta_{x+t^{-1/d}z})^a\,{\bf 1}_{A_2^C}]\,. \end{align*} It follows from Lemma \ref{lem:KonstanteAlphad} that $$
\BP(A_1)\leq \exp(-\alpha_W\kappa_d\|z\|^d/2^d)\,, \qquad \BP(A_2)\leq \exp(-\alpha_W\kappa_d\|z\|^d/2^d) $$ and hence $$
\BP(A) \leq 2 \exp(-\alpha_W\kappa_d\|z\|^d/2^d)\,. $$ Thus, using \eqref{eq:monotone}, repeatedly the Cauchy-Schwarz inequality and Lemma \ref{lem:ExpTail}, we see that the integrand of $T_{1}(t)$ is bounded in absolute value from above by \begin{align*} & t^{2a/d} \, \BE[\ell(x,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a \, \ell(x+t^{-1/d}z,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a\,{\bf 1}_{A}]\\ & \quad + t^{2a/d} \, \BE[\ell(x,\eta_t+\delta_x)^a\,{\bf 1}_{A_1}] \, \BE[\ell(x+t^{-1/d}z,\eta_t+\delta_{x+t^{-1/d}z})^a]\\ & \quad + t^{2a/d} \, \BE[\ell(x,\eta_t+\delta_x)^a] \, \BE[\ell(x+t^{-1/d}z,\eta_t+\delta_{x+t^{-1/d}z})^a\,{\bf 1}_{A_2}]\\ & \leq \BE[t^{4a/d}\ell(x,\eta_t+\delta_x)^{4a}]^{1/4} \, \BE[t^{4a/d}\ell(x+t^{-1/d}z,\eta_t+\delta_{x+t^{-1/d}z})^{4a}]^{1/4}\\
& \quad\quad \times\sqrt{2} \exp(-\alpha_W\kappa_d\|z\|^d/2^{d+1})\\ & \quad +2\BE[t^{2a/d}\ell(x,\eta_t+\delta_x)^{2a}]^{1/2} \, \BE[t^{2a/d}\ell(x+t^{-1/d}z,\eta_t+\delta_{x+t^{-1/d}z})^{2a}]^{1/2} \\
& \quad\quad \times\exp(-\alpha_W\kappa_d\|z\|^d/2^{d+1})\\
& \leq (\sqrt{2c_{4a}}+2c_{2a})\exp(-\alpha_W \kappa_d \|z\|^d/2^{d+1})\,. \end{align*} As a consequence, the integrand in \eqref{eq:T1} is bounded by an integrable function. We can thus apply the dominated convergence theorem and obtain that \begin{align*} \lim_{t\to\infty} T_{1}(t) \!& =\!\! \int_W\int_{\R^d} \lim_{t\to\infty} {\bf 1}\{x+t^{-1/d}z\in W\}\\ & \hskip 2.1cm \times\Big(t^{2a/d}\,\BE[\ell(x,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a \, \ell(x+t^{-1/d}z,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a]\\ & \hskip 2.75cm -t^{2a/d}\,\BE[\ell(x,\eta_t+\delta_x)^a]\,\BE[\ell(x+t^{-1/d}z,\eta_t+\delta_{x+t^{-1/d}z})^a]\Big) \,\dint z \, \dint x\,. \end{align*}
Using the homogeneity relation \eqref{eq:homogene} and the fact that $t^{1/d}\eta_t$ has the same distribution as the restriction $(t^{1/d}x+\eta)|_{t^{1/d}W}$ of the translated Poisson point process $t^{1/d}x+\eta$ to $t^{1/d}W$, we see that \begin{align*} &t^{2a/d}\,\BE[\ell(x,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a \, \ell(x+t^{-1/d}z,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a]\\ & \qquad -t^{2a/d}\,\BE[\ell(x,\eta_t+\delta_x)^a] \, \BE[\ell(x+t^{-1/d}z,\eta_t+\delta_{x+t^{-1/d}z})^a]\\ & = \BE[\ell(t^{1/d}x,t^{1/d}\eta_t+\delta_{t^{1/d}x}+\delta_{t^{1/d}x+z})^a \, \ell(t^{1/d}x+z,t^{1/d}\eta_t+\delta_{t^{1/d}x}+\delta_{t^{1/d}x+z})^a]\\ & \qquad - \BE[\ell(t^{1/d}x,t^{1/d}\eta_t+\delta_{t^{1/d}x})^a] \, \BE[\ell(t^{1/d}x+z,t^{1/d}\eta_t+\delta_{t^{1/d}x+z})^a]\\
& = \BE[\ell(t^{1/d}x,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x}+\delta_{t^{1/d}x+z})^a\\
& \qquad\qquad\qquad \times\ell(t^{1/d}x+z,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x}+\delta_{t^{1/d}x+z})^a]\\
& \qquad - \BE[\ell(t^{1/d}x,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x})^a]\,\BE[\ell(t^{1/d}x+z,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x+z})^a]\,. \end{align*} It is not hard to verify that, for all $x\in\interior{(W)}$ with $x\neq0$ and $z\in\R^d$, the relations \begin{align}
& \lim_{t\to\infty} \ell(t^{1/d}x,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x}+\delta_{t^{1/d}x+z}) = \ell_{x/\|x\|}(0,\eta+\delta_{0}+\delta_{z}) \label{eq:limit1}\\
& \lim_{t\to\infty} \ell(t^{1/d}x+z,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x}+\delta_{t^{1/d}x+z}) = \ell_{x/\|x\|}(z,\eta+\delta_{0}+\delta_{z}) \\
& \lim_{t\to\infty} \ell(t^{1/d}x,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x}) = \ell_{x/\|x\|}(0,\eta+\delta_0) \\
& \lim_{t\to\infty} \ell(t^{1/d}x+z,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x+z}) = \ell_{x/\|x\|}(z,\eta+\delta_z)\label{eq:limit2} \end{align} hold with probability one. In order to apply the dominated convergence theorem again, we have to find integrable majorants for the terms on the left-hand sides of \eqref{eq:limit1}--\eqref{eq:limit2}. First note that by the monotonicity relation \eqref{eq:monotone} we have that $$
\ell(t^{1/d}x,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x}+\delta_{t^{1/d}x+z})\leq \ell(t^{1/d}x,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x}) $$ and \begin{align*}
&\ell(t^{1/d}x+z,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x}+\delta_{t^{1/d}x+z})\\
&\qquad\qquad \leq \ell(t^{1/d}x+z,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x+z})\,. \end{align*}
For each $x\in\interior{(W)}$ we can construct a cone $K_x$ such that, for $t\geq 1$, a point $y\in t^{1/d} x+K_x$ is also in $t^{1/d}W$ if $\|y-t^{1/d}x\|\leq t^{1/d}\|x\|/2$. This can be done by choosing a point $p_x$ on the line between $x$ and $0$, which is sufficiently close to the origin $0$. Since this will always be an interior point of $W$, we find a ball $B^d(p_x,r_x)$ of a certain radius $r_x>0$ and centred at $p_x$, which is completely contained in $W$ and generates $K_x$ together with $x$. That is, $K_x$ is the smallest cone with apex $x$ containing $B^d(p_x,r_x)$. Then, \begin{equation}\label{eq:BoundStationary1}
\ell(t^{1/d}x,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x})\leq 2\min_{y\in\eta\cap K_x}\|y\|\,. \end{equation}
If the opening angle of the cone $K_x$ is sufficiently small, we have that $\|y\|\leq \|t^{1/d}x+z\|$ if $y\in t^{1/d}x +K_x$ and $\|y-t^{1/d}x\|\geq 2\|z\|$. Thus, it holds that \begin{equation}\label{eq:BoundStationary2}
\ell(t^{1/d}x+z,(t^{1/d}x+\eta)|_{t^{1/d}W}+\delta_{t^{1/d}x+z}) \leq 2 \min_{y\in\eta\cap K_x, \|y\|\geq 2\|z\|}\|y\|\,. \end{equation} It follows from the fact that $\eta$ is a stationary Poisson point process with intensity one that $$
\BP(\min_{y\in\eta\cap K_x, \|y\|\geq 2\|z\|}\|y\|\geq u) = \exp\big(-\lambda_d(K_x\cap B^d(0,1))(u^d-2^d\|z\|^d)\big) $$
for $u\geq 2\|z\|$. Hence, the right-hand sides in \eqref{eq:BoundStationary1} and \eqref{eq:BoundStationary2} are integrable majorants for the expressions on the left-hand sides in \eqref{eq:limit1}--\eqref{eq:limit2}. Thus, we obtain by the dominated convergence theorem that \begin{align*} & \lim_{t\to\infty} {\bf 1}\{x+t^{-1/d}z\in W\} t^{2a/d}\Big(\BE[\ell(x,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a \ell(x+t^{-1/d}z,\eta_t+\delta_x+\delta_{x+t^{-1/d}z})^a]\\ & \hskip 5.5cm -\BE[\ell(x,\eta_t+\delta_x)^a]\,\BE[\ell(x+t^{-1/d}z,\eta_t+\delta_{x+t^{-1/d}z})^a]\Big)\\
& = \BE[\ell_{x/\|x\|}(0,\eta+\delta_0+\delta_z)^a \, \ell_{x/\|x\|}(z,\eta+\delta_0+\delta_z)^a] \\
&\qquad\qquad\qquad\qquad- \BE[\ell_{x/\|x\|}(0,\eta+\delta_0)^a]\,\BE[\ell_{x/\|x\|}(z,\eta+\delta_{z})^a]\,. \end{align*} Together with the fact that the expectations in the previous term are independent of the choice of $x\in\R^d$, we see that \begin{align*} \lim_{t\to\infty}T_1(t) = \lambda_d(W) \int_{\R^d} & \BE[\ell_e(0,\eta+\delta_0+\delta_z)^a \, \ell_e(z,\eta+\delta_0+\delta_z)^a]\\
&\qquad\qquad - \BE[\ell_e(0,\eta+\delta_0)^a]\,\BE[\ell_e(z,\eta+\delta_{z})^a] \, \dint z\,, \end{align*} which together with \eqref{eq:limit3} concludes the proof. \end{proof}
After having established the existence of the variance limit in \eqref{eq:VarianceAsymptotics}, we need to show that it is bounded away from zero. As a first step, the next lemma relates the asymptotic variance of $\mathcal{L}_t^{(a)}$, as $t\to\infty$, to that of $\widehat{\mathcal{L}}_{B^d(0,r)}^{(a)}$, as $r\to\infty$. Recall \eqref{eq:widehatLW} for the definition of $\widehat{\mathcal{L}}_{B^d(0,r)}^{(a)}$ and note that $\widehat{\mathcal{L}}_{B^d(0,r)}^{(a)}$ depends on a direction $e\in\mathbb{S}^{d-1}$, which is suppressed in our notation.
\begin{lemma}\label{lem:VarianceDSF} For $a\geq 0$, $$ \lim\limits_{r\to\infty} \frac{\BV[\widehat{\mathcal{L}}_{B^d(0,r)}^{(a)}]}{\kappa_d r^d}=v_a\,, $$ where $v_a$ is the constant from Lemma \ref{lem:ExistenceVarianceAsymptotics}. \end{lemma}
\begin{proof} For $a=0$, $\widehat{\mathcal{L}}_{B^d(0,r)}^{(a)}$ is a Poisson random variable with mean $\lambda_d(B^d(0,r))=\kappa_dr^d$ and the statement is thus satisfied with $v_0=1$. So, we may assume that $a>0$. It follows from the multivariate Mecke equation \eqref{eq:MeckeMulti} and the same argument as at the beginning of the proof of Lemma \ref{lem:ExistenceVarianceAsymptotics} that $$ \BV[\widehat{\cL}_{B^d(0,r)}^{(a)}] = T_{1,r}+T_{2,r} $$ with $T_{1,r}$ and $T_{2,r}$ given by \begin{align*} T_{1,r} &:= \int_{B^d(0,r)}\int_{\mathbb{R}^d} {\bf 1}\{y\in B^d(0,r)\} \Big(\BE[\ell_e(x,\eta+\delta_x+\delta_y)^a \, \ell_e(y,\eta+\delta_x+\delta_y)^a]\\ &\qquad\qquad\qquad\qquad\qquad\qquad\qquad -\BE[\ell_e(x,\eta+\delta_x)^a]\,\BE[\ell_e(y,\eta+\delta_y)^a]\Big) \,\dint y \, \dint x\,,\\ T_{2,r} &:= \int_{B^d(0,r)} \BE[\ell_e(x,\eta+\delta_x)^{2a}]\,\dint x\,. \end{align*} By the translation invariance of $\ell_e$ and $\eta$ we have that \begin{align*} T_{1,r} &:= \int_{B^d(0,r)}\int_{\mathbb{R}^d} {\bf 1}\{y-x\in B^d(-x,r)\}\\ & \hskip 2.5cm \Big(\BE[\ell_e(0,\eta+\delta_0+\delta_{y-x})^a \, \ell_e(y-x,\eta+\delta_0+\delta_{y-x})^a]\\ & \hskip 2.75cm -\BE[\ell_e(0,\eta+\delta_{0})^a]\,\BE[\ell_e(y-x,\eta+\delta_{y-x})^a]\Big) \,\dint y \, \dint x \allowdisplaybreaks\\ &= \int_{B^d(0,r)}\int_{\mathbb{R}^d} {\bf 1}\{y\in B^d(-x,r)\} \Big(\BE[\ell_e(0,\eta+\delta_0+\delta_y)^a \, \ell_e(y,\eta+\delta_0+\delta_y)^a]\\ & \hskip 6cm -\BE[\ell_e(0,\eta+\delta_0)^a]\,\BE[\ell_e(y,\eta+\delta_y)^a]\Big) \,\dint y \, \dint x \allowdisplaybreaks\\ &= r^d \int_{B^d(0,1)}\int_{\mathbb{R}^d} {\bf 1}\{y\in B^d(-rx,r)\} \Big(\BE[\ell_e(0,\eta+\delta_0+\delta_y)^a \, \ell_e(y,\eta+\delta_0+\delta_y)^a]\\ & \hskip 6.5cm -\BE[\ell_e(0,\eta+\delta_0)^a]\,\BE[\ell_e(y,\eta+\delta_y)^a]\Big) \,\dint y \, \dint x\,. \end{align*} Let $y\in\R^d$ and let $A_1$ and $A_2$ be the events
$$A_1:=\{\eta(B^d(0,\|y\|/2))=0\}\qquad\text{and}\qquad A_2:=\{\eta(B^d(y,\|y\|/2))=0\}\,,$$ respectively. On $(A_1\cup A_2)^C$ the expectation factorizes so that \begin{align*} &\BE[\ell_e(0,\eta+\delta_0+\delta_y)^a \, \ell_e(y,\eta+\delta_0+\delta_y)^a {\bf 1}_{(A_1\cup A_2)^C}] \\ &\qquad\qquad= \BE[\ell_e(0,\eta+\delta_0)^a {\bf 1}_{A_1^C}]\,\BE[\ell_e(y,\eta+\delta_y)^a {\bf 1}_{A_2^C}]\,. \end{align*} Together with the Cauchy-Schwarz inequality and \eqref{eq:monotoneelle}, this implies that \begin{align*}
& \big|\BE[\ell_e(0,\eta+\delta_0+\delta_y)^a \, \ell_e(y,\eta+\delta_0+\delta_y)^a] -\BE[\ell_e(0,\eta+\delta_0)^a]\,\BE[\ell_e(y,\eta+\delta_y)^a]\big|\\ & \leq \BE[\ell_e(0,\eta+\delta_0+\delta_y)^a \, \ell_e(y,\eta+\delta_0+\delta_y)^a {\bf 1}_{(A_1\cup A_2)}]\\ & \quad +\BE[\ell_e(0,\eta+\delta_0)^a {\bf 1}_{A_1}]\,\BE[\ell_e(y,\eta+\delta_y)^a]+\BE[\ell_e(0,\eta+\delta_0)^a]\,\BE[\ell_e(y,\eta+\delta_y)^a {\bf 1}_{A_2}]\\ & \leq \BE[\ell_e(0,\eta+\delta_0)^{4a}]^{1/4} \, \BE[\ell_e(y,\eta+\delta_y)^{4a}]^{1/4} \, \BP(A_1\cup A_2)^{1/2}\\ & \quad +\BE[\ell_e(0,\eta+\delta_0)^{2a}]^{1/2} \, \BE[\ell_e(y,\eta+\delta_y)^{2a}]^{1/2} \, (\BP(A_1)^{1/2}+\BP(A_2)^{1/2})\,. \end{align*} Since $\BP(\ell_e(0,\eta+\delta_0)\geq u) = \exp(-\kappa_d u^d/2)$ for $u\geq 0$, all moments of $\ell_e(0,\eta+\delta_0)$ are finite. Moreover, we have that $$
\BP(A_1)=\BP(A_2)=\exp(-\kappa_d \|y\|^d/2^d) \quad \text{ and } \quad \BP(A_1\cup A_2)\leq 2\exp(-\kappa_d \|y\|^d/2^d)\,. $$ Hence, there is a constant $C_a\in(0,\infty)$ such that \begin{align*}
& \big|\BE[\ell_e(0,\eta+\delta_0+\delta_y)^a \, \ell_e(y,\eta+\delta_0+\delta_y)^a] -\BE[\ell_e(0,\eta+\delta_0)^a]\,\BE[\ell_e(y,\eta+\delta_y)^a]\big|\\
&\qquad \leq C_a \exp(-\kappa_d \|y\|^d/2^{d+1})\,. \end{align*} This allows us to apply the dominated convergence theorem, which yields \begin{align*} \lim_{r\to\infty} \frac{T_{1,r}}{\kappa_d r^d} &= \int_{\R^d} \BE[\ell_e(0,\eta+\delta_0+\delta_y)^a \ell_e(y,\eta+\delta_0+\delta_y)^a]\\ &\qquad\qquad\qquad - \BE[\ell_e(0,\eta+\delta_0)^a ] \, \BE[\ell_e(y,\eta+\delta_y)^a ] \, \dint y\,. \end{align*} On the other hand we have that $$T_{2,r}=\kappa_d r^d\,\BE[\ell_e(0,\eta+\delta_0)^{2a}]\,.$$ This completes the proof. \end{proof}
Lemma \ref{lem:ExistenceVarianceAsymptotics} and Lemma \ref{lem:VarianceDSF} show that the suitably normalized functionals $\cL_t^{(a)}$ and $\widehat{\cL}_{B^d(0,r)}^{(a)}$ have the same limiting variance $v_a$, as $t\to\infty$ or $r\to\infty$, respectively. To complete the proof of Theorem \ref{thm:Variance} it remains to show that $v_a$ is strictly positive. Our preceding observation shows that for this only an analysis of the directed spanning forest ${\rm DSF}(\eta)$ of a stationary Poisson point process $\eta$ in $\R^d$ with intensity $1$ is necessary. Compared to the radial spanning tree and as discussed already in the introduction, the advantage of this model is that its construction is homogeneous in space, as it is based on the notion of half-spaces. This makes it much easier to analyse. Recall the definition \eqref{eq:widehatLW} of $\widehat{\cL}_W^{(a)}$ and note that it depends on a previously chosen direction $e\in\mathbb{S}^{d-1}$.
\begin{lemma}\label{lem:VarianceStrictlyPositive} For all $e\in\mathbb{S}^{d-1}$ and $a\geq 0$ one has that $\BV[\widehat{\cL}^{(a)}_W]\geq \hat{v}_{a} \lambda_d(W)$ with a constant $\hat{v}_a\in(0,\infty)$ only depending on $a$, $d$ and $$ r_W:=\sup\{u\in(0,1]: \lambda_d(\{x\in W: B^d(x,\max\{2,\sqrt{d}\}u)\subset W\})\geq \lambda_d(W)/2\}\,. $$ In particular, the constants $v_a$, $a\geq 0$, defined in Lemma \ref{lem:ExistenceVarianceAsymptotics} are strictly positive. \end{lemma} \begin{proof} Since $v_0=1$ as observed above, we restrict from now on to the case $a>0$. Without loss of generality, we can assume that $e=(0,-1,0,\hdots,0)$. We define $W_0=\{x\in W: B^d(x,\max\{2,\sqrt{d}\}r_W)\subset W\}$.
For technical reasons, we have to distinguish two cases and start with the situation that $a\geq 1$. Let us define $$ z_1:=(-1/2,-\sqrt{3}/2,0,\hdots,0)\,,\ z_2:=(1/2,-\sqrt{3}/2,0,\hdots,0)\,,\ z_3:=(0,-\sqrt{3}/2,0,\hdots,0)\,. $$ The points $0$, $z_1$ and $z_2$ form an equilateral triangle with side-length one, and $z_3$ is the midpoint between $z_1$ and $z_2$, see Figure \ref{fig:Variance}. Because of $2 > 2 (1/2)^a + (\sqrt{3}/2)^a$ we have that $$
\ell_e(z_1,\delta_{0}+\delta_{z_1}+\delta_{z_2})^a + \ell_e(z_2,\delta_{0}+\delta_{z_1}+\delta_{z_2})^a > c+\|z_1-z_3\|^a+ \|z_2-z_3\|^a + \|z_3\|^a $$ with a constant $c\in(0,\infty)$. By continuity, there is a constant $r_0\in(0,r_W)$ such that $$
\ell_e(y_1,\delta_{0}+\delta_{y_1}+\delta_{y_2})^a + \ell_e(y_2,\delta_{0}+\delta_{y_1}+\delta_{y_2})^a > \frac{c}{2}+\|y_1-y_3\|^a+ \|y_2-y_3\|^a + \|y_3\|^a $$ for all $y_1\in B^d(z_1,r_0)\cap H_{z_1,e}^C$, $y_2\in B^d(z_2,r_0)\cap H_{z_2,e}^C$ and $y_3\in B^d(z_3,r_0)\cap H_{z_3,e}$. Moreover, we choose constants $\tilde{c}\in(0,\infty)$ and $s\in(0,1)$ such that \begin{equation}\label{eq:ExpectationDifferencePoints} -\frac{c}{2} \exp(-4^d \kappa_d s^d)+ \Big({\sqrt{3}\over 2}+1\Big)^a (1-\exp(-4^d \kappa_d s^d)) \leq - \tilde{c}\,. \end{equation} In the following let $x\in W_0$ and $y_1,y_2,y_3$ be as above. If $\eta(B^d(x,4s))=0$, we have that \begin{align*} & \widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^3\delta_{x+s y_i})\Big) - \widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_{x}+\sum_{i=1}^2\delta_{x+s y_i})\Big)\\
& \leq s^a \Big(\|y_1-y_3\|^a+\|y_2-y_3\|^a+\|y_3\|^a-\ell_e(y_1,\delta_{0}+\delta_{y_1}+\delta_{y_2})^a -\ell_e(y_2,\delta_{0}+\delta_{y_1}+\delta_{y_2})^a\Big)\\ & \leq - \frac{c s^a}{2}\,. \end{align*} On the other hand, it holds that \begin{align*} & \widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^3 \delta_{x+sy_i})\Big) - \widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^2\delta_{x+sy_i})\Big)\\ &\qquad \leq \ell_e(x+sy_3,\eta+\delta_x+\sum_{i=1}^3 \delta_{x+sy_i})\\ &\qquad \leq s^a \Big({\sqrt{3}\over 2}+1\Big)^a\, . \end{align*} Denote by $A$ the event that $\eta(B^d(x,4s))=0$. Then, combining the previous two inequalities with the fact that $\mathbb{P}(A)=\exp(-4^d\kappa_ds^d)$ and \eqref{eq:ExpectationDifferencePoints}, we see that \begin{align*} &\BE\Big[ \widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^3 \delta_{x+y_i})\Big) - \widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^2 \delta_{x+y_i})\Big)\Big] \\ &=\BE\Big[ \Big(\widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^3 \delta_{x+y_i})\Big) - \widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^2 \delta_{x+y_i})\Big)\Big){\bf 1}_A\\ &\qquad +\Big(\widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^3 \delta_{x+y_i})\Big) - \widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^2 \delta_{x+y_i})\Big)\Big){\bf 1}_{A^C}\Big]\\
&\leq -{\frac{cs^a}{2}}\,\exp(-4^d\kappa_ds^d)+s^a\Big({\sqrt{3}\over 2}+1\Big)^a\,(1-\exp(-4^d\kappa_ds^d))\\ &\leq -\tilde{c}s^a \end{align*} for $x\in W_0$ and $y_1\in B^d(sz_1,sr_0)\cap H_{sz_1,e}^C$, $y_2\in B^d(sz_2,sr_0)\cap H_{sz_2,e}^C$, $y_3\in B^d(sz_3,sr_0)\cap H_{sz_3,e}$. Now, Proposition \ref{prop:LPSvariance} concludes the proof for $a\geq 1$.
\begin{figure}
\caption{Illustration of the constructions in the proof of Lemma \ref{lem:VarianceStrictlyPositive} for the planar case if $a\geq 1$ (left) and $0< a<1$ (right).}
\label{fig:Variance}
\end{figure}
Next, we consider the case that $0< a <1$. Let $N\in\N$ be a sufficiently large integer (a lower bound for $N$ will be stated later). We denote the points of the grid $$ G:=\Big\{ \frac{r_W}{N+1}(i_1,\hdots,i_d)-(0,r_W,0,\hdots,0): i_1,\hdots,i_d=1,\hdots,N\Big\} $$ by $z_1,\hdots,z_{N^d}$, see Figure \ref{fig:Variance}. Let $x\in W_0$ and $y_i\in B^d(z_i,r_W/(4N+4))$ for $i=1,\hdots,N^d$. By construction $y_1,\hdots,y_{N^d}$ are included in the $d$-dimensional hypercube $Q:=[0,r_W]\times [-r_W,0]\times [0,r_W]\times\hdots\times [0,r_W]$ so that $x+y_i\in W$ for all $i\in\{1,\hdots,N^d\}$. In the following, we derive a lower bound for $$
\Big| \BE\Big[\widehat{\mathcal{L}}_W^{(a)}\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^{N^d}\delta_{x+y_i})\Big) - \widehat{\mathcal{L}}_W^{(a)}\Big(\operatorname{DSF}(\eta+\delta_x)\Big) \Big] \Big|\,. $$ Adding the points $x+y_1,\hdots,x+y_{N^d}$ to the point process $\eta+\delta_x$ generates new edges. If $\eta(x+Q)=0$, then $$ \ell_e(x+y_i,\eta+\delta_x+\sum_{i=1}^{N^d}\delta_{x+y_i})\geq \frac{r_W}{2(N+1)}\,, \qquad i=1,\hdots,N^d\,, $$ where the lower bound is just half of the mash of the grid $G$. Consequently the expectation of the contribution of the new points is at least $$ \exp(-r_W^d) N^d \frac{r_W^a}{2^a(N+1)^a}\,. $$ On the other hand, inserting the additional points $x+y_1,\hdots,x+y_{N^d}$ can also shorten the edges belonging to some points of $\eta\cap W$, whereas the edge associated with $x$ is not affected. By the triangle inequality and the assumption $0\leq a<1$, we see that, for $z\in\eta\cap W$ with $\hat{n}(z,\eta+\delta_x+\sum_{i=1}^{N^d} \delta_{x+y_i})\in\{x+y_1,\hdots,x+y_{N^d}\}$, \begin{align*} \ell_e(z,\eta+\delta_x+\sum_{i=1}^{N^d}\delta_{x+y_i})^a -\ell_e(z,\eta+\delta_x)^a
& \geq \Big\|z-\hat{n}(z,\eta+\delta_x+\sum_{i=1}^{N^d}\delta_{x+y_i})\Big\|^a-\|z-x\|^a\\
& \geq -\Big\|\hat{n}(z,\eta+\delta_x+\sum_{i=1}^{N^d}\delta_{x+y_i})-x\Big\|^a\,, \end{align*}
where we have used that $x\in H_{z,e}$ and that $\|z-x\|\geq \|z-\hat{n}(z,\eta+\delta_x+\sum_{i=1}^{N^d} \delta_{x+y_i})\|$. Since $\|x+y_i-x\|\leq \sqrt{d}\, r_W$ for $i=1,\hdots,N^d$, this means that $$ 0\geq \ell_e(z,\eta+\delta_x+\sum_{i=1}^{N^d}\delta_{x+y_i})^a-\ell_e(z,\eta+\delta_x)^a \geq -(\sqrt{d}\,r_W)^a\,, \quad z\in\eta+\delta_x\,. $$ So, it remains to bound the expectation of the number of points of $\eta\cap W$ whose edges are affected by adding $x+y_1,\hdots,x+y_{N^d}$, i.e., $$ R:=\BE\Big[\sum_{z\in \eta\cap W} {\bf 1}\{\ell_e(z,\eta+\delta_x+\sum_{i=1}^{N^d}\delta_{x+y_i}) < \ell_e(z,\eta+\delta_x) \}\Big]\,. $$ Using the multivariate Mecke formula \eqref{eq:MeckeMulti} and denoting by $B_Q$ the smallest ball circumscribing the hypercube $Q$, we obtain that $$
R \leq \kappa_d (\sqrt{d}\,r_W/2)^d + \int_{B_Q^C} \BP(\ell_e(z,\eta+\delta_z)\geq \inf_{u\in B_Q}\|u-z\|) \, \dint z\,. $$ Transformation into spherical coordinates yields that $$ R \leq \kappa_d (\sqrt{d}r_W/2)^d + d\kappa_d \int_{\sqrt{d}r_W/2}^\infty \exp(-\kappa_d (r-\sqrt{d}\,r_W/2)^d/2) \, r^{d-1} \, \dint r=: C_R\,. $$ Consequently, we have that \begin{align*} & \BE\Big[\widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^{N^d}\delta_{x+y_i})\Big) - \widehat{\mathcal{L}}_W^{(a)}\Big(\operatorname{DSF}(\eta+\delta_x)\Big) \Big]\\
&\qquad \geq \exp(-r_W^d) N^d \frac{r_W^a}{2^a(N+1)^a} - C_R (\sqrt{d}\,r_W)^a\,. \end{align*} We now choose $N$ such that the right-hand side is larger than $1$ (note that this is always possible). Summarizing, we have shown that $$
\BE\Big[\widehat{\mathcal{L}}^{(a)}_W\Big(\operatorname{DSF}(\eta+\delta_x+\sum_{i=1}^{N^d}\delta_{x+y_i})\Big) - \widehat{\mathcal{L}}_W^{(a)}\Big(\operatorname{DSF}(\eta+\delta_x)\Big) \Big]\geq 1 $$ for $x\in W_0$ and $y_i\in B^d(z_i,r_W/(4N+4))$, $i=1,\hdots,N^d$. Now, the assertion for $0< a<1$ follows from Proposition \ref{prop:LPSvariance}.
It remains to transfer the result to the variances of the edge-length functionals $\cL_t^{(a)}$ of the radial spanning tree. Since there is a constant $r_0\in(0,\infty)$ such that $r_{B^d(0,r)}=1$ for all $r\geq r_0$, we have that $\BV[\widehat{\mathcal{L}}^{(a)}_{B^d(0,r)}]\geq \hat{v}_a \kappa_dr^d$ with the same constant $\hat{v}_a\in(0,\infty)$ for all $r\geq r_0$. On the other hand, Lemma \ref{lem:VarianceDSF} implies that, for $a\geq 0$, $$ v_a=\lim_{r\to\infty}\frac{\BV[\widehat{\mathcal{L}}^{(a)}_{B^d(0,r)}]}{\kappa_d r^d} \geq \hat{v}_a>0\,. $$ This completes the proof. \end{proof}
\section{Proof of Theorem \ref{thm:CLT}} \label{sec:ProofCLT}
Our aim is to apply Proposition \ref{prop:LPS}. In view of the variance asymptotics provided in Theorem \ref{thm:Variance}, it remains to investigate the first and the second-order difference operator of $t^{a/d}\cL_t^{(a)}$.
\begin{lemma}\label{lem:FirstOrderDifferenceOperator} For any $a\geq 0$ there are constants $C_{1},C_{2}\in(0,\infty)$ only depending on $W$, $a$ and $d$ such that $$
\BE[|D_z(t^{a/d}\cL_t^{(a)})|^5]\leq C_{1} \qquad\text{and}\qquad\BE[|D_{z_1,z_2}^2(t^{a/d}\cL_t^{(a)})|^5]\leq C_{2} $$ for $z,z_1,z_2\in W$ and $t\geq 1$. \end{lemma} \begin{proof} If $a=0$, we have $\cL_t^{(a)}=\eta_t(W)$ so that the statements are obviously true with $C_1=C_2=1$, for example. For $a> 0$, $\xi\in\bN$ and $z\in W$ we have that $$
|D_z\cL^{(a)}(\RST(\xi))|\leq \ell(z,\xi+\delta_z)^a+\sum_{x\in\xi}{\bf 1}\{\ell(x,\xi)\geq\|z-x\|\} \ \max_{\stackrel{x\in\xi}{\ell(x,\xi)\geq\|z-x\|}} \ell(x,\xi)^a\,. $$ Consequently, it follows from Jensen's inequality that \begin{align*}
\BE[|D_z(t^{a/d}\cL_t^{(a)})|^5]&\leq 16 t^{5a/d}\,\BE[\ell(z,\eta_t+\delta_z)^{5a}]\\
& \quad +16\BE\Big[\Big(\sum_{x\in\eta_t}{\bf 1}\{\ell(x,\eta_t)\geq\|z-x\|\}\Big)^5\Big(\max_{x\in\eta_t\atop\ell(x,\eta_t)\geq\|z-x\|} t^{a/d}\ell(x,\eta_t)^a\Big)^5\Big]\,. \end{align*} Lemma \ref{lem:ExpTail} implies that $t^{5a/d}\BE[\ell(z,\eta_t+\delta_z)^{5a}]\leq c_{5a}$ for $z\in W$ and $t\geq 1$. For the second term, the Cauchy-Schwarz inequality yields the bound \begin{align}\label{eq:10thMomentEstimate}
\Big(\BE\Big[\Big(\sum_{x\in\eta_t}{\bf 1}\{\ell(x,\eta_t)\geq\|z-x\|\}\Big)^{10}\Big]\,\Big)^{1/2}\;\Big(\BE[\max_{x\in\eta_t\atop\ell(x,\eta_t)\geq\|z-x\|}t^{10a/d}\ell(x,\eta_t)^{10a}]\,\Big)^{1/2} \,. \end{align}
The $10$th moment of $\sum_{x\in\eta_t}{\bf 1}\{\ell(x,\eta_t)\geq\|z-x\|\}$ can be expressed as a linear combination of terms of the form $$
\BE\Big[\sum_{(x_1,\ldots,x_k)\in\eta_{t,\neq}^k}{\bf 1}\{\ell(x_i,\eta_t)\geq\|x_i-z\|,\,i=1,\ldots,k\}\Big] $$ with $k\in\{1,\ldots,10\}$. Applying the multivariate Mecke formula \eqref{eq:MeckeMulti}, the monotonicity relation \eqref{eq:monotone}, H\"older's inequality and Lemma \ref{lem:ExpTail} yields, for each such $k$, \begin{align*}
&\BE\Big[\sum_{(x_1,\ldots,x_k)\in\eta_{t,\neq}^k}{\bf 1}\{\ell(x_i,\eta_t)\geq\|x_i-z\|,\,i=1,\ldots,k\}\Big]\\
&=t^k\int_{W^k}\BP\Big(\ell(x_i,\eta_t+\sum_{i=1}^k\delta_{x_i})\geq\|x_i-z\|\,,i=1,\ldots,k\Big)\,\dint(x_1,\ldots,x_k)\\
&\leq t^k\int_{W^k}\BP\big(\ell(x_i,\eta_t+\delta_{x_i})\geq\|x_i-z\|\,,i=1,\ldots,k\big)\,\dint(x_1,\ldots,x_k)\\
&\leq t^k\int_{W^k}\prod_{i=1}^k\BP\big(\ell(x_i,\eta_t+\delta_{x_i})\geq\|x_i-z\|\big)^{1/k}\,\dint(x_1,\ldots,x_k)\\
&\leq \Big(t\int_W \exp(-t\alpha_W\kappa_d\|x-z\|^d/k) \,\dint x\Big)^k\,. \end{align*} Introducing spherical coordinates and replacing $W$ by $\R^d$, we see that \begin{align*}
& t\int_{W} \exp(-t\alpha_W\kappa_d\|x-z\|^d/k) \, \dint x \leq td\kappa_d\int_0^\infty \exp(-t\alpha_W\kappa_dr^d/k) \, r^{d-1}\,\dint r = \frac{k}{\alpha_W}\,. \end{align*}
This implies that $\BE\big[\big(\sum_{x\in\eta_t}{\bf 1}\{\ell(x,\eta_t)\geq\|z-x\|\}\big)^{10}\big]$ is uniformly bounded for $z\in W$ and $t\geq 1$. Moreover, for $u\geq 0$ we have that \begin{align*}
& \BP\big(t^{a/d}\max_{\stackrel{x\in\eta_t}{\ell(x,\eta_t)\geq\|z-x\|}} \ell(x,\eta_t)^a\geq u\big)\\
& = \BP(\exists x\in\eta_t: \ell(x,\eta_t)\geq t^{-1/d}u^{1/a}\,,\ell(x,\eta_t)\geq \|z-x\|)\\
& \leq \BE\Big[\sum_{x\in\eta_t}{\bf 1}\{\ell(x,\eta_t)\geq\max\{t^{-1/d}u^{1/a},\|z-x\|\}\}\Big] \allowdisplaybreaks\\
& = t \int_{W} \BP\big(\ell(x,\eta_t+\delta_x)\geq\max\{t^{-1/d}u^{1/a},\|z-x\|\}\big) \, \dint x\\
& \leq t\int_{\R^d} \exp(-t\alpha_W\kappa_d\max\{t^{-1/d}u^{1/a},\|z-x\|\}^d) \, \dint x\\
& = \kappa_du^{d/a} \exp(-\alpha_W\kappa_du^{d/a}) + t\int_{\R^d\setminus B^d(z,t^{-1/d}u^{1/a})} \exp(-t\alpha_W\kappa_d\|z-x\|^d) \, \dint x \allowdisplaybreaks\\ & = \kappa_du^{d/a} \exp(-\alpha_W\kappa_du^{d/a}) + d\kappa_dt \int_{t^{-1/d}u^{1/a}}^\infty \exp(-t\alpha_W\kappa_d r^d) \, r^{d-1} \, \dint r\\ & = \kappa_du^{d/a} \exp(-\alpha_W\kappa_du^{d/a}) + \frac{1}{\alpha_W} \exp(-\alpha_W\kappa_d u^{d/a}) \,, \end{align*}
where we have used the Mecke formula \eqref{eq:MeckeMulti}, Lemma \ref{lem:ExpTail} and a transformation into spherical coordinates. This exponential tail behaviour implies that the second factor of the product in \eqref{eq:10thMomentEstimate} is uniformly bounded for all $t\geq 1$. Altogether, we see that $\BE[|D_z(t^{a/d}\cL_t^{(a)})|^5]\leq C_{1}$ for all $z\in W$ and $t\geq 1$ with a constant $C_{1}\in(0,\infty)$ only depending on $W$, $a$ and $d$.
For the second-order difference operator we have that, for $z_1,z_2\in W$, \begin{equation}\label{eq:DecompositionSecondDifferenceOperatorMoment} \begin{split}
\BE[|D_{z_1,z_2}^2(t^{a/d}\cL_t^{(a)})|^5] &\leq 16 t^{5a/d}\,\BE[|D_{z_1}\cL^{(a)}(\operatorname{RST}(\eta_t))|^5]\\
&\qquad +16 t^{5a/d}\,\BE[|D_{z_1}\cL^{(a)}(\operatorname{RST}(\eta_t+\delta_{z_2}))|^5]\,. \end{split} \end{equation} In view of the argument for the first-order difference operator above, it only remains to consider the second term. For $z_1,z_2\in W$ and $\xi\in\bN$ we have that \begin{align*}
&\big|D_{z_1}(t^{a/d}\cL^{(a)}(\operatorname{RST}(\xi+\delta_{z_2})))\big| \leq t^{a/d}\,\ell(z_1,\xi+\delta_{z_1}+\delta_{z_2})^a\\
&\qquad+\Big(\sum_{x\in\xi+\delta_{z_2}}{\bf 1}\{\ell(x,\xi+\delta_{z_2})\geq\|z_1-x\|\}\Big)\Big(\max_{\stackrel{x\in\xi+\delta_{z_2}}{\ell(x,\xi+\delta_{z_2})\geq\|z_1-x\|}}t^{a/d}\ell(x,\xi+\delta_{z_2})^a\Big)\,. \end{align*} Using the monotonicity relation \eqref{eq:monotone}, we find that \begin{align*} t^{a/d}\,\ell(z_1,\xi+\delta_{z_1}+\delta_{z_2})^a &\leq t^{a/d}\,\ell(z_1,\xi+\delta_{z_1})^a\,,\\
\sum_{x\in\xi+\delta_{z_2}}{\bf 1}\{\ell(x,\xi+\delta_{z_2})\geq\|z_1-x\|\} &\leq 1+\sum_{x\in\xi}{\bf 1}\{\ell(x,\xi)\geq\|z_1-x\|\} \end{align*} and
$$\max_{\stackrel{x\in\xi+\delta_{z_2}}{\ell(x,\xi+\delta_{z_2})\geq\|z_1-x\|}}t^{a/d}\,\ell(x,\xi+\delta_{z_2})^a \leq t^{a/d}\,\ell(z_2,\xi+\delta_{z_2})^a+\max_{\stackrel{x\in\xi}{\ell(x,\xi)\geq\|z_1-x\|}}t^{a/d}\,\ell(x,\xi)^a\,.$$
This implies that for $\xi=\eta_t$ the $5$th and the $10$th moment of these expressions are uniformly bounded in $t$ by the same arguments as above. In view of \eqref{eq:DecompositionSecondDifferenceOperatorMoment} this shows that $\BE[|D_{z_1,z_2}^2(t^{a/d}\cL_t^{(a)})|^5]\leq C_{2}$ for all $z_1,z_2\in W$ and $t\geq 1$ with a constant $C_{2}\in(0,\infty)$ only depending on $W$, $a$ and $d$. \end{proof}
While Lemma \ref{lem:FirstOrderDifferenceOperator} shows that assumption \eqref{eq:MomentsDifferenceOperator} in Proposition \ref{prop:LPS} is satisfied, the following result puts us in the position to verify also assumption \eqref{eq:SupDifferenceOperator} there.
\begin{lemma}\label{lem:SecondOrderDifferenceOperator} For $a\geq 0$, $z_1,z_2\in W$ and $t\geq 1$, $$
\BP(D_{z_1,z_2}^2\cL_t^{(a)}\neq 0) \leq (2+2/\alpha_W)\,\exp(-t\alpha_W\kappa_d\|z_1-z_2\|^d/2^{d}) $$ with $\alpha_W$ as in Lemma \ref{lem:KonstanteAlphad}. \end{lemma}
\begin{proof} Since $D^2_{z_1,z_2}\cL_t^{(a)}=0$ for $a=0$, we can restrict ourselves to $a>0$ in the following. First notice that $$ D_{z_1,z_2}^2\cL_t^{(a)} = \sum_{y\in\eta_t}D_{z_1,z_2}^2 \big(\ell(y,\eta_t)^a\big) + D_{z_1}\big(\ell(z_2,\eta_t+\delta_{z_2})^a\big) + D_{z_2}\big(\ell(z_1,\eta_t+\delta_{z_1})^a\big) \,. $$ To have $D_{z_1,z_2}^2\cL_t^{(a)}\neq 0$, at least one of the above terms has to be non-zero. Moreover, for $x,z\in W$ and $\xi\in\bN$, $D_z(\ell(x,\xi+\delta_x)^a)\neq 0$ and $D_z\ell(x,\xi+\delta_x)\neq 0$ are equivalent. Thus, \begin{align*} \BP(D_{z_1,z_2}^2\cL_t^{(a)}\neq 0) & \leq \BP(\exists y\in\eta_t: D^2_{z_1,z_2}\big(\ell(y,\eta_t)^a\big) \neq 0)\\ & \qquad +\BP(D_{z_1}\big(\ell(z_2,\eta_t+\delta_{z_2})^a\big)\neq 0)+\BP(D_{z_2}\big(\ell(z_1,\eta_t+\delta_{z_1})^a\big)\neq 0)\\ & = \BP(\exists y\in\eta_t: D^2_{z_1,z_2}\ell(y,\eta_t) \neq 0)\\ & \qquad +\BP(D_{z_1}\ell(z_2,\eta_t+\delta_{z_2})\neq 0)+\BP(D_{z_2}\ell(z_1,\eta_t+\delta_{z_1})\neq 0)\,. \end{align*}
Since $D_{z_1}\ell(z_2,\eta_t+\delta_{z_2})\neq 0$ requires that $\ell(z_2,\eta_t+\delta_{z_2})\geq \|z_1-z_2\|$, application of Lemma \ref{lem:ExpTail} yields $$
\BP(D_{z_1}\ell(z_2,\eta_t+\delta_{z_2})\neq 0) \leq \BP(\ell(z_2,\eta_t+\delta_{z_2})\geq \|z_1-z_2\|) \leq \exp(-t\alpha_W\kappa_d\|z_1-z_2\|^d) $$ and similarly $$
\BP(D_{z_2}\ell(z_1,\eta_t+\delta_{z_1})\neq 0) \leq \exp(-t\alpha_W\kappa_d\|z_1-z_2\|^d)\,. $$
Using Mecke's formula \eqref{eq:MeckeMulti}, the fact that $D^2_{z_1,z_2}\ell(y,\eta_t+\delta_y)\neq 0$ implies $\ell(y,\eta_t+\delta_y)\geq \max\{\|y-z_1\|,\|y-z_2\|\}$ and once again Lemma \ref{lem:ExpTail}, we finally conclude that \begin{align*}
\BP(\exists y\in\eta_t: D^2_{z_1,z_2}\ell(y,\eta_t)\neq 0) & \leq \BE\Big[\sum_{y\in\eta_t} {\bf 1}\{ D^2_{z_1,z_2}\ell(y,\eta_t)\neq 0 \}\Big]\\ & = t\int_W \BP(D^2_{z_1,z_2}\ell(y,\eta_t+\delta_y)\neq 0) \, \dint y\\
& \leq t\int_W \BP(\ell(y,\eta_t+\delta_y)\geq \max\{\|y-z_1\|,\|y-z_2\|\}) \, \dint y \\
&\leq t\int_W \exp(-t\alpha_W\kappa_d\max\{\|y-z_1\|,\|y-z_2\|\}^d) \, \dint y \allowdisplaybreaks\\
&\leq t\int_{\R^d\setminus B^d(z_1,\|z_1-z_2\|/2)}\exp(-t\alpha_W\kappa_d\|y-z_1\|^d) \,\dint y\\
&\hspace{1.5cm}+t\int_{\R^d\setminus B^d(z_2,\|z_1-z_2\|/2)} \exp(-t\alpha_W\kappa_d\|y-z_2\|^d) \,\dint y\\
&\leq \frac{2}{\alpha_W} \exp(-t\alpha_W\kappa_d\|z_1-z_2\|^d/2^d)\,. \end{align*} Consequently, $$
\BP(D_{z_1,z_2}^2\cL_t^{(a)}\neq 0) \leq (2+2/\alpha_W) \, \exp(-t\alpha_W\kappa_d\|z_1-z_2\|^d/2^{d})\,, $$ which proves the claim. \end{proof}
After these preparations, we can now use Proposition \ref{prop:LPS} to prove Theorem \ref{thm:CLT}.
\begin{proof}[Proof of Theorem \ref{thm:CLT}] It follows from Lemma \ref{lem:SecondOrderDifferenceOperator} by using spherical coordinates that \begin{align*} & \sup_{z_1\in W,\, t\geq 1}t\int_{W}\BP(D_{z_1,z_2}^2(t^{a/d}\cL_t^{(a)})\neq 0)^{1/20}\,\dint z_2\\
& \leq (2 + 2/\alpha_W)\sup_{z_1\in W,\, t\geq 1}t\int_{\R^d} \exp\big(-t\alpha_W\kappa_d\|z_1-z_2\|^d/(20\cdot 2^{d})\big) \,\dint z_2\\
& \leq (2 + 2/\alpha_W) \sup_{t\geq 1}\,t\int_{\R^d} \exp\big(-t\alpha_W\kappa_d\|z\|^d/(20\cdot2^d)\big) \,\dint z\,, \\ &= 20\cdot 2^d (2/\alpha_W+2/\alpha_W^2)\,, \end{align*} which is finite. Moreover, Lemma \ref{lem:FirstOrderDifferenceOperator} shows that there are constants $C_{1},C_{2}\in(0,\infty)$ only depending on $W$, $a$ and $d$ such that $$
\BE[|D_z(t^{a/d}\cL_t^{(a)})|^5]\leq C_{1}\qquad\text{and}\qquad\BE[|D_{z_1,z_2}^2(t^{a/d}\cL_t^{(a)})|^5]\leq C_{2} $$ for $z,z_1,z_2\in W$ and $t\geq 1$. Finally, Lemma \ref{lem:ExistenceVarianceAsymptotics} and Lemma \ref{lem:VarianceStrictlyPositive} ensure the existence of constants $v_a, t_0\in(0,\infty)$ depending on $W$, $a$ and $d$ such that $t^{-1}\BV[t^{a/d}\cL_t^{(a)}]\geq v_a/2$ for all $t\geq t_0$. Consequently, an application of Proposition \ref{prop:LPS} completes the proof of Theorem \ref{thm:CLT}. \end{proof}
\end{document} |
\begin{document}
\title{Multisymplectic 3-forms on 7-dimensional manifolds}
\begin{abstract} A 3-form $\omega\in\Lambda^3\mathbb R^{7\ast}$ is called multisymplectic if it satisfies some natural non-degeneracy requirement. It is well known that there are 8 orbits (or types) of multisymplectic 3-forms on $\mathbb R^7$ under the canonical action of $\mathrm{GL}(7,\mathbb R)$ and that two types are open. This leads to 8 types of global multisymplectic 3-forms on 7-dimensional manifolds without boundary. The existence of a global multisymplectic 3-form of a fixed type is a classical problem in differential topology which is equivalent to the existence of a certain $G$-structure. The open types are the most interesting cases as they are equivalent to a $\mathrm G_2$ and $\tilde{\mathrm{G}}_2$-structure, respectively. The existence of these two structures is a well known and solved problem. In this article is solved (under some convenient assumptions) the problem of the existence multisymplectic 3-forms of the remaining types. \end{abstract}
\section{Introduction} Put $\mathrm V:=\mathbb R^7$. There are finitely many orbits of the canonical action of $\mathrm{GL}(\mathrm V)$ on $\Lambda^3\mathrm V^\ast$. We will call the orbits also types. A linear isomorphism $\Phi:\mathbb R^7\rightarrow\mathrm W$ induces a map $\Phi^\ast:\Lambda^3\mathrm W^\ast\rightarrow\Lambda^3\mathbb R^{7\ast}$. The type of $\Phi^\ast\omega$ does not depend on the choice of linear isomorphism and thus, we can define the type for any skew-symmetric 3-form on any 7-dimensional real vector space.
A 3-form $\omega\in\Lambda^3\mathrm V^\ast$ is called \textit{multisymplectic} if the insertion map \begin{equation} \mathrm V\rightarrow\Lambda^2\mathrm V^\ast, \ v\mapsto i_v\omega:=\omega(v,-,-) \end{equation} is injective. There are (see \cite{Dj} and \cite{W}) eight types of multisympletic 3-forms and two open types.
Let $\Omega$ be a global 3-form on a 7-dimensional manifold $N$ without boundary and $i\in\{1,\dots,8\}$. We call $\Omega$ a \textit{multisympletic 3-form of algebraic type} $i$ if for each $x\in N:\ \Omega_x$ is a multisymplectic 3-form of type $i$. The existence of such a 3-form is a classical problem in differential topology, if $\mathrm O_i$ is the stabilizer of a fixed multisymplectic 3-form $\omega_i\in\Lambda^3\mathrm V^\ast$ of algebraic type $i$, then $N$ admits a multisymplectic 3-form of algebraic type $i$ if, and only if it has an $\mathrm O_i$-structure. The groups $\mathrm O_i$ were studied in \cite{BV} where they were given as semi-direct products of some well known Lie groups.
By the Cartan-Iwasawa-Malcev theorem (see \cite[Theorem 1.2]{Bo}), a connected Lie group $\mathrm H$ has a maximal compact subgroup and any two such subgroups are conjugated. Let us fix one such subgroup and let us denote it by $\mathrm K$. By Cartan's result, the group $\mathrm H$ has the homotopy type of $\mathrm K$ and by a standard argument from the obstruction theory, any $\mathrm H$-principal bundle reduces to a $\mathrm K$-principal bundle. Hence, the first goal is (see Section \ref{section max compact subgroups}) to find a maximal compact subgroup $\mathrm K_i$ of each group $\mathrm O_i$. Then we solve (see Section \ref{section global forms}) the problem of the existence of a multisympletic form on a closed 7-manifold of algebraic type $i$. The problem is not solved completely as for some types we assume that the underlying manifold is orientable or simply-connected.
The most interesting and well known cases are types 8 and 5 as $\mathrm O_8=\mathrm G_2$ and $\mathrm O_5=\tilde{\mathrm{G}}_2$. The existence of a $\mathrm G_2$-structure was solved in \cite{G} and the existence of a $\tilde{\mathrm{G}}_2$-structure in \cite{Le}.
Let us summarize the main result of this article into a single Proposition. See Section \ref{section spin char class} for the definition of characteristic classes $q(N)$ and $q(N;\ell)$.
\begin{thm}\label{metatheorem} Let $N$ be a closed and connected 7-manifold. \begin{enumerate}
\item Suppose that $N$ is orientable, spin$^c$ and that there are $e,f\in H^2(N,\mathbb Z)$ such that
\begin{equation*}
w_2(N)=\rho_2(e+f)\ \ \mathrm{and}\ \ q(N;e+f)=-ef,
\end{equation*} then $N$ admits a multisymplectic 3-form of algebraic type 1.
If $N$ is simply-connected, then the assumptions are also necessary.
\item Suppose that $N$ is orientable, spin and that there are $e,f\in H^2(N,\mathbb Z)$ such that $$-q(N)=e^2+f^2+3ef,$$ then $N$ admits a multisymplectic 3-form of algebraic type 2.
If $N$ is simply-connected, then the assumptions are also necessary.
\item $N$ admits a multisympletic 3-form of algebraic type $3$ if, and only if $N$ is orientable and spin$^c$.
\item Suppose that $N$ is orientable, spin and there is $u\in H^4(N,\mathbb Z)$ such that $$q(M)=-4u,$$ then $N$ admits a multisymplectic 3-form of algebraic type 4.
On the other hand, if $N$ admits a multisymplectic 3-form of algebraic type $4$, then $N$ is orientable and spin.
\item $N$ admits a multisympletic 3-form of algebraic type $i=5,6,7,8$ if, and only if $N$ is orientable and spin. \end{enumerate} \end{thm}
\textit{Acknowledgement and dedication}. The author is grateful to Micheal C. Crabb for pointing out several mistakes in the original article and considerably simplifying many arguments in the present work. I would also like to thank to J. Van\v zura, M. \v Cadek and to the unknown referee for several valuable comments and suggestions.
I would like to dedicate this article to M. Doubek who drew author's attention to this subject and who passed away in a car accident at the age of 33.
\subsection{Notation}\label{section notation} We will use the following notation:
\begin{tabular}{rl} $1_n:=$& identity $n\times n$ matrix,\\ $[v_1,\dots,v_i]:=$& the linear span of vectors $v_1,\dots,v_i$,\\ $\alpha_{i_1i_2\dots i_\ell}:=$&$\alpha_{i_1}\wedge\alpha_{i_2}\wedge\dots\wedge\alpha_{i_\ell}$,\\ $M(k,\mathbb R):=$& the algebra of $k\times k$ real matrices. \end{tabular}
Let $\mathrm V,\mathrm W$ be real vector spaces, $\mathrm V^\ast$ be the dual vector space to $\mathrm V$, $\mathrm{End}(\mathrm V)$ be the algebra of linear endomorphisms of $\mathrm V$, $\mathrm{GL}(\mathrm V)$ be the group of linear automorphisms of $\mathrm V$. Suppose that $\mathrm W'$ is a vector subspace of $\mathrm W$,\ \ $A$ is a subalgebra of $\mathrm{End}(\mathrm V)$,\ \ $\omega\in\otimes^i\mathrm W^\ast$,\ \ $\varphi\in\mathrm{GL}(\mathrm W)$,\ \ $H\subset\mathrm{GL}(\mathrm W)$, \ \ $ \mathcal{B}=\{v_1,\dots,v_n\}$ is a basis of $\mathrm V$ and $\Phi:\mathrm V\rightarrow\mathrm W$ is a linear isomorphism. Then we put \begin{align} &\Phi^\ast\varphi:=\Phi^{-1}\circ\varphi\circ\Phi\in\mathrm{GL}(\mathrm V)\ \ \mathrm{ and} \ \ \varphi^\ast H:=\{\varphi^\ast h:\ h\in H\},\label{notation pullback of map}\\ &\Phi^\ast\omega\in\otimes^i\mathrm V^\ast,\ \Phi^\ast\omega(v_1,\dots,v_i):=\omega(\Phi(v_1),\dots,\Phi(v_i)), \ v_1,\dots,v_i\in\mathrm V,\\\label{notation pullback of tensor} &\varphi.\omega:=(\varphi^{-1})^\ast\omega,\\ &\mathrm{Stab}(\omega):=\{\varphi\in\mathrm{GL}(\mathrm W):\ \varphi.\omega=\omega\},\label{notation stabilizer of tensor}\\ &\mathrm{Stab}(\mathrm W'):=\{\varphi\in\mathrm{GL}(\mathrm W):\ \varphi(\mathrm W')\subset\mathrm W'\},\label{notation stabilizer of subspace}\\ &\mathrm{GL}_A(\mathrm V):=\{\varphi\in\mathrm{GL}(\mathrm V):\ \forall a\in A,\ \varphi\circ a=a\circ\varphi\},\label{notation equivariant maps}\\ &\phi_{\mathcal{B}}:=(a_{ij})\in M(n,\mathbb R),\ \phi\in\mathrm{End}(\mathrm V),\ \phi(v_j)=\sum_i a_{ij}v_i,\\ &\triangle^k(\omega):=\{w\in\mathrm W:\ (i_w\omega)^{\wedge^k}=0\}\label{notation isotropy set}. \end{align} It is easy to see that $\varphi^\ast\mathrm{Stab}(\omega)=\mathrm{Stab}(\varphi^\ast\omega)$.
From now on, $\mathcal{B}_{st}=\{e_1,\dotsm,e_\ell\}$ will denote the standard basis of $\mathbb R^\ell,\ \ell\ge1$. If $f_1,\dots,f_\ell\in\mathbb R^\ell$, then $(f_1|\dots|f_\ell)$ denotes the matrix with $i$-th column equal to $f_i$. We put $E_{ij}:=(0|\dots|e_i|\dots|0)\in M(\ell,\mathbb R)$ where $e_i$ is the $j$-th column. We for brevity put $\mathrm V:=\mathbb R^7$ and $\triangle^j_i:=\triangle^j(\omega_i)$ where $\omega_i$ is the fixed multisymplectic 3-form of type $i$ from \cite{BV}. It follows that $\triangle^j_i$ is an $\mathrm O_i$-invariant subset.
\section{$\ast$-Algebras, some subgroups of $\mathrm G_2,\tilde\mathrm G_2$ and semi-direct products of Lie groups} In this Section we will review some well known theory, set notation and prove some elementary facts about maximal compact subgroups of semi-direct products of Lie groups.
\subsection{$\ast$-algebras}\label{section algebras}
A $\ast$-\textit{algebra} is a pair $(A,\ast)$ where: \begin{itemize}
\item $A$ is a real algebra\footnote{We do not assume that $A$ is associative.} with a unit $1$ and
\item $\ast:A\rightarrow A$ is an involution which satisfies: $$\ast 1=1,\ \ast(a.b)=(\ast b).(\ast a), \ a,b\in A.$$ \end{itemize} We put $\bar a:=\ast(a)$, $\Re A:=\{a\in A:\ \bar a=a\}$ and $\Im A:=\{a\in A:\ \bar a=-a\}$. Then $A=\Re A\oplus\Im A$ and we denote by $\Re(a)$ and $\Im(a)$ the projection of $a\in A$ onto $\Re A$ and $\Im A$, respectively.
Suppose that $\Re A=\mathbb R.1$. Then $B_A(a,b):=\Re(a\bar b),\ a,b\in A$ is a \textit{standard bilinear form} on $A$ and $Q_A(a):=B_A(a,a)$ is a \textit{standard quadratic form}. Finally, we define a multi-linear form on $\Im A$: \begin{equation}\label{tautological form on A} \omega_A(a,b,c):=B_A(a.b,c),\ a,b,c\in\Im A. \end{equation}
We will consider two products on the vector space $A\oplus A$, namely \begin{equation}\label{CD multiplication} (a,b)\cdot(c,d)=(ac-\bar d b,b\bar c+da) \end{equation} and \begin{equation}\label{alternative multiplication}
(a,b)\tilde\cdot(c,d)=(ac+\bar d b,b\bar c+da). \end{equation} Then $A_2:=(A\oplus A,\cdot)$ and $\tilde A_2:=(A\oplus A,\tilde\cdot)$ are real algebras with a unit $(1,0)$. It is clear that \begin{equation}\label{conjugation} \star:A\oplus A\rightarrow A\oplus A,\ \star{(a,b)}=(\bar a,-b) \end{equation} is an involution.
Then (see \cite[Section 1.13]{Y}) also $\mathfrak{CD}(A):=(A_2,\star)$ and $\widetilde{\CD}(A):=(\tilde A_2,\star)$ are $\ast$-algebras and $\mathfrak{CD}(A)$ is the \textit{Cayley-Dickson algebra associated to} $(A,\ast)$.
It is well known that $\mathbb C=\mathfrak{CD}(\mathbb R),$ the algebra of quaternions $\mathbb H=\mathfrak{CD}(\mathbb C)$ and the algebra of octonions $\mathbb O=\mathfrak{CD}(\mathbb H)$.
The algebra of pseudo-quaternions ${\tilde{\mathbb H}}=\widetilde{\CD}(\mathbb C)$ is isomorphic to $(\mathrm M(2,\mathbb R),\ast)$ where \begin{equation}\label{involution on PQ} \ast\left( \begin{array}{cc} a&b\\ c&d\\ \end{array} \right)= \left( \begin{array}{cc} d&-b\\ -c&a\\ \end{array} \right). \end{equation} The standard quadratic form is the determinant. In particular, the signature of $Q_{{\tilde{\mathbb H}}}$ is $(2,2)$.
We will use the following conventions. We put \begin{equation}\label{pseudo-quaternions} \ \tilde I:=\left( \begin{array}{cc} 0&-1\\ 1&0\\ \end{array} \right),\
\tilde J:= \left( \begin{array}{cc} 0&1\\ 1&0\\ \end{array} \right), \ \tilde K:= \left( \begin{array}{cc} 1&0\\ 0&-1\\ \end{array} \right). \end{equation} Be aware that ${\tilde I}.{\tilde J}=-{\tilde K}$.
The algebra of pseudo-octonions ${\tilde{\mathbb O}}=\mathfrak{CD}({\tilde{\mathbb H}})\cong \widetilde{\CD}(\mathbb H)$, see \cite[Section 1.13]{Y}.
\subsection{Subgroups of $\mathrm G_2$ and $\tilde\mathrm G_2$}\label{section subgroups}
We have $\mathrm G_2=\{\varphi\in\mathrm{GL}(\mathbb O)|\ \forall a,b\in\mathbb O: \varphi(a.b)=\varphi(a).\varphi(b)\}$. It is well known that $\mathrm G_2\subset\mathrm{Stab}(1)\cap\mathrm{Stab}(\Im\mathbb O)$. Hence, it is more natural to view $\mathrm G_2$ as a subgroup of $\mathrm{GL}(\Im\mathbb O)$. It is well known that $\omega_\mathbb O\in\Lambda^3\Im\mathbb O^\ast$ and $\mathrm G_2=\mathrm{Stab}(\omega_{\tilde{\mathbb O}})$. If we replace in the definition of $\mathrm G_2$ the algebra $\mathbb O$ by ${\tilde{\mathbb O}}$, then we get the group $\tilde{\mathrm{G}}_2$. As above, $\omega_{\tilde{\mathbb O}}\in\Lambda^3\Im{\tilde{\mathbb O}}^\ast$ and that $\tilde{\mathrm{G}}_2=\mathrm{Stab}(\omega_{\tilde{\mathbb O}})$. We view also $\tilde{\mathrm{G}}_2$ as a subgroup of $\mathrm{GL}(\Im{\tilde{\mathbb O}})$.
Let $\mathrm{Sp}(1)$ be the group of unit quaternions. We define \begin{equation}\label{SO434} \phi_{p,q}\in\mathrm{GL}(\Im\mathbb H\oplus\mathbb H),\ \phi_{p,q}(a,b):=(p.a.\bar p,q.b.\bar p),\ p,q\in\mathrm{Sp}(1). \end{equation} It is well known (see \cite[Proposition 1.20.1]{Y}) that $\mathrm{SO}(4)_{3,4}:=\{\phi_{p,q}:\ p,q\in\mathrm{Sp}(1)\}$ is a subgroup of $\mathrm G_2$. If we view ${\tilde{\mathbb O}}$ as $\widetilde{\CD}(\mathbb H)$, then it is shown in \cite[Section 1.13]{Y} that $\mathrm{SO}(4)_{3,4}\subset\tilde{\mathrm{G}}_2$.
We put \begin{equation*} \mathrm{\widetilde{Sp}}(1)_{\pm}:=\{A\in{\tilde{\mathbb H}}: Q_{{\tilde{\mathbb H}}}(A)=\pm1\}\ \ \mathrm{and}\ \ \mathrm{\widetilde{Sp}}(1):=\{A\in{\tilde{\mathbb H}}: Q_{{\tilde{\mathbb H}}}(A)=1\}. \end{equation*} Let $A,B\in\mathrm{\widetilde{Sp}}(1)_\pm$ and consider \begin{equation}\label{SO2234} \tilde\phi_{A,B}\in\mathrm{GL}(\Im{\tilde{\mathbb H}}\oplus{\tilde{\mathbb H}}),\ \tilde\phi_{A,B}(X,Y)=Q_{{\tilde{\mathbb H}}}(B).(A.X.\bar A,B.Y.\bar A). \end{equation}
It can be verified directly that $\mathrm{SO}(2,2)_{3,4}:=\{\tilde\phi_{A,B}|\ A,B\in\mathrm{\widetilde{Sp}}(1)_\pm:\ Q_{{\tilde{\mathbb H}}}(A)=Q_{{\tilde{\mathbb H}}}(B)\}$ is a subgroup of $\tilde{\mathrm{G}}_2$. This uses the same computation which shows that $\mathrm{SO}(4)_{3,4}\subset\mathrm G_2$ together with the identity $A^{-1}=Q_{{\tilde{\mathbb H}}}(A).\bar A,\ A\in\mathrm{\widetilde{Sp}}(1)_\pm$.
On the other hand, the group $\widetilde\mathrm{SO}(2,2)_{3,4}:=\{\tilde\phi_{A,B}|\ A,B\in\mathrm{\widetilde{Sp}}(1)_\pm\}$ is not contained in $\tilde{\mathrm{G}}_2$. Nevertheless, we still have the following.
\begin{lemma}\label{lemma invariance of omega 5} Let $X\in\Im{\tilde{\mathbb H}},\ Y,Z\in{\tilde{\mathbb H}}$ and $\tilde\phi_{A,B}\in\widetilde\mathrm{SO}(2,2)_{3,4}$. Then \begin{equation} \tilde\phi_{A,B}.\omega_{\tilde{\mathbb O}}((X,0),(0,Y),(0,Z)) =\omega_{\tilde{\mathbb O}}((X,0),(0,Y),(0,Z))= \end{equation} \end{lemma}
\begin{proof} The right hand side is \begin{eqnarray}\label{help I} &&\omega_{\tilde{\mathbb O}}((X,0),(0,Y),(0,Z))=B_{\tilde{\mathbb O}}((X,0).(0,Y),(0,Z))=\Re((0,Y.X).\overline{(0,Z)})\nonumber\\ &&\ \ =\Re((0,Y.X).(0,-Z))=\Re(\bar Z.Y.X,0)=\Re(\bar Z.Y.X). \end{eqnarray} The left hand side is \begin{eqnarray*} &&\omega_{\tilde{\mathbb O}}(Q_{{\tilde{\mathbb H}}}(B)(\bar A.X.A,0),Q_{{\tilde{\mathbb H}}}(A)(0,\bar B.Y. A),Q_{{\tilde{\mathbb H}}}(A)(0,\bar B.Z.A))\\ &&=Q_{{\tilde{\mathbb H}}}(B)\Re(\bar A.\bar Z .B.\bar B.Y.A.\bar A.X.A)=\Re(\bar Z .Y.X). \end{eqnarray*} \end{proof}
The restriction map $\phi_{p,q}\mapsto\phi_{p,q}|_{\Im\mathbb H}$ induces a homomorphism $\pi:\mathrm{SO}(4)_{3,4}\rightarrow\mathrm{SO}(\Im\mathbb H)$. It is well known that the homomorphism is surjective.
If $A,B\in\mathrm{\widetilde{Sp}}(1)_\pm$ and $X\in\Im{\tilde{\mathbb H}}$, then $Q_{{\tilde{\mathbb H}}}(Q_{{\tilde{\mathbb H}}}(B) A.X.\bar A)=Q_{\tilde{\mathbb H}}(X)$. It follows that the restriction map $\tilde\phi_{A,B}\mapsto\tilde\phi_{A,B}|_{\Im{\tilde{\mathbb H}}}$ induces a map $\tilde\pi:\widetilde\mathrm{SO}(2,2)_{3,4}\rightarrow\mathrm O(\Im{\tilde{\mathbb H}})$.
\begin{lemma}\label{lemma split ses of groups} There are split short exact sequence of Lie groups \begin{align}\label{split ses with SO432}
&0\rightarrow\mathrm{Sp}(1)\rightarrow\mathrm{SO}(4)_{3,4}\xrightarrow{\pi}\mathrm{SO}(\Im\mathbb H)\ra0\ \ \mathrm{and}\\
& 0\rightarrow\mathrm{\widetilde{Sp}}(1)\rightarrow\widetilde\mathrm{SO}(2,2)_{3,4}\xrightarrow{\tilde\pi}\mathrm O(\Im{\tilde{\mathbb H}})\ra0\label{split ses with O2234} \end{align} where $\pi$ and $\tilde\pi$ are defined above. The second short exact sequence contains a split short exact sequence \begin{equation} 0\rightarrow\mathrm{\widetilde{Sp}}(1)\rightarrow\mathrm{SO}(2,2)_{3,4}\rightarrow\mathrm{SO}(\Im{\tilde{\mathbb H}})\ra0 \label{split SO2234}. \end{equation} \end{lemma} \begin{proof}
We have $\ker\pi=\{\phi_{\pm1,q}: q\in\mathrm{Sp}(1)\}$. Now $\phi_{p,q}=\phi_{p',q'}$ if, and only if $p=p',q=q'$ or $p=-p',q=-q'$. It follows that $\ker\pi=\{\phi_{1,q}:\ q\in\mathrm{Sp}(1)\}\cong\mathrm{Sp}(1)$. It remains to show that the short exact sequence is split. For this consider the group $\mathrm{SO}(3)\cong\{\phi_{p,p}:\ p\in\mathrm{Sp}(1)\}$. Then $\pi|_{\mathrm{SO}(3)}$ induces isomorphism $\mathrm{SO}(3)\rightarrow\mathrm{SO}(\Im\mathbb H)$. The inverse of this map is a splitting of $\pi$.
It is easy to see that $\tilde\phi_{A,B}=\tilde\phi_{A',B'}$ if, and only if $A=A',B'=B$ or $A=-A',B=-B'$. We have $\ker\tilde\pi=\{\tilde\phi_{\pm1,B}:\ B\in\mathrm{\widetilde{Sp}}(1)\}=\{\tilde\phi_{1,B}:\ B\in\mathrm{\widetilde{Sp}}(1)\}\cong\mathrm{\widetilde{Sp}}(1)$. It is well known that any element of $\mathrm O(\Im{\tilde{\mathbb H}})$ is of the form $X\mapsto \pm A.X.\bar A$ for some $A\in\mathrm{\widetilde{Sp}}(1)_\pm$. Then $\tilde\pi$ is surjective and arguing as above, it is easy to see that it is a split.
The last claim is easy to verify. \end{proof}
\subsection{Maximal compact subgroup of semi-direct product}\label{section semi-direct products} A semi-direct product of Lie groups is a short exact sequence \begin{equation}
0\rightarrow\mathrm H_1\rightarrow\mathrm H\xrightarrow{\pi}\mathrm H_2\ra0 \end{equation} of Lie groups which splits, i.e. there is a homomorphism $\iota:\mathrm H_2\rightarrow\mathrm H$ such that $\pi\circ\iota=Id_{\mathrm H_2}$. In this way, we can view $\mathrm H_2$ as a subgroup of $\mathrm H$ and we will do this without further comment. We write $\mathrm H=\mathrm H_1\rtimes\mathrm H_2$.
\begin{lemma}\label{lemma max compact in product} Let $\mathrm G$ be a Lie group with a compact subgroup $\mathrm K$. Suppose that $\mathrm H_1,\mathrm H_2$ are closed subgroups of $\mathrm G$ such that the group generated by $\mathrm H_1,\mathrm H_2$ is a semi-direct product $\mathrm H_1\rtimes\mathrm H_2$. Assume that $\mathrm K_i:=\mathrm K\cap\mathrm H_i,\ i=1,2$ is a maximal compact subgroup of $\mathrm H_i$.
Then the group generated by $\mathrm K_1,\mathrm K_2$ is a semi-direct product $\mathrm K_1\rtimes\mathrm K_2$ and this is a maximal compact subgroup of $\mathrm H_1\rtimes\mathrm H_2$. \end{lemma} \begin{proof} Let $k_i\in\mathrm K_i,\ i=1,2$ be arbitrary. Then $k_2^{-1}k_1k_2\in\mathrm H_1\cap\mathrm K=\mathrm K_1$ and so the first claim follows. The group $\mathrm K_1\rtimes\mathrm K_2$ is homeomorphic to $\mathrm K_1\times\mathrm K_2$ and so it is compact. It remains to show that it is maximal among all compact subgroups of $\mathrm H_1\rtimes\mathrm H_2$.
Let $\pi:\mathrm H_1\rtimes\mathrm H_2\rightarrow\mathrm H_2$ be the canonical projection and $\iota:\mathrm H_2\rightarrow\mathrm H_1\rtimes\mathrm H_2$ be the inclusion. Put $\bar h:=\iota\circ\pi(h),\ h\in\mathrm H_1\rtimes\mathrm H_2$. Let $\mathrm L$ be a compact group such that $\mathrm K_1\rtimes\mathrm K_2\subset\mathrm L\subset\mathrm H_1\rtimes\mathrm H_2$. Then in particular, $\mathrm K_i\subset\mathrm L,\ i=1,2$. It is clearly enough to show that for each $\ell\in\mathrm L:\ \ell.\bar \ell^{-1}\in\mathrm K_1$ and $\bar \ell\in\mathrm K_2$.
The group $\pi(\mathrm L)$ is a compact subgroup of $\mathrm H_2$ which contains $\mathrm K_2$. By the maximality of $\mathrm K_2$, $\pi(\mathrm L)=\mathrm K_2$. Thus $\bar \ell\in\iota(\pi(\mathrm L))=\iota(\mathrm K_2)=\mathrm K_2$.
It is clear that $\ell.\bar \ell^{-1}\in\ker\pi=\mathrm H_1$. As $\bar \ell\in\mathrm K_2\subset\mathrm L$, we have that $\ell.\bar \ell^{-1}\in\mathrm L$ and thus $\ell.\bar \ell^{-1}\in\mathrm L\cap\mathrm H_1$. Now $\mathrm L\cap\mathrm H_1$ is a compact subgroup of $\mathrm H_1$ which contains $\mathrm K_1$. Again by the maximality of $\mathrm K_1$, $\mathrm K_1=\mathrm L\cap\mathrm H_1$ and the proof is complete. \end{proof}
\begin{lemma}\label{lemma max compact in product I} Let $\mathrm H_1\rtimes\mathrm H_2$ be a semi-direct product of Lie groups. Assume that the trivial group $\{e\}$ is a maximal compact subgroup of $\mathrm H_i$ and that $\mathrm K$ is a maximal compact subgroup of $\mathrm H_j$ where $i,j\in\{1,2\},\ i\ne j$. Then $\mathrm K$ is a maximal compact subgroup of $\mathrm H_1\rtimes\mathrm H_2$. \end{lemma} \begin{proof} As $\mathrm H_1\rtimes\mathrm H_2$ is diffeomorphic to $\mathrm H_1\times\mathrm H_2$, the groups $\mathrm H_i,\ i=1,2$ are closed in the semi-direct product. The group $\mathrm K$ is a compact subgroup of $\mathrm H_1\rtimes\mathrm H_2$. By the assumptions, $\mathrm K\cap\mathrm H_i$ is a maximal compact subgroup of $\mathrm H_i,\ i=1,2$. The claim follows from Lemma \ref{lemma max compact in product} \end{proof}
\begin{lemma}\label{lemma max compact in upper trian} Let $\mathrm L$ be a subgroup of $\mathrm{GL}(n,\mathbb R)$. Suppose that for each $A\in\mathrm L$ the matrix $A-1_n$ is strictly upper triangular. Then $\{1_n\}$ is a maximal compact subgroup of $\mathrm L$. \end{lemma} \begin{proof}
Let $\mathrm K$ be a maximal compact subgroup of $\mathrm L$ and $B$ be a $\mathrm K$-invariant inner product on $\mathbb R^n$. Let $\{f_1,\dots,f_n\}$ be a $B$-orthonormal basis that is obtained by applying the Gram-Schmidt algorithm to the standard basis of $\mathbb R^n$. Let $F=(f_1|f_2|\dots|f_n)$ be the corresponding matrix. Then $F^{-1}.\mathrm K.F\subset\mathrm O(7)$. On the other hand, $F$ and $F^{-1}$ are upper triangular matrices with positive numbers on the diagonal. Thus, for each $A\in\mathrm K$ the matrix $F^{-1}.A.F$ is upper triangular and its diagonal coefficients are equal to 1. Hence, $F^{-1}.A.F=1_n$ and $A=1_n$. \end{proof}
\section{Maximal compact subgroups}\label{section max compact subgroups} This Section is organized as follows. Each type is treated in its own section. We first recall some results about $\mathrm O_i:=\mathrm{Stab}(\omega_i)$ from \cite{BV} where $\omega_i$ is a fixed representative of the type $i=1,\dots,8$. Then we use these results to find a maximal compact subgroup of $\mathrm O_i$. We separate the summary part from the genuine research part of this article by \textemdash\ x \textemdash.
\subsection{Type 8}\label{section type 8} A representative chosen in \cite[Section Type 8.]{BV} is \begin{equation}\label{omega8} \omega_8=\alpha_{123}+\alpha_{145}-\alpha_{167}+\alpha_{246}+\alpha_{257}+\alpha_{347}-\alpha_{356}. \end{equation} It is well known that $\mathrm O_8=\mathrm G_2$.
\begin{center}
\textemdash\ x \textemdash \end{center}
We will need in Section \ref{section type 7} the following observation. Recall (see Section \ref{section algebras}) that $\mathbb O=\mathfrak{CD}(\mathbb H)$ and the definition (\ref{tautological form on A}) of the tautological 3-form $\omega_\mathbb O$. Let $\Phi_8:\mathrm V\rightarrow\Im\mathbb O$ be the linear isomorphism that maps the standard basis of $\mathrm V$ to the standard basis \begin{equation} \label{standard basis of O} \{(i,0),(j,0),(k,0),(0,1),(0,i),(0,j),(0,k)\}. \end{equation} of $\Im\mathbb O$ where $\{1,i,j,k\}$ is the standard basis of $\mathbb H$.
\begin{lemma}\label{lemma type 8} $\omega_8=\Phi_8^\ast\omega_\mathbb O$. \end{lemma} \begin{proof} This is a straightforward computation which uses (\ref{help I}) together with $\omega_\mathbb O((i,0),(j,0),(k,0))=\Re((k,0).\overline{(k,0)})=\Re(-k^2)=\Re(1)=1.$ \end{proof}
\subsection{Type 7}\label{section type 7} Let us first recall some facts from \cite[Section Type 7.]{BV}. A representative is \begin{equation} \omega_7=\alpha_{125}+\alpha_{136}+\alpha_{147}+\alpha_{237}-\alpha_{246}+\alpha_{346}. \end{equation} Then $\mathrm V_3:=\triangle_7^3=[e_5,e_6,e_7]$ and we put $\mathrm W_4:=\mathrm V/\mathrm V_3$. The 3-form $\omega_7$ induces a bilinear form\footnote{Here $v=(c_1,\dots,c_7),\ w=(d_1,\dots,d_7)$} $B(v,w)=c_1d_1+c_2d_2+c_3d_3+c_4d_4$ on $\mathrm V$. The form $B$ descends to a positive definite bilinear form $B_4$ on $\mathrm W_4$. The associated quadratic form is denoted by $Q_4$. We have\footnote{Recall notation from (\ref{notation stabilizer of subspace}). In particular, $\mathrm{Stab}([B])$ is the stabilizer of the \textbf{line} spanned by $B$.} $\mathrm O_7\subset\mathrm{Stab}(\mathrm V_3)\cap\mathrm{Stab}([B])$.
The insertion map $v\mapsto i_v\omega_7$ induces a linear map $\lambda:\mathrm V_3\rightarrow\Lambda^2\mathrm W_4$. We put $\sigma_i:=\lambda(e_{i+4}),\ i=1,2,3$. As $B_4$ is non-degenerate, there are unique $E,F,G\in\mathrm{End}(\mathrm W_4)$ such that \begin{equation}\label{type 7 quaternionic triple} \sigma_1=B_4\circ E,\ \sigma_2=B_4\circ F,\ \sigma_3=B_4\circ G \end{equation} where $(B_4\circ A)(u,v):=B_4(A(u),v),\ A\in\mathrm{End}(\mathrm W_4),\ u,v\in\mathrm W_4$. The endomorphisms satisfy $E^2= F^2=-Id_{W_4}$ and $E F=-F E=G$. Hence, $\mathrm W_4$ is a 1-dimensional $\mathbb H$-vector space.
The scalar product $B_4$ induces a scalar product on $\Lambda^2\mathrm W_4^\ast$. As $\lambda$ is injective, $B_4$ induces a scalar product $B_3$ on $\mathrm V_3$ and one finds that $\{e_5,e_6,e_7\}$ is an orthonormal basis.
The homomorphism $\rho:\mathrm O_7\rightarrow\mathrm{GL}(\mathrm V_3),\ \varphi\mapsto \varphi|_{\mathrm V_3}$ induces a split short exact sequence $0\rightarrow\mathrm K\rightarrow\mathrm O_7\rightarrow\mathrm{CSO}(Q_3)\ra0$. Any $\varphi\in\mathrm O_7$ induces an endomorphism $\tilde\varphi$ of $\mathrm W_4$ which preserves $Q_4$ up to scale. If $\varphi\in\mathrm K$, then $\tilde\varphi\in\mathrm{GL}_\mathbb H(\mathrm W_4)$ and $\tilde\varphi\in\mathrm{Stab}(B_4)$. This induces another split short exact sequence $0\rightarrow\mathrm L\rightarrow\mathrm K\rightarrow\mathrm{Sp}(1)\ra0$. The group $\mathrm L$ is isomorphic to the Lie group $\mathbb R^8$. Notice that $ \ell_{\mathcal{B}_{st}}=\left( \begin{matrix} 1_4&0\\ \ast&1_3\\ \end{matrix} \right)$ for every $\ell\in\mathrm L$. All together, $\mathrm O_7$ is isomorphic to a semi-direct product \begin{equation}\label{semi-direct product type 7}
(\mathbb R^8\rtimes\mathrm{Sp}(1))\rtimes\mathrm{CSO}(3) \end{equation} as claimed in \cite[10. Proposition]{BV}
\begin{center}
\textemdash\ x \textemdash \end{center}
We can now proceed. We put $$f_1:=-e_6,\ f_2:=e_7,\ f_3:=e_5, \ f_4:=-e_1,\ f_5:=e_3,\ f_6:=-e_4,\ f_7:=-e_2$$ and $\mathcal{B}_7:=\{f_1,\dots,f_7\}$. Let $\mathcal{B}_7^\ast=\{\beta_1,\dots,\beta_7\}$ be a dual basis. Then \begin{equation} \omega_7=\beta_{145}-\beta_{167}+\beta_{246}+\beta_{257}+\beta_{347}-\beta_{356}. \end{equation}
Consider a linear isomorphism $\Phi_7:\mathrm V\rightarrow Im\mathbb O$ which maps the basis $\mathcal{B}_7$ to the standard basis (\ref{standard basis of O}) of $\Im\mathbb O$. Recall Section \ref{section subgroups} that $\mathrm{SO}(4)_{3,4}$ is a subgroup of $\mathrm{GL}(\Im\mathbb O)$. Thus, $\mathrm K_7:=\Phi_7^\ast\mathrm{SO}(4)_{3,4}$ is a subgroup of $\mathrm{GL}(\mathrm V)$. Here we use the notation set in (\ref{notation pullback of map}).
\begin{lemma}\label{lemma inclusion type 7} The subspaces $\mathrm V_3$ and $\mathrm V_4:=[f_4,\dots,f_7]$ are $\mathrm K_7$-invariant and $\mathrm K_7\subset\mathrm O_7$. \end{lemma} \begin{proof} Since $\mathrm V_3=\Phi_7^{-1}(\Im\mathbb H)$ and $\mathrm V_4=\Phi_7^{-1}(\mathbb H)$, the first claim follows from the definition (\ref{SO434}).
By Lemma \ref{lemma type 8}: $\omega_7=\Phi_7^\ast\omega_\mathbb O-\beta_{123}$. As $\mathrm{SO}(4)_{3,4}\subset\mathrm G_2$, we see that $\mathrm K_7\subset\Phi_7^\ast\mathrm G_2=\mathrm{Stab}(\Phi_7^\ast\omega_\mathbb O)$. From (\ref{SO434}) is also clear that $\mathrm K_7\subset\mathrm{SO}(B_3)\subset\mathrm{Stab}(\beta_{123})$. Thus $\mathrm K_7\subset\mathrm O_7$. \end{proof}
Put \begin{equation}\label{group R+} \mathrm{R}^+:=\bigg\{
\varphi\in\mathrm{GL}(\mathrm V)\bigg| \ \varphi_{\mathcal{B}_7}= \left( \begin{matrix} \lambda^{-2}.1_3&0\\ 0&\lambda.1_4\\
\end{matrix} \right),\lambda>0 \bigg\} \end{equation} It is clear that $\mathrm{R}^+$ is a subgroup of $\mathrm{GL}(\mathrm V)$ and that $\mathrm{R}^+\subset\mathrm O_7$. From Lemma \ref{lemma inclusion type 7} follows that $\mathrm K_7$ commutes with $\mathrm{R}^+$. It is easy to see that $k.\ell.k^{-1}\in\mathrm L$ whenever $\ell\in\mathrm L,\ k\in\mathrm K_7\times\mathrm{R}^+$. Hence, the group generated by $\mathrm L,\mathrm K_7$ and $\mathrm{R}^+$ is isomorphic to $\mathrm L\rtimes(\mathrm K_7\times\mathrm{R}^+)$. This is a subgroup of $\mathrm O_7$.
\begin{lemma}\label{lemma semi-direct product type 7} $\mathrm O_7=\mathrm L\rtimes(\mathrm K_7\times\mathrm{R}^+)$. \end{lemma} \begin{proof} Let $v\in\mathrm V_3$. The map $\Phi_7$ restricts to an isomorphism $\mathrm V_3\rightarrow\Im\mathbb H$ and so there is a unique $a\in\Im\mathbb H$ so that $\Phi_7(v)=(a,0)$. The composition $\mathrm V\xrightarrow{\Phi_7}\Im\mathbb O\rightarrow\mathbb H$ where the second map is the canonical projection descends to an isomorphism $\underline\Phi_7:\mathrm W_4\rightarrow\mathbb H$. Notice that $\underline\Phi_7^\ast B_\mathbb H=B_4$. We know that there is a unique $A\in\mathrm{End}(\mathrm W_4)$ such that $\lambda(v)=B_4\circ A$. We will now show that $A$ corresponds to the multiplication by $a$ on the right.
Let $w_1,w_2\in\mathrm W_4$ and put $\ b=\underline\Phi_7(w_1),\ c=\underline\Phi_7(w_2)$. Then \begin{eqnarray*} &&\lambda(v)(w_1,w_2)=\omega_\mathbb O((a,0),(0,b),(0,c))=\Re(\bar cba)=\Re(ba \bar c)=B_\mathbb H(b.a,c)\\ &&=B_4(\Phi_7^\ast(r_a)(w_1),w_2) \end{eqnarray*} where $r_a:\mathbb H\rightarrow\mathbb H$ is the right-action by $a$. We see that $A=\underline\Phi_7^\ast(r_a)$.
Recall Lemma \ref{lemma split ses of groups} that $\pi:\mathrm{SO}(4)_{3,4}\rightarrow\mathrm{SO}(\Im\mathbb H),\ \varphi\mapsto\varphi|_{\Im\mathbb H}$ induces the split short exact sequence (\ref{split ses with SO432}) where $\mathrm{Sp}(1)=\{\phi_{1,q}:\ q\in\mathrm{Sp}(1)\}$ and that $\phi_{1,q}(a,b)=(a,q.b)$. Now it is easy to see that $\mathrm K_7\times\mathrm{R}^+$ is a splitting of the subgroup $\mathrm{Sp}(1)\rtimes\mathrm{CSO}(3)$ from (\ref{semi-direct product type 7}). The claim then follows from the definition of $\mathrm L$. \end{proof}
\begin{thm} \label{thm max compact type 7} $\mathrm K_7$ is a maximal compact subgroup of $\mathrm O_7$. \end{thm} \begin{proof} By Lemma \ref{lemma max compact in upper trian}, $\{1_7\}$ is a maximal compact subgroup of $\mathrm L$. It is also clear that $\{1_7\}$ is a maximal compact subgroup of $\mathrm{R}^+$. The claim follows from Lemma \ref{lemma max compact in product I}. \end{proof}
\subsection{Type 5}\label{section type 5} A representative chosen in \cite[Section type 5]{BV} is
\begin{eqnarray}\label{type 5} \omega_5=\alpha_{123}-\alpha_{145}+\alpha_{167}+\alpha_{246}+\alpha_{257}+\alpha_{347}-\alpha_{356}. \end{eqnarray} It is well known that $\mathrm O_5=\tilde{\mathrm{G}}_2$. \begin{center} \textemdash\ x \textemdash \end{center}
We will later need two more representatives of type 5. Recall Section \ref{section algebras} that ${\tilde{\mathbb O}}=\widetilde{\CD}(\mathbb H)$ and the definition of $\omega_{\tilde{\mathbb O}}$. Let $\Phi_5:\mathrm V\rightarrow\Im{\tilde{\mathbb O}}$ be the linear isomorphism that maps the standard basis of $\mathrm V$ to the basis (\ref{standard basis of O}) of $\Im{\tilde{\mathbb O}}$. From (\ref{help I}) and (\ref{alternative multiplication}) follows at once that $\Phi_5^\ast\omega_{\tilde{\mathbb O}}=2\alpha_{123}-\omega_8$, i.e.
\begin{eqnarray}\label{type 5.I} \Phi^\ast_5\omega_{\tilde{\mathbb O}}=\alpha_{123}-\alpha_{145}+\alpha_{167}-\alpha_{246}-\alpha_{257}-\alpha_{347}+\alpha_{356}. \end{eqnarray} Notice that $\varphi.\omega_5=\Phi^\ast_5\omega_{\tilde{\mathbb O}}$ where $\varphi\in\mathrm{GL}(\mathrm V)$ is determined by $\varphi(e_i)=e_i, \ 1\le i\le 5$ and $\varphi(e_j)=-e_j,\ j =6,7$.
Let us now view ${\tilde{\mathbb O}}$ as $\mathfrak{CD}({\tilde{\mathbb H}})$ and ${\tilde{\mathbb H}}$ as $M(2,\mathbb R)$. Consider the linear isomorphism $\tilde\Phi_5:\mathrm V\rightarrow\Im{\tilde{\mathbb O}}$ that sends the standard basis of $\mathrm V$ to the basis \begin{equation}\label{basis of pO} \{({\tilde I},0),({\tilde J},0),({\tilde K},0),(0,\sqrt2E_{11}),(0,\sqrt2E_{21}),(0,\sqrt2E_{12}),(0,\sqrt2E_{22})\}. \end{equation}
\begin{lemma}\label{lemma type 5 II} \begin{eqnarray} \tilde\Phi_5^\ast\omega_{\tilde{\mathbb O}}=\alpha_{123}+\alpha_{145}+\alpha_{167}-\alpha_{245}+\alpha_{267}+\alpha_{347}-\alpha_{356}. \end{eqnarray} \end{lemma} \begin{proof} Let $\mathrm{tr}$ be the usual trace and \begin{equation} A=\left( \begin{matrix} a_3&a_2-a_1\\ a_1+a_2&-a_3 \end{matrix} \right), \ B=\sqrt2\left( \begin{matrix} b_1&b_3\\ b_2&b_4 \end{matrix} \right),\ C=\sqrt2\left( \begin{matrix} c_1&c_3\\ c_2&c_4 \end{matrix} \right). \end{equation} From (\ref{help I}) follows that \begin{align*} \omega_{\tilde{\mathbb O}}((A,0),(0,B),(0,C))&=\Re(\bar CBA)=\frac{1}{2}\mathrm{tr}(\bar CBA)\\ &=a_1(b_1c_2-b_2c_1+b_3c_4-b_4c_3)\\ &+a_2(-b_1c_2+b_2c_1+b_3c_4-b_4c_3)\\ &+a_3(b_1c_4-b_4c_1-b_2c_3+b_3c_2). \end{align*} As $\omega_{\tilde{\mathbb O}}(({\tilde I},0),({\tilde J},0),({\tilde K},0))=\Re((-{\tilde K},0).\overline{({\tilde K},0)})=\Re({\tilde K}^2)=\Re(1)=1,$ the claim follows.
\end{proof}
\subsection{Type 2}\label{section type 2} Let us first recapitulate \cite[Section Type 2.]{BV}. A representative is \begin{equation*} \omega_2=\alpha_{125}+\alpha_{127}+\alpha_{147}-\alpha_{237}+\alpha_{346}+\alpha_{347}. \end{equation*} We will use a basis $\mathcal{B}_2=\{f_5,f_6,f_7,e_1,e_2,e_3,e_4\}$ of $\mathrm V$ where $$f_5:=e_5+e_6,\ f_6:=-e_5+e_6,\ f_7:=-e_5-e_6+e_7.$$ For a dual basis $\mathcal{B}_2^\ast=\{\beta_1,\ldots,\beta_7\}$ we have $\beta_1=\frac{1}{2}(\alpha_5+\alpha_6+2\alpha_7),\ \beta_2=\frac{1}{2}(-\alpha_5+\alpha_6),\ \beta_3=\alpha_7$ and $\beta_{i+3}=\alpha_i,\ i=1,\dots,4$ and so $$\alpha_i=\beta_{i+3},\ i=1,\dots,4,\ \alpha_5=\beta_1-\beta_2-\beta_3,\ \alpha_6=\beta_1+\beta_2-\beta_3,\ \alpha_7=\beta_3.$$ Then \begin{equation} \omega_2=\beta_{145}+\beta_{167}-\beta_{245}+\beta_{267}+\beta_{347}-\beta_{356}. \end{equation}
The subspace $\mathrm V_3:=[f_5,f_6,f_7]$ is $\mathrm O_2$-invariant and we put $\mathrm W_4:=\mathrm V/\mathrm V_3$. We denote by $B_3$ a symmetric bilinear form on $\mathrm V_3$ with orthogonal basis $\{f_5,f_6,f_7\}$ and $B_3(f_5,f_5)=1,\ B_3(f_6,f_6)=B_3(f_7,f_7)=-1$. The 3-form induces a quadratic form $Q(v)=2(c_1c_4-c_2c_3)$ where $v=(c_1,\dots,c_7)$. The associated symmetric bilinear form $B$ descends to a bilinear form $B_4$ on $\mathrm W_4$. We denote by $Q_3$ and $Q_4$ the quadratic form on $\mathrm V_3$ and $\mathrm W_4$ associated to $B_3$ and $B_4$, respectively. We have $\mathrm O_2\subset\mathrm{Stab}([B_3])\cap\mathrm{Stab}([B])$.
The insertion map $v\mapsto i_v\omega_2$ induces an injective linear map $\lambda:\mathrm V_3\rightarrow\Lambda^2\mathrm W_4$. We put $\sigma_i:=\lambda(f_{4+i}),\ i=1,2,3$. As $B_4$ is non-degenerate, there are unique $E,F,G\in\mathrm{End}(\mathrm W_4)$ such that \begin{eqnarray*} &&\sigma_1= B_4\circ E,\ \sigma_2=B_4\circ F,\ \sigma_3=B_4\circ G \end{eqnarray*} where we use the notation from (\ref{type 7 quaternionic triple}). Then $E^2=-F^2=-Id_{\mathrm W_4}$ and $EF=-FE=G$ and hence, $\mathrm W_4$ is a 1-dimensional free ${\tilde{\mathbb H}}$-module.
There is a split short exact sequence $0\rightarrow\mathrm O_2^+\rightarrow\mathrm O_2\xrightarrow{\det}\mathbb R^\ast\ra0$. The restriction map $\varphi\in\mathrm O^+_2\rightarrow\varphi|_{\mathrm V_3}$ induces another split short exact sequence $0\rightarrow\mathrm K\rightarrow\mathrm O^+_2\rightarrow\mathrm{SO}(Q_3)\ra0$. Next, any $\varphi\in\mathrm O_2$ induces $\tilde\varphi\in\mathrm{GL}(\mathrm W_4)$. If $\varphi\in\mathrm K$, then $\tilde\varphi\in\mathrm{GL}_{\tilde{\mathbb H}}(\mathrm W_4)\cap\mathrm{Stab}(B_4)\cong\widetilde\mathrm{Sp}(1)$. We obtain another split short exact sequence $0\rightarrow\mathrm L\rightarrow\mathrm K\rightarrow\mathrm{\widetilde{Sp}}(1)\ra0$. The group $\mathrm L$ is isomorphic to $\mathbb R^8$. Notice that $\ell_{\mathcal{B}_2}=\left(
\begin{matrix} 1_3&\ast\\ 0&1_4\\
\end{matrix}
\right)$ for every $\ell\in\mathrm L$. All together, $\mathrm O_2$ is isomorphic to a semi-direct product \begin{equation}\label{semi-direct product type 2} ((\mathbb R^8\rtimes\mathrm{\widetilde{Sp}}(1))\rtimes\mathrm{SO}(1,2))\rtimes\mathbb R^\ast \end{equation} as claimed in \cite[5. Proposition]{BV}.
\begin{center}
\textemdash\ x \textemdash \end{center}
We can now continue. Let $\Phi_2:\mathrm V\rightarrow\Im{\tilde{\mathbb O}}$ be a linear isomorphism that sends $\mathcal{B}_2$ to the basis (\ref{basis of pO}). Recall Section \ref{section subgroups} that $\mathrm{SO}(2,2)_{3,4}\subset\widetilde\mathrm{SO}(2,2)_{3,4}\subset\mathrm{GL}(\Im{\tilde{\mathbb O}})$. Put $\widetilde\mathrm H_2:=\Phi_2^\ast\widetilde\mathrm{SO}(2,2)_{3,4}$ and $\mathrm H_2:=\Phi_2^\ast\mathrm{SO}(2,2)_{3,4}$.
\begin{lemma}\label{lemma inclusion type 2} The subspaces $\mathrm V_3$ and $\mathrm V_4:=[e_1,\dots,e_4]$ are $\widetilde\mathrm H_2$-invariant and $\mathrm H_2\subset\mathrm O_2$. \end{lemma} \begin{proof}
The first claim follows from the fact that $\Phi_2(\mathrm V_3)=\Im{\tilde{\mathbb H}},\ \Phi_2(\mathrm V_4)={\tilde{\mathbb H}}$ are $\widetilde\mathrm{SO}(2,2)_{3,4}$-invariant subspaces. From Lemma \ref{lemma type 5 II} follows that $\omega_2=\Phi_2^\ast\omega_{{\tilde{\mathbb O}}}-\beta_1\wedge\beta_2\wedge\beta_3$. The second claim is then a consequence of Lemma \ref{lemma invariance of omega 5}. \end{proof}
Let $\mathrm{R}^+$ be the group from (\ref{group R+}). It is easy to see that $\mathrm{R}^+\subset\mathrm O_2$. From Lemma \ref{lemma inclusion type 2} follows that $\mathrm{R}^+$ commutes with $\widetilde\mathrm H_2$. Using the definition of $\mathrm L$, one can easily check that $g.\ell.g^{-1}\in\mathrm L$ whenever $\ell\in\mathrm L,\ g\in\widetilde\mathrm H_2\times\mathrm{R}^+$. We see that $\mathrm L,\widetilde\mathrm H_2$ and $\mathrm{R}^+$ generate a group $\mathrm L\rtimes(\widetilde\mathrm H_2\times\mathrm{R}^+)$ inside $\mathrm O_2$.
\begin{lemma}\label{lemma semi-direct product type 2} $\mathrm O_2=\mathrm L\rtimes(\widetilde\mathrm H_2\times\mathrm{R}^+)$. \end{lemma} \begin{proof} Let $v\in\mathrm V_3$. The map $\Phi_2$ restricts to an isomorphism $\mathrm V_3\rightarrow\Im{\tilde{\mathbb H}}$ and so there is a unique $a\in\Im{\tilde{\mathbb H}}$ so that $\Phi_2(v)=(a,0)$. The composition $\mathrm V\xrightarrow{\Phi_2}\Im{\tilde{\mathbb O}}\rightarrow{\tilde{\mathbb H}}$ where the second map is the canonical projection descends to an isomorphism $\underline\Phi_2:\mathrm W_4\rightarrow{\tilde{\mathbb H}}$. From the summary given above, there is a unique $A\in\mathrm{End}(\mathrm W_4)$ such that $\lambda(v)=B_4\circ A$. Following the proof of Lemma \ref{lemma semi-direct product type 7}, we find that $A=\underline\Phi_2^\ast(r_a)$.
It is easy to see that $\det:\mathrm L\rtimes(\widetilde\mathrm H_2\times\mathrm{R}^+)\rightarrow\mathbb R^\ast$ is surjective and that $\ker(\det)=\mathrm L\rtimes\mathrm H_2$. Recall Lemma \ref{lemma split ses of groups} that $\tilde\pi$ induces the split short exact sequence (\ref{split ses with SO432}) where $\mathrm{\widetilde{Sp}}(1)=\{\tilde\phi_{1,B}:\ B\in\mathrm{\widetilde{Sp}}(1)\}$ and $\tilde\phi_{1,B}(X,Y)=(X,B.Y)$. From this easily follows that $\mathrm H_2$ is a splitting of $\mathrm{\widetilde{Sp}}(1)\rtimes\mathrm{SO}(1,2)$ in (\ref{semi-direct product type 2}). The claim now follows from the definition of $\mathrm L$. \end{proof}
It remains to find a maximal compact subgroup of $\mathrm O_2$. We will for simplicity consider only a maximal connected and compact subgroup. The group $\mathrm{SO}(2)$ is a maximal compact subgroup of $\mathrm{SL}(2,\mathbb R)\cong\mathrm{\widetilde{Sp}}(1)$. From the proof of Lemma \ref{lemma split ses of groups} follows that a subgroup $\mathrm K^0$ of $\mathrm{SO}(2,2)_{3,4}$ that is generated by $\mathrm{SO}(2)_1:=\{\tilde\phi_{1,B}:\ B\in\mathrm{SO}(2)\}$ and $\mathrm{SO}(2)_2:=\{\tilde\phi_{A,A}:\ A\in\mathrm{SO}(2)\}$ is a semi-direct product $\mathrm K^o:=\mathrm{SO}(2)_1\rtimes\mathrm{SO}(2)_2$. As $\mathrm{SO}(2)$ is commutative, the product is direct. Put $\mathrm K^o_2:=\Phi_2^\ast\mathrm K^o$.
\begin{thm}\label{thm max compact type 2} The group $\mathrm K^o_2\cong\mathrm{SO}(2)\times\mathrm{SO}(2)$ is a maximal connected and compact subgroup of $\mathrm O_2$. There is an isomorphism of $\mathrm K_2^o$-modules \begin{equation} \mathrm V\cong\mathbb R\oplus\mathbb C_1\oplus\mathbb C_2\oplus\mathbb C_1\otimes_\mathbb C\mathbb C_2 \end{equation} where we denote by $\mathbb C_i,\ i=1,2$ the standard complex representation of the $i$-th factor of $\mathrm{SO}(2)\times\mathrm{SO}(2)$ on $\mathbb C=\mathbb R^2$. \end{thm} \begin{proof} By Lemma \ref{lemma max compact in upper trian}, $\{1_7\}$ is a maximal compact subgroup of $\mathrm L$. It is also clear that $\{1_7\}$ is a maximal compact subgroup of $\mathrm{R}^+$. Hence, a maximal compact subgroup of $\widetilde\mathrm H_2$ is a maximal compact subgroup of $\mathrm O_2$. From Lemma \ref{lemma max compact in product} easily follows that $\mathrm K^o$ is a maximal compact subgroup of the connected component of the identity element of $\mathrm{SO}(2,2)_{3,4}$. Hence, $\mathrm K^o_2$ is a maximal compact subgroup of the connected component $\mathrm O_2^o$ of the identity element of $\mathrm O_2$. By the Cartan-Iwasawa-Malcev theorem, any two maximal compact subgroups of $\mathrm O_2^o$ are conjugated and hence isomorphic. The first claim follows and it remains to show the second claim.
Put $$\mathcal{B}_{{\tilde{\mathbb O}}}:=\{({\tilde I},0),({\tilde J},0),(-{\tilde K},0),(0,1),(0,{\tilde I}),(0,{\tilde J}),(0,-{\tilde K})\}.$$ Let $R_t:\mathbb R^2\rightarrow\mathbb R^2$ be the anti-clockwise rotation at angle $t\in\mathbb R$. Then it is straightforward to find that \begin{equation}\label{element of max compact type 2} (\tilde\phi_{1,R_s})_{\mathcal{B}_{{\tilde{\mathbb O}}}}= \left(
\begin{matrix} 1&0&0&0\\ 0&1&0&0\\ 0&0&R_{s}&0\\ 0&0&0&R_{s}\\
\end{matrix}
\right),\ (\tilde\phi_{R_t,R_t})_{\mathcal{B}_{{\tilde{\mathbb O}}}}= \left(
\begin{matrix} 1&0&0&0\\ 0&R_{2t}&0&0\\ 0&0&1&0\\ 0&0&0&R_{2t}\\
\end{matrix}
\right) \end{equation} and the second claim now easily follows. \end{proof}
\subsection{Type 6}\label{section type 6} A representative is \begin{equation}
\omega_6=\alpha_{127}-\alpha_{136}+ \alpha_{145}+\alpha_{235}+\alpha_{246}. \end{equation} Invariant subspaces are $\triangle_6^2=[e_7]$ and $\triangle_6^3=[e_3,e_4,\dots,e_7]$. We denote them by $\mathrm V_1$ and $\mathrm V_5$, respectively. We put $\mathrm W_2:=\mathrm V/\mathrm V_5,\ \mathrm Z_4=\mathrm V_5/\mathrm V_1$. The 3-form $\omega_6$ induces a quadratic form $Q(v)=c_1^2+c_2^2,\ v=(c_1,\dots,c_7)$. Let $B$ be the associated bilinear form. Then $\mathrm O_6\subset\mathrm{Stab}([B])$. The form $Q$ descends to a regular quadratic form $Q_2$ on $\mathrm W_2$. We denote by $B_2$ the associated bilinear form.
The insertion map $v\mapsto i_v\omega_6|_{V_5}$ induces a monomorphism $\lambda:\mathrm W_2\rightarrow\Lambda^2\mathrm Z_4^\ast$ and we put $\sigma_i:=\lambda(e_i),\ i=1,2$.
Each $\varphi\in\mathrm O_6$ induces $\tilde\varphi\in\mathrm{GL}(\mathrm W_2)$. Since $\tilde\varphi\in\mathrm{Stab}([B_2])$, we get a map $\mu:\mathrm O_6\rightarrow\mathrm{CO}(Q_2)$ and a split short exact sequence $0\rightarrow\mathrm O_6^1\rightarrow\mathrm O_6\xrightarrow{\det\mu}\mathbb R^\ast\ra0$. Then $\mu|_{\mathrm O_6^1}$ induces another split short exact sequence $0\rightarrow\mathrm L\rightarrow\mathrm O_6^1\rightarrow\mathrm{SO}(Q_2)\ra0$. Each $\varphi\in\mathrm O_6$ descends to $\bar\varphi\in\mathrm{GL}(\mathrm Z_4)$ and we get a map $\rho:\mathrm O_6\rightarrow\mathrm{GL}(\mathrm Z_4)$. If $\varphi\in\mathrm L$, then $\bar\varphi.\sigma_i=\sigma_i,\ i=1,2$. It can be shown that $\mathrm{Stab}(\sigma_1)\cap\mathrm{Stab}(\sigma_2)\cong\mathrm{SL}(2,\mathbb C)$ and we get a split short exact sequence $0\rightarrow\mathrm M\rightarrow\mathrm L\rightarrow\mathrm{SL}(2,\mathbb C)\ra0$. Moreover, it can be shown that $\varphi_{\mathcal{B}_{st}}=\left( \begin{matrix} 1_2&0&0\\ \ast&1_4&0\\ \ast&\ast&1\\
\end{matrix} \right)$ for every $\varphi\in\mathrm M$ and that $\mathrm M\cong(\mathbb R^6\rtimes\mathbb R^2)\rtimes\mathbb R^2$. All together, $\mathrm O_6$ is isomorphic to a semi-direct product \begin{equation}\label{semidirect product type 6} ((((\mathbb R^6\rtimes\mathbb R^2)\rtimes\mathbb R^2)\rtimes\mathrm{SL}(2,\mathbb C))\rtimes\mathrm{SO}(2))\rtimes\mathbb R^\ast \end{equation} as claimed in \cite[9. Proposition]{BV}.
\begin{center}
\textemdash\ x \textemdash \end{center}
Let us first relabel the standard basis. We put $$f_1:=e_7,\ f_2:=e_1,\ f_3:=e_2,\ f_4:=e_4,\ f_5:=-e_3,\ f_6:=-e_5,\ f_7:=-e_6.$$ Let $\mathcal{B}_6^\ast=\{\beta_1,\dots,\beta_7\}$ be a dual basis to $\mathcal{B}_6:=\{f_1,f_2,\dots,f_7\}$. Then we have $$\beta_1=\alpha_7,\ \beta_2=\alpha_1,\ \beta_3=\alpha_2,\ \beta_4=\alpha_4,\ \beta_5=-\alpha_3,\ \beta_6=-\alpha_5,\ \beta_7=-\alpha_6.$$ From this one finds that \begin{equation} \omega_6=\beta_{123}-\beta_{246}-\beta_{257}-\beta_{347}+\beta_{356}. \end{equation}
Consider the linear isomorphism $\mathbb C^2\rightarrow\mathbb H,\ (x_1+iy_1, x_2+iy_2)\mapsto(x_1+iy_1+y_2j+x_2k)$. This isomorphism is complex linear with respect to the multiplication by $i$ on $\mathbb H$ on the right. This induces embedding $\mathrm{SL}(2,\mathbb C)\hookrightarrow\mathrm{GL}(\mathbb H)$. We compose this with the canonical map $\mathrm{GL}(\mathbb H)\hookrightarrow\mathrm{GL}(\Im\mathbb H\oplus\mathbb H),\ \varphi\mapsto Id_{\Im\mathbb H}\oplus\varphi$ so that we can view $\mathrm{SL}(2,\mathbb C)$ as a subgroup of $\mathrm{GL}(\Im\mathbb H\oplus\mathbb H)$.
Put $\widetilde\mathrm U(1):=\{p\in\mathrm{Sp}(1):p.i=\pm i.p\}$. It is easy to see that $\widetilde\mathrm U(1)$ has two connected components $\widetilde\mathrm U(1)_0=\{e^{it}:\ t\in\mathbb R\}$ and $\widetilde\mathrm U(1)_1=\{e^{it}.j:\ t\in\mathbb R\}$. We put $\mathrm K_6':=\{\phi_{p,q}:\ p\in\widetilde\mathrm U(1),\ q\in\mathrm{Sp}(1)\}$. It is easy to see that $\mathrm K_6'$ is a subgroup of $\mathrm{GL}(\Im\mathbb H\oplus\mathbb H)$ and we denote by $\mathrm H_6'$ the group generated by $\mathrm K_6'$ and $\mathrm{SL}(2,\mathbb C)$.
\begin{lemma}\label{lemma split ses with H_6'}
The map $\pi_6:\mathrm H_6'\rightarrow\mathrm{SO}(\Im\mathbb H),\ \pi_6(\varphi)=\varphi|_{\Im\mathbb H}$ induces a split short exact sequence \begin{equation}\label{ses with H_6'} 0\rightarrow\mathrm{SL}(2,\mathbb C)\rightarrow\mathrm H_6'\rightarrow\mathrm O(2)\ra0 \end{equation} where $\mathrm O(2)\subset\mathrm{SO}(\Im\mathbb H)$ is the stabilizer of the line spanned by $i$. The group $\mathrm K_6'$ is a maximal compact subgroup of $\mathrm H'_6$. \end{lemma} \begin{proof} It is easy to see that the image of $\pi_6$ is $\mathrm O(2)$. From Lemma \ref{lemma split ses of groups} follows that $\ker\pi_6$ is generated by $\mathrm{SL}(2,\mathbb C)$ and by $\mathrm{Sp}(1)=\{\phi_{1,q}:\ q\in\mathrm{Sp}(1)\}$. From (\ref{SO434}) is clear that $\mathrm{Sp}(1)\subset\mathrm{SL}(2,\mathbb C)$ and hence, $\pi_6$ induces the short exact sequence (\ref{ses with H_6'}). This sequence is split by repeating the proof of Lemma \ref{lemma split ses of groups}. It remains to prove the second claim.
The group $\mathrm K_6'$ is a compact subgroup of $\mathrm H_6'=\mathrm{SL}(2,\mathbb C)\rtimes\mathrm O(2)$. We have that $\mathrm K_6'\cap\mathrm O(2)=\mathrm O(2)$ and that $\mathrm K_6'\cap\mathrm{SL}(2,\mathbb C)=\mathrm{Sp}(1)$ is a maximal compact subgroup of $\mathrm{SL}(2,\mathbb C)$. The claim follows from Lemma \ref{lemma max compact in product}. \end{proof}
Let us view ${\tilde{\mathbb O}}$ as $\widetilde{\CD}(\mathbb H)$. Let $\Phi_6:\mathrm V\rightarrow\Im\mathbb H\oplus\mathbb H$ be the linear isomorphism which maps the basis $\mathcal{B}_6$ to the basis (\ref{standard basis of O}) of $\Im{\tilde{\mathbb O}}$. We put $\mathrm H_6:=\Phi_6^\ast\mathrm H_6'$ and $\mathrm K_6:=\Phi_6^\ast\mathrm K_6'$. These are by definition subgroups of $\mathrm{GL}(\mathrm V)$.
\begin{lemma}\label{lemma inclusion type 6} The subspaces $[f_1],\ [f_2,f_3],\ [f_4,\dots,f_7]$ are $\mathrm H_6$-invariant and $\mathrm H_6\subset\mathrm O_6$. \end{lemma} \begin{proof} Notice that $[f_1]=\Phi_6^{-1}([(i,0)]),\ [f_2,f_3]=\Phi_6^{-1}([(j,0),(k,0)])$ and $[f_4,\dots,f_7]=\Phi_6^{-1}({\tilde{\mathbb H}})$. Hence, the first claim follows from the definition of $\mathrm H_6'$ and Lemma \ref{lemma split ses with H_6'}. Notice that $\mathrm H_6\subset\mathrm{Stab}(\beta_1)\cap\mathrm{Stab}(\beta_{23})$.
The 3-form $\Phi_6^\ast\omega_{{\tilde{\mathbb O}}}$ is obtained from $\Phi_5^\ast\omega_{{\tilde{\mathbb O}}}$ given in (\ref{type 5.I}) by replacing each $\alpha_i$ by $\beta_i,\ i=1,2,\dots,7$. We see that $\omega_6+\beta_1\wedge\gamma_1=\Phi_6^\ast\omega_{{\tilde{\mathbb O}}}$ where $\gamma_1:=-\beta_4\wedge\beta_5+\beta_6\wedge\beta_7$.
The group $\mathrm H_6$ is generated by $\mathrm K_6$ and $\Phi_6^\ast\mathrm{SL}(2,\mathbb C)$. Hence, it is enough to show that $\mathrm K_6\subset\mathrm{Stab}(\omega_6)$ and $\mathrm{SL}(2,\mathbb C)\subset\mathrm{Stab}(\omega_6)$.
Let $\varphi\in\mathrm K_6$. As $\mathrm K_6'\subset\mathrm{SO}(4)_{3,4}\subset\tilde\mathrm G_2$, it follows that $\mathrm K_6\subset\Phi_6^\ast\tilde{\mathrm{G}}_2=\mathrm{Stab}(\Phi^\ast_6\omega_{{\tilde{\mathbb O}}})$. Hence, it is enough to show $\varphi.\gamma_1=\gamma_1$. We have \begin{eqnarray*} \varphi.(\Phi_6^\ast\omega_{{\tilde{\mathbb O}}})&=&\varphi.(\beta_{123}+\beta_1\wedge\gamma_1-\beta_{246}-\beta_{257}+\beta_{356}-\beta_{347})\\ &=&\beta_{123}+\beta_1\wedge\varphi.\gamma_1+\varphi.(-\beta_{246}-\beta_{257}+\beta_{356}-\beta_{347})\\ &=&\beta_{123}+\beta_1\wedge\varphi.\gamma_1+\beta_2\wedge\gamma_2+\beta_3\wedge\gamma_3 \end{eqnarray*} for some 2-forms $\gamma_2,\gamma_3$ which belong to the subalgebra of $\Lambda^\bullet\mathrm V^\ast$ generated $\beta_4,\beta_5,\beta_6,\beta_7$. But since $\varphi.(\Phi_6^\ast\omega_{{\tilde{\mathbb O}}})=\Phi_6^\ast\omega_{{\tilde{\mathbb O}}}$, it follows that $\varphi.\gamma_1=\gamma_1$ and $\gamma_2=-\beta_{46}-\beta_{57},\ \gamma_3=\beta_{56}-\beta_{47}$.
As $\mathrm{SL}(2,\mathbb C)$ acts by identity on $\Im\mathbb H$, we know that $\beta_i,\ i=1,2,3$ are invariant under $\mathrm{SL}(2,\mathbb C)$. The standard complex volume form on $\mathbb C^2$ induces via the isomorphism $\mathbb C^2\rightarrow\mathbb H$ and the inclusion $\mathbb H\hookrightarrow\Im\mathbb H\oplus\mathbb H$ given above a complex 2-form $\theta$ on $\Im\mathbb H\oplus\mathbb H$. It is straightforward to verify that $\Phi_6^\ast\theta=-\gamma_3-i\gamma_2$. Hence, $\mathrm{SL}(2,\mathbb C)\subset\mathrm{Stab}(\gamma_2)\cap\mathrm{Stab}(\gamma_3)$. \end{proof}
We will need one more subgroup of $\mathrm{GL}(\mathrm V)$. Put \begin{equation}
\mathrm{R}^+:=\bigg\{\varphi\in\mathrm{GL}(\mathrm V)\bigg|\ \left( \begin{matrix} \lambda^{-2}&0&0\\ 0&\lambda.1_2&0\\ 0&0&\lambda^{-\frac{1}{2}}.1_4\\
\end{matrix} \right),\ \lambda>0 \bigg\}. \end{equation} It is easy to check that $\mathrm{R}^+$ is a subgroup of $\mathrm O_6$. From Lemma \ref{lemma inclusion type 6} follows that $\mathrm{R}^+$ commutes with $\mathrm H_6$. By the summary given above, $g.m. g^{-1}\in\mathrm M$ whenever $m\in\mathrm M,\ g\in\mathrm H_6\times\mathrm{R}^+$. This shows that $\mathrm O_6$ contains a subgroup $\mathrm M\rtimes(\mathrm H_6\times\mathrm{R}^+)$.
\begin{lemma}\label{lemma semidirect product type 6} $\mathrm O_6=\mathrm M\rtimes(\mathrm H_6\times\mathrm{R}^+)$. \end{lemma} \begin{proof}
Put $\mathrm O_6':=\mathrm M\rtimes(\mathrm H_6\times\mathrm{R}^+)$. Then $\mu|_{\mathrm O_6'}$ induces epimorphism $\mathrm O_6'\rightarrow\mathrm{CO}(Q_2)$. By Lemma \ref{lemma split ses with H_6'}, we get a split short exact sequence $0\rightarrow\mathrm L'\rightarrow\mathrm O_6'\rightarrow\mathrm{CO}(Q_2)\ra0$ where $\mathrm L'=\mathrm M\rtimes\Phi_6^\ast\mathrm{SL}(2,\mathbb C)$. Then $\rho|_{\mathrm L'}$ induces a split short exact sequence $0\rightarrow\mathrm M\rightarrow\mathrm L'\rightarrow\Phi_6^\ast\mathrm{SL}(2,\mathbb C)\ra0$. By the proof Lemma \ref{lemma inclusion type 6}, $\Phi_6^\ast\mathrm{SL}(2,\mathbb C)\subset\mathrm{Stab}(\gamma_2)\cap\mathrm{Stab}(\gamma_3)$. It is straightforward to verify that also the other inclusion $"\supset"$ holds. The 2-form $\gamma_{i},\ i=2,3$ descends to the form $\sigma_{i-1}$ on $\mathrm Z_4$. Thus $\Phi_6^\ast\mathrm{SL}(2,\mathbb C)=\mathrm{Stab}(\sigma_1)\cap\mathrm{Stab}(\sigma_2)$ and so $\Phi_6^\ast\mathrm{SL}(2,\mathbb C)$ is a splitting of the subgroup $\mathrm{SL}(2,\mathbb C)$ from (\ref{semidirect product type 6}). The claim now follows from the definition of $\mathrm M$. \end{proof}
\begin{thm}\label{thm max compact type 6}
$\mathrm K_6$ is a maximal compact subgroup of $\mathrm O_6$. \end{thm} \begin{proof} By Lemma \ref{lemma max compact in upper trian}, $\{1_7\}$ is a maximal compact subgroup of $\mathrm M$. The same is obviously true also for $\mathrm{R}^+$. From Lemma \ref{lemma split ses with H_6'} follows that $\mathrm K_6$ is a maximal compact subgroup of $\mathrm H_6$. The claim is then a consequence of Lemma \ref{lemma split ses with H_6'}. \end{proof}
\subsection{Type 3}\label{section type 3}\label{section type 3} A representative is \begin{equation} \omega_3=\alpha_{123}-\alpha_{167}+\alpha_{145}. \end{equation}
Then $\mathrm V_6:=\triangle_3^2=\triangle_3^3=[e_2,\ldots,e_7]$. This is an $\mathrm O_3$-invariant subspace. We put $\mathrm W_1:=\mathrm V/\mathrm V_6$. The insertion map $v\mapsto i_v\omega_3$ induces a monomorphism $\lambda:\mathrm W_1\rightarrow\Lambda^2\mathrm V_6^\ast$. The image of $\lambda$ is an $\mathrm O_3$-invariant conformally symplectic structure on $\mathrm V_6$. The restriction map $\varphi\mapsto\varphi|_{\mathrm V_6}$ induces a split short exact sequence $0\rightarrow\mathrm K\rightarrow\mathrm O_3\rightarrow\mathrm{CSp}(3,\mathbb R)\ra0$. Moreover, $\mathrm K= \bigg\{\varphi\in\mathrm{GL}(\mathrm V)
\bigg| \ \varphi_{\mathcal{B}_{st}}= \left( \begin{matrix}
1&\ast\\
0&1_6 \end{matrix} \right) \bigg\}.$
\begin{center}
\textemdash\ x \textemdash \end{center}
We put $f_1:=e_1,\ f_2:=e_2,\ f_3:=e_4,\ f_4:=e_6,\ f_5:=e_7,\ f_6:=e_5,\ f_7:=e_3$. Let $\mathcal{B}_3^\ast=\{\beta_1,\dots,\beta_7\}$ be a dual basis to $\mathcal{B}_3:=\{f_1,\dots,f_7\}$. Then \begin{equation}
\omega_3=\beta_{125}+\beta_{136}+\beta_{147}. \end{equation}
Let $\omega$ be the imaginary part of the standard Hermitian form on $\mathbb C^3$. Let $\mathrm{CSp}(\omega):=\mathrm{Stab}([\omega])$ and $\mathrm{Sp}(\omega):=\mathrm{Stab}(\omega)$. The map $\mu:\mathrm{CSp}(\omega)\rightarrow\mathbb R^\ast$ determined by $ \mu(\varphi).\omega=\varphi.\omega$ is a group homomorphism.
Let $C:\mathbb C^3\rightarrow\mathbb C^3$ be the standard conjugation. Then $C.\omega=-\omega$ and so $C\in\mathrm{CSp}(\omega),\ \mu(C)=-1$. Let $\widetilde\mathrm U(3)$ be the group generated by $\mathrm U(3)$ and $C$. It is easy to see that $\widetilde\mathrm U(3)$ has two connected components, the connected component of the identity $\widetilde\mathrm U(3)_0=\mathrm U(3)$ and $\widetilde\mathrm U(3)_1:=\{\varphi\circ C:\ \varphi\in\mathrm U(3)\}$.
\begin{lemma}\label{lemma split ses with CSp} There is a split short exact sequence \begin{equation}
0\rightarrow\mathrm{Sp}(\omega)\rightarrow\mathrm{CSp}(\omega)\xrightarrow{\mu}\mathbb R^\ast\ra0. \end{equation} The group $\widetilde\mathrm U(3)$ is a maximal compact subgroup of $\mathrm{CSp}(\omega)$. \end{lemma} \begin{proof} Consider the subgroup $\mathrm{R}^\ast$ of $\mathrm{CSp}(\omega)$ generated by $C$ and $\{\lambda.Id_{\mathbb C^3}:\ \lambda>0\}$. Then it is easy to see that $\mu$ restricts to an isomorphism $\mathrm{R}^\ast\rightarrow\mathbb R^\ast$. The inverse of this is a splitting of $\mu$.
$\{Id_{\mathbb C^3},C\}=\mathrm O(\mathbb C^3)\cap\mathrm{R}^\ast$ and $\mathrm U(3)=\mathrm{Sp}(\omega)\cap\mathrm O(3)$. As $\mathrm{CSp}(\omega)$ and $\mathrm{R}^\ast$ are closed in $\mathrm{GL}(\mathbb C^3)$, the second claim is a corollary of Lemma \ref{lemma max compact in product}. \end{proof}
For $A\in\mathrm{CSp}(\omega)$ we define $\phi_A\in\mathrm{GL}(\mathbb R\oplus\mathbb C^3),\ \phi_A(x,v)=(\mu(A).x,Av)$. Let \begin{equation} \Phi_3:\mathrm V\rightarrow\mathbb R\oplus\mathbb C^3,\ \ \Phi_3\big(\sum_{i=1}^7 x_if_i\big)=(x_1,(x_2+ix_5,x_2+ix_6,x_4+ix_6)). \end{equation} We put $\mathrm H_3:=\{\Phi_3^\ast(\phi_A):\ A\in\mathrm{CSp}(\omega)\}$ and $\mathrm K_3:=\{\Phi_3^\ast(\phi_A):\ A\in\widetilde\mathrm U(3)\}$.
\begin{lemma}
$\mathrm H_3\subset\mathrm O_3$. \end{lemma} \begin{proof} Let $A\in\mathrm{CSp}(\omega)$ and $\alpha\in\mathbb R^\ast$ be the linear form corresponding to $Id_\mathbb R$. Then $\alpha\wedge\omega\in\Lambda^3(\mathbb R\oplus\mathbb C^3)^\ast$ and $$\phi_A.(\alpha\wedge\omega)=\mu(A)^{-1}.\alpha\wedge A.\omega=\mu(A)^{-1}.\alpha\wedge\mu(A).\omega=\alpha\wedge\omega.$$ Since $\omega_3=\Phi_3^\ast(\alpha\wedge\omega)$, the claim follows. \end{proof}
It is clear that $\mathrm K$ and $\mathrm H_3$ generate a subgroup $\mathrm K\rtimes\mathrm H_3$ of $\mathrm O_3$.
\begin{lemma}
$\mathrm O_3=\mathrm K\rtimes\mathrm H_3$. \end{lemma} \begin{proof}
The restriction map $\varphi\in\mathrm O_3\mapsto\varphi|_{\mathrm V_3}$ induces isomorphism $\mathrm H_3\rightarrow\mathrm{CSp}(3,\mathbb R)$. This readily proves the claim. \end{proof}
\begin{thm}\label{thm max compact type 3} The group $\mathrm K_3$ is a maximal compact subgroup of $\mathrm O_3$. \end{thm} \begin{proof} From Lemma \ref{lemma max compact in upper trian} follows that $\{1_7\}$ is a maximal compact subgroup of $\mathrm K$. The claim is a consequence of Lemmata \ref{lemma split ses with CSp} and \ref{lemma max compact in product I}. \end{proof}
It is clear that $\mathrm K_3\subset\mathrm{SO}(7)$. We will now show $\mathrm K_3$ can be viewed as a subgroup of $\mathrm{Spin}^c(7)$, i.e. there is an embedding $\mathrm K_3\rightarrow\mathrm{Spin}^c(7)$ such that the composition $\mathrm K_3\rightarrow\mathrm{Spin}^c(7)\rightarrow\mathrm{SO}(7)$ induces the identity on $\mathrm K_3$. Recall that $\mathrm{Spin}^c(n)$ is the quotient of $\mathrm{Spin}(n)\times\mathrm U(1)$ by the subgroup $\{\pm(1,1)\}$ where $\{\pm1\}=\ker\rho_n$ and $\rho_n:\mathrm{Spin}(n)\rightarrow\mathrm{SO}(n)$ is the usual 2:1 covering. We denote the class of $(a,e^{it})$ in the quotient by $\langle a,e^{it}\rangle$. The canonical homomorphism $\rho_n^c:\mathrm{Spin}^c(n)\rightarrow\mathrm{SO}(n)$ is $\langle a,e^{it}\rangle\mapsto\rho_n(a)$.
\begin{lemma}\label{lemma subgroup of Spin^c(7)} $\mathrm K_3$ is a subgroup of $\mathrm{Spin}^c(7)$. \end{lemma} \begin{proof}
$\widetilde\mathrm U(3)$ has two connected components and so the same is true for $\mathrm K_3$. The connected component $\mathrm K_3^o$ of the neutral element of $\mathrm K_3$ is isomorphic to $\mathrm U(3)$. It is well known (see \cite[Section 3.4]{Mo}) that $\mathrm U(3)$ is a subgroup of $\mathrm{Spin}^c(6)$. Using the standard inclusion $\mathrm{Spin}^c(6)\hookrightarrow\mathrm{Spin}^c(7)$, we see that there is a subgroup $\widetilde\mathrm K_3^o$ of $\mathrm{Spin}^c(7)$ such that $\rho_7^c|_{\widetilde\mathrm K_3^o}$ induces isomorphism of Lie groups $\widetilde\mathrm K_3^o\rightarrow\mathrm K_3^o$. Hence, it remains to show that there is a subgroup $\tilde\mathrm K_3$ of $\mathrm{Spin}^c(7)$ which contains $\tilde\mathrm K_3^o$ such that $\rho_7^c$ restricts to an isomorphism of Lie groups $\tilde\mathrm K_3\rightarrow\mathrm K_3$.
Notice that $\psi_{C}:=\Phi_3^\ast(\phi_A)\in\mathrm{SO}(7)$ is determined by $f_i\mapsto- f_i,,\ i=1,2,3,4$ and $\ f_{j}\mapsto f_j,\ j=5,6,7$. Let $B_{\mathrm V}$ be the standard inner product on $\mathrm V$. Let us view $\mathrm{Spin}(7)$ as the subgroup of the Clifford algebra $(\mathrm V,B_{\mathrm V})$ that is generated by even number of unit vectors from $\mathrm V$. Put $\alpha:= f_1.f_3.f_5.f_7\in\mathrm{Spin}(7)$. Then $\psi_C=\rho_7(\alpha)$ and $\alpha^2=1$. It is easy to verify that we can take as $\tilde\mathrm K_3$ the subgroup of $\mathrm{Spin}^c(7)$ that is generated by $\tilde\mathrm K_3^o$ and $\langle\alpha,1\rangle$. \end{proof}
\subsection{Type 4}\label{section type 4} A representative of the orbit is \begin{equation} \omega_4=\alpha_{123}-\alpha_{167}+\alpha_{145}+\alpha_{246}. \end{equation} With respect to the basis $\mathcal{B}_3$ from Section \ref{section type 3} we have that \begin{equation} \omega_4=\beta_{125}+\beta_{136}+\beta_{147}+\beta_{234}. \end{equation}
Put $\mathrm V_3:=[f_5,f_6,f_7],\ \mathrm V_6:=[f_2,f_3,f_4,f_5,f_6,f_7]$. It is claimed in \cite[Section type 3]{BV} that $\triangle^2_4=\mathrm V_3$ and $\triangle_4^3=\mathrm V_6$ and so these are $\mathrm O_4$-invariant subspaces. We put $\mathrm W_1:=\mathrm V/\mathrm V_6, \mathrm W_4:=\mathrm V/\mathrm V_3$ and $\mathrm Z_3:=\mathrm V_6/\mathrm V_3$.
Each $\varphi\in\mathrm O_4$ descends to $\tilde\varphi\in\mathrm{GL}(\mathrm W_1)\cong\mathbb R^\ast$ and $\hat\varphi\in\mathrm{GL}(\mathrm Z_3)$. The homomorphism $\mu:\mathrm O_4\rightarrow\mathrm{GL}(\mathrm W_1),\ \varphi\mapsto\tilde\varphi$ induces a split short exact sequence $0\rightarrow\mathrm O_4^+\rightarrow\mathrm O_4\xrightarrow{\mu}\mathbb R^\ast\ra0$. The 3-form $\omega_4$ induces a volume form on $\mathrm Z_3$. It follows that the image of the homomorphism $\nu:\mathrm O_4\rightarrow\mathrm{GL}(Z_3),\ \varphi\mapsto\hat\varphi$ is contained in $\mathrm{SL}(\mathrm Z_3)$. There is a split short exact sequence $0\rightarrow\mathrm K\rightarrow\mathrm O_4^+\rightarrow\mathrm{SL}(Z_3)\ra0$. If $\varphi\in\mathrm K$, then $\varphi_{\mathcal{B}_3}= \left( \begin{matrix} 1&0&0\\ \ast&1_3&0\\ \ast&\ast&1_3\\ \end{matrix} \right)$. Moreover, it can be shown that $\mathrm K\cong(((\mathbb R^3\rtimes\mathbb R^6)\rtimes\mathbb R)\rtimes\mathbb R)\rtimes\mathbb R$. Fixing an isomorphism $\mathrm Z^3\rightarrow\mathbb R^3$, we obtain an isomorphism between $\mathrm O_4$ and a semi-direct product \begin{equation}
(((((\mathbb R^3\rtimes\mathbb R^6)\rtimes\mathbb R)\rtimes\mathbb R)\rtimes\mathbb R)\rtimes\mathrm{SL}(3,\mathbb R))\rtimes\mathbb R^\ast \end{equation} as in \cite[7. Proposition]{BV}.
\begin{center}
\textemdash\ x \textemdash \end{center}
Put \begin{equation} \mathrm H_4:=\Bigg\{\varphi\in\mathrm{GL}(\mathrm V): \varphi_{\mathcal{B}_3}=\left(
\begin{matrix} 1&0&0\\ 0&A&0\\ 0&0&(A^T)^{-1}\\
\end{matrix}
\right),\ A\in\mathrm{SL}(3,\mathbb R) \Bigg\}. \end{equation} It is clear that $\mathrm K_4^o:=\mathrm H_4\cap\mathrm{SO}(7)$ is a maximal compact subgroup of $\mathrm H_4$.
\begin{lemma} $\mathrm H_4\subset\mathrm O_4$. \end{lemma} \begin{proof} Notice that $\omega_4=\omega_3+\beta_{234}$. It is obvious that $\mathrm H_4\subset\mathrm{Stab}(\beta_{234})\cap\mathrm{Stab}(\beta_{1})$ and it is straightforward to verify that $\mathrm H_4\subset\mathrm{Stab}(\beta_{25}+\beta_{26}+\beta_{37})$. \end{proof}
Put \begin{equation}\label{R subgroup of O4} \mathrm{R}^\ast=\Bigg\{\varphi\in\mathrm{GL}(\mathrm V):\ \varphi_M=\left(
\begin{matrix} \lambda&0&0\\ 0&1_3&0\\ 0&0&\lambda^{-1}.1_3\\
\end{matrix}
\right),\ \lambda\in\mathbb R^\ast\Bigg\}. \end{equation} It clear that $\mathrm{R}^\ast\subset\mathrm O_4$ and that $\mathrm{R}^\ast$ commutes with $\mathrm H_4$. Given $k\in\mathrm K,\ g\in\mathrm H_4\times\mathrm{R}^\ast$, then we easily check that $g.k. g^{-1}\in\mathrm K$. This shows that $\mathrm O_4$ contains a subgroup $\mathrm K\rtimes(\mathrm H_4\times\mathrm{R}^\ast)$.
\begin{lemma}
$\mathrm O_4=\mathrm K\rtimes(\mathrm H_4\times\mathrm{R}^\ast)$. \end{lemma} \begin{proof}
This easily follows from the summary given above. \end{proof}
$\mathrm{R}^\ast\cap\mathrm O(7)$ contains two elements that correspond to $\lambda=\pm1$. We denote the intersection by $\mathbb Z_2$.
\begin{thm}\label{thm max compact type 4} The group $\mathrm K_4:=\mathrm K^o_2\times\mathbb Z_2$ is a maximal compact subgroup of $\mathrm O_4$. The group $\mathrm K_4$ is a subgroup of $\mathrm G_2$. \end{thm} \begin{proof} By Lemma \ref{lemma max compact in upper trian}, $\{1_7\}$ is maximal compact subgroup of $\mathrm K$. Hence, it is (see Lemma \ref{lemma max compact in product I}) enough to show that $\mathrm K_4$ is a maximal compact subgroup of $\mathrm H_4\times\mathrm{R}^\ast$. We have that $\mathrm K_4=(\mathrm H_4\times\mathrm{R}^\ast)\cap\mathrm O(7),\ \mathrm K_4^o=\mathrm H_4\cap\mathrm O(7)$ and $\mathbb Z_2=\mathrm{R}^\ast\cap\mathrm O(7)$. Since $\mathrm H_4$ and $\mathrm{R}^\ast$ are closed subgroups of $\mathrm{GL}(\mathrm V)$ and since $\mathrm K_4^o$ and $\mathbb Z_2$ is a maximal compact subgroup of $\mathrm H_2$ and $\mathrm{R}^\ast$, respectively, the claim follows from Lemma \ref{lemma max compact in product}.
To complete the proof, let $\Phi_4:\mathrm V\rightarrow\Im\mathbb H\oplus\mathbb H$ be the linear isomorphism that sends the basis $\mathcal{B}_3$ to the basis $$\{(0,1),(i,0),(j,0),(k,0),(0,i),(0,j),(0,k)\}.$$ Then it is easy to see that $\mathrm K_4=\Phi_4^\ast\{\phi_{p,\pm 1.p}:\ p\in\mathrm{Sp}(1)\}$. Thus, $\mathrm K_4\subset\Phi_4^\ast\mathrm G_2$ \end{proof}
\subsection{Type 1}\label{section type 1} A representative of the orbit is \begin{eqnarray} \omega_1=\alpha_{127}+\alpha_{134}+\alpha_{256}. \end{eqnarray}
We have $\triangle^2_1=\mathrm V^a_3\cup\mathrm V^b_3$ and $\triangle_1^3=\mathrm V_6^a\cup\mathrm V_6^b$ where \begin{equation*}
\mathrm V^a_3:=[e_3,e_4,e_7],\ \mathrm V^b_3:=[e_5,e_6,e_7], \ \mathrm V_6^a=[e_1,e_3,\dots,e_7],\ \mathrm V_6^b=[e_2,\dots,e_7]. \end{equation*} A subspace $\mathrm V_1:=\mathrm V^a_3\cap\mathrm V^b_3$ is $\mathrm O_1$-invariant and we put $\mathrm Z_2^a:=\mathrm V_3^a/\mathrm V_1,\ \mathrm Z^b_2:=\mathrm V^b_3/\mathrm V_1$. Each element $\varphi\in\mathrm O_1$ induces an automorphism $\tilde\varphi$ of $\mathrm Z_2^a\oplus\mathrm Z_2^b$ such that $\tilde\varphi(\mathrm Z_2^a)=\mathrm Z_2^a,\ \tilde\varphi(\mathrm Z_2^b)=\mathrm Z_2^b$ or $\tilde\varphi(\mathrm Z_2^a)=\mathrm Z_2^b,\ \tilde\varphi(\mathrm Z_2^b)=\mathrm Z_2^a$. A map \begin{displaymath}
sg:\mathrm O_1\rightarrow\mathbb Z_2,\ sg(\varphi)= \Big\{ \begin{matrix} 1&\ \mathrm{if}\ \ \tilde\varphi(\mathrm Z_2^a)=\mathrm Z_2^a,\ \tilde\varphi(\mathrm Z_2^b)=\mathrm Z_2^b\\ -1&\ \mathrm{if}\ \ \tilde\varphi(\mathrm Z_2^a)=\mathrm Z_2^b,\ \tilde\varphi(\mathrm Z_2^b)=\mathrm Z_2^a\\ \end{matrix} \end{displaymath} is a split group homomorphism. Put $\mathrm O_1^+:=\ker(sg)$. If $\varphi\in\mathrm O_1^+$, then it is natural to view $\tilde\varphi$ as an element of $\mathrm{GL}(\mathrm Z_2^a)\times\mathrm{GL}(\mathrm Z_2^b)$. A map $\varphi\mapsto\tilde\varphi$ induces a split short exact sequence $0\rightarrow\mathrm K\rightarrow\mathrm O_1^+\rightarrow\mathrm{GL}(\mathrm Z_2^a)\times\mathrm{GL}(\mathrm Z_2^b)\ra0$. Moreover, it can be shown that $\varphi_\mathcal{B}= \left( \begin{matrix} 1_2&0&0\\ \ast&1_4&0\\ \ast&\ast&1 \end{matrix} \right)$ for every $\varphi\in\mathrm K$ and that $\mathrm K\cong(H\oplus H)\rtimes\mathbb R^4$ where $H$ is a Heisenberg algebra in dimension 3. All together, $\mathrm O_1$ is isomorphic to a semi-direct product \begin{equation}
(((H\oplus H)\rtimes\mathbb R^4)\rtimes\mathrm{GL}(\mathrm Z_2^a)\times\mathrm{GL}(\mathrm Z_2^b))\rtimes\mathbb Z_2 \end{equation} as in \cite[4. Proposition]{BV}.
\begin{center}
\textemdash\ x \textemdash \end{center}
We can now continue with the proof. Let $A,B\in\mathrm{GL}(2,\mathbb R)$. We define 2-linear automorphisms of $\mathbb R\oplus\mathbb R\oplus\mathbb R^2\oplus\mathbb R^2\oplus\mathbb R$, namely \begin{align*} &\phi^+_{A,B}(u_1,u_2,v_1,v_2,u_3):=\bigg(\frac{u_1}{\det A},\frac{u_2}{\det B}, A(v_1),B(v_2),\det A.\det B.u_3\bigg)\ \mathrm{and}\\ &\phi^{-}_{A,B}(u_1,u_2,v_1,v_2,u_3):=\bigg(\frac{u_2}{\det B},\frac{u_1}{\det A}, B(v_2),A(v_1),-\det A.\det B.u_3\bigg) \end{align*} where $u_1,u_2,u_3\in\mathbb R,\ v_1,v_2\in\mathbb R^2$. Let $$\Phi_1:\mathbb R^7\rightarrow \mathbb R\oplus\mathbb R\oplus\mathbb R^2\oplus\mathbb R^2\oplus\mathbb R,\ \Phi_1(x_1,\dots,x_7)=(x_1,x_2,(x_3,x_4),(x_5,x_6),x_7)$$ and put $\varphi^\bullet_{A,B}:=\Phi_1^\ast(\phi^\bullet_{A,B})$ where $\bullet=\pm$. It is straightforward to verify that \begin{eqnarray} \varphi^+_{A,B}\circ\varphi^+_{C,D}=\varphi^+_{A.C,B.D},\ \varphi^{-}_{A,B}\circ\varphi^{-}_{C,D}=\varphi^+_{B.C,A.D},\\ \varphi^+_{A,B}\circ\varphi^{-}_{C,D}=\varphi^{-}_{B.C,A.D},\ \varphi^{-}_{A,B}\circ\varphi^{+}_{C,D}=\varphi^+_{C.A,D.B} \end{eqnarray}
and so $\mathrm H_1:=\{\varphi^\pm_{A,B}|\ A,B\in\mathrm{GL}(2,\mathbb R)\}$ is a subgroup of $\mathrm{GL}(\mathrm V)$. The following observation is straightforward.
\begin{lemma}\label{lemma inclusion type 1} $\mathrm H_1\subset\mathrm O_1$ \end{lemma}
It is easy to check that $h.k.h^{-1}\in\mathrm K$ whenever $h\in\mathrm H_1,\ k\in\mathrm K$. Hence, $\mathrm O_1$ contains a subgroup $\mathrm K\rtimes\mathrm H_1$.
\begin{lemma}\label{lemma semi-direct product type 1}
$\mathrm O_1=\mathrm K\rtimes\mathrm H_1$ \end{lemma} \begin{proof} It is easy to see that the inclusion $\mathrm H_1\hookrightarrow\mathrm O_1$ is a splitting of the subgroup $(\mathrm{GL}(Z_2^a)\times\mathrm{GL}(Z_2^b))\rtimes\mathbb Z_2$. The claim follows from the definition of $\mathrm K$. \end{proof}
\begin{thm}\label{thm max compact type 1} A maximal compact subgroup of $\mathrm H_1$ is a maximal compact subgroup of $\mathrm O_1$. \end{thm} \begin{proof} From Lemma \ref{lemma max compact in upper trian} follows that $\{1_7\}$ is a maximal compact subgroup of $\mathrm K$. The claim is then a consequence of Lemma \ref{lemma max compact in product I}. \end{proof}
\section{Multisymplectic 3-forms of a fixed algebraic type}\label{section global forms} \subsection{Characteristic classes of spin and spin$^c$ vector bundles}\label{section spin char class} We will denote by $\underline\mathbb R^i$ a trivial vector bundle with fiber $\mathbb R^i$. For a real vector bundle $\xi$ we denote by $w_i(\xi), \ p_1(\xi)$ and $e(\xi)$ the $i$-th Stiefel-Whitney class, the first Pontryagin class and the Euler class of $\xi$, respectively. If $\xi$ is a complex vector bundle, then $c_i(\xi)$ is the $i$-th Chern class of $\xi$. We will need (see also \cite[Section 2]{CCV}) two more characteristic classes, one is defined for a spin vector bundle and the other is defined for a spin$^c$ vector bundle.
Suppose that $\xi$ is a spin vector bundle over a base $N$. Then there is a class $q(\xi)\in H^4(N,\mathbb Z)$ which is independent of the choice of a spin structure on $\xi$. We have: \begin{equation}
q(\xi\oplus\xi')=q(\xi)+ q(\xi') \ \ \mathrm{and} \ \ 2q(\xi)=p_1(\xi) \end{equation} if $\xi$ and $\xi'$ are spin. If $\xi$ is a complex bundle, then $\xi$ admits a spin structure if, and only if $c_1(\xi)$ is divisible by two, say $2m=c_1(\xi)$ for some $m\in H^2(M,\mathbb Z)$. Then \begin{equation}
q(\xi)=2m^2-c_2(\xi). \end{equation} If $\xi=TN$, then we put $q(N):=q(TN)$.
A vector bundle $\xi$ admits a spin$^c$ structure if, and only if $w_2(\xi)$ is a reduction of an integral class, say $\rho_2(\ell)=w_2(\xi)$ for some $\ell\in H^2(N,\mathbb Z)$. Then we can define $q(\xi;\ell):=q(\xi-\lambda)\in H^4(N,\mathbb Z)$ where $\lambda$ is a complex line bundle with $c_1(\xi)=\ell$. The spin$^c$ characteristic class satisfies: \begin{equation} 2q(\xi;\ell)=p_1(\xi)-\ell^2,\ \rho_2(q(\xi;\ell))=w_4(\xi),\ q(\xi;l+2m)=q(\xi,\ell)-2\ell m-2m^2 \end{equation} where $m\in H^2(N,\mathbb Z)$. If $\xi$ is a complex vector bundle with $c_1(\xi)=\ell$, then \begin{equation} q(\xi;\ell)=-c_2(\xi). \end{equation} If $\xi=TN$, then we put $q(N;\ell):=q(TN;\ell)$.
The following Theorem can be found in \cite{CCS}.
\begin{thm}\label{fundamental thm} Let $N$ be a closed, connected manifold of dimension 7. Consider two orientable 7-dimensional real vector bundles $\xi$ and $\xi'$ over $N$ with $w_2(\xi) = w_2(\xi')=\rho_2(\ell)$, where $\ell \in H^2(N;\mathbb Z)$. Then $\xi$ and $\xi'$ are isomorphic as vector bundles if, and only if $q(\xi;\ell) = q(\xi'; \ell)$ \end{thm}
\subsection{Multisymplectic 3-forms of algebraic type 5,6,7,8} \begin{thm}\label{thm global forms 5,6,7,8} Let $N$ be a closed connected 7-dimensional manifold. Then the following are equivalent: \begin{enumerate} \item $N$ is orientable and spin. \item $N$ admits a multisymplectic 3-form of algebraic type 8. \item $N$ admits a multisymplectic 3-form of algebraic type 5. \item $N$ admits a multisymplectic 3-form of algebraic type 6. \item $N$ admits a multisymplectic 3-form of algebraic type 7. \end{enumerate} \end{thm} \begin{proof} (1)"$\Leftrightarrow$"(2) is proved in \cite{G}.
(2)"$\Leftrightarrow$"(3) is proved in \cite{Le}.
(1)"$\Rightarrow$"(4) and (5). By a result from \cite{Th}, any closed, orientable, spin manifold of dimension 7 admits two everywhere linearly independent vector fields. This gives a reduction from $\mathrm{Spin}(7)$-structure to $\mathrm{Spin}(5)$-structure. Following \cite[Proposition 2.2]{CCS}, an inclusion $$\mathrm{Sp}(1)=\mathrm{Sp}(1)\times 1\hookrightarrow\mathrm{Sp}(1)\times\mathrm{Sp}(1)=\mathrm{Spin}(4)\hookrightarrow\mathrm{Spin}(5)$$ induces isomorphism on homotopy groups $\pi_i$ for $i\le 5$ and an epimorphism on $\pi_6$. Hence, the $\mathrm{Spin}(5)$-structure reduces to a $\mathrm{Spin}(4)$-structure which shows that $N$ admits three everywhere linearly independent vector fields, i.e. $TN\cong\underline\mathbb R^3\oplus\eta$ where $\eta$ is spin. Furthermore, the reduction from $\mathrm{Spin}(4)$ to $\mathrm{Sp}(1)$ means that $\eta$ has a $\mathrm{Sp}(1)$-structure or equivalently, $\eta$ is a 1-dimensional $\mathbb H$-vector bundle. Notice that a composition $$\mathrm{Sp}(1)\hookrightarrow\mathrm{Spin}(5)\hookrightarrow\mathrm{Spin}(7)\xrightarrow{\rho_7}\mathrm{SO}(7)\ \mathrm{is}\footnote{Up to conjugation.}\ \ q\in\mathrm{Sp}(1)\mapsto\phi_{1,q}\in\mathrm{SO}(7).$$ This is an embedding $\mathrm{Sp}(1)\hookrightarrow\mathrm{SO}(7)$. We know from Section \ref{section type 6} that $\mathrm{Sp}(1)\subset\mathrm K_6$ and from Section \ref{section type 7} that $\mathrm{Sp}(1)\subset\mathrm K_7$. We see that the $\mathrm{Sp}(1)$-structure extends to a $\mathrm K_6$ and $\mathrm K_7$-structure.
(4)"$\Rightarrow$" (3) By Section \ref{section type 6}, $\mathrm K_6\subset\Phi_6^\ast\tilde{\mathrm{G}}_2$.
(5)"$\Rightarrow$" (2) By Section \ref{section type 7}, $\mathrm K_7\subset\Phi_7^\ast\mathrm G_2$. \end{proof}
\subsection{Multisymplectic 3-forms of algebraic type 4} \begin{thm}\label{thm gl form 4} Let $N$ be a closed, connected 7-manifold. If $N$ is orientable, spin and that there is $u\in H^4(M;\mathbb Z)$ such that $q(M) = -4u$, then $N$ admits a multisymplectic 3-form of algebraic type 4.
If $N$ admits a multisymplectic 3-form of algebraic type 4, then $N$ is orientable and spin. \end{thm} \begin{proof} Recall Section \ref{section type 4} that the maximal compact, connected subgroup $\mathrm K_4^o$ of $\mathrm O_4$ is isomorphic to $\mathrm{SO}(3)$ and that $\mathrm V\cong\mathbb R\oplus\mathbb R^3\oplus\mathbb R^3$ as a $\mathrm K_4^o$-module where $\mathbb R$ is a trivial module and $\mathbb R^3$ is the standard representation. Alternatively, $\mathrm V\cong\mathbb R\oplus\mathbb C^3$ where $\mathbb C^3=\mathbb R^3\otimes\mathbb C$. We see that $N$ admits a $\mathrm K_4^o$-structure if, and only if there is a 3-dimension orientable vector bundle $\alpha$ over $N$ such that $TN\cong\underline\mathbb R\oplus\alpha_\mathbb C$ where $\alpha_\mathbb C$ is the complexification of $\alpha$.
Let us assume that $q(N)=-4u$ for some $u\in H^4(N,\mathbb Z)$. We will first show that there is a $\mathbb H$-line bundle $\mu$ such that $-q(\mu)=e(\mu)=u$. For this we follow \cite[Proposition 2.5]{CCS}. The map $e: BSU(2)\rightarrow K(\mathbb Z,4)$ is an isomorphism on $\pi_i,\ i\le 4$ and epimorphism on $\pi_5$. This implies that there is a $\mathbb H$-line bundle $\eta$ over a 5-skeleton $N^{(5)}$ of $N$ with $e(\eta)=u$. Since $\pi_5(BO(\infty))=\pi_6(BO(\infty))=0$, it follows that the stable bundle $\eta\oplus\underline\mathbb R^3$ extends to $N$. Since any stable vector bundle over $N$ is stably isomorphic to a vector bundle of rank $7$, there is a vector bundle $\xi$ over $N$ of rank 7 with $-q(\xi)=u$ and $w_2(\xi)=w_2(\mu)=0$. By a result from \cite{CS}, the bundle $\xi$ admits a $\mathrm{Spin}(5)$-structure. By the same argument as in the proof of Theorem \ref{thm global forms 5,6,7,8}, this structure reduces to $\mathrm{Sp}(1)$-structure, i.e. $\xi\cong\mu\oplus\underline\mathbb R^3$ where $\mu$ has a $\mathrm{Sp}(1)$-structure. We have $-q(\mu)=-q(\xi)=u$.
Next we take the associated 3-dimensional bundle $\alpha=\rho_-(\mu)$, see \cite[ Proposition 2.1]{CCVa}. From \cite[Lemma 2.4]{CCVa} follows that: $$p_1(\alpha)=p_1(\mu)-2e(\mu)=2q(\mu)-2e(\mu)=-4u=q(M).$$ On the other hand, $p_1(\alpha)=-c_2(\alpha_\mathbb C)=q(\alpha_\mathbb C)$. By Theorem \ref{fundamental thm}, $TN\cong\underline\mathbb R\oplus\alpha_\mathbb C$ and the sufficient condition follows.
By Theorem \ref{thm max compact type 4}, $\mathrm K_4\subset\mathrm G_2$ and so the necessary condition follows from Theorem \ref{thm global forms 5,6,7,8}. \end{proof}
Notice that by \cite[Lemma 2.6]{CD}, $0=w_4(M)=\rho_2(q(M))$ and so for a closed spin manifold $N$ there is always $v\in H^4(M;\mathbb Z)$ such that $q(M) = 2v$.
\subsection{Multisymplectic 3-forms of algebraic type 3}
\begin{thm}\label{global form 3} Let $N$ be a 7-dimensional manifold. Then $N$ admits a multisymplectic 3-form of algebraic type 3 if, and only if $N$ is orientable and spin$^c$. \end{thm} \begin{proof} $"\Leftarrow"$ This is proved in \cite[Theorem 5.7]{D}. $"\Rightarrow"$ By Lemma \ref{lemma subgroup of Spin^c(7)}, $\mathrm K_3$ is a subgroup of $\mathrm{Spin}^c(7)$ and so any $\mathrm K_3$-structure extends to a $\mathrm{Spin}^c(7)$-structure. \end{proof}
\subsection{Multisymplectic 3-forms of algebraic type 2}
\begin{thm}\label{thm gl form 2} Let $N$ be a 7-dimensional closed and connected manifold. If $N$ is orientable, spin and there are $e,f\in H^2(M,\mathbb Z)$ such that $e^2+f^2+3e f=-q(M)$, then $N$ admits a multisymplectic 3-form of algebraic type 2. If $N$ is simply-connected, then this condition is also necessary.
On the other hand, suppose that $N$ is orientable and admits a multisymplectic 3-form of algebraic type 2. Then $N$ is spin. \end{thm} \begin{proof} Let $\alpha$ and $\beta$ be a complex line bundle with $c_1(\alpha)=e$ and $c_1(\beta)=f$. Put $\xi:=\underline\mathbb R\oplus\alpha\oplus\beta\oplus\alpha\otimes\beta$. By Theorem \ref{thm max compact type 2}, $\xi$ has a $\mathrm K_2^o$-structure. On the other hand, $w_2(\xi)=0$ and $q(\xi)=-c_2(\xi)=-e^2-f^2-3ef$. From Theorem \ref{fundamental thm} follows that $TN\cong\xi$. It is obvious that the condition is necessary, if $N$ is simply-connected.
Let us assume that $N$ is orientable and admits a $\mathrm O_2^+$-structure. We have seen in the proof of Lemma \ref{lemma semi-direct product type 2}, that $\mathrm O_2^+=\mathrm O_2\cap\mathrm{SL}(\mathrm V)=\mathrm L\rtimes\mathrm H_2$. Moreover, the inclusion $\mathrm H_2\hookrightarrow\mathrm O_2^+$ is a homotopy equivalence and thus any $\mathrm O_2^+$-structure reduces to $\mathrm H_2$-structure. As $\mathrm H_2\subset\Phi_6^\ast\tilde{\mathrm{G}}_2$, any$\mathrm H_2$-structure extends to a $\tilde{\mathrm{G}}_2$-structure. Theorem \ref{thm global forms 5,6,7,8} implies that $N$ is spin. \end{proof}
\subsection{Multisymplectic 3-forms of algebraic type 1}
\begin{lemma} Let $N$ be a 7-manifold without boundary. Suppose that there are oriented vector bundles $\alpha,\beta$ of rank 2 such that $TN\cong\underline\mathbb R^3\oplus\alpha\oplus\beta$. Then $N$ admits a multisympletic 3-form of algebraic type 1. If $N$ is also simply-connected, then the assumption is also necessary. \end{lemma} \begin{proof} Let us first show that the condition is sufficient. If $TN\cong\underline\mathbb R^3\oplus\alpha\oplus\beta$, then dually $T^\ast N\cong\underline\mathbb R^{3\ast}\oplus\alpha^\ast\oplus\beta^\ast$. Let $\theta_i,\ i=1,2,3$ be differential forms on $N$ which trivialize $\underline\mathbb R^{3\ast}$. Next we choose everywhere non-zero sections $\mu_1$ and $\mu_2$ of $\Lambda^2\alpha^\ast$ and $\Lambda^2\beta^\ast$, respectively. Now it is easy to see that $\theta_1\wedge\theta_2\wedge\theta_3+\theta_1\wedge\mu_1+\theta_2\wedge\mu_2$ is a multisymplectic 3-form of algebraic type 1.
Let us now assume that $\pi_1(N)=1$. Recall Lemma \ref{lemma semi-direct product type 1} that $\mathrm O_1=\mathrm K\rtimes\mathrm H_1$. As $\mathrm K\cong\mathbb R^{10}$ we know that $\pi_i(\mathrm O_1/\mathrm H_1)=1,\ i\ge0$. From the classical obstruction theory (see for example \cite{Th}) follows that any $\mathrm O_1$-structure reduces to an $\mathrm H_1$-structure. If $N$ is simply-connected, then any $\mathrm H_1$-structure reduces to an $\mathrm H_1^o$-structure where $\mathrm H_1^o$ is the connected component of the identity element of $\mathrm H_1$. Now $\mathrm H_1^o\cong\mathrm{GL}^+(2,\mathbb R)^+\times\mathrm{GL}^+(2,\mathbb R)$ where $\mathrm{GL}^+(2,\mathbb R)=\{A\in\mathrm{GL}(2,\mathbb R):\ \det(A)>0\}$. As an $\mathrm H_1^0$-module, $\mathrm V\cong\mathbb R^3\oplus\mathbb R^2_1\oplus\mathbb R^2_2$ where $\mathbb R^3$ is a trivial representation and $\mathbb R^2_i$ is the standard representation of the $i$-th factor of $\mathrm H_1^o$. From this the necessary condition easily follows. \end{proof}
We see that a simply-connected manifold $N$ with a global multisymplectic 3-form of algebraic type 1 admits a spin$^c$-structure. Proposition \ref{fundamental thm} implies the following.
\begin{thm}\label{thm gl form 1} Let $N$ be a connected, closed and spin$^c$ 7-manifold. Suppose that there are $e,f\in H^2(M,\mathbb Z)$ such that $\rho_2(e+f)=w_2(M)$ and $-e f=q(M;e+f)$. Then $N$ admits a multisympletic 3-form of algebraic type 1. If $N$ is simply-connected, then the assumption is also necessary. \end{thm}
\end{document} |
\begin{document}
\title[Encouraging student creativity in mathematics]{Encouraging student creativity in mathematics\\ through 3D design and 3D printing}
\author{Christopher R.\ H.\ Hanusa} \address{Department of Mathematics \\ Queens College (CUNY) \\ 65-30 Kissena Blvd. \\ Flushing, NY 11367\\ United States} \email{[email protected]} \thanks{The author was supported in part by NSF Grant DUE-1928565.}
\subjclass{Primary 97D40, 97N80; Secondary 00A66, 97D60, 97M80, 97P50, 97U70}
\keywords{Wolfram Mathematica, 3D printing, 3D modeling, 3D design, student creativity, design thinking, teaching mathematics, mathematical art, mathematical sculpture, standards-based grading}
\begin{abstract} This is a case study of teaching 3D design and 3D printing in a project-based computing course for undergraduate math majors. This article discusses content organization, implementation, project grading, and includes a personal reflection. There is an emphasis on lessons learned and how to encourage student creativity and artistic expression. An appendix details 3D design techniques in Mathematica. \end{abstract}
\maketitle
\section*{Introduction} As mathematicians we recognize that the research process can be messy and non-linear, that productive failure is a key part of achieving a new result, that creativity and pushing boundaries is necessary to our craft, and that there is beauty in the elegance of mathematical results and visualizations. Now ask yourself: How many of our courses let
\begin{wrapfigure}[16]{R}{0.34\textwidth}
\centering
\includegraphics[angle=270,origin=c,height=0.3\textwidth]{hanusa-student3.jpg}
\begin{minipage}{0.33\textwidth}
\caption{Jenny Xu with her 3D printed ceramic pot.}\end{minipage} \end{wrapfigure}
\noindent students experience these aspects that we find so fundamental in mathematics?
This article describes the project-based Mathematical Computing course at Queens College that aims to give students this experience. In this course, students learn the basics of programming in Wolfram Mathematica and actively apply their skills to design mathematical sculpture and create an interactive app. Section~1 describes the structure of the course, including learning objectives and ensuring student success. Section~2 details the philosophy behind and the implementation of the 3D design and 3D printing module in the course, including how students learn about design thinking and the elements of art. Section~3 discusses the written lab report and the way the project deliverables are graded. Section~4 is a personal reflection on the course. Finally, Appendix~\ref{sec:3DDesign} summarizes key techniques for 3D design in Mathematica.
\section{Course Structure}
\subsection{Student Population} Queens College is located in its namesake borough of Queens in New York City and is one of the 25 campuses of the City University of New York. The student body of Queens College of approximately 18,000 mirrors the population of Queens, the most ethnically diverse county in the United States. Among freshmen, 44\% are immigrants born outside the mainland US, and 34\% are first generation students. Queens College serves as an engine of upward mobility and is proud to offer a high-quality education to students at a fraction of the cost of private institutions.
The Queens College Department of Mathematics has 31 full-time faculty members and many part-time instructors. There are approximately 450 math majors in five specialties: pure mathematics, applied mathematics, data science and statistics, secondary education, and elementary education. The largest cohorts are applied mathematics and secondary education.
The course Mathematical Computing (MATH 250) is offered once per year with 75-minute classes twice a week during a 14-week semester. The course has an enrollment between 15 and 25 students, most of whom are juniors or seniors. This course fulfills the computing requirement for applied mathematics majors and serves as a math elective for all other math majors. The prerequisite for the course is either multivariable calculus or linear algebra, to ensure that students have prior experience in multi-dimensional thinking and visualization. The course has no computer programming prerequisite in order to serve the largest possible audience. At the same time, a handful of enrolled students do have extensive programming experience as they are majoring or minoring in computer science.
\begin{figure}
\caption{The Fall 2018 Mathematical Computing class.}
\end{figure}
\subsection{Course Materials}
The current structure of the course was initiated in Spring 2015. There is no textbook for this course; the instructor provides freely available Mathematica tutorials and accompanying video lectures on his webpage.\footnote{\url{http://qc.edu/~chanusa/courses/250/21/}} The students contribute to an online discussion platform during the semester for pre-class activities; past platforms have included Blackboard, Microsoft Teams, and Campuswire.
The City University of New York has a university-wide site license for Wolfram Mathematica, which means all Queens College campus computers have Mathematica installed, and all students may install Mathematica on their home machines, so software access and cost are not barriers for our students.
Students have access to the Queens College Makerspace, a new space located in the library that houses approximately eight 3D printers, a 3D scanner, a laser cutter, an embroidery machine, a CNC machine, pen plotters, electronic supplies, and a variety of workshop tools. Once students have completed the orientation, they may freely use any of the materials and machines.
Students are required to pay to print their final model through a third-party 3D printing company such as Shapeways. This is a cost of approximately \$20-\$60 dollars, depending on the size of the model and the material they choose, and is their only expense for the course.
\subsection{Learning Objectives} \label{sec:objectives}
Our department believes that every math major should gain some programming experience. For the vast majority of students, this class is their first exposure to any mathematical software. So the most fundamental learning objective is that students learn the basics of programming in Mathematica, developing fluency with the key data structures of lists and functions. Students learn how to design, code, run, test, and debug computer programs. Along the way, students are developing good programming techniques, including sectioning and documenting their code. Students also gain skills to become confident and self-sufficient learners through the use of Mathematica's extensive documentation and internet searches.
Another important objective is that students develop mathematical, programming, and problem-solving skills. For example, students gain a deeper understanding of three-dimensional geometry, including coordinate systems, multivariable functions, and three-dimensional objects. From a programming perspective, students learn and apply functional programming, which is new even for experienced programmers. Building on this knowledge, students apply basic problem-solving skills including analyzing problems, modeling a problem as a system of objects, creating algorithms, and implementing solutions in Mathematica.
A final objective is for students to use mathematics and programming for experimentation, as creative tools, and collaboratively. They apply the design process and communicate the decisions made therein, including ideation, artistic principles, prototyping, and revisions. Students advance teamwork skills by collaborating with classmates, discussing and solving problems in a group setting, and practicing giving and receiving constructive feedback. These ideas will be developed further in Section 2.
\subsection{A Day in the Life}
As a project-based course, class time is balanced between content acquisition and project work. The semester breaks down into three modules, each of which culminate with an individual project.
At the beginning of each module, classes are centered around learning the course content. Before each class, students work through a tutorial notebook with an accompanying video lecture, created by the author. Students are encouraged to ask questions about the content through the online discussion board. (A tutorial structure works well given the disparate levels of programming background because the tutorials can be consumed at each student's individual speed.) During class time, students work together in groups on comprehension and challenge questions that reinforce key ideas from the day's tutorial and lecture. During this time the instructor travels from group to group, addressing any questions and, if necessary, gently nudging students in the correct direction. Each class ends with a debriefing of the different ways to approach solving the questions.
As each module progresses, class time turns to project work. Students work on their projects outside class and bring their progress to class for instructor and peer consultation and feedback. As the module nears its end, the instructor spends much of the class period troubleshooting and debugging code and ensuring students are on track to complete their project by the deadlines. Once the students finish their final draft, they complete a peer review with one or more classmates and revise their work before submitting their work for grading.
\begin{figure}
\caption{Students working in groups on their projects.}
\end{figure}
\subsection{Course Content Overview}
The first module of the class teaches students the basics of working with Mathematica, exposes students to the wide capabilities of Mathematica, and helps students develop fluency with lists, list manipulation, and functions. The first project involves creating a tutorial centered around a coherent set of Mathematica commands that each student chooses. The instructor encourages students to choose a topic related to the mathematics they have seen in previous courses or from one of the many subject domains to which Mathematica applies. Past student projects include visualizing graphs of multivariable functions, matrix operations, statistical methods, image processing, machine learning, finance, geography, and sound.
The second module of the class is centered around two- and three-dimensional graphics, 3D design, and the 3D printing process. Students design and 3D print a mathematical sculpture. This is the focus of the remainder of this article.
In the third module of the class students learn about and apply the interactive capabilities of Mathematica. The third project involves creating an interactive program similar to an app you might use on your phone. Students determine and design the entire interface and program the behavior of the interface depending on the user input. The resulting programs always vary widely and use a broad spectrum of Mathematica's visual, audio, and interactive capabilities. Past student projects include an interactive map, a piano simulator, a fractal explorer, trivia guessing games, two-player games like dots and boxes or tic-tac-toe, and action games like pong, snake, or boulder dash.
\subsection{Ensuring Student Success}
The most important part of ensuring student success is making sure that each student is working on an individually fruitful and feasible project. It must be fruitful in that the student develops programming and problem solving skills and feels a sense of accomplishment from their work. It must be feasible in that each project must be achievable in the given timeframe with the student's skill set. To that end, the instructor must help each student develop a well-defined project goal of the right scope. This is described in detail in Section~\ref{sec:design}.
During this project work phase, the instructor is simultaneously overseeing 15 to 25 different research projects. For this reason, it is important that the students be able to function mostly independently or with help from their peers. It has been crucial to provide a large amount of scaffolding around every project, including outlining transparent expectations and setting deadlines to keep the students on track. The course webpage provides details about how far along students should be each class period and this is reinforced in class by the instructor. This signals to students who are behind that they need to put in more time to the project and/or come to office hours for extra help.
Because the students have such diverse prior knowledge and backgrounds, the fruitfulness and feasibility of a project will vary from student to student and the grading system must accommodate this disparity. In particular, if project expectations were uniform across all students, this might encourage advanced students to put in minimal work and discourage beginning students. Details about project grading are discussed in depth in Section~\ref{sec:grading}.
\section{The 3D Design Project}
\subsection{Overview} \label{sec:projectoverview}
In the second module of the course, students are tasked with designing and 3D printing a mathematical sculpture. Each student artwork is expected to originate from some mathematical concept. The artwork may visualize an idea from mathematics, or instead use mathematical and computational techniques to recreate a specific object or abstract form of interest.
In contrast to more established computer-aided design programs (such as Solidworks, AutoCAD, or Fusion 360), 3D design in Mathematica relies on the user building their model from base mathematical concepts. To be able to do this, students must refresh their mathematical knowledge of three-dimensional coordinate systems, curves, surfaces, and geometric objects.
The author has created Mathematica tutorials on 2D graphics objects, 3D graphics objects, parametric curves and surfaces, transformations thereof, and how to make each of these types of objects 3D printable. (See Appendix~\ref{sec:3DDesign} for more information.) These ideas are interwoven with discussions of the elements of art, design thinking, and the 3D printing process.
\subsection{Creative Expression and Artistic Intentionality} \label{sec:creativity}
The author cares deeply that students get a chance to use mathematics creatively and learn to think artistically. A few days into this second module, students are prompted to explore examples of mathematical art online, including works by Bathsheba Grossman\footnote{\url{http://www.shapeways.com/shops/bathsheba}}, Henry Segerman\footnote{\url{http://www.shapeways.com/shops/henryseg}}, Laura Taalman\footnote{\url{https://mathgrrl.com/designs/}}, and various artists exhibiting at the Bridges Mathematical Art Conference\footnote{\url{http://gallery.bridgesmathart.org/exhibitions/}}. They are asked to choose a few inspiring artworks, and share and annotate them with a few sentences about why they chose the pieces on the online discussion board before class. Here are a few examples of past student comments:
\begin{myquotation}
``I find the following pieces of artwork interesting and inspiring because I can imagine the pieces being utilized as jewelry as a statement piece. The 3D prints in my option are very unique and striking visually.'' \end{myquotation}
\begin{myquotation}
``This gif unlocked memories I forgot I had. I remembered learning about flowers having symmetry, patterns, and some even follow the Fibonacci sequence. They are mathematical and stunning!'' \end{myquotation}
\begin{myquotation}
``[This sculpture] shows the beauty of the randomness and reorganization of water. This piece is not created using a 3D printer, however, I can see something like this being designed and created on a 3D printer.'' \end{myquotation}
\begin{wrapfigure}[13]{R}{0.31\textwidth}
\centering
\includegraphics[origin=c,width=0.26\textwidth]{hanusa-greco.jpg}
\begin{minipage}{0.29\textwidth}\caption{Professor of Art Matthew Greco.}\end{minipage} \end{wrapfigure}
The next class session the students are in for a treat. A colleague from the art department comes to the classroom and gives a presentation about 3D printing and the artistic elements of sculpture, including the concepts of positive and negative space, symmetry vs. asymmetry, proportion, repetition, contrast, harmony, and movement. After the structured presentation, the class discusses the artistic elements that are present in the mathematical art the students had shared the night before. Because it is often the first time many students have thought about these ideas, they are always engaged and inspired by this discussion.
This is supplemented by handouts on the Elements of Art\footnote{ \url{https://www.getty.edu/education/teachers/building_lessons/formal_analysis.html}} and the Principles of Design\footnote{ \url{https://www.getty.edu/education/teachers/building_lessons/formal_analysis2.html}} from the Getty Museum's education initiative. Once students are given these insights to the world of art and design, they can use this lexicon to express their goals in the creative process. Do they want to aim for a piece that has distinctive negative space? Are they looking for a piece that looks lightweight or instead has gravitas? Through this exposure, students gain the ability to express key aspects of inspiring artwork, become more intentional about how their sculpture develops, and enrich their final reflections.
\subsection{Design Thinking} \label{sec:design}
``3D printing'' sounds like it could be simple$\hdots$ until you try it. The process involves many messy and complex steps between the initial seed of an idea and the final physical piece. Students confront the full 3D printing experience in this class, including working through productive failure at many levels.
Students are introduced to a macro vision of this adventure from start to finish using the ``double diamond'' framework in Figure~\ref{fig:doublediamond}. This breaks down the process into four stages: Discover, Define, Develop, and Deliver. The first diamond is where a vague initial concept is made into a concrete problem specification. The second diamond is where the idealized problem specification meets practicality and feasibility to be physically realized.
\begin{figure}
\caption{The double diamond design model}
\label{fig:doublediamond}
\end{figure}
\subsubsection{Discover Stage}
The process starts with the students receiving the very vague project description given in the first paragraph of Section~\ref{sec:projectoverview}. The students are primed to think artistically after the web browsing and discussion of the elements of art. At that point, students are encouraged to explore, research, and brainstorm to create many possible project directions that touch on mathematical principles or outside interests.
\subsubsection{Define Stage}
This brings us to the widest part of the first diamond and the next stage in the process: problem definition. These many directions created from the discover stage need to be triaged based on personal preference, time constraints, ambition, and peer feedback.
Students propose initial project ideas over email by a specified deadline; the next class period is spent helping students hone these initial ideas to precise project goals. This requires that the instructor understands Mathematica's capabilities and can envision how the student could complete the project when they propose it. Furthermore, the instructor must informally assess each individual student's level of knowledge to make sure that the steps to completion are not too simple nor too complex.
After multiple rounds of back and forth, the result is a precise problem specification that the student can always refer back to if they need to remind themselves what they are trying to accomplish.
\subsubsection{Develop Stage}
We have reached the point in between the two diamonds and it is time for students to branch out once again. They need to take the precise idea and understand how they are going to realize it. Students are encouraged to sketch their idea on paper first and develop a plan before starting to code. They then develop the necessary coding skills and mathematical knowledge to explore the space of possibilities. They also make use of a prototyping feedback loop to learn what works and doesn't work in the 3D printing world.
This process has been enhanced by the new Queens College Makerspace, first incorporated into this course in the Fall 2021 semester. Students use the 3D printers for ``rapid prototyping'' of their initial designs, which can be printed and critiqued within a day's time.
\begin{figure}
\caption{Students working in the Queens College Makerspace.}
\end{figure}
\subsubsection{Deliver Stage}
\begin{wrapfigure}[11]{R}{0.32\textwidth}
\centering
\includegraphics[origin=c,width=0.3\textwidth]{hanusa-class6.jpg}
\begin{minipage}{0.3\textwidth}\caption{In class peer feedback session.}\end{minipage} \end{wrapfigure}
The exploration, research, and prototyping that happens in the develop stage then needs to be harnessed into a final project. As the tests continue, the students give each other feedback until the finalized model is complete. A peer community is extremely beneficial during this process for giving advice about the Mathematica commands they are using and to help shape the final piece. The class period before the project is due is spent doing a formal peer review. The instructor provides pairs of students with a collection of questions that each student answers about the other student's work. The questions guide students to provide constructive feedback about whether the current model, Mathematica notebook, and report satisfy the project expectations.
After instructor feedback on the grading criteria (see Section~\ref{sec:grading}), students revise their models one more time before sending the final, honed models to be 3D printed through Shapeways to take advantage of the variety of materials in which they can print, including high resolution nylon and resin, steel, cast metals, and full color sandstone.
\subsection{The Culminating Experience}
At the end of the semester, the instructor organizes an art exhibition for students to display their artwork in the Queens College Library. This culminating experience of the semester gives the students an opportunity to take a victory lap and have yet another experience from the art world. The students bring their final 3D printed sculptures and we gather together for every student to give a two-minute artist talk about their piece and the highs and lows of the creation process. It is clear that throughout the process, students have developed agency over their artwork and are proud of the results.
\begin{figure}
\caption{A student presenting work during the art exhibition.}
\end{figure}
\begin{figure}
\caption{Students proudly showing off their final artwork, 3D printed in full color sandstone, white nylon, and gold steel.}
\end{figure}
\begin{wrapfigure}[21]{R}{0.38\textwidth}
\centering
\includegraphics[origin=c,width=0.34\textwidth]{hanusa-exhibit5.jpg}
\begin{minipage}{0.35\textwidth}\caption{An installation of student art at the library.}\end{minipage} \end{wrapfigure}
At the end of the art show, we install the artwork in an art exhibit that stays on display in the library until the end of the next semester. Before the show the instructor requests from the students the name of their artwork and provide a two-sentence description. This information is used to create professional-looking museum placards that are placed in the display next to the artwork. It is empowering to see one's work in an art exhibit alongside other people's creations. Students are always impressed that they have their work in an art exhibit and enjoy being able to show off their artwork to their friends.
The helpfulness of the Queens College Library staff has made this process a pleasure. It is wonderful to be able to hold the event in the library and for the artwork to be displayed in a public space. The exhibits have become a highlight of campus tours for prospective Queens College students.
\subsection{Project Timeline}
When creating a schedule for this module, it is important to account for the time that it takes to prepare a 3D model for printing, have it printed by the 3D printing company, and receive the model after shipping. This explains why this module is the second of three, and not the final one. Here is a representative timeline for organizing the content in a course that meets twice a week for 14 weeks:
\begin{description}
\item[Day 9] The class wraps up Module 1. Students complete a tutorial on the basics of 2D graphics. Students sign up for a tour of the Queens College Makerspace and create a Shapeways for Education account.
\item[Day 10] Students complete a tutorial on the basics of 3D graphics with an emphasis on functional programming. Students are given a list of sites with mathematical art and asked to share and annotate a couple examples on our online discussion board.
\item[Day 11] Students complete a tutorial on more advanced 3D graphics. On this day Professor Greco talks about 3D printing and the artistic elements of sculpture.
\item[Day 12] Students complete a tutorial on making high quality 3D models and start to brainstorm possible project ideas. We discuss how 3D printing works to ensure the final models will be printable.
\item[Day 13] Students complete a tutorial on 3D design techniques. Students finalize their project specifications.
\item[Day 14] Students see a number of minimal working examples to see how to realize common 3D design tasks and watch a video about advanced techniques in 3D design. Students are actively designing their sculpture inside and outside class.
\item[Day 15] Students are in full project work mode inside and outside class. In class we introduce Ultimaker Cura and rapid prototyping, getting practice using the Makerspace to print a model.
\item[Day 16] Students finalize their model and print a physical prototype for peer review. They complete an initial draft of their lab report (minus discussion of revision process).
\item[Day 17] Students complete an in-class peer review of prototype and draft report, after which they revise their sculpture, notebook, and report.
\item[Day 18] Students submit final prototype to Shapeways for 3D printing in desired material, and submit their final report to the instructor. The class transitions to thinking about Module 3.
\end{description}
Soon after Day 18, the instructor gives feedback on the final report and allows for revisions. Shapeways prints and ships the 3D printed model within the next few weeks. The semester culmination event and art exhibit installation take place during our assigned final exam period.
\section{Project Deliverables and Grading}
At the end of the project, students submit their physical mathematical sculpture, an organized and commented Mathematica notebook that recreates their 3D model, the digital model computer file, and a two-to-three page writeup about the project.
\subsection{The Written Lab Report}
Students are required to write and submit a two-to-three page lab report that is a reflection on the artistic and creative process and documents the student's process throughout the project.
The goal is to give students the opportunity to reflect on how they have developed in their roles as programmers, mathematicians, and artists. The report also serves to ensure that the final artwork is a product of deliberate choices and not simple happenstance or academic dishonesty.
From a programming standpoint, students are asked to share the commands and algorithmic techniques they integrate into the coding of their projects and how they advance their programming skills. They also detail how they took the initiative to learn the necessary commands and techniques in Mathematica. Students also need to describe the mathematical principles that are at work, and how those principles are evident in the finished sculpture. Further students address how the mathematical principles match their level of mathematical knowledge.
Students are called upon to reflect upon their artistic choices: the inspiration for the project, the aesthetic they were going for, the elements of art they worked to achieve, the reason for their choice of final material, and what the viewer should appreciate about the artwork.
Students are asked to address prompts related to the creation, prototyping, and revision processes: what sort of obstacles they encountered along the way, how they experienced the first prototype they created and how that inspired them to revise the project. Students have to explain how their project evolved over time, including how discussions with their classmates and during the peer review process influenced the ways in which they changed their project. Last, students are to reflect on how they might approach the process differently if they were starting over.
\subsection{Grading the Projects} \label{sec:grading}
In the author's experience, instructors who exclusively grade mathematics exercises take comfort in the idea that their grading role is objective---a mathematical answer is either right or wrong. (The author disagrees with this assessment.) This can lead to discomfort about one's role in grading student artwork because the universe of possible subjects is infinite and the universe of approaches to realizing the final piece is also infinite. In effect, we are called upon to render a value judgment on student work and at the same time we recognize that our interpretation of all parts of the process is subjective. It is also important to recognize that many math students are also often uncomfortable with the freedom (and therefore the responsibility) that comes from an exercise that does not have one ``right'' answer.
The author has developed a grading method for the projects in this course that is inspired by his experience in standards-based grading, which he now uses when teaching a non-project-based class like calculus. In standards-based grading, the learning objectives are transparently provided to the students, and regular assessments and reassessments allow students to show their mastery of the material at any point during the semester. Such an approach makes it possible to assess {\it everything} that the instructor values, including so-called ``soft skills'' like the ability to discuss math with peers. This method of grading has been transformative because it changes the dynamic in the class from adversarial to collaborative between students and instructor, and grades become much more aligned with student progress towards learning objectives.
The rubric developed for the 3D design project has been refined over the years to address the course learning objectives in Section~\ref{sec:objectives}. Students are assessed on the following standards. \begin{description} \item[Attentiveness] The student has made active and steady progress and met deadlines. \item[Name and Description] The name and description of the final artwork are precise and concise. \item[Intentionality] The vision of the artwork has been honed over time and has a story behind it. \item[Mathiness] The object is created using mathematical content at the student's level of knowledge. \item[Functional Techniques] The object was developed using Mathematica techniques learned in class. \item[Notebook Organization] The notebook was well organized into sections and with text cells that provide context for the code. \item[Artistic Writeup] The report discusses the student's development as an artist and artistic choices. \item[Technical Writeup] The report discusses the student's technical choices and their development as a both a mathematician and programmer. \item[Revision Writeup] The report discusses the creation, prototyping, and revision processes. \item[Writeup style] The report is well written and follows the format requirements. \end{description}
After the student submits their deliverables, each standard is scored on an E-M-R-N scale (Exemplary, Meets Expectations, Revisions Needed, and Not Assessable) and the instructor provides detailed feedback about each standard. The ten scores are converted to a provisional letter grade as follows.
\begin{tabular}{cp{4in}}
{\bf A} & Earn a score of M or higher on all standards and a score of E on at least eight standards.\\
{\bf B} & Earn a score of M or higher on all standards and a score of E on at least four standards.\\
{\bf C} & Earn a score of M or higher on nine standards and no N scores.\\
{\bf D} & Earn a score of M or higher on eight standards and at most one N score.\\
{\bf F} & Have fewer than eight E or M scores.\\ \end{tabular}
Note that a student cannot receive a grade higher than C if there is some aspect of their project that needs revision. Students are given the opportunity to improve their provisional grade by revising their project deliverables and having the instructor re-score the standards. Due to time constraints and instructor mental load, students are limited to one resubmission.
Consistent with Talbert's pillars of alternative grading (see \cite{Talbert}), this grading scheme was developed to provide complete transparency in the expectations for students and how their work will be graded, and by giving students the opportunity for peer and instructor feedback throughout the process with the ability to revise work without penalty.
Students appreciate the transparency in evaluation expectations. Since the expectations align with the class's learning objectives, a student who addresses the items in the rubric is showing that they learned what the instructor wanted them to learn. The revision process benefits both the student and the instructor. The instructor can convey a frank assessment of the student's work and how it meets or does not meet expectations. The student gets a chance to see where their work does not meet expectations and other aspects that could be improved, which reinforces that project development is a process and there is always room for improvement. The student can decide whether it is worth putting in the time to revise their work to improve their standards scores, and in a direct and transparent way, their final project grade.
\section{Course Reflection}
The students and the author enjoy this project immensely; this section includes reflections from both points of view.
\subsection{Student Comments}
Student voices are collected at the end of the semester through a request to write ``a letter to future students'', a curated collection of which is shared with incoming students the following semester. Here are some of their words of wisdom.
\begin{myquotation} ``For a lot of students, this class might be one of the first instances that unravels for them what it is exactly that is so beautiful about mathematics, though things like precision are not particularly stressed, your creative freedom will know no bounds in this class. You should absolutely take advantage of this, the depth at which you're allowed to pursue a particular problem is seldom allowed in other mathematics courses, this versatility is a gift.'' \end{myquotation}
\begin{myquotation} ``Incoming students can expect to be as creative as they want when doing their projects. The professor allows complete creative freedom and that is what made the project fun to complete. It would have been nice to know before hand that I would need to spend the extra hours outside of the classroom to understand the topics. Overall, I have nothing but positive things to say about this class and I would recommend it to anyone who is creative and who wish to show their creative prowess.'' \end{myquotation}
\begin{myquotation} ``The amazing thing is that the professor gives you the tool set to do these projects throughout the course and all you have to do is apply that and he even gives you the freedom to put your own flavor in to the mix. With this you get to truly see the diversity that everyone can come up with through each project.'' \end{myquotation}
\begin{myquotation}
``We completed some major projects. The most exciting of them all was designing three dimensional objects to be sent to a 3D-printing company (Shapeways). I was able to make a piece of jewelry based on the intersection of sine waves and lines that I can sell if I wanted to! There's nothing like holding in your hands and feeling the math you've been drawing on paper or seeing on the computer for your entire life. We've built tutorials. We've built games. We've built economic models. We've done so much with the robust power of Mathematica. This class inspired me so much that I want to keep playing with it and exploring it even into the Summer, long after the course is over.'' \end{myquotation}
\begin{myquotation} ``Do all of the assignments. Play with Mathematica in your spare time. Explore the internet---there are tons of resources, tutorials and communities of users who are already asking questions you probably are searching for the answer to. This is one of the few classes where you'll be challenged to think mathematically, critically, and creatively. This is one of the few classes where you may not know the answer and neither does the professor. This is one of the few classes where you'll learn that it's okay not to have the answer, even in a mathematics class. The professor will work hardest to help you master Mathematica and bring your dreams to live via coding. Hopefully Mathematica will take you to a new place in your journey with mathematics.'' \end{myquotation}
\begin{myquotation} ``Out of everything I've learned, there's one thing that stuck out to me the most. One day I asked the professor a mathematical question which for me was hard, so I asked him for help. He thought about my question and then told me `I don't know.' As bright and intelligent as the professor is, he humanized himself in that moment. It made me feel like I have a shot at this math thing. It was more inspirational than anything. He could have given me an answer to make himself look great and leave me baffled but he didn't.'' \end{myquotation}
\subsection{Instructor Reflection}
It is always a treat to teach this class. While the project descriptions and expectations have converged over time, the course is always different and exciting to teach! Project deliverables vary greatly from student to student and semester to semester because I encourage students to draw from their lived experience and personal interests and because Mathematica's computational power extends to a vast variety of subject areas.
This course is a lot of work to coordinate and supervise. The students are given leeway to explore, experience productive failure, and take ownership of their learning. This is a messy process and often the first time the students encounter this type of learning environment, so as an instructor I need to encourage students and provide technical help if their project choices end up being a bit too ambitious. This means that in its current incarnation this course cannot realistically scale up to serve more students without additional teaching assistants or peer mentors. Relatedly, because I prioritize ensuring that the day-to-day process runs smoothly, I end up falling behind in returning project feedback promptly.
One conscious choice I made in the course construction is that there are three projects in the semester. I think the first project (the Mathematica tutorial) serves its purpose phenomenally---students learn the structure of Mathematica and its basic data types, they explore a topic of interest, they learn how to format a notebook, and they ease into the self-directed and collaborative modes that make the rest of the semester run smoothly. It also introduces the alternative grading scheme and reinforces that learning and personal growth is valued. Students often wish that they had more time for each project, which is impossible if there are three projects. I take the view that the second project is there to give students a taste of what 3D printing can be. If I devoted a whole semester to it, I am sure that the students' skills in 3D printing would improve as well as the quality of the final pieces. However, we would run into issues at the end of the semester with the time it takes for models to be printed and be shipped, and the students would miss out on the distinct yet complementary third project where they develop different programming skills and learn about designing interfaces with human interaction. Often the second and third projects are both rated equally positively by students in terms of preference and how much they felt they learned.
One of the most rewarding points in the class is at the end of the second project. It is such an amazing and empowering experience to hold something in your hand that had only previously existed in your mind and in the computer. When the final models arrive, students can't wait to show me and their classmates; they regularly relate how proud they were to share their sculpture with their friends and family. And I often get feedback that before the class they never expected to be able to create something of their own, yet after the class feel comfortable and confident with that process. With the opening of the Queens College Makerspace, I expect future students to take even more ownership of their learning.
Just as I hope that my students develop their growth mindset in this course, I am continually working to improve my teaching and the curriculum. For example, each semester I modify the standards and grade conversion table as I learn more about the way I assign scores and what I expect from students. In addition, one thing I noticed that had been lacking in previous semesters was that students would proceed linearly from start to finish without exploring a wide space of possible solutions. So I added a requirement in Fall 2021 that students need to modify some parameter or aspect of their project, and reflect on which version they prefer and why. With this prompt, students thought more deeply about why they ended up with their final result and they shared more about their creative process in their reports.
Mathematical Computing is a course destined for math majors who have a certain amount of mathematical background. I wanted to be able to offer the opportunity to be creative with mathematics and create mathematical art to non-majors. I created a project-based course for non-majors called Mathematical Design with the online graphing calculator Desmos as its medium. I look forward to further developing the curriculum for this class and publishing more about it. Stay tuned!
\section{Acknowledgments}
Many thanks to Leah Wrenn Berman, Emily Dennett, and Patrick Johnson for their advice for improving this article. I would also like to thank Matthew Greco for being a great collaborative partner at Queens College.
\appendix \section{3D design in Mathematica} \label{sec:3DDesign}
In this section the reader can learn multiple techniques to use Mathematica for 3D design. An interactive copy of this notebook is available on the Wolfram Notebook Archive \cite{HanusaMMA}. Additional resources about 3D design in Mathematica are available on the author's webpage\footnote{\url{http://qc.edu/~chanusa/mathematica}}.
The author would like to acknowledge Mathematica's Documentation Center \cite{Mathematica} for its extensive examples that explain the Wolfram Language in great detail, the Mathematica StackExchange community\footnote{\url{https://mathematica.stackexchange.com/}} for their selfless sharing of ideas that have inspired a number of advanced explorations, and Henry Segerman's article \cite{Segerman}, which was very helpful with initial trials and tribulations in 3D modeling in Mathematica. Lastly, many thanks to Jakub Kuczmarski for his {\tt mmaCells} \LaTeX\ package\footnote{\url{https://github.com/jkuczm/mmacells}} that allowed for in-line typesetting of Mathematica code in this article.
\subsection{Basic 3D graphics and exporting} \label{sec:basic3D}
Creating a 3D model file with Mathematica can be exceptionally easy or extremely frustrating. In many cases, creating your 3D file is but a simple call to the {\tt Export} function, where you specify the file extension {\tt .stl} for uncolored models or {\tt .wrl} for colored models. \begin{mmaCell}{Code} Export["file.stl", model]
\end{mmaCell} In general, I like to append the file name to the current notebook's working directory (using the {\tt NotebookDirectory} command) to organize my files.
The most basic way to create an object in Mathematica is to specify the coordinates and parameters of the 3D primitives such as {\tt Sphere}, {\tt Cylinder}, {\tt Tube}, {\tt Tetrahedron} and wrap them all in a {\tt Graphics3D} command. Happily, starting in Mathematica version 13, these primitives behave well when they are exported. For example, here is the code that builds a model of a basic snowman using spheres, cylinders, a cone, and some rectangular prisms: \noindent \begin{mmaCell}[moredefined={Sphere,Cylinder,Cone}]{Code} snowman = Graphics3D[{
Sphere[{0, 0, 0}, 1],
Sphere[{0, 0, 1.3}, 0.8],
Sphere[{0, 0, 2.4}, 0.6],
Black, Cylinder[{{0, 0, 2.8}, {0, 0, 2.9}}, 0.7],
Cylinder[{{0, 0, 2.9}, {0, 0, 3.5}}, 0.5],
Orange, Cone[{{0, -0.55, 2.4}, {0, -0.9, 2.4}}, 0.1],
Black, Cuboid[{0, -0.75, 1.3} - 0.1 {1, 1, 1},
{0, -0.75, 1.3} + 0.1 {1, 1, 1}],
Cuboid[{0, -0.7, 1.6} - 0.1 {1, 1, 1},
{0, -0.7, 1.6} + 0.1 {1, 1, 1}],
Cuboid[{0, -0.7, 1.0} - 0.1 {1, 1, 1},
{0, -0.7, 1.0} + 0.1 {1, 1, 1}] }] Export[NotebookDirectory[] <> "snowman.stl", snowman]
\end{mmaCell} In each {\tt Sphere} command, you specify the center of the sphere and its radius. In the {\tt Cylinder} and {\tt Cone} commands you are specifying the endpoints of its axis of rotation as well as its radius. In the {\tt Cuboid} command, you specify the opposite corners of the rectangular prisms. The resulting snowman looks like this: \mmaCellGraphics[ig={height=3in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a1-i1.png}
I always suggest that you re-import your STL files to ensure that everything exported correctly.
\begin{mmaCell}{Code} Import[NotebookDirectory[] <> "snowman.stl"]
\end{mmaCell}
\mmaCellGraphics[ig={height=3in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a1-i2.png}
Upon observation, we see that the imported model has no color and that the final result is very faceted because the model was exported with the default options. We can greatly improve the result with the more advanced commands shown in Section~\ref{sec:discretize}; we use these techniques to write updated code to create a high quality, full color model of the snowman in Section~\ref{sec:snowman}.
Mathematica is not currently capable of importing WRL files so any colored models should be opened in a different program or uploaded to a web service such as Sketchfab\footnote{\url{https://sketchfab.com}} to ensure that the model has exported correctly.
\subsection{Curves}
The next level of 3D design complexity uses knowledge of multivariable calculus to create models of curves and surfaces. A 3D model of a vector curve $\mathbf{r}(t)=\langle f(t),g(t),h(t)\rangle$ is made using {\tt ParametricPlot3D} with one input variable. For example, a basic helix can be created using the code
\begin{mmaCell}{Code} ParametricPlot3D[{t/Pi, Cos[t], Sin[t]}, {t, 0, 6 Pi}]
\end{mmaCell}
\mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a2-i1.png}
However, this is where the precision of an idealized mathematical model runs into the reality of what can be 3D printed. A helix is a one-dimensional curve embedded in three dimensions, so we need to add some thickness to create a printable model. We can do this using a {\tt PlotStyle} option that specifies the radius of a tube that will be swept out along the curve. \begin{mmaCell}[moredefined={Tube}]{Code} helix = ParametricPlot3D[{t/Pi, Cos[t], Sin[t]},
{t, 0, 6 Pi}, PlotStyle -> {Tube[0.1]},
Axes -> False, Boxed -> False, SphericalRegion -> True]; Import[Export[NotebookDirectory[] <> "helix.stl", helix]]
\end{mmaCell}
\mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a2-i2.png}
The {\tt Axes}, {\tt Boxed}, and {\tt SphericalRegion} options improve visualization in the notebook by removing the default framing around the model and ensuring that when you rotate the model it does not resize the cell.
It is only on re-importing the model do we realize that the result is once again very faceted. We can use the {\tt PlotPoints} option twice to improve the quality of this 3D print. We specify how many points will be sampled along the curve (here 500), as well as how many points will be sampled in the circular cross-section of the tube (here 50). \begin{mmaCell}[moredefined={Tube}]{Code} helixHD = ParametricPlot3D[{t/Pi, Cos[t], Sin[t]},
{t, 0, 6 Pi}, PlotStyle -> {Tube[0.1, PlotPoints -> 50]},
PlotPoints -> 500,
Axes -> False, Boxed -> False, SphericalRegion -> True]; Import[Export[
NotebookDirectory[] <> "helix.HD.stl", helixHD]]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a2-i3.png} (In your notebook the first {\tt PlotPoints} option will be {\color{red} red}.) The more points specified, the more detailed the print will be; at the same time, this increases computation time and the size of the exported file. The two plots {\tt helix} and {\tt helixHD} look the same in the notebook; it is only upon re-importing the stl models that we can see that the fidelity of the model has greatly improved.
{\tt ParametricPlot3D} also allows you to 3D print a spline specified by a set of control points. The primitive for a spline is a {\tt BSplineCurve}: \begin{mmaCell}[moredefined={Sphere,Thick,BSplineCurve,SplineClosed}]{Code} pts = {{0, 0, 0}, {1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; Graphics3D[{
Blue, Map[Sphere[#, 0.03] &, pts],
Thick, Green, Line[Append[pts, pts[[1]]]],
Red, BSplineCurve[pts, SplineClosed -> True]}, Axes -> True, AxesOrigin -> {0, 0, 0}, Boxed -> False]
\end{mmaCell} \mmaCellGraphics[ig={height=1.5in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a2-i4.png} You can 3D print this curve by first converting the primitive to a parametric function on the domain $[0,1]$ by replacing {\tt BSplineCurve} by {\tt BSplineFunction} and then using {\tt ParametricPlot3D} as above. \begin{mmaCell}[moredefined={BSplineFunction,SplineClosed,Tube}]{Code} ParametricPlot3D[
BSplineFunction[pts, SplineClosed -> True][t],
{t, 0, 1}, PlotStyle -> {Tube[0.02]},
PlotRange -> All, Axes -> False, Boxed -> False]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a2-i5.png}
\subsection{Surfaces}
There are four ways to create two-dimensional surface embedded in three dimensions, depending on how it is defined.
When the surface is the graph of a function $f(x,y)$ of two variables, use {\tt Plot3D}. You need to specify the two independent variables and their domains. By default {\tt Plot3D} only outputs the 2D surface; thicken it to make it 3D printable by specifying the {\tt PlotTheme} option to be {\tt "ThickSurface"}.
\begin{mmaCell}[moredefined={PlotTheme}]{Code} Plot3D[Sin[x + y^2], {x, -3, 3}, {y, -2, 2}, Mesh -> None,
PlotPoints -> 30, PlotTheme -> "ThickSurface"]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a3-i1.png}
That option does not allow you to specify a thickness; to do so instead use the hidden {\tt Extrusion} option to thicken the surface in the direction normal to the surface. (This option will show in {\color{red} red} in your notebook.) In the following example we also see the domain does not have to be a rectangle. (In your Mathematica code be sure to replace IN by $\in$ by typing \keys{esc} {\tt $\backslash$in} \keys{esc}.)
\begin{mmaCell}[morelocalconflict={Extrusion}]{Code} Plot3D[Sin[x + y^2], {x, y} IN Disk[{0, 0}, 2],
Mesh -> None, PlotPoints -> 30, Extrusion -> 0.25]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a3-i2.png}
If instead the surface is defined implicitly as the level surface of a function $f(x,y,z)=k$, use {\tt ContourPlot3D}:
\begin{mmaCell}[moredefined={ContourPlot3D},morelocalconflict={Extrusion}]{Code} ContourPlot3D[x^3 + y^2 - z^2 == 0,
{x, -2, 2}, {y, -2, 2}, {z, -2, 2},
Mesh -> None, PlotPoints -> 30, Extrusion -> 0.2]
\end{mmaCell} \mmaCellGraphics[ig={height=1.5in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a3-i3.png}
Otherwise, if the surface is defined parametrically as a function $f(u,v)$ of two parameters, use {\tt ParametricPlot3D} and specify the domain of each parameter. The following example is a torus; we specify the number of sample points for each parameter independently.
\begin{mmaCell}{Code} ParametricPlot3D[
{(3 + Cos[v]) Cos[u], (3 + Cos[v]) Sin[u], Sin[v]},
{u, 0, 2 Pi}, {v, 0, 2 Pi},
Mesh -> None, PlotPoints -> {100, 30}]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a3-i4.png}
That torus is the boundary of a 3D solid, so the file exported by Mathematica can be 3D printed. In general, a parametric surface may not be the boundary of a solid so the slicing software for 3D printing will tell you that your object is ``not manifold''. You will need to thicken those surfaces as we did before.
\begin{mmaCell}[morelocalconflict={Extrusion}]{Code} ParametricPlot3D[
{v Cos[u], v Sin[u], -Cos[v]},
{u, 0, 2 Pi}, {v, 0, 2 Pi},
Mesh -> None, PlotPoints -> {100, 30}, Extrusion -> .2]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a3-i5.png}
Last, you can create a two dimensional spline surface using {\tt BSplineSurface}. Here is the example from the Wolfram Documentation Center that generates a random surface over the rectangular domain $[1,5]\times[1,5]$. \begin{mmaCell}[moredefined={BSplineSurface,RandomReal}]{Code} pts = Table[{i, j, RandomReal[{-1, 1}]}, {i, 5}, {j, 5}]; Graphics3D[BSplineSurface[pts]]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a3-i6.png} Again we need to convert the {\tt BSplineSurface} to a {\tt BSplineFunction} to create a 3D printable model. \begin{mmaCell}[moredefined={BSplineFunction},morelocalconflict={Extrusion}]{Code} ParametricPlot3D[BSplineFunction[pts][u, v],
{u, 0, 1}, {v, 0, 1}, Mesh -> None, Extrusion -> .2]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a3-i7.png}
\subsection{Getting to Yes: Meshes and Manual Discretization} \label{sec:discretize}
Mathematica uses mesh-based geometric regions to make three-dimensional objects 3D printable. This is similar to how complex numerical expressions are converted to a decimal \begin{mmaCell}[moredefined={Rasterize,RegularPolygon}]{Code} N[Sqrt[5 + Sqrt[3]]]
\end{mmaCell} \begin{mmaCell}{Output} 2.59462
\end{mmaCell} or how a two-dimensional scene is represented as a collection of pixels. \begin{mmaCell}[moredefined={Rasterize,RegularPolygon}]{Code} Rasterize[RegularPolygon[7], ImageSize -> 100]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a4-i1.png}
The process of approximating an ideal object by an approximation is called discretization (or triangulation). This replaces the smooth object by a discrete object made up of cells: points, line segments, triangles, tetrahedra, and higher-dimensional simplices. There are two main ways to describe a discretized object: either by the cells that comprise it or by the cells of its boundary. Compare the following two discretizations of the circle:
\begin{mmaCell}[moredefined={DiscretizeRegion,BoundaryDiscretizeRegion}]{Code} {DiscretizeRegion[Disk[]],
BoundaryDiscretizeRegion[Disk[]]}
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a4-i2.png}
In the first example, the circle is approximated as a collection of two-dimensional triangles. In the second example, the circle is approximated as the interior of a collection of one-dimensional line segments. In the same way, a three-dimensional form can be represented as a collection of three-dimensional tetrahedra or as the interior of a collection of two-dimensional triangles.
When you discretize a graphics object, use {\tt DiscretizeGraphics} or {\tt Boundary\textrm{-} DiscretizeGraphics}. When you discretize a shape that is defined as a region, use {\tt DiscretizeRegion} or {\tt BoundaryDiscretizeRegion}. The resulting objects are {\tt MeshRegion} objects or {\tt BoundaryMeshRegion} objects, respectively.
Mathematica has a large number of built-in polyhedra, accessible using the command {\tt PolyhedronData}; their meshes can be accessed directly from the command by specifying {\tt "MeshRegion"} or {\tt "BoundaryMeshRegion"}.
\begin{mmaCell}[moredefined={PolyhedronData}]{Code} PolyhedronData["Dodecahedron", "BoundaryMeshRegion"]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a4-i3.png}
Since boundary representations are one lower dimension than cell representations, they are computationally simpler so more of Mathematica's functionality applies. However, not all shapes have a nice boundary representation; in that case you will have to work with the shape's cell representation. Wolfram developers have been continually improving these commands; great strides have been made in the past few years.
When you use {\tt Export}, (I think) Mathematica tries its best to discretize each piece of your model, then combines everything together. This process sometimes goes awry, so it may make sense to discretize your objects first, use a {\tt Show} command to combine them, and export the result. Do note that it may be beneficial to discretize every single object individually instead of trying to discretize a collection of objects. Compare the following. \begin{mmaCell}[moredefined={RandomReal,DiscretizeGraphics,Sphere}]{Code} balls = Table[
Sphere[{i, j, 0}, RandomReal[]], {i, 10}, {j, 10}]; {DiscretizeGraphics[balls],
Show[Map[DiscretizeGraphics, balls]]}
\end{mmaCell} \mmaCellGraphics[ig={height=1.25in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a4-i4.png} The resolution of the second code block is better because {\tt DiscretizeGraphics} is being applied to each sphere individually.
Furthermore, because {\tt Export} appears to try much harder than the various {\tt Discretize} commands, sometimes it is even beneficial to export and re-import each piece of the model and assemble those pieces together.
It is possible to specify the quality of a discretization of a 3D model by applying the {\tt MaxCellMeasure} option which specifies the maximum size for each highest-dimension cell. Compare:
\begin{mmaCell}[moredefined={BoundaryDiscretizeGraphics,Ball,MaxCellMeasure}]{Code} {BoundaryDiscretizeGraphics[Ball[], MaxCellMeasure->.01],
BoundaryDiscretizeGraphics[Ball[], MaxCellMeasure->.001],
BoundaryDiscretizeGraphics[Ball[], MaxCellMeasure->.0001]}
\end{mmaCell} \mmaCellGraphics[ig={height=1.25in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a4-i5.png}
As of Mathematica version 13, the various region objects can be imported into a {\tt Graphics3D} scene as primitives, which makes it possible to finalize each individual piece of the model and then color them independently for export as a WRL file, which is how the snowman is improved in Section~\ref{sec:snowman}.
In my experience, applying some combination of these techniques allow the vast majority of models to be exported by Mathematica. If you are still having issues, see Section~\ref{sec:RegionPlot} for another tool.
\subsection{Extracting Mesh Information}
There are two useful commands to situate your object in 3D space. {\tt RegionBounds} gives the smallest and largest $x$, $y$, and $z$ values over the whole object. (Do not confuse {\tt RegionBounds} with {\tt RegionBoundary} which finds a region's geometric boundary.) And {\tt RegionCentroid} gives the centroid of the object. We observe that the following discretized ellipse is $3.8$ units wide and is centered at $(0,0,0)$.
\begin{mmaCell}[moredefined={BoundaryDiscretizeGraphics,Ellipsoid,MaxCellMeasure,RegionBounds,RegionCentroid}]{Code} mesh = BoundaryDiscretizeGraphics[
Ellipsoid[{0, 0, 0}, {2, 1, 1}], MaxCellMeasure -> 0.2]; RegionBounds[mesh] Map[#[[2]] - #[[1]] &, Chop[RegionCentroid[mesh]]
\end{mmaCell}
\begin{mmaCell}{Output} \{\{-1.90211, 1.90211\}, \{-1., 1.\}, \{-1., 1.\}\} \{3.80423, 2., 2.\} \{0, 0, 0\}
\end{mmaCell}
As we build complexity into our models, it becomes useful to work directly with the internal structure of the mesh. Use {\tt MeshCoordinates} to extract the coordinates of the mesh and {\tt MeshPrimitives} to extract higher-dimensional pieces of the object, like its edges or faces. The second input is the dimension of the parts you want to extract.
\begin{mmaCell}[moredefined={MeshCoordinates,MeshPrimitives}]{Code} coords = MeshCoordinates[mesh]; edges = MeshPrimitives[mesh, 1]; faces = MeshPrimitives[mesh, 2];
\end{mmaCell}
These commands allow you to efficiently make a wireframe version of your object or algorithmically color the faces of your object.
\begin{mmaCell}[moredefined={Sphere,Tube}]{Code} thickness = 0.06; Graphics3D[{
Map[Sphere[#, thickness] &, coords],
Map[Tube[#, thickness] &, edges] }, Boxed -> False, SphericalRegion -> True]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a5-i1.png}
\begin{mmaCell}[moredefined={RegionCentroid}]{Code} colors = Map[Hue[(RegionCentroid[#][[3]]+1)/2] &, faces]; Graphics3D[MapThread[{#2, #1} &, {faces, colors}],
Boxed -> False, SphericalRegion -> True]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a5-i2.png}
Note that the wireframe object needs to include spheres at each vertex so that no gaps occur where two cylinders meet. Compare the 3D models generated with spheres and without: \begin{center} \includegraphics[height=1in]{hanusa-a5-i4.png}\qquad\qquad\qquad\includegraphics[height=1in]{hanusa-a5-i3.png} \end{center} The model without spheres only consists of the shell of each cylindrical edge. Because it is not solid, it would not be 3D printable.
\subsection{Region Operations}
Once you have discretized your models (or pieces of your models) using the discretizing commands or by exporting and reimporting, you have {\tt RegionMesh} or {\tt BoundaryRegionMesh} objects and you can apply various region operations to them.
It is important to be able to change the size and orientation of your model. For instance, it may be easiest to work in one coordinate system to create your mathematical form and in a different coordinate system when printing. In addition, you will want to specify the dimensions of the piece in millimeters if you are uploading your model to a 3D printing service.
To change the size of your model use {\tt RegionResize}. If your second input is one number, Mathematica will scale the object to have first coordinate equal to that input, respecting the existing box ratios. If you instead specify a list of parameters, it will scale the object to fit into the given box, not respecting the existing box ratios.
If we want our model to be 100mm wide, we would write \begin{mmaCell}[moredefined={RegionResize,PolyhedronData}]{Code} mesh = PolyhedronData["Dodecahedron", "BoundaryMeshRegion"] RegionResize[mesh, 100]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a6-i1.png} or if we want to skew it to fit in a box that is 80mm $\times$ 50mm $\times$ 30mm, we would write \begin{mmaCell}[moredefined={RegionResize,PolyhedronData}]{Code} mesh = PolyhedronData["Dodecahedron", "BoundaryMeshRegion"] RegionResize[mesh, {80, 50, 30}]
\end{mmaCell} \mmaCellGraphics[ig={height=0.75in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a6-i2.png}
Use {\tt TransformedRegion} and the various {\tt Transform} commands to apply transformations to a region. Suppose we want to apply transformations to the basic cube: \begin{mmaCell}[moredefined={BoundaryDiscretizeGraphics}]{Code} cube = BoundaryDiscretizeGraphics[Cuboid[]];
\end{mmaCell} To rotate the object by a given angle $\theta$ around an axis of rotation (with direction $v$ and passing through point $p$), input these values into {\tt RotationTransform}. \begin{mmaCell}[moredefined={TransformedRegion,RotationTransform}]{Code} rotatedcube = TransformedRegion[cube,
RotationTransform[Pi/4, {0, 0, 1}, {1/2, 1/2, 0}]]; Graphics3D[{Green, cube, Red, rotatedcube},
Boxed -> False]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a6-i3.png} To translate the object along a vector $v$, input that vector into {\tt Translation\textrm{-} Transform}. \begin{mmaCell}[moredefined={TransformedRegion,TranslationTransform}]{Code} translatedcube = TransformedRegion[cube,
TranslationTransform[{0.5, 0.5, 0.5}]]; Graphics3D[{Green, cube, Red, translatedcube},
Boxed -> False]
\end{mmaCell} \mmaCellGraphics[ig={height=1.5in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a6-i4.png}
Combining a few of these techniques is useful to orient your object. For example, if you want to move the center of your object to the origin, apply a translation equal to the negative of its centroid: \begin{mmaCell}[moredefined={TransformedRegion,TranslationTransform,RegionCentroid}]{Code} TransformedRegion[mesh,
TranslationTransform[-RegionCentroid[mesh]]],
\end{mmaCell}
To intersect, merge, or subtract two regions, use {\tt RegionIntersection}, {\tt Region\textrm{-} Union}, and {\tt RegionDifference}. These commands work well in 2D. On the other hand, it has been hit or miss whether these commands work on 3D MeshRegion objects. Starting in version 12.1 and further improved in version 13 of Mathematica, these boolean operations work more often for 3D boundary representations. \begin{mmaCell}[moredefined={BoundaryDiscretizeGraphics,Ball,RegionResize,PolyhedronData,RegionUnion,RegionIntersection,RegionDifference}]{Code} ball = BoundaryDiscretizeGraphics[
Graphics3D[Ball[{0, 0, 0}, 1]]]; cube = BoundaryDiscretizeGraphics[
Graphics3D[Cuboid[{0.1, -0.5, 0.1}, {1, 0.5, 1}]]]; dodec = RegionResize[
PolyhedronData["Dodecahedron", "BoundaryMeshRegion"], 2]; {RegionUnion[ball, cube],
RegionUnion[dodec, cube],
RegionIntersection[ball, cube],
RegionDifference[ball, cube],
RegionDifference[dodec, cube]}
\end{mmaCell} \mmaCellGraphics[ig={height=0.9in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a6-i5.png}
\subsection{RegionPlot3D} \label{sec:RegionPlot}
Another command that can achieve high resolution models is {\tt RegionPlot3D}. It can be very computationally intensive but it is sometimes the only command that works. This command takes as input a boolean expression that defines your desired region and the domain that you want sampled. {\tt RegionPlot3D} gives all the points in the specified domain that satisfy your given requirements.
For example, if you want to find the intersection of two unit spheres centered at $(0,0,0)$ and $(1,0,0)$, you specify the inequality that defines each sphere and use {\tt \&\&} (the syntax for `and') to indicate that you want their intersection, where both inequalities are true. \begin{mmaCell}[moredefined={RegionPlot3D}]{Code} RegionPlot3D[
(x^2 + y^2 + z^2 <= 1) && ((x - 1)^2 + y^2 + z^2 <= 1),
{x, -1, 2}, {y, -1.5, 1.5}, {z, -1.5, 1.5}]
\end{mmaCell} \mmaCellGraphics[ig={height=1.5in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a7-i1.png}
or use {\tt ||} (the syntax for `or') to indicate that you want their union, where at least one of the two inequalities is true. \begin{mmaCell}[moredefined={RegionPlot3D}]{Code} RegionPlot3D[
(x^2 + y^2 + z^2 <= 1) || ((x - 1)^2 + y^2 + z^2 <= 1),
{x, -1, 2}, {y, -1.5, 1.5}, {z, -1.5, 1.5}]
\end{mmaCell} \mmaCellGraphics[ig={height=1.5in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a7-i2.png}
{\tt RegionPlot3D} is a nice way to create 3D models of surfaces defined by an equation $z=f(x,y)$ that have a flat base for display. We do that by specifying the region as the set of points that lie below the curve. \begin{mmaCell}[moredefined={RegionPlot3D}]{Code} block = RegionPlot3D[Sin[x] + Sin[y] > z,
{x, 0 - Pi/2, 6 Pi - Pi/2}, {y, 0 - Pi/2, 6 Pi - Pi/2},
{z, -3, 3}, BoxRatios -> Automatic]; Import[Export[NotebookDirectory[] <> "Bumpy.stl", block]]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a7-i3.png} I assign the option {\tt BoxRatios} to {\tt Automatic} so that the axes are scaled to ensure the unit distance in each direction is the same.
We see that the quality of the surface could be improved. We can specify the number of points that Mathematica should sample in each direction using the {\tt PlotPoints} option. \begin{mmaCell}[moredefined={RegionPlot3D}]{Code} blockHD = RegionPlot3D[Sin[x] + Sin[y] > z,
{x, 0 - Pi/2, 6 Pi - Pi/2}, {y, 0 - Pi/2, 6 Pi - Pi/2},
{z, -3, 3}, PlotPoints -> {100, 100, 100},
BoxRatios -> Automatic]; Import[Export[
NotebookDirectory[] <> "Bumpy.HD.stl", blockHD]]
\end{mmaCell} \mmaCellGraphics[ig={height=1in},pole2=vc,yoffset=.5ex]{Output}{hanusa-a7-i4.png} The more points you specify, the better the result. However, this becomes computationally intensive very quickly as you increase the number of points and it rarely produces as precise of a model as we had using the earlier methods.
\subsection{High Quality Snowman} \label{sec:snowman}
Below is code that creates a high quality, full color 3D model of the snowman from Section~\ref{sec:basic3D}. It took multiple iterations to make it work---I ran into errors to export to a {\tt .wrl} file until I converted every mesh to its polygonal representation, hence the use of {\tt MeshPrimitives}. \begin{mmaCell}[moredefined={BoundaryDiscretizeRegion,BoundaryDiscretizeGraphics,MaxCellMeasure,Ball,MeshPrimitives,Cylinder,ConeRegionResize,PolyhedronData,RegionUnion,RegionIntersection,RegionDifference}]{Code} snowball1 = MeshPrimitives[BoundaryDiscretizeRegion[
Ball[{0, 0, 0}, 1], MaxCellMeasure -> .0005], 2]; snowball2 = MeshPrimitives[BoundaryDiscretizeRegion[
Ball[{0, 0, 1.3}, 0.8], MaxCellMeasure -> .0005], 2]; snowball3 = MeshPrimitives[BoundaryDiscretizeRegion[
Ball[{0, 0, 2.4}, 0.6], MaxCellMeasure -> .0005], 2]; tophat1 = MeshPrimitives[BoundaryDiscretizeRegion[
Cylinder[{{0, 0, 2.8}, {0, 0, 2.9}}, 0.7],
MaxCellMeasure -> 0.0005], 2]; tophat2 = MeshPrimitives[BoundaryDiscretizeRegion[
Cylinder[{{0, 0, 2.9}, {0, 0, 3.5}}, 0.5],
MaxCellMeasure -> 0.0005], 2]; carrot = MeshPrimitives[BoundaryDiscretizeRegion[
Cone[{{0, -0.55, 2.4}, {0, -0.9, 2.4}}, 0.1],
MaxCellMeasure -> .00005], 2]; button1 = MeshPrimitives[BoundaryDiscretizeGraphics[
Cuboid[{0, -0.75, 1.3} - 0.1 {1, 1, 1},
{0, -0.75, 1.3} + 0.1 {1, 1, 1}]], 2]; button2 = MeshPrimitives[BoundaryDiscretizeGraphics[
Cuboid[{0, -0.7, 1.6} - 0.1 {1, 1, 1},
{0, -0.7, 1.6} + 0.1 {1, 1, 1}]], 2]; button3 = MeshPrimitives[BoundaryDiscretizeGraphics[
Cuboid[{0, -0.7, 1.0} - 0.1 {1, 1, 1},
{0, -0.7, 1.0} + 0.1 {1, 1, 1}]], 2]; snowmanHD = Graphics3D[{EdgeForm[None],
White, snowball1, snowball2, snowball3,
Black, tophat1, tophat2, button1, button2, button3,
Orange, carrot},
Boxed -> False, SphericalRegion -> True] Export[NotebookDirectory[] <> "snowmanHD.wrl", snowmanHD]
\end{mmaCell} You can interact with the final model on Sketchfab at \url{https://skfb.ly/ovIVI}. \begin{center}
\includegraphics[height=3in]{hanusa-a8-i1.png} \end{center}
\end{document} |
\begin{document}
\title[Topology of definalbe sets in OAGs of burden 2]{Topological properties of definable sets in ordered Abelian groups of burden 2}
\author{Alfred Dolich and John Goodrick}
\thanks{The first author's research was partially supported by PSC-CUNY Grant \#63392-00 51. The second author would like to thank the Universidad de los Andes for granting him paid leave (Semestre de Trabajo AcadΓ©mico Independiente) during which part of this research was carried out.}
\address{Dept. of Math and CS \\ Kingsborough Community College (CUNY) \\ 2001 Oriental Blvd.\\ Brooklyn, NY 11235} \address{Department of Mathematics \\ CUNY Graduate Center \\ 365 5th Ave. \\ New York, NY 10016} \email{[email protected]}
\address{Departamento de MatemΓ‘ticas \\ Universidad de los Andes \\ Carrera 1 No. 18A-12 \\ BogotΓ‘, COLOMBIA 111711} \email{[email protected]}
\maketitle
\begin{abstract}
We obtain some new results on the topology of unary definable sets in expansions of densely ordered Abelian groups of burden $2$. In the special case in which the structure has dp-rank $2$, we show that the existence of an infinite definable discrete set precludes the definability of a set which is dense and codense in an interval, or of a set which is topologically like the Cantor middle-third set (Theorem~\ref{sec2main}). If it has burden $2$ and both an infinite discrete set $D$ and a dense-codense set $X$ are definable, then translates of $X$ must witness the Independence Property (Theorem~\ref{IP_translations}). In the last section, an explicit example of an ordered Abelian group of burden $2$ is given in which both an infinite discrete set and a dense-codense set are definable. \end{abstract}
\section{Introduction}
In this note we will study the topological properties of sets definable in densely ordered Abelian groups satisfying an extra model-theoretic hypothesis (having ``burden 2'') which in some sense limits the combinatorial complexity of combinations of instances of formulas (the precise definition will be recalled below). Typical examples of such groups are the structures $\mc{R}_1 = \langle \R; <, +, \Q \rangle$, the additive group of real numbers endowed with a unary predicate for the set $\Q$ of rationals, and $\mc{R}_2 = \langle \R; <, +, \Z\rangle$, the same group but with a predicate for the integers. In fact, both of the structures $\mc{R}_1$ and $\mc{R}_2$ are of \emph{dp-rank 2}, which is equivalent to having burden 2 and being NIP.\footnote{Note that it can be tricky to prove precise upper bounds on the burden or the dp-rank of a structure. See \cite{DG} for a detailed calculation of the dp-rank of the structures mentioned here.}
Recall that in an expansion of a divisible ordered Abelian group (or ``OAG'') of dp-rank 1, no infinite discrete subset of the domain can be definable (see \cite{Goodrick_dpmin}), nor can any dense and codense subset be definable (by a result of Simon \cite{S}). However, the examples $\mc{R}_1$ and $\mc{R}_2$ above show that neither of these results hold for OAGs of dp-rank 2. One of the new results of this article is that in an expansion of a divisible OAG of dp-rank 2, there cannot be \emph{both} a definable infinite discrete set and a definable dense-codense set (see Theorem~\ref{sec2main} below).
The goal of this article is to understand the topological properties of unary definable sets in an expansion of a densely ordered OAG $\mathcal{R} = \langle R; < , +, \ldots \rangle$ with burden at most $2$. Suppose that $X \subseteq R$ is definable in such a structure. The case when $X$ is open may be considered the ``nicest'' situation since the topological structure around any point in $X$ is as simple as possible. An expansion of a densely ordered OAG in which every infinite definable $X \subseteq R$ has interior is called a \emph{viscerally ordered structure}. In our previous work \cite{viscerality}, we undertook an extensive analysis of definable sets in a viscerally ordered structure, giving a cell decomposition theorem and showing that topological dimension has many desirable properties, justifying the intuition that this is the ``tamest'' possible case.
Suppose now that $\mathcal{R}$ is \emph{not} visceral and that furthermore $\textup{Th}(\mathcal{R})$ has finite burden. Let $X \subseteq R$ be definable with empty interior. Since infinite definable discrete sets in $\mathcal{R}$ cannot have accumulation points (see \cite[Corollary 2.13]{DG}), it follows easily that $X$ can be partitioned as $X=X_0 \cup X_1 \cup X_2$ with each $X_i$ definable so that:
\begin{enumerate}
\item $X_1$ is either empty or dense and codense in an open definable set $U$ with $X_1 \subset U$;
\item $X_2$ is either empty or discrete (and possibly finite); and
\item $X_3$ is either empty or an infinite definable set which is nowhere dense and has no isolated points.
\end{enumerate}
In case (3), if $X_3$ is non-empty then the topological closure $\overline{X}_3$ is what we call a \emph{Cantor-like} definable set, namely it is a nonempty set which is closed, nowhere dense, and has no isolated points.
We conjecture that in an expansion of a divisible OAG of dp-rank $2$, if there is an infinite definable set satisfying one of the three conditions above (being discrete, being dense-codense in an interval, or being Cantor-like), then there cannot be any other infinite definable set satisfying either of the other two (giving a basic trichotomy). Though we cannot quite prove this, in the next section we will show that the existence of an infinite definable discrete set precludes the definability of either a dense-codense or an infinite Cantor-like set (Theorem~\ref{sec2main}). In the case when $\mathcal{R}$ has burden $2$ and is definably complete, there can never be a definable Cantor-like set (Theorem~\ref{no_cantor_dc} below). Finally, in any OAG with burden $2$, if there is both an infinite discrete set $D$ definable in such a structure and also a definable $X$ which is dense and codense in some interval, then the Independence Property can be witnessed by translations of $X$ (Theorem~\ref{IP_translations}).
After proving the above general results in Section 2, the final section (Section 3) is devoted to the study of a concrete example of a divisible OAG of burden 2 in which both an infinite discrete set and a dense codense set are definable.
Note that the present work focuses on the topological properties of definable sets. Given an expansion of a OAG of burden $2$ in which an infinite discrete set $D$ is definable, it turns out that $D$ must have a very simple ``arithmetical'' structure, similar to the subsets of $\Z$ definable in Presburger arithmetic. For much more on this topic, see our previous article \cite{DG} or our recent preprint \cite{discrete_burden_2}, which can be seen as a companion to the present work.
\subsection{Notation and basic definitions}
Mostly we will follow standard notational conventions from model theory (as in \cite{Guide_NIP}, for example). ``Formulas'' and ``models'' are as in first-order logic and overlined variables ($\overline{x}, \overline{a}, \ldots$) denote finite tuples. Unlike some authors, a plain variable such as $x$ (not $\overline{x}$) is always a single variable, and we rarely work in $T^{eq}$. Also note that ``definable'' for us always means ``definable over some set of parameters.''
The abbreviation OAG stands for Ordered Abelian Group, which is a structure $\langle G; +, < \rangle$ consisting of an Abelian group $\langle G; +\rangle$ endowed with a total ordering $<$ which is translation invariant ($x < y$ implies that $x+z < y + z$). Some OAGs are \emph{discretely ordered} and have a least positive element (such as $\langle \Z; +, ,\rangle$), but in the present article all OAGs will be \emph{densely ordered}. Note that if an OAG is \emph{divisible} -- that is, for every $x \in G$ and every positive integer $n$, there is a $y \in G$ such that $ny = x$ -- then it is densely ordered, but there are examples of OAGs of finite dp-rank which are densely ordered and not divisible. See, for instance, \cite{Guide_NIP} or \cite{pure_OAGs} for more examples.
All topological properties of sets mentioned in this article (``open,'' ``dense,'' and so on) refer to the order topology generated by all open intervals.
At some points in Section 2 it will be useful to work in the Dedekind completion $\overline{R}$ of an OAG $\mathcal{R}$, for which the following notion from \cite{MMS} will be useful.
\begin{definition} \label{ded_sort} Suppose that $\mathcal{R} = \langle R; <, \ldots \rangle$ is a linearly ordered structure and $\{X_{\overline{a}} \, : \, \overline{a} \in Z\}$ is a definable family of subsets $X_{\overline{a}}$ of $R$, with $Z \subseteq R^n$ definable over $\emptyset$ and $X_{\overline{a}} = \{b \in R \, : \, \mathcal{R} \models \varphi(b; \overline{a}) \}$ for some formula $\varphi(x; \overline{y})$. Consider the relation $\sim$ on $n$-tuples from $Z$ such that $\overline{a}_1 \sim \overline{a}_2$ if and only if
$$ \forall y_1 \in X_{\overline{a}_1} \exists y_2 \in X_{\overline{a}_2} \left( y_1 < y_2 \right) \wedge \forall y_2 \in X_{\overline{a}_2} \exists y_1 \in X_{\overline{a}_1} \left( y_2 < y_1 \right). $$
The relation $\sim$ is a definable equivalence relation on $Z$ which we informally think of as expressing that $\sup(X_{\overline{a}_1}) = \sup(X_{\overline{a}_2})$ where the suprema are calculated in the Dedekind completion $\overline{R}$ of $\mathcal{R}$, and defined so that $\sup(\emptyset) = - \infty$ and $\sup(X_{\overline{a}}) = + \infty$ when $X_{\overline{a}}$ is unbounded. Then $Z / \sim$, which is a sort in $\mathcal{R}^{eq}$, is a called a \emph{sort in $\overline{R}$.} We naturally identify such a sort with a subset of the Dedekind completion $\overline{R}$ of $\mathcal{R}$ with the induced ordering.
Thinking of sorts in $\overline{R}$ as sorts in $\mathcal{R}^{eq}$, we can also talk about definable subsets of sorts in $\overline{R}$, and of functions to and from such sorts. \end{definition}
\begin{definition} If $\mathcal{R} = \langle R; <, +, \ldots, \rangle$ is an expansion of an OAG, then $\mathcal{R}$ is \emph{definably complete} if for every nonempty definable subset $X \subseteq R$ which has an upper bound, $\sup(X) \in R$. \end{definition}
The next observation (\cite[Proposition 2.2]{ivp}) will occasionally be useful.
\begin{fact} \label{DC_divisible} If $\mathcal{R}$ is an expansion of a densely ordered OAG which is definably complete, then $\mathcal{R}$ is divisible. \end{fact}
The less commonly used definitions we need are those of \emph{burden} and \emph{dp-rank}. These notions originally go back to Shelah \cite{strong_dep}, but we will use the versions as given by Adler \cite{adler_strong_dep}. We recall them briefly here. Below, ``$T$'' always denotes some complete theory.
\begin{definition} An \emph{ict-pattern of depth $\kappa$} is a sequence $\{ \varphi_i(\overline{x}; \overline{y}_i) \, : \, i < \kappa \}$ of formulas and a sequence $\{\overline{a}_{i,j} \, : \, i < \kappa, j < \omega \}$ of tuples from some model $\mathcal{M} \models T$ such that for every function $\eta \, : \, \kappa \rightarrow \omega$, the partial type
\begin{equation} \{\varphi_i(\overline{x}; \overline{a}_{i, j})^{\textup{if } j = \eta(i)} \, : \, i < \kappa, j < \omega \} \end{equation} is consistent, where the exponent ``$\textup{if } j = \eta(i)$'' means that the formula is negated if $j \neq \eta(i)$. If $p(\overline{x})$ is a partial type, an ict-pattern as above is \emph{in $p(\overline{x})$} if every partial type as in (1) is consistent with $p(\overline{x})$.
The partial type $p(\overline{x})$ has \emph{dp-rank less than $\kappa$} if there is \textbf{no} ict-pattern of depth $\kappa$ in $p(\overline{x})$. If the least $\kappa$ such that the dp-rank of $p(\overline{x})$ is less than $\kappa$ is a successor cardinal, say $\kappa = \lambda^+$, then we say that the dp-rank of $p(\overline{x})$ is $\lambda$.
The dp-rank of the theory $T$ is the dp-rank of the partial type $x=x$ (in a single free variable $x$), and $T$ is \emph{dp-minimal} if its dp-rank is $1$. \end{definition}
\begin{definition} An \emph{inp-pattern of depth $\kappa$} is a sequence $\{ \varphi_i(\overline{x}; \overline{y}_i) \, : \, i < \kappa \}$ of formulas, a sequence $\{k_i \, : \, i < \kappa\}$ of positive integers, and a sequence $\{\overline{a}_{i,j} \, : \, i < \kappa, j < \omega \}$ of tuples from some model $\mathcal{M} \models T$ such that:
$\bullet$ For each $i < \kappa$, the ``$i$-th row'' $$\{\varphi_i(\overline{x}; \overline{a}_{i, j}) \, : \, j < \omega \}$$ is $k_i$-inconsistent; and
$\bullet$ For each function $\eta \, : \, \kappa \rightarrow \omega$, the partial type
\begin{equation} \{ \varphi_i(\overline{x}; \overline{a}_{i, \eta(i)}) \, : \, i < \kappa\} \end{equation} is consistent.
If $p(\overline{x})$ is a partial type, an inp-pattern as above is \emph{in $p(\overline{x})$} if every partial type as in (2) is consistent with $p(\overline{x})$.
The partial type $p(\overline{x})$ has \emph{burden less than $\kappa$} if there is \textbf{no} inp-pattern of depth $\kappa$ in $p(\overline{x})$. If the least $\kappa$ such that the burden of $p(\overline{x})$ is less than $\kappa$ is a successor cardinal, say $\kappa = \lambda^+$, then we say that the burden of $p(\overline{x})$ is $\lambda$.
The burden of the theory $T$ is the burden of the partial type $x=x$ (in a single free variable $x$), and $T$ is \emph{inp-minimal} if its burden is $1$.
\end{definition}
\begin{fact} (Adler, \cite{adler_strong_dep}) The dp-rank of a theory is less than some cardinal $\kappa$ if and only if $T$ is NIP. In case $T$ is NIP, the dp-rank of $T$ is equal to the burden of $T$. In particular, $T$ is dp-minimal if, and only if, $T$ is both NIP and inp-minimal. \end{fact}
To mention some related work, fields of finite dp-rank have been recently classified by Johnson, who also showed that any valued field with finite dp-rank is Henselian \cite{Johnson_dp_finite}. Interesting examples of finite burden structures which are not finite dp-rank include pseudo real-closed fields \cite{samy_prcf}. For more background on these concepts and how they relate to NIP, see the introduction to our companion article \cite{discrete_burden_2} or the survey \cite{goodrick_survey}.
\section{Topologoical properties of definable sets in burden-2 OAGs}
In this section we will prove the general topological results for OAGs of burden 2 mentioned in the introduction.
Throughout this section, we will work under the following assumptions, unless otherwise stated:
\begin{itemize}
\item $\mathcal{R} = \langle R; <, +, \ldots\rangle$ is an expansion of a densely-ordered OAG with complete theory $T$;
\item $\mathcal{R}$ is sufficiently saturated (generally just $|T|^+$-saturated will be enough);
\item The burden of $\mathcal{R}$ is at most $2$.
\end{itemize}
Note that in this section, we do not generally assume that $\mathcal{R}$ is definably complete (unless we explicitly say so).
\begin{lem} \label{rank_1_interval} Suppose that $I \subseteq R$ is an interval such that $I$ has burden $1$. Then if $X \subseteq I$ is definable and nowhere dense, then $X$ is finite. \end{lem}
\begin{proof} This is essentially the same result as Lemma~3.3(1) of \cite{Goodrick_dpmin}, except that we allow $I$ to be any interval instead of the whole universe $R$. The same proof as in \cite{Goodrick_dpmin} goes through, working within $I$. \end{proof}
\begin{lem} \label{rank_2_intervals} If there is an infinite discrete set definable in $\mathcal{R}$ then there is $\epsilon>0$ so that $(0,\epsilon)$ has burden $1$. \end{lem}
\begin{proof} Suppose to the contrary that $D \subseteq R$ is infinite, discrete, and definable, and that for every $\epsilon > 0$ in $R$ there is an inp-pattern of depth $2$ consistent with $(0, \epsilon)$.
Now using $\omega$-saturation of $\mathcal{R}$ we can select an increasing sequence of elements $\{a_i \, : \, i \in \omega\} \subseteq D$ and an $\epsilon > 0$ such that for every $i$ we have $(a_i - 2 \epsilon, a_i + 2 \epsilon) \cap D = \{a_i\}$. We can now construct an inp-pattern of depth $3$ by attaching translated copies of the inp-pattern within $(0, \epsilon)$ onto each point of $a_i$ and adding a third row consisting of pairwise disjoint intervals, leading to a contradiction.
More precisely, if $\varphi_0(x, \overline{b}_{0,j})$ and $\varphi_1(x, \overline{b}_{1,j})$ witness an inp-pattern of depth $2$ consistent with $(0, \epsilon)$, then for each $\ell \in \{0,1\}$ let $\varphi'_\ell(x, \overline{b}_{\ell,j})$ be the formula expressing ``there is a unique point $a \in D$ such that $a < x < a + \epsilon$, and for this unique point $a$, the formula $\varphi_\ell(x-a, \overline{b}_{\ell,j})$ holds.'' These will be the first two rows of our inp-pattern, and the third row will consist of the pairwise disjoint intervals $I_i = (a_i - \epsilon, a_i + \epsilon)$. The inconsistency of each row is easy to check, and if $c_{i,j} \in (0, \epsilon)$ satisfies the formula $\varphi_0(x, \overline{b}_{0,i}) \wedge \varphi_1(x, \overline{b}_{1,j})$, then for any $k \in \omega$ the point $a_k + c_{i,j}$ satisfies $\varphi'_0(x, \overline{b}_{0,i}) \wedge \varphi'_1(x, \overline{b}_{1,j})$ and lies within the interval $I_k$. \end{proof}
This lemma has an immediate and useful corollary:
\begin{cor} \label{no_Cantor} If there is $X \subseteq R$ definable, infinite, and discrete then there is no definable Cantor-like set in $\mathcal{R}$. \end{cor}
\begin{proof} By the previous Lemma, there is $\epsilon > 0$ such that $(0,\epsilon)$ has burden $1$. Suppose for contradiction that there is $Y$ definable and Cantor-like. Translating $Y$ as necessary, we may assume that $Y \cap (0,\epsilon) \neq \emptyset$; and intersecting $Y$ with a closed subinterval of $(0, \epsilon)$, we may further assume that there is a Cantor-like definable subset of $(0, \epsilon)$. This contradicts Lemma~\ref{rank_1_interval}. \end{proof}
Using the above Corollary, we can rule out the existence of any Cantor-like definable set in the case when $\mathcal{R}$ additionally satisfies definable completeness. First we prove a simple lemma. Recall that if $X$ is a subset of an ordered structure, a \emph{convex component} of $X$ is a maximal subset of $X$ which is convex.
\begin{lem} If $X$ is a Cantor-like subset of $R$, then $R \setminus X$ has infinitely many convex components. \end{lem}
\begin{proof} Recall that Cantor-like sets are nonempty by definition, so we may pick some $a \in X$. Given that $X$ has no isolated points, it is either the case that (i) for every positive $\epsilon \in R$, the interval $(a-\epsilon, a)$ contains a point of $X$, or (ii) for every positive $\epsilon \in R$, the interval $(a, a + \epsilon)$ contains a point of $X$. Without loss of generality we assume that (ii) occurs, and in case of (i) a similar proof will work.
Pick some $b_0 > a$. Since $X$ is nowhere dense, there is some element $c_0$ of $R \setminus X$ contained in the interval $(a, b_0)$. Given the assumption (ii) above, the convex component $C_0$ of $c_0$ in $R \setminus X$ cannot contain points arbitrarily close to $a$, an hence there is a point $b_1 \in R$ such that $a < b_1$ and $b_1$ is less than every element of $C_1$. Now repeat the argument above to find $c_1 \in R \setminus X$ contained in $(a, b_1)$, and continuing by induction we may find an infinite sequence of elements $c_1 > c_2 > \ldots$ of $R \setminus X$ all in distinct convex components.
\end{proof}
\begin{thm} \label{no_cantor_dc} If $\mathcal{R}$ is a densely ordered, definably complete OAG with burden at most $2$, then there is no definable Cantor-like set in $\mathcal{R}$. \end{thm}
\begin{proof} Assume otherwise, and let $X \subseteq R$ be a definable Cantor-like set. By the previous lemma, $R \setminus X$ consists infinitely many convex components, each of which is open (as $X$ is closed). By definable completeness, each convex component of $R \setminus X$ is an interval; we call these the \emph{complementary intervals}. All but at most two complementary intervals are bounded, and if $I = (a,b)$ is a bounded complementary interval, then we can use the fact that $\mathcal{R}$ is divisible (Fact~\ref{DC_divisible} above) to define its midpoint $I_m = \frac{a+b}{2}$. The collection of all such midpoints $I_m$ is itself definable and comprises and infinite discrete set, yielding a contradiction to Corollary~\ref{no_Cantor}. \end{proof}
Now we will focus on the case when $\mathcal{R}$ has dp-rank 2 (which, recall, is equivalent to NIP plus having burden $2$). In this case, we will show that if $R$ is divisible and defines an infinite discrete set, then it cannot also define a set which is dense and codense in some infinite interval.
To this end, we will use the \emph{Shelah expansion} $\mathcal{R}^{Sh}$ of the structure $\mathcal{R}$. This concept, and the facts below, are due to Shelah \cite{Dependent_Shelah}, but we will follow the notation and presentation of Simon \cite{Guide_NIP}.
\begin{definition} \label{Sh_expansion}
Suppose that $\mathcal{R} \prec \mathcal{U}$ and $\mathcal{U}$ is $|R|^+$ -saturated. The \emph{Shelah expansion} $\mathcal{R}^{Sh}$ of $\mathcal{R}$ is the expansion of $\mathcal{R}$ with the following new predicates: for every partitioned formula $\varphi(\overline{x}; \overline{y})$ and every finite tuple $\overline{b} \in U^{|\overline{y}|}$, define a predicate $S_{\varphi(\overline{x}; \overline{b})} (\overline{x})$ on $R^{|\overline{x}|}$ such that $$\mathcal{R}^{Sh} \models S_{\varphi(\overline{x}; \overline{b})} (\overline{a}) \Leftrightarrow \mathcal{U} \models \varphi(\overline{a}; \overline{b}).$$
\end{definition}
The subsets of $R^n$ defined by the new basic predicates $ S_{\varphi(\overline{x}; \overline{b})}$ as above are called \emph{externally definable sets}. An important example for our purposes is that if $C \subseteq R$ is convex, then using $|R|^+$-saturation we may find $a, b \in U$ such that $(a,b) \cap R = C$, and thus $C$ is externally definable.
The next fact summarizes the important basic properties of Shelah expansions.
\begin{fact} \label{Sh_expansion_properties} (\cite{Dependent_Shelah}, and see also \cite{Guide_NIP}) Suppose that the complete theory of $\mathcal{R}$ is NIP. \begin{enumerate} \item The subsets of $R^n$ which are definable in the Shelah expansion $\mathcal{R}^{Sh}$ are independent of the choice of the saturated extension $\mathcal{U}$ in Definition~\ref{Sh_expansion}, and hence we may talk about ``the'' Shelah expansion. \item The structure $\mathcal{R}^{Sh}$ admits elimination of quantifiers. \item The structure $\mathcal{R}^{Sh}$ is NIP. \item If $X \subseteq R^n$ is type-definable in $\mathcal{R}$, then the dp-rank of $X$ as calculated in $\mathcal{R}$ is equal to the dp-rank of $X$ as calculated in $\mathcal{R}^{Sh}$. \end{enumerate} \end{fact}
\begin{proof} While complete proofs of (1), (2), and (3) can be found in \cite{Guide_NIP}, we take the opportunity to explain how (4) follows from (2) and (3). Note that a fact similar to (4) has been claimed by Onshuus and Usvyatsov (see \cite{onsh_usv}) but only in the special case when the theory is dp-minimal.
Let $d_1$ be the dp-rank of $X$ as calculated in the original structure $\mathcal{R}$, and let $d_2$ be the dp-rank of $X$ as calculated in the Shelah expansion $\mathcal{R}^{Sh}$ (each of which exists since their theories are NIP). On the one hand, if $\kappa$ is any cardinal and $d_1 \geq \kappa$, then there is an ict-pattern of depth $\kappa$ in $X$ with parameters from some elementary extension of $\mathcal{R}$, and the same array of formulas is an ict-pattern of depth $\kappa$ in the expanded language of $\mathcal{R}^{Sh}$. Therefore $d_1 \leq d_2$.
On the other hand, suppose that there is an ict-pattern
\begin{equation} \{\varphi_i(\overline{x}; \overline{a}_{i,j}) \, : \, i < \kappa, \, j < \omega \} \end{equation} of depth $\kappa$ consistent with $X$ in some elementary extension $\mathcal{R}'$ of $\mathcal{R}^{Sh}$. By quantifier elimination, we may assume that
$$\varphi_i(\overline{x}; \overline{y}) = S_{\psi_i(\overline{x}; \overline{y}; \overline{b}_i)}(\overline{x}; \overline{y})$$
where $\overline{b}_i$ is a tuple of parameters from the $|R|^+$-saturated model $\mathcal{U} \succ \mathcal{R}$ used to define $\mathcal{R}^{Sh}$.
Working in $\mc{R}^{Sh}$, for any $m,n \in \mathbb{N}$, any $m$-element subset $\{l_1, \ldots, l_m\}$ of $\kappa$, and any formula $\theta(\overline{x})$ in $X$, there are parameters $\ob{c}_{i,j} \in R$ with $1 \leq i \leq m$ and $1 \leq j \leq n$ so that for any $\eta: \{1, \dots, m\} \to \{1, \dots, n\}$ there is $\ob{d}_{\eta} \in \theta(R)$ so that $$\mc{R}^{Sh} \models S_{\psi_{l_i}(\overline{x}; \overline{y}; \overline{b}_{l_i})}(\overline{d}_{\eta}; \overline{c}_{i,j}) \text{ if and only if } \eta(i)=j.$$ But then \[\mc{U} \models \psi_{l_i}(\ob{d}_{\eta}, \ob{c}_{i,j}, \ob{b}_{l_i}) \text{ if and only if } \eta(i)=j.\] Thus by compactness, in $\mc{U}$ the formulas $\psi_i(\ob{x}, \ob{y}, \ob{b}_i)$ form an ict-pattern with $\kappa$ rows consistent with $X$ and hence $d_1 \geq d_2$.
\end{proof}
The next fact is a slight generalization of a theorem proved by Simon \cite{S} which we will need for what follows.
The fact below was proved by Simon \cite{S} in the case when the entire structure is a dp-minimal divisible OAG, but the same proof can be relativized to convex definable subgroups $G$ to yield:
\begin{fact} \label{Simon_interior} If $G \subseteq R$ is a type-definable convex divisible subgroup of $R$, $\textup{dp-rk}(G) = 1$, and $X \subseteq G$ is infinite and definable, then $X$ has nonempty interior. \end{fact}
\begin{proof} Suppose that $G$ is as in the statement (type-definable, convex, divisible, and with $\textup{dp-rk}$ $1$). By the comment just before Fact~\ref{Sh_expansion_properties}, the set $G$ is externally definable. By Fact~\ref{Sh_expansion_properties}~(4), working in $\mathcal{R}^{Sh}$, the dp-rank of $G$ is still $1$. Now consider the structure $\mathcal{G}$ whose universe is the set $G$ and with the induced definable structure: that is, the basic predicates in the language for $\mathcal{G}$ represent sets of the form $G^n \cap Z$ where $Z \subseteq R^n$ is $\emptyset$-definable in the language of $\mathcal{R}$. The fact that $\textup{dp-rk}(G) = 1$ as calculated in $\mathcal{R}^{Sh}$ implies that the theory of the structure $\mathcal{G}$ is dp-minimal. Also the set $X \subseteq G$ is definable in $\mathcal{G}$. Now we can apply Theorem~3.6 of \cite{S} to conclude that $X$ has nonempty interior, as we wanted. \end{proof}
Now we state our main result on divisible OAGs of dp-rank $2$.
\begin{thm}\label{sec2main} If $\mathcal{R}$ is an expansion of a divisible ordered Abelian group of dp-rank at most $2$ and there is an infinite definable discrete set in $\mathcal{R}$ then there is no $X$ definable which is dense and codense in an interval $I$ of $\mathcal{R}$ nor is there a definable Cantor-like set. \end{thm}
\begin{proof} Say $X \subseteq R$ is and $\mathcal{R}$-definable. By Corollary~\ref{no_Cantor}, we know that $X$ is not Cantor-like. It only remains to consider the case when $X$ is dense in some interval $I$.
First pick $\epsilon \in R$ as in the conclusion of Lemma~\ref{rank_2_intervals} so that $(-\epsilon, \epsilon)$ has dp-rank $1$. Now let $G$ be the subset of $R$ defined as
$$G = \bigcap_{n \in \N} \left(-\frac{\epsilon}{n+1}, \frac{\epsilon}{n+1} \right)$$ and note that $G$ is a convex subgroup of $R$. By $\omega$-saturation, $G$ is infinite. Also, $G$ is definable in $\mathcal{R}^{Sh}$, so by Fact~\ref{Sh_expansion_properties}, it has dp-rank $1$ as calculated in the Shelah expansion.
Suppose that $X$ is dense and codense in $I$. Translating and truncating $I$ as necessary, we may assume that $I \subseteq G$. But then $X \cap I$ is an infinite definable subset of $G$ which has empty interior, contradicting Fact~\ref{Simon_interior}.
\end{proof}
We have considerably more precise results if we also assume that $\mathcal{R}$ is definably complete.
\begin{cor}\label{discrete_interior_dichotomy} Suppose that $\mathcal{R}$ is an expansion of a definably complete divisible OAG of dp-rank at most $2$ in which an infinite discrete set is definable. Then for any definable $X \subseteq R$, either $X$ is discrete or $X$ has nonempty interior. \end{cor}
\begin{proof} Suppose that $X \subseteq R$ is definable and has empty interior. By Theorem~\ref{sec2main}, $X$ is nowhere dense. By Corollary~2.13 of \cite{DG}, it follows that $X$ is discrete. \end{proof}
\begin{cor}\label{opencore} Suppose that $\mathcal{R}$ is a definably complete expansion of a divisible ordered Abelian group with dp-rank at most $2$. If there is $X \subseteq R$ definable which is dense and codense in some interval then any model of $T=\textup{Th}{(\mc{R})}$ has o-minimal open core. \end{cor}
\begin{proof} We claim that the theory of $\mathcal{R}$ has uniform finiteness: otherwise, there would exist a formula $\varphi(x; \overline{y})$ such that for every $n \in \N$, there are parameters $\overline{b}_n$ such that the set defined by $\varphi(x; \overline{b}_n)$ is finite and of size at least $n$. Note that there is a first-order formula $\theta(\overline{y})$ which expresses the property ``the set defined by $\varphi(x; \overline{y})$ is topologically discrete,'' and since any finite set is discrete, $\theta(\overline{b}_n)$ holds for each $n$. Thus every finite subset of
$$\{\exists^{\geq m} x \, \, \varphi(x; \overline{y}) \, : \, m \in \N \} \cup \{\theta(\overline{y}) \}$$ is satisfiable by some tuple $\overline{b}_n$, and so by compactness and $\omega$-saturation there is a tuple $\overline{b}$ such that $\varphi(x; \overline{b})$ defines an infinite discrete set, contradicting Theorem~\ref{sec2main}.
Therefore by~\cite[Theorem A]{DMS}, $\mathcal{R}$ has o-minimal open core. \end{proof}
We note that Theorem \ref{sec2main} no longer holds once the dp-rank of $\mathcal{R}$ exceeds $2$: for example, in \cite{DG} we have shown that the structure $\langle \R; +, <, \Q, \Z\rangle$ has dp-rank $3$.
For the remainder of this section, we return to the general situation when $\mc{R}$ is only assumed to have burden $2$ (rather than dp-rank $2$). Once again adapting arguments of Simon from \cite{S} allows us to obtain information on dense-codense definable sets in the case when there is also an infinite definable discrete set.
We begin with a definition.
\begin{definition} \label{bounded_away_zero} Let $X \subseteq R$ be definable and $f:X \to \ob{R}$ be a definable function from $X$ to some sort in the Dedekind completion $\ob{R}$ of $\mathcal{R}$ (see Definition~\ref{ded_sort} above) with $f(x)>0$ for all $x \in X$. We say that \emph{$f$ is bounded away from $0$} if whenever $I$ is an interval so that $I \cap X$ is infinite there is $\epsilon>0$ and a subinterval $J \subseteq I$ with $J \cap X$ infinite so that $f(a)>\epsilon$ for all $a \in J$.
We say "functions on $X$ are bounded away from $0$" to mean that all definable $f: X \to \ob{R}$ with $f$ positive on $X$ are bounded away from $0$. \end{definition}
\begin{lem} \label{bdd_away_from_zero_bd_1} If $X \subseteq R$ has burden $1$, then definable functions on $X$ are bounded away from $0$.
\end{lem}
\begin{proof} We will adapt the proof of \cite[Lemma 3.19]{Goodrick_dpmin}. Suppose that definable functions on $X$ are not bounded away from $0$, as witnessed by a definable function $f \, : \, X \rightarrow \overline{R}$ and an interval $I$ such that:
(*) $I \cap X$ is infinite, and for any positive $\epsilon$ and every subinterval $J$ with $X \cap J$ infinite, there is some $a \in X \cap J$ such that $f(a) \leq \epsilon$.
By saturation, we may pick a sequence $\langle J_i \, : \, i \in \omega \rangle$ of pairwise disjoint subintervals of $I$ such that for each $i$ the set $J_i \cap X$ is infinite. The property (*) transfers to subintervals of $I$ so (*) is true, \emph{mutatis mutandis}, of each $J_i$ as well.
Pick $\epsilon_0 > 0$ in $R$ arbitrarily. By (*), we can pick an element $a_{i,0} \in J_i$ for each $i$ such that for each $i$,
\begin{equation} f(a_{i,0}) < \epsilon_0. \end{equation}
Now we pick positive elements $\epsilon_j$ and $a_{i,j} \in J_i$ for each $j \in \omega$ by induction, as follows: suppose we have already selected elements
$$\epsilon_0 > \epsilon_1 > \ldots > \epsilon_j$$ and elements $a_{i,j} \in J_i$ such that $f(a_{i,j}) < \epsilon_j$. By saturation, we can pick $\epsilon_{j+1} \in R$ such that for every $i \in \omega$,
\begin{equation} 0 < \epsilon_{j+1} < f(a_{i,j}). \end{equation}
Finally, applying the property (*) again, for each $i \in \omega$ we can pick an element $a_{i,j+1} \in J_i \cap X$ such that $f(a_{i, j+1}) < \epsilon_{j+1}$.
Now we have elements $\langle a_{i,j} \, : \, (i,j) \in \omega \times \omega \rangle$ such that for every $(i,j) \in \omega \times \omega$,
$$\epsilon_{j+1} < f(a_{i,j}) < \epsilon_j.$$
From this, we can construct an inp-pattern of depth $2$, as follows: in the first row, we use formulas $\varphi_0(x; \overline{b}_i)$ expressing the fact that $x \in J_i \cap X$, and $\{\varphi_0(x; \overline{b}_i) \, : \, i \in \omega \}$ is $2$-inconsistent since the $J_i$ are pairwise disjoint. In the second row, we use formulas $\varphi_1(x; \epsilon_j, \epsilon_{j+1})$ expressing the property that
$$\epsilon_{j+1} < f(x) < \epsilon_j$$ and again $\{\varphi_1(x; \epsilon_j, \epsilon_{j+1}) \, : \, j \in \omega \}$ is $2$-inconsistent. Furthermore, for any $(i,j) \in \omega \times \omega$, the formula $$\varphi_0(x; \overline{b}_i) \wedge \varphi_1(x; \epsilon_j, \epsilon_{j+1})$$ is consistent since it is satisfied by $a_{i,j}$. Thus we have an inp-pattern of depth $2$ in $X$, contradicting the assumption that $X$ has burden $1$.
\end{proof}
The next lemma generalizes a key fact from \cite{Goodrick_dpmin} about definable functions in the dp-minimal case.
\begin{lem} \label{bdd_away_from_zero_discrete} If there is an infinite discrete set definable in $\mathcal{R}$, then functions on $R$ are bounded away from $0$.
\end{lem}
\begin{proof} Suppose, to the contrary, that $f : R \rightarrow \overline{R}$ is definable, $f(x) > 0$ for every $x \in R$, and $f$ is not bounded away from $0$. Then there is a nonempty interval $I$ such that for any $\epsilon > 0$ and any subinterval $J$ of $I$, there is $a \in J$ such that $f(a) \leq \epsilon$. Therefore, $f$ is not bounded away from $0$ on any subinterval of $I$. By Lemma~\ref{bdd_away_from_zero_bd_1}, any subinterval of $I$ has burden $2$. By Lemma~\ref{rank_2_intervals}, there is some interval $(0, \epsilon)$ of burden $1$, and shrinking $\epsilon$ as necessary, we may assume that $\epsilon$ is less than the diameter of $I$. Since burden is translation-invariant, we conclude that there is a subinterval of $I$ of burden $1$, a contradiction.
\end{proof}
We record an immediate consequence of Lemma \ref{bdd_away_from_zero_discrete}.
\begin{cor}\label{away_dense} Suppose there is an infinite discrete set definable in $\mathcal{R}$. Suppose that $I$ is an interval and $X \subseteq I$ is definable, dense, and codense in $I$. Then functions on $X$ are bounded away from $0$. \end{cor}
\begin{proof} If $g \, : \, X \rightarrow \overline{R}$ is definable and has positive values, pick any $a > 0$ and extend $g$ to a definable function $f \, : \, R \rightarrow \overline{R}$ by the rule that $f(x) = a$ when $x \notin X$. Now apply the previous Lemma to conclude that $f$, and hence $g$, is bounded away from $0$. \end{proof}
\begin{definition} For a definable set $X$ we write $a \sim_{X, \delta} b$ or simply $a \sim_\delta b$ for the equivalence relation on $R$ defined by $$ \forall \, \epsilon \in (-\delta, \delta) \left[ a + \epsilon \in X \Leftrightarrow b + \epsilon \in X \right],$$ and $a \sim_X b$ (or simply $a \sim b$) means that for some $\delta > 0$, we have $a \sim_\delta b$. \end{definition}
First we note the following easy but useful fact:
\begin{lem} \label{translation_fact}
If $X$ is definable, $a, b \in R$, $a \sim_{X,\delta} b$, and $|\epsilon| < \delta$, then $a + \epsilon \sim_{X,\delta - |\epsilon|} b + \epsilon$. In particular, $a + \epsilon \sim_X b + \epsilon$. \end{lem}
\begin{lem} \label{single_class_reduction}
Let $X$ be definable. Fix any $a \in X$ and let $\widetilde{X} = \{b \in X \, : \, b \sim_X a\}$. Then $|\widetilde{X} / \sim_{\widetilde{X}} | = 1$. \end{lem}
\begin{proof} Let $b \in \widetilde{X}$, and we will show that $b \sim_{\widetilde{X}} a$. Choose $\delta > 0$ such that $b \sim_{X, \delta} a$ and suppose that $\epsilon \in (-\delta, \delta)$. By Lemma~\ref{translation_fact}, we have $ b + \epsilon \sim_X a + \epsilon$, so in particular $b + \epsilon \sim_X a$ if and only if $a + \epsilon \sim_X a$. In other words, $b + \epsilon \in \widetilde{X}$ if and only if $a + \epsilon \in \widetilde{X}$, so $b \sim_{\widetilde{X},\delta} a$. \end{proof}
Now we look for type-definable subgroups related to definable subsets of $R$. The proof of the next lemma is similar to that of Theorem~3.6 from \cite{S}.
\begin{lem} \label{single_class_group} Suppose that: \begin{enumerate} \item $X \subseteq R$ is definable; \item $I$ is an open interval such that $X \cap I$ is infinite; \item functions on $X \cap I$ are bounded away from $0$; \item $X \cap I$ is not discrete; and
\item $|(X \cap I) / \sim| = 1$. \end{enumerate}
Then for any $a \in X \cap I$, there is a nonzero type-definable convex subgroup $C$ of $(R, +)$ such that $(X - a) \cap C$ is also a subgroup of $(R, +)$.
\end{lem}
\begin{proof} Fix $a \in X \cap I$ and define $f: X \rightarrow \overline{R}$ to be the function $$f(x) = \sup \{ \delta \in R \, : \, x \sim_\delta a \}.$$ Note that $f(x)>0$ on $I$. Since functions on $X \cap I$ are bounded away from zero, we may pick an $\epsilon > 0$ and an open subinterval $J \subseteq I$ such that $X \cap J$ is infinite and $f(x) > \epsilon$ for every $x \in J \cap X$.
\begin{claim} There is a positive element $\epsilon_0 < \epsilon$ such that $f(x) > \epsilon_0$ for every $x \in (a - \epsilon_0, a+ \epsilon_0)$.
\end{claim}
\begin{proof}
First, we may assume that $f(a) > \epsilon$ (by replacing $\epsilon$ by a positive element less than $f(a)$ if necessary). Pick some $b \in X \cap J$ and some $\epsilon' > 0$ such that $a \sim_{\epsilon'} b$, $(b - \epsilon', b + \epsilon') \subseteq J$, and $\epsilon' \leq \epsilon$. Now pick a positive element $\epsilon''$ such that $2 \epsilon'' < \epsilon'$ and let $z$ be an arbitrary element of $(-\epsilon'', \epsilon'')$. Then we have
\begin{equation}
a + z \sim_{\epsilon''} b + z
\end{equation}
since $a \sim_{\epsilon'} b$ and using Lemma~\ref{translation_fact}. On the other hand, since $b + z \in J$, we also have that $f(b+z) > \epsilon > \epsilon''$ (by our initial choice of the interval $J$), and so
\begin{equation}
b + z \sim_{\epsilon'} a.
\end{equation}
By (6), (7), and transitivity, we conclude that $a+z \sim_{\epsilon''} a$. Therefore, given that $f(a) > \epsilon > \epsilon''$, it follows that for any $a + z \in (a - \epsilon'', a + \epsilon'')$, we have $f(a+z) \geq \epsilon''$. Then if $0 < \epsilon_0 < \epsilon''$ and $a + z \in (a - \epsilon_0, a + \epsilon_0)$, we have $f(a + z) > \epsilon_0$, as desired.
\end{proof}
Pick $\epsilon_0$ as in the Claim above and small enough so that $(a - \epsilon_0, a + \epsilon_0) \subseteq I$. Replace $I$ by the subinterval $(a - \epsilon_0, a + \epsilon_0)$ and note that all the hypotheses of the Lemma still hold (since (4) and (5) imply that $X \cap I$ has no isolated points, hence $X \cap (a - \epsilon_0, a + \epsilon_0)$ is infinite, and the rest are trivial).
Define
$$C = \bigcap_{n \in \N}\{x \in R : -\epsilon_0 < nx < \epsilon_0\}$$
and note that $C$ is closed under addition by the triangle quality. Thus $C$ is a convex type-definable subgroup of $(R, +)$. Let $H = (X - a) \cap C$ and suppose that $g, h \in H$. Then $g + h \in C$. Furthermore, since $f(a+g) > \epsilon_0$ (by the conclusion of the Claim above), we have $a + g \sim_{\epsilon_0} a$. Hence by Lemma~\ref{translation_fact} and the fact that $|h| < \epsilon_0$, we obtain $$a + g + h \sim a + h .$$ But $a+h \in X$ since $h \in X - a$, so $g + h \in X - a$. This shows that $H$ is closed under addition. For closure under negation, if $g \in (X-a) \cap C$, then it is immediate that $-g \in C$, while $a + g \sim_{\epsilon_0} a$ and $|g| < \epsilon_0$ imply (using Lemma~\ref{translation_fact} again) that $$ a + g -g \sim a - g$$ $$\Rightarrow a \sim a-g,$$ and therefore $a-g \in X$, hence $-g \in X - a$.
\end{proof}
Finally we come to our generalization of Theorem~\ref{sec2main} in the case when $\mathcal{R}$ has burden $2$ instead of dp-rank $2$. The example in the subsequent section demonstrates the necessity of the extra hypothesis that $X / \sim$ is finite.
\begin{thm} \label{dense_finite_twiddle} If $\mathcal{R}$ is a divisible OAG of burden $\leq 2$ in which an infinite discrete set is definable, then there cannot be a definable set $X \subseteq R$ which is dense and codense in some nonempty interval and such that $X / \sim $ is finite. \end{thm}
\begin{proof}
Suppose towards a contradiction there is such a set $X$. Since $X / \sim $ is finite, there is some $a \in X$ and some nonempty interval $I$ in which $[a]_\sim$ is dense and codense. Let $Z = [a]_\sim \cap I$, and by Lemma~\ref{single_class_reduction}, we have that $| Z / \sim_Z | = 1$.
Since an infinite discrete set is definable in $\mathcal{R}$, by Corollary~\ref{away_dense}, functions on $Z$ are bounded away from zero. Fix any $a \in Z \cap I$, and we may apply Lemma~\ref{single_class_group} to obtain a nonzero convex subgroup $C$ of $\langle R; + \rangle$ such that $H := C \cap (Z -a)$ is a subgroup. By our assumptions on $Z$, $H$ is dense and codense in $C$. Pick any $g \in C \setminus H$.
\begin{claim} The set $\{\frac{g}{n} \, : \, n \in \N \setminus \{0\} \}$ contains representatives of infinitely many cosets of $H$. \end{claim}
\begin{proof} On the one hand, if the image of $g$ in $R / H$ has infinite order, then for any distinct $n, m \in \N \setminus \{0\}$, we have $mg - ng \notin H$, and hence $\frac{g}{n} - \frac{g}{m} = \frac{mg - ng}{nm} \notin H$, so we are done. Otherwise, let $k$ be the least positive natural number such that $k g \in H$, and note that if $\textup{GCD}(k,m) =1$, then $mg \notin H$. Thus whenever $1 \leq i < j$, $$\frac{g}{k^j} - \frac{g}{k^i} = \frac{(1 - k^{j-i} ) g}{k^j},$$ but $\textup{GCD}(1 - k^{j-i} , k) = 1$, so this difference cannot be in $H$. \end{proof}
By the Claim, the convex set $C$ intersects infinitely many cosets of the group $H$. Now we can easily build a depth-2 inp-pattern within any interval $J \subseteq C$, as follows: for the first row, let
\begin{equation} \varphi_0(x; a_i, b_i) := a_{0, i} < x < b_{0,i} \end{equation} (for $i < \omega$) be any family of formulas which define pairwise disjoint open subintervals of $J$; and for the second row, let
\begin{equation} \varphi_1(x; c_j) := x - c_j \in X \end{equation} with the parameters $\{c_j \, : \, j \in \omega\}$ chosen from $C$ which represent distinct cosets of $H$ (which is possible by the Claim). Since $H$ is a dense and codense subgroup of $C$, every coset $c_j + H$ is also dense and codense in $C$, and hence each pair $\varphi_0(x; a_i, b_i) \wedge \varphi_1(x; c_j)$ is consistent. Thus we have an inp-pattern of depth $2$ in any subinterval $J$ of $C$, contradicting Lemma~\ref{rank_2_intervals} and finishing the proof. \end{proof}
Thus we have established that for divisible $\mathcal{R}$ we can not simultaneously have an infinite definable discrete set and a definable dense codense set $X$ with $X/\sim$ finite. Next we consider the case in which $X \subseteq R$ is definable and $X / \sim$ is infinite. First we need a preliminary lemma:
\begin{lem} \label{dividing_acc_point} If there is an infinite definable discrete set in $\mathcal{R}$, then there is no definable $X \subseteq R$ such that $X$ divides over some elementary submodel $\mathcal{R}_0$ of $\mathcal{R}$ and $0$ is an accumulation point of $X$.
\end{lem}
\begin{proof} Otherwise, say there is such a set $X = X_{\overline{a}}$ defined over parameters $\overline{a}$. Pick any interval $I$ around $0$, and we will construct an inp-pattern of depth $2$ in $I$ as follows: first, since $X_{\overline{a}}$ divides, pick a set of $\mathcal{R}_0$-conjugates $\{X_{\overline{a}_i} \, : \, i \in \omega\}$ of $X_{\overline{a}}$ which are $k$-inconsistent for some $k$, and this will be the first row of the inp-pattern. For the second row, we recursively construct a sequence of points
$$b_0 > c_0 > b_1 > c_1 > \ldots > 0$$ as follows: first, let $b_0$ be any positive element of $I$. Since $0$ is an accumulation point of $X$, it is also an accumulation point of each of its conjugates $X_{\overline{a}_i}$, so by saturation we can pick $c_0 > 0$ so that the interval $J_0 := (c_0, b_0)$ intersects each of the sets $X_{\overline{a}_i}$. In general, once we have constructed $b_i$ and $c_i$ for all $i \leq n$, we pick an arbitrary $b_{n+1} \in (0, c_n)$, and then as before the fact that $0$ is an accumulation point of each conjugate of $X_{\overline{a}}$ allows us to pick $c_{n+1} \in (0, b_{n+1})$ so that $J_{n+1} := (c_{n+1}, b_{n+1})$ intersects every $X_{\overline{a}_i}$.
Now the second row in our inp-pattern will consist of the formulas $\varphi_1(x; b_j, c_j)$ asserting that $b_j < x < c_j$, which define pairwise-disjoint intervals which each intersect every set $X_{\overline{a}_i}$, as we wanted.
Since this inp-pattern can be constructed within any interval $I$ around $0$, by Lemma~\ref{rank_2_intervals} we have a contradiction. \end{proof}
The next proposition is similar to the well-known fact that in an NIP theory, global types which do not divide over a small submodel are invariant over said submodel (see Chapter 5 of \cite{Guide_NIP}, or Proposition 2.1 of \cite{HruPil}). However, we require a version of this which assumes only that the formula we are working with is NIP.
\begin{prop} \label{dividing_invariant}
Suppose $\varphi(\overline{x}; \overline{y})$ is an NIP formula in a theory $T$, $\mathcal{N} \prec \mathcal{M}$ are models of $T$ such that $\mathcal{M}$ is $|N|^+$- saturated, and $p(\overline{x})$ is a complete $\varphi(\overline{x}; \overline{y})$-$M$-type\footnote{That is, a maximal consistent collection of boolean combinations of instances $\varphi(\overline{x}; \overline{b})$ of $\varphi(\overline{x}; \overline{y})$ with $\overline{b}$ from $M$.} which does not divide over $N$. Then $p(\overline{x})$ is $N$-invariant. \end{prop}
\begin{proof} The same proof as in \cite{HruPil} goes through. Namely, suppose that $\overline{a}_0, \overline{a}_1 \in M$ and $\textup{tp}(\overline{a}_0 / N) = \textup{tp}(\overline{a}_1 / N)$, and assume towards a contradiction that $\varphi(\overline{x}; \overline{a}_0) \wedge \neg \varphi(\overline{x}; \overline{a}_1) \in p(\overline{x})$. As $N$ is a model, there is some $\overline{b}$ such that both $\overline{a}_0, \overline{b}$ and $\overline{a}_1, \overline{b}$ can be extended to infinite $N$-indiscernible sequences (see Facts~1.11 and 1.12~(ii) of \cite{CLPZ}), and by saturation we may assume $\overline{b} \in M$. If $\varphi(\overline{x}; \overline{b}) \in p$, then for the $N$-indiscernible sequence $\{\overline{a}'_i : i \in \omega\}$ extending $\overline{a}_1, \overline{b}$, we have $\neg \varphi(\overline{x}; \overline{a}'_0) \wedge \varphi(\overline{x}; \overline{a}'_1) \in p(\overline{x})$; and if to the contrary $\neg \varphi(\overline{x}; \overline{b}) \in p$, then let $\{\overline{a}'_i : i \in \omega \}$ be the $N$-indiscernible sequence extending $\overline{a}_0, \overline{b}$, and we have $ \varphi(\overline{x}; \overline{a}'_0) \wedge \neg \varphi(\overline{x}; \overline{a}'_1) \in p(\overline{x})$. In either case,
\begin{equation} \neg \left[\varphi(\overline{x}; \overline{a}'_0) \leftrightarrow \varphi(\overline{x}; \overline{a}'_1) \right] \in p(\overline{x}). \end{equation}
Since $p(\overline{x})$ does not divide over $N$, the partial type $$\{\neg \left[ \varphi(\overline{x}; \overline{a}'_{2i}) \leftrightarrow \neg \varphi(\overline{x}; \overline{a}'_{2i+1}) \right] \, : \, i < \omega \}$$ must be consistent. As any Boolean combination of NIP formulas is NIP, $\neg \left[ \varphi(\overline{x}; \overline{y}_1) \leftrightarrow \neg \varphi(\overline{x}; \overline{y}_2)\right]$ is NIP, but we have just shown that this formula has infinite alternation rank, which is a contradiction. \end{proof}
\begin{prop} \label{no_inf_twiddle} Suppose that there is an infinite discrete set definable in $\mathcal{R}$, $X \subseteq R$ is definable, and the formula $\varphi(x; y)$ expressing that $x \in X - y$ is NIP. Then $X / \sim$ is finite. \end{prop}
\begin{proof}
Suppose that $X$ is definable over the $\omega$-saturated model $\mathcal{R}_0$. Let $\mathcal{R}_1$ be an $|R_0|^+$-saturated elementary extension of $\mathcal{R}_0$. If $X / \sim$ is infinite, then it has unboundedly many equivalence classes (since $\sim$ is a definable equivalence relation) and thus we can find $a, b \in X(R_1)$ such that $\textup{tp}(a/ \mathcal{R}_0) = \textup{tp}(b / \mathcal{R}_0)$ and $a$ is not in the same $\sim$-class as $b$. This means that for every $\epsilon > 0$, there is some $g \in (-\epsilon, \epsilon)$ such that $$\neg \left[ a + g \in X \leftrightarrow b + g \in X \right],$$ thus $$g \in (X - a) \Delta (X - b),$$ and so $0$ is an accumulation point of the set $Z := (X - a) \Delta (X - b)$. By Lemma~\ref{dividing_acc_point}, $Z$ does not divide over $R_0$. By $\omega$-saturation, $Z$ does not fork over $R_0$ (see, for instance, Proposition~5.14 of \cite{Guide_NIP}). Therefore $Z$ has an extension to a complete $\varphi(x;y)$-type $p(x)$ over $R_1$ which does not fork over $R_0$. But the fact that $a \equiv_{R_0} b$ implies that $Z$ is not $R_0$-invariant, and hence $p(x)$ cannot be $R_0$-invariant either, so by Proposition~\ref{dividing_invariant}, we conclude that $\varphi(x; y)$ cannot be NIP.
\end{proof}
Putting this all together, we obtain:
\begin{thm} \label{IP_translations} Suppose that $\mathcal{R}$ is a divisible OAG of burden $2$ in which an infinite discrete set is definable and also there is a definable $X \subseteq R$ which is dense and codense in some nonempty interval. Then the formula $\varphi(x; y)$ expressing that $x \in X - y$ has the independence property.
\end{thm}
\begin{proof} By Theorem~\ref{dense_finite_twiddle}, $X / \sim$ is infinite. Hence by Proposition~\ref{no_inf_twiddle}, we conclude that $\varphi(x;y)$ has the independence property. \end{proof}
\begin{quest} Are there divisible OAGs of burden $2$ (or dp-rank $2$) in which both a Cantor-like set $C$ and a set $X$ which is dense and codense on an infinite interval are definable? \end{quest}
\section{A theory with burden $2$ with both an infinite definable discrete set and a dense-codense set}
In this section, we construct an example of a complete theory $T$ with the following properties: \begin{itemize} \item $T$ expands the theory divisible ordered Abelian groups; \item $T$ is definably complete; \item $T$ has burden 2; and \item if $\mc{M} \models T$ then there are
both an infinite definable discrete subset of $M$ and a definable dense and codense subset of $M$.
\end{itemize}
This establishes that the hypotheses of Theorem \ref{IP_translations} can in fact be satisfied. In order to accomplish this we need to develop some background facts that slightly generalize some of the work in \cite{cp}.
Recall that in \cite{cp} the authors show that given a theory $T$ in a language $\mc{L}$ which eliminates $\exists^{\infty}$ and a distinguished unary predicate $S$ from $\mc{L}$ if we expand $\mc{L}$ to $\mc{L}(P)$ by adding a new unary predicate $P$ then the $\mc{L}(P)$-theory $$T_{S}= T \cup \{\forall x \left[ P(x) \rightarrow S(x) \right]\}$$ has a model companion $T_G$ with multiple desirable properties.
Here we point out the the results from \cite[section 2]{cp} all hold if we slightly weaken the assumption that $T$ eliminates $\exists^{\infty}$ to the assumption that $T$ eliminates $\exists^{\infty}$ relative to $S$.
\begin{definition} For a theory $T$ and a distinguished unary predicate $S$ we say that {\em $T$ eliminates $\exists^{\infty}$ relative to $S$} if for any formula $\varphi(x, \ob{y})$ so that $T \models \forall x \left[\varphi(x, \ob{y}) \rightarrow S(x)\right]$ there is $n \in \mathbb{N}$ so that if $\mc{M} \models T$ and $\ob{a} \in M^{|\ob{y}|}$ and
$|\varphi(M, \ob{a}) |> n$ then $\varphi(M, \ob{a})$ is infinite.
\end{definition}
We now provide the slight modifications to the result from \cite[Section 2]{cp} that we will need to construct and analyze our theory. All the proofs are {\it mutatis mutandis} from those in the original paper of Chatzidakis and Pillay. We include the specific analogous result from \cite{cp} with each statement. From now on we assume that $T$ eliminates $\exists^{\infty}$ relative to $S$. We also assume that $T$ eliminates quantifiers.
\begin{definition}\cite[Definitions 2.1]{cp} Let $\mc{M}$ be a saturated model of $T$, $C$ a small subset of $M$, $\ob{a}=(a_1, \dots, a_n)$ a tuple of elements of $M$, and $\varphi(\ob{x})$ a formula defined with parameters in $C$. \begin{enumerate}
\item We define the algebraic dimension of $\ob{a}$ over $C$, $\text{a-dim}(\ob{a}/C)$, to be the maximal length of a sequence $j(i)$ of positive integers $\leq n$ such that: \[a_{j(1)}\notin \textup{acl}(C), a_{j(i)} \notin \textup{acl}(C,a_{j(1)}, \dots, a_{j(i-1)}).\]
\item We set
\[\text{a-dim}(\varphi(\ob{x}))=\sup\{\text{a-dim}(\ob{a}/C) | M \models \varphi(\ob{a})\}.\] \end{enumerate} \end{definition}
\begin{fact}\cite[Lemma 2.2]{cp} Let $\mc{M} \models T$, $\varphi(x_1, \dots, x_n, \ob{y})$ an $\mc{L}$-formula so that
\[T \models \forall x_1, \dots, x_n(\varphi(\ob{x}, \ob{y}) \rightarrow \bigwedge_{1 \leq i \leq n}S(x_i))\] and $d$ an integer. Then the set $\{\ob{b}\; |\; \text{a-dim}(\varphi(\ob{x},\ob{b}))=d\}$ is definable.
\end{fact}
\begin{fact}\cite[Lemma 2.3]{cp} Let $\mc{M}$ be a saturated model of $T$ and $\varphi(x_1, \dots, x_n, \ob{y})$ an $\mc{L}$-formula so that
\[T \models \forall x_1, \dots, x_n(\varphi(\ob{x}, \ob{y}) \rightarrow \bigwedge_{1 \leq i \leq n}S (x_i)).\] Then the set
$\Sigma(\varphi)$ of tuples $\ob{b}$ so that there is $\ob{a} \in M$ satisfying $\varphi(\ob{x}, \ob{b})$ and $\ob{a} \cap \textup{acl}(\ob{b}) =\emptyset$ is definable.
\end{fact}
\begin{fact}\cite[Theorem 2.4]{cp}\label{cpax} The theory $T_S$ has a model companion $T_G$, whose axiomatization is obtained by expressing in a first order way the following properties of a model $(M,P)$:
\begin{enumerate}
\item $M \models T$.
\item For every $\mc{L}$-formula $\varphi(x_1, \dots, x_n, \ob{z})$ so that
\[T \models \forall x_1, \dots, x_n(\varphi(\ob{x}, \ob{z}) \rightarrow \bigwedge_{1 \leq i \leq n}S(x_i)),\]
for every subset $I$ of $\{1, \dots, n\}$,
\[\forall \ob{z} \left [ \exists\ob{x}\varphi(\ob{x}, \ob{z}) \wedge (\ob{x} \cap \textup{acl}_T(\ob{z})= \emptyset )\wedge \bigwedge_{1 \leq i<j \leq n}x_i \not= x_j\right ]\]
\[\rightarrow \left [\exists\ob{x}\varphi(\ob{x},\ob{z}) \wedge \bigwedge_{i \in I}(x_i \in P) \wedge \bigwedge_{i \notin I}(x_i \notin P)\right]\]
\end{enumerate}
where $\textup{acl}_T$ is the algebraic closure operator for $T$.
\end{fact}
\begin{fact}\cite[Proposition 2.5]{cp} Let $(\mc{M},P)$ and $(\mc{N},Q)$ be models of $T_G$, and let $A$ be a common subset of $M$ and $N$. Then
\[(\mc{M},P)\equiv_A(\mc{N},Q) \Leftrightarrow (\textup{acl}_T(A),P \cap \textup{acl}_T(A)) \simeq_A (\textup{acl}_T(A), Q \cap \textup{acl}_T(A)).\]
\end{fact}
\begin{fact}\cite[Corallaries 2.6]{cp}\label{completions} \begin{enumerate} \item The completions of $T_G$ are obtained by describing $P \cap \textup{acl}_T(\emptyset)$.
\item If $\ob{a}, \ob{b}$ are tuples from $\mc{M}\models T_G$ and $A \subseteq M$, then $\textup{tp}(\ob{a}/A)=\textup{tp}(\ob{b}/A)$ if and only if there is an $A$-isomorphism of $\mc{L}(P)$ structures from $\textup{acl}_T(A, \ob{a})$ to $\textup{acl}_T(A, \ob{b})$ which carries $\ob{a}$ to $\ob{b}$.
\item Let $a \in \mc{M} \models T_G$, $A \subseteq M$. Then $a$ is algebraic over $A$ if and only if $a \in \textup{acl}_T(A)$. Thus algebraic closures in the sense of $T$ and $T_G$ coincide.
\item Modulo $T_G$, every formula $\varphi(\ob{x})$ is equivalent to a disjunction of formulas of the form $\exists \ob{y} \psi(\ob{x}, \ob{y})$, where $\psi$ is quantifier-free, and for every $(\ob{a}, \ob{b})$ satisfying $\psi$, $\ob{b} \in \textup{acl}_T(\ob{a})$.
\end{enumerate}
\end{fact}
Given this general background information we now proceed to analyze the specific theory $T$ with which we plan to work.
Let $T$ be the theory of the structure $\langle \R; +, <, \mathbb{Z}\rangle$ formulated in the language $\mc{L}=\{+, <, 0, 1, \floor{\;}, S, \lambda\}_{\lambda \in \Q}$ where $S$ is interpreted as the interval $(0,1)$, the $\lambda$ are simply multiplication by $\lambda \in \Q$ and
$\floor{x}=\max\{y \in \mathbb{Z} : y\leq x\}$. Note we do not include a separate predicate for $\mathbb{Z}$ as the set of integers is defined by the quantifier free formula $\floor{x}=x$. Also notice that as $T$ is linearly ordered $\textup{acl}=\textup{dcl}$ in models of $T$. By results from the appendix of \cite{ivp} the $\mc{L}$-theory $T$ eliminates quantifiers and is universally axiomatizable. We record some additional facts about $T$ which we will use repeatedly without further mention.
\begin{fact}\label{zfact}
\begin{enumerate}
\item If $\mc{M} \models T$ and $f:M^n \to M$ is definable then $f$ is given piecewise by terms; more precisely, if $f$ is definable with parameters $\ob{c}$, then there are $\ob{c}$-definable sets $X_1, \dots, X_k$ partitioning $M^n$ and terms $t_1(\ob{x}, \ob{y}), \dots, t_k(\ob{x}, \ob{y})$ so that $f(\ob{x})=t_i(\ob{x}, \ob{c})$ on $X_i$.
\item $T$ has dp-rank 2.
\item If $\mc{M} \models T$ and $X \subseteq M$ is definable then $X$ is either discrete or has interior.
\item If $\mc{M} \models T$ and $X \subseteq (0,1)$ is definable then $X$ is a finite union of points and intervals, so $T$ eliminates $\exists^{\infty}$ relative to $S$ and the induced structure on $(0,1)$ is o-minimal.
\item If $\mc{M} \models T$, $X \subseteq M$ is definable and discrete and $f: M \to M$ is definable then $f[X]$ is discrete.
\item If $\mc{M} \models T$, $U \subseteq M$ is an open interval and $f: U \to M$ is definable then there is an open subinterval $V \subseteq U$ so that $f$ is equal to a linear function of the form
$x \mapsto \lambda x +a$ on $V$.
\end{enumerate} \end{fact}
\begin{proof} For (1), it is well known that this follows from the fact that $T$ is universally axiomatizable and model complete. For completeness we outline a proof. Fix $\mc{M}$ and $f$. Assume that $f$ is definable with parameters $\ob{c}$. Let $\ob{a} \in M$. Consider the closure of $\ob{a}\ob{c}$ under all terms, $\langle \ob{a}\ob{c}\rangle$. As $T$ is universal $\langle \ob{a}\ob{c}\rangle \models T$. As $T$ is model complete $\langle \ob{a}\ob{c}\rangle \preceq \mc{M}$. In particular $f(\ob{a}) \in \langle \ob{a}\ob{c}\rangle$ so that $f(\ob{a})=t(\ob{a}, \ob{c})$ for some term $t$. The result then follows by compactness.
See the note after the proof of Theorem 3.1 in \cite{DG} for (2). As $T$ is clearly definably complete (3) follows from Corollary \ref{discrete_interior_dichotomy}. (4) is a special case of \cite[Lemma 3.3(1)]{DG}. (5) follows from \cite[Corollary 2.17]{DG}. (6) follows from \cite[Lemma 3.2(2)]{DG} and (1). \end{proof}
As $T$ eliminates $\exists^{\infty}$ relative to $S$ we can form the theory $T_G$. For convenience we fix a completion $T^*_G$ of $T_G$ by specifying that if $\mc{M} \models T_G$ then $P(M) \cap \textup{dcl}{(\emptyset)}=\emptyset$ (see Fact \ref{completions}(1)).
\begin{lem} $T_G^*$ has quantifier elimination and is definably complete. \end{lem}
\begin{proof} That $T^*_G$ has quantifier elimination follows from Fact \ref{completions}~(4) together with the facts that $\textup{acl}_T=\textup{dcl}_T$, definable functions are given piecewise by terms (by Fact \ref{zfact}~(1)), and a use of compactness.
To establish that $T_G^*$ is definably complete we work in the structure $\mc{R}=\langle \R; +, <, 0, 1, \floor{\;}, S, \lambda\rangle_{\lambda \in \Q}$. It suffices to show that there is $G \subseteq \R \setminus \textup{dcl}(\emptyset)$ so that \[\langle \R; +, <, 0, 1, \floor{\;}, S, G, \lambda\rangle_{\lambda \in \Q}\] is a model of $T_G^*$.
We construct $G \subseteq \R$ and its complement $\ob{G} = \R \setminus G$ simultaneously by induction. Let $\bm{c}=\left|\R\right|$.
By an instance of axiom (2) in Fact \ref{cpax} we mean a triple $C=(\varphi(\ob{x}, \ob{y}), \ob{c}, I)$ so that:
\begin{enumerate}
\item $\varphi(x_1, \dots, x_n, \ob{y})$ is an $\mc{L}$-formula;
\item $\ob{c} \in \R^{|\ob{y}|}$ and in some extension $\mc{M}$ of $\mc{R}$ there is $\ob{a} \in M^{|\ob{x}|}$ satisfying $\varphi(\ob{x}, \ob{c})$ with $\ob{a} \cap \textup{acl}{(\ob{c})}=\emptyset$ and all coordinates of $\ob{a}$ distinct; and
\item $I \subseteq \{1, \dots, n\}$.
\end{enumerate}
We say for a set $G' \subseteq \R$ a tuple $\ob{a} \in \R^{|\ob{x}|}$ satisfies $C$ relative to $G'$ if $\mc{R} \models \varphi(\ob{a}, \ob{c})$ and $a_i \in G'$ if and only if $i \in I$.
List all instances of axiom (2) as $C_{\alpha}=(\varphi_{\alpha}(\ob{x},\ob{y}), \ob{c}_{\alpha}, I_{\alpha})$ for $\alpha \in \bm{c}$.
We construct $G$ as $\bigcup_{\alpha \in \bm{c}}G_{\alpha}$ and $\ob{G}=\bigcup_{a \in \bm{c}}\ob{G}_\alpha$ so that: \begin{enumerate}
\item $|G_{\alpha}|<\bm{c}$ and $|\ob{G}_{\alpha}|<\bm{c}$ for all $\alpha<\bm{c}$; \item $G_{\alpha} \subseteq G_{\beta}$ and $\ob{G}_{\alpha} \subseteq \ob{G}_{\beta}$ for $\alpha<\beta$; \item $G_0=\emptyset$ and $\ob{G}_0=\textup{dcl}(\emptyset)$; \item $G_{\alpha} \cap \ob{G}_{\alpha}=\emptyset$ for all $\alpha<\bm{c}$; and \item there is $\ob{c}_{\beta} \in G_{\alpha} \cup \ob{G}_{\alpha}$ satisfying condition $C_{\beta}$ relative to $G_{\alpha}$ for all $\beta < \alpha$. \end{enumerate}
If we can construct such a $G$ then it is immediate that \[\langle \R; +, <, 0, 1, \floor{\;}, S, G, \lambda\rangle_{\lambda \in \Q} \models T_G^*.\]
Constructing $G_0$ and $\ob{G}_0$ is immediate. Also if $\alpha$ is a limit ordinal and we have constructed $G_{\beta}$ and $\ob{G}_{\beta}$ for all $\beta<\alpha$ then we simply let $G_{\alpha}=\bigcup_{\beta<\alpha}G_{\alpha}$ and $\ob{G}_{\alpha}=\bigcup_{\beta<\alpha}\ob{G}_{\beta}$.
Thus suppose we have a successor ordinal $\alpha+1$ and we have constructed $G_{\beta}$ and $\ob{G}_{\beta}$ for all $\beta \leq\alpha$. We need to extend the construction to satisfy $C_{\alpha}=(\varphi_{\alpha}(\ob{x}, \ob{y}), \ob{c}_{\alpha}, I_{\alpha})$. Let $X_{\alpha} \subseteq (0,1)^n$ be the subset of $\R^n$ defined by $\varphi_{\alpha}(\ob{x},\ob{c}_{\alpha})$. By Fact \ref{zfact}~(2) the structure induced by $\mc{R}$ on $(0,1)$ is o-minimal. Hence without loss of generality we may assume that $X_{\alpha}$ is an o-minimal cell. After potentially permuting the variables of the defining formula for $X_{\alpha}$ we may without loss of generality assume that \[X_{\alpha}=\{(z_1, \dots, z_l, y_1, \dots, y_m) : \ob{z} \in U \text{ and } y_i=g_i(\ob{z}) \text{ for all } 1 \leq i \leq m\}\] where $U \subseteq \R^l$ is a definable open o-minimal cell and the $g_i$ are definable continuous functions on $U$ which are never equal on $U$. After refining $U$ if necessary we may also assume that if $a_1, \dots, a_{l-1} \in \pi(U)$ (where $\pi$ is projection on the first $l-1$ coordinates) then $g_i: U_{(a_1, \dots, a_{l-1})} \to \R$ is monotone for all $1 \leq i \leq m$ (where $U_{(a_1, \dots, a_{l-1})}$ is the fiber in $U$ over $(a_1, \dots, a_{l-1})$). Furthermore we may also assume that for any $1 \leq i \leq m$ if for some $(a_1, \dots, a_{l-1}) \in \pi(U)$ the function $g_i:U_{(a_1, \dots, a_{l-1})} \to \R$ is increasing then the same is true for all $(b_1, \dots b_{l-1}) \in \pi(U)$ and the same holds for ``decreasing'' or ``constant'' in place of ``increasing''.
To build $G_{\alpha+1}$ and $\ob{G}_{\alpha+1}$ it suffices to find $a_1, \dots, a_l, b_1, \dots, b_m \in X_{\alpha}$ so that $a_i \notin G_{\beta} \cup \ob{G}_{\beta}$ and $b_j \notin G_{\beta}\cup \ob{G}_{\beta}$ for all $1 \leq i \leq l$, $1 \leq j \leq m$, and $\beta \leq \alpha$. We show this by induction on $l$, the dimension of $U$. If $l=0$ then $U$ is a point, but then there can not be $\ob{a}$ in an extension of $\mc{R}$ with $\ob{a} \in X_{\alpha}$ and $\ob{a} \cap \textup{acl}(\ob{c}) =\emptyset$. Hence there can be no such instance of axiom (2). Thus we may assume that $l>0$ we have our result for all values less that $l$. Suppose that for some function $g_j$ with $1 \leq j \leq m$ it is the case that $g_j: U_{(a_1, \dots, a_{l-1})} \to \R$ is constant (for simplicity suppose that this holds for the functions $g_1, \dots, g_r$ with $0 \leq r \leq m$). Thus for $1 \leq i \leq r$ the function $g_i$ may be thought of as a function from $\pi(U)$ to $\R$. By induction we may find $a_1, \dots a_{l-1} \in \pi(U)$ so that $a_i \notin G_{\beta} \cup \ob{G}_{\beta}$ for any $1 \leq i \leq l-1$ and $\beta \leq \alpha$ and also so that $g_i(a_1, \dots, a_{l-1}) \notin G_{\beta} \cup \ob{G}_{\beta}$ for all $1 \leq i \leq r$ and all $\beta \leq \alpha$. If there are no constant functions $g_i$ then we can also easily pick $a_1, \dots, a_{l-1} \in \pi(U)$ so that no $a_i \in G_{\beta} \cup \ob{G}_{\beta}$ for any $\beta\leq\alpha$. Each function $g_i$ with $r<i$ is monotone increasing or decreasing on $U_{(a_1, \dots, a_{l-1})}$, in particular there are less than $\bm{c}$ elements $a_l$ of $U_{a_1, \dots, a_{l-1}}$ so that $g_i(a_1, \dots, a_l) \in G_{\beta} \cup\ob{G}_{\beta}$ for any $\beta\leq\alpha$. Also that are less that $\bm{c}$ element $a_l$ of $U_{(a_1, \dots, a_{l-1})}$ so that $a_l \in G_{\beta} \cup\ob{G}_{\beta}$ for some $\beta\leq\alpha$. Thus simply due to cardinality considerations there must be $a_l \in U_{(a_1, \dots, a_{l-1})}$ so that $a_l \notin G_{\beta} \cup \ob{G}_{\beta}$ for any $\beta\leq \alpha$ and $g_i(a_1 \dots, a_l) \notin G_{\beta} \cup \ob{G}_{\beta}$ for all $1 \leq i \leq m$ and $\beta\leq \alpha$. Let $\ob{c}$ be the $n$-tuple $(a_1, \dots, a_l, g_1(\ob{a}), \dots, g_m(\ob{a}))$. Set $G_{\alpha+1}=G_{\alpha} \cup \{c_i : i \in I_{\alpha}\}$ and $\ob{G}_{\alpha+1}=\ob{G}_{\alpha} \cup \{c_i : i \in \{1, \dots, n\} \setminus I_{\alpha}\}$.
\end{proof}
Axiom scheme (2) for $T_G$ from Fact \ref{cpax} implies that if $\mc{M} \models T_G^*$ then $P(M)$ must be dense and codense in $(0,1)$. Thus to establish that $T_G^*$ is our desired example we must show that $T_G^*$ is of burden $2$. Ideally we would simply like to reference \cite[Theorem 7.3]{chernikov} but as $T$ does not satisfy exchange for algebraic closure it is not clear that this result is applicable, so we provide an \emph{ad hoc} proof. We need some preliminary lemmas and observations. The following is an immediate consquence of the axiomatization for $T_G$ given in Fact \ref{cpax}.
\begin{fact}\label{genfact} Let $\mc{M} \models T_G$ and let $U \subseteq M$ be an open interval. Furthermore suppose that $f_i: U \to (0,1)$ for $1 \leq i \leq n$ and $g_j: U \to (0,1)$ for $1 \leq j \leq m$ are definable functions which are continuous, non-constant, and monotone. If there is $V \subseteq U$ an open interval so that $f_i(x) \not= g_j(x)$ for all $x \in V$ and all $i,j$ then there is $x \in V$ so that $f_i(x) \in P(M)$ for all $i$ and $g_j(x) \notin P(M)$ for all $j$. \end{fact}
\begin{lem}\label{loc_const} Let $\mc{M} \models T_G^*$ and $f: M \to (0,1)$ be definable. Then there are only finitely many $a \in (0,1)$ so that $f^{-1}(a)$ has nonempty interior. \end{lem}
\begin{proof} Let \[X=\{b \in M : f \text{ is constant in a neighborhood of } b\}.\] Either $X$ is empty in which case we are done or it is a nonempty open set and hence a union of open intervals by definable completeness. Also by definable completeness if $I$ is any one of these intervals $f$ is constant on $I$. Let $X_0$ be the set of midpoints of these intervals. $X_0$ is definable and discrete. Thus $f[X_0]$ is discrete and as $f[X_0] \subseteq (0,1)$ it must be finite. This establishes the lemma.
\end{proof}
\begin{lem}\label{good_form} Let $\mc{M} \models T_G^*$ and let $X \subseteq M$ be definable. $X$ is a finite union of sets definable by formulas of the form: \[\psi(x) \wedge \bigwedge_{i \in I}P(t_i(x)) \wedge \bigwedge_{j \in J}\neg P(s_j(x))\] where $\psi(x)$ is an $\mc{L}$-formula possibly with parameters, $I$ and $J$ are finite sets, and the $t_i$'s and $s_j$'s are terms possibly with parameters so that: \begin{enumerate} \item $\psi(M)$ is either discrete or open; \item if $\mc{M} \models \psi(a)$ then $t_{i_1}(a) \not= t_{i_2}(a)$ for $i_1 \not= i_2$, $s_{j_1}(a) \not= s_{j_2}(a)$ for $j_2 \not= j_2$ and $t_i(a) \not= s_j(a)$ for all $i,j$; \item if $\mc{M} \models \psi(a)$ then $t_i(a) \in (0,1)$ and $s_j(a) \in (0,1)$ for all $i,j$; and \item if $I$ is an open interval with $I \subseteq \psi(M)$ then no $t_i$ and no $s_j$ is constant on $I$. \end{enumerate} \end{lem}
\begin{proof} By quantifier elimination for $T_G$ we reduce to the case where $X$ is defined by a formula of the form:
\[\psi_0(x) \wedge \bigwedge_{i \in I_0}P(t_i^0(x)) \wedge \bigwedge_{j \in J_0}\neg P(s^0_j(x))\] where $\psi_0$ is an $\mc{L}$-formula, $I_0$ and $J_0$ are finite sets, and the $t^0_i$'s and $s^0_j$'s are terms. The proof is a straightforward induction on $|I_0+J_0|$ and proceeds by repeatedly partitioning the set defined by $\psi_0(x)$ into smaller $\mc{L}$-definable subsets where (1) follows as any definable set in $\mc{M}$ either is discrete or has interior, (2) is immediate, (3) is immediate as $P(M) \subseteq (0,1)$, and (4) follows by Lemma \ref{loc_const} and the fact that $T$ eliminates $\exists^{\infty}$ relative to $(0,1)$.
\end{proof}
\begin{lem}\label{disc_def} If $\mc{M} \models T_G^*$ and $X \subseteq M$ is definable and discrete then $X$ is definable in $\mc{M} \restriction \mc{L}$. \end{lem}
\begin{proof} Without loss of generality we may assume that $X$ is defined by a formula $\varphi(x)$ of the form given in Lemma \ref{good_form}, say \[\varphi(x) = \psi(x) \wedge \bigwedge_{i \in I}P(t_i(x)) \wedge \bigwedge_{j \in J}\neg P(s_j(x)).\]
If $\psi(M)$ has interior we can find an open interval $U \subseteq \psi(M)$ so that each $t_i$ and $s_j$ is equal to a linear term of the form $\lambda x + a$ on $U$. By assumption none of the $s_i$ and $t_i$ are equal on $U$ and none of them are constant. Thus by Fact \ref{genfact} it must be the case that $\varphi(M)$ is dense in $U$ and thus this case is impossible.
If $\psi(M)$ is discrete the image of $\psi(M)$ under any of the $t_i$ or $s_j$ must be discrete. As these images are subsets of $(0,1)$ they must be finite. It follows that $X$ is definable in $\mc{M} \restriction \mc{L}$.
\end{proof}
Although we only need the following lemma in the specific case of models of $T$ we provide a statement and proof in much greater generality.
\begin{lem}\label{disc_rank} Suppose that $\mc{N}$ is an expansion of a densely ordered group of finite burden $n$. If $X \subseteq N$ is definable, infinite, and discrete, then the burden of $X$ is less than or equal to $n-1$. \end{lem}
\begin{proof} Without loss of generality we assume that $\mc{N}$ is sufficiently saturated. Suppose the result fails. Thus $X$ has burden $n$. Let the formulas $\varphi_i(x, \ob{y})$ for $1 \leq i \leq n$ and parameters $\ob{a}_{i,j}$ for $1 \leq i \leq n$ and $j \in \omega$ witness that $X$ has burden $n$. Notice that we only need countably many elements from $X$ to witness that the burden is $n$, thus by compactness we can find $\delta>0$ so that \[X'=\{x \in X : (x-\delta, x+\delta) \cap X =\{x\}\}\] also has burden $n$ witnessed by the same formulas and parameters.
We can find $0<\varepsilon_j^0<\varepsilon_j^1<\delta$ for $j \in \omega$ so that $\varepsilon^1_j<\varepsilon^0_{j+1}$ for all $j \in \omega$. Let $\varphi_{n+1}(x,y_1y_2)$ be the formula \[ \exists z \in X'(z<x \wedge \forall w(w \in X' \rightarrow w \leq z \vee w>x) \wedge y_1<x-z<y_2)\] and let $\varphi'_i(x,\ob{y})$ for $1 \leq i \leq n$ be the formula \[\exists z (z \in X' \wedge \forall w(w \in X' \rightarrow w \leq z \vee w>x) \wedge \varphi_i(x, \ob{y})).\] It now follows that the formulas $\varphi'_1(x, \ob{y}) \dots \varphi'_n(x, \ob{y})$ and $\varphi_{n+1}(x,y_1y_2)$ with respective sequences of parameters $\ob{a}_{1,j}, \dots \ob{a}_{n,j}$ and $\epsilon^0_j,\epsilon^1_j$ witness that the theory of $\mc{N}$ has burden at least $n+1$, a contradiction. \end{proof}
\begin{prop} $T_G^*$ has burden $2$. \end{prop}
\begin{proof} Fix $\mc{M}$ a sufficiently saturated model of $T$ and suppose the result fails. Let $\varphi_k(x, \ob{y})$ for $k \in \{1, 2, 3\}$ together with mutually indiscernible parameters $\ob{a}_{k,l}$ with $k \in \{1, 2, 3\}$ and $l \in \R$ be an inp-pattern with three rows.
If any of the $\varphi_k(x, \ob{a}_{k,0})$, say $k=1$, defines a discrete set $X$ then by Lemma \ref{disc_def} $X$ is definable in $\mc{M} \restriction \mc{L}$. As $\mc{M} \restriction \mc{L}$ has dp-rank $2$, $X$ must have dp-rank $1$ in $\mc{M} \restriction \mc{L}$ by Lemma \ref{disc_rank}. Now consider $\varphi_2(M, \ob{a}_{2,0}) \cap X$. This set is discrete and hence also definable in $\mc{M} \restriction \mc{L}$, say by the $\mc{L}$-formula $\theta_2(x, \ob{b}_{2,0})$. As $\{\ob{a}_{2,i} : i \in \R\}$ is indiscernible over $\ob{a}_{1,0}$ we find $\{\ob{b}_{2,i} : i \in \R\}$ so that $\theta_2(x, \ob{b}_{2,i})$ defines $X \cap \varphi_2(x, \ob{a}_{2,i})$ for $i \in \R$. Similarly we find an $\mc{L}$-formula $\theta_3(x, \ob{y})$ and parameters $\{\ob{b}_{3,i} : i \in \R\}$ so that $\theta_3(x, \ob{b}_{3,i})$ defines $X \cap \varphi_3(x, \ob{a}_{3,i})$ for all $i \in \R$. Hence the pair of formulas $\theta_2(x, \ob{y})$ and $\theta_3(x, \ob{y})$ with respective sequences of parameters $\{\ob{b}_{2,i} : i \in \R\}$ and $\{\ob{b}_{3,i} : i \in \R\}$ witnesses that $X$ has burden $2$. But then $X$ has dp-rank $2$, a contradiction.
Consider $\varphi_1(x, \ob{y})$. By \cite[Lemma 7.1]{chernikov} we may assume that this formula is of the form given by Lemma \ref{good_form}, say \[\varphi_1(x, \ob{y}) = \psi_1(x, \ob{y}) \wedge \bigwedge_{i \in I}P(t_i(x, \ob{y})) \wedge \bigwedge_{j \in J}\neg P(s_j(x, \ob{y}))\] with $\psi_1(M, \ob{a}_{1,0})$ open.
Suppose that $\{\psi_1(x, \ob{a}_{1,l}) : l \in \R\}$ is consistent.
Then by compactness we may find an open interval $V=(c,d)$ so that $V$ is a subset of $\psi_1(M, \ob{a}_{1,l})$ for all $l \in \R$. By shrinking $V$ further we may also assume by Fact \ref{zfact}(6) that all the terms $t_i(x, \ob{a}_{1,l})$ and $s_j(x, \ob{a}_{1,l})$ are given by linear functions of the form $\lambda x +a$ on $V$.
As $\mc{M} \restriction \mc{L}$ is of dp-rank 2, by \cite
[Theorem 4.16]{Guide_NIP} we may find an open interval $W \subset \R$
so that $\{\ob{a}_{1,j} : j \in W\}$ is indiscernible over $cd$ as a sequence in $\mc{M} \restriction \mc{L}$. For notational convenience assume that $W=\R$.
Fix $\kappa \in \omega$ such that $\{\varphi_1(x, \ob{a}_{1,l}) : l \in \R\}$ is $\kappa$-inconsistent. By the assumptions on the formula $\varphi_1(x, \ob{y})$ from Lemma \ref{good_form} all the terms are continuous, monotone, and non-constant on $V$ and we have guaranteed that they are also all linear on $V$. But then by Fact \ref{genfact} we can only have $\kappa$-inconsistency of $\{\varphi_1(x, \ob{a}_{1,l}) : l \in \R\}$ if for some $i \in I$, some $j \in J$, and some $l_1, l_2 \in \R$,
we have that $t_i(x, \ob{a}_{1, l_1})=s_j(x, \ob{a}_{1, l_2})$ densely often in some subinterval of $V$. As these functions are linear over $\Q$, it follows that $t_i(x, \ob{a}_{1, l_1})=s_j(x, \ob{a}_{1, l_2})$ on all of $V$. Then by indiscernibility of $\ob{a}_{1,l}$ over $cd$ (as an $\mc{M} \restriction \mc{L}$-sequence) it follows that $t_i(x, \ob{a}_{1,0})=s_j(x, \ob{a}_{1,0})$ on $V$. But this violates the properties of $\varphi_1$ guaranteed by Lemma \ref{good_form}.
Thus it must be the case that $\{\psi_1(x, \ob{a}_{1,l}) : l \in \R\}$ is inconsistent. But the same holds for
$\{\psi_k(x, \ob{a}_{k,l}) : l \in \R\}$ for $k \in \{2,3\}$. Thus $\psi_k(x, \ob{y})$ for $k \in \{1,2,3\}$ and $\ob{a}_{k,l}$ for $k \in \{1,2,3\}$ and $l \in \R$ is an inp-pattern with three rows in $\mc{M}\restriction \mc{L}$ which is impossible as $\mc{M}\restriction \mc{L}$ has dp-rank $2$.
\end{proof}
This example demonstrates that the results in Section 2 of this paper are in some sense sharp. In particular notice that as indicated in Theorem \ref{IP_translations} the formula $\tau(x,y):=x \in P-y$ has the independence property by the axioms for $T_G$.
\end{document} |
\begin{document}
\title{Microscopic models of quantum jump super-operators}
\author{A. V. Dodonov} \email{[email protected]}
\author{S. S. Mizrahi} \email{[email protected]}
\affiliation{Departamento de F\'{\i}sica, CCET, Universidade Federal de S\~{a}o Carlos, Via Washington Luiz km 235, 13565-905, S\~ao Carlos, SP, Brazil}
\author{V. V. Dodonov} \email{[email protected]} \affiliation{Instituto de F\'{\i}sica, Universidade de Bras\'{\i}lia,\\ PO Box 04455, 70910-900, Bras\'{\i}lia, DF, Brazil}
\date{\today}
\begin{abstract} We discuss the quantum jump operation in an open system, and show that jump super-operators related to a system under measurement can be derived from the interaction of that system with a quantum measurement apparatus. We give two examples for the interaction of a monochromatic electromagnetic field in a cavity (the system) with 2-level atoms and with a harmonic oscillator (representing two different kinds of detectors). We show that derived quantum jump super-operators have `nonlinear' form $J\rho = \gamma\, \mbox{diag} \left[ F(\hat{n})a{\rho }a^{\dagger}F(\hat{n})\right]$, where the concrete form of the function $F(\hat{n})$ depends on assumptions made about the interaction between the system and the detector. Under certain conditions the asymptotical power-law dependence $F(\hat{n})=(\hat{n}+1)^{-\beta}$ is obtained. A continuous transition to the standard Srinivas--Davies form of the quantum jump super-operator (corresponding to $\beta=0$) is shown.
\end{abstract}
\pacs{42.50.Lc, 03.65.Ta, 03.65.Yz}
\maketitle
\section{Introduction}\label{sec1}
In the theory of continuous photodetection and continuous measurements the (one-count) quantum jump super-operator (QJS) is an essential part of the formalism \cite {carmichael,plenio,ueda1,ueda3,Gard92,WiseMilb,Ban93,Garr94,agarwal, ueda2,brun,marsh}, since it accounts for the loss of one photon from the electromagnetic field (EM) and corresponding photoelectron detection and counting within the measurement apparatus (MA). One of the main equations in this theory is the evolution equation of the field's density operator $\rho _{t}$, or master equation, which reads in the simplest variant as \begin{equation} \frac{d\rho _{t}}{dt}=\frac{1}{i\hbar }\left[ H_{0},\rho _{t}\right] -\frac{ \gamma }{2}\left( O^{\dagger }O\rho _{t}+\rho _{t}O^{\dagger }O-2O\rho _{t}O^{\dagger }\right) , \label{eqmestra} \end{equation} where $H_{0}$ is the EM field Hamiltonian, $\gamma $ is the field-MA coupling constant and $O$ is some lowering operator, representing the loss of a single photon from the field to the environment, that may be detected and counted by a duly constructed experimental setup. Defining the effective non-hermitian Hamiltonian as \cite{Mol75,Gisin92,Dum92,Molmer93} \begin{equation} H_{eff}=H_{0}-i\frac{\gamma }{2}O^{\dagger }O, \label{Heff} \end{equation} Eq. (\ref{eqmestra}) can be written as (we set here $\hbar =1$) \begin{equation} \frac{d\rho _{t}}{dt}= -i \left( H_{eff}\rho _{t}-\rho _{t}H_{eff}^{\dagger }\right) +\gamma O\rho _{t}O^{\dagger }, \label{eqmestra2} \end{equation} whose formal solution is (see, for example, \cite{carmichael,Zol87}) \begin{eqnarray} \rho _{t}&=&\sum_{k=0}^{\infty }\int_{0}^{t}dt_{k}\int_{0}^{t_k}dt_{k-1}\cdot \cdot \cdot \int_{0}^{t_2}dt_{1}e^{L\left( t-t_{k}\right) }{J}\nonumber \\ &&\times e^{L\left( t_{k}-t_{k-1}\right) }{J}\cdot \cdot \cdot {J}e^{Lt_{1}}\rho _{0}, \end{eqnarray} where \[ L\rho _{0}=-i\left[ H_{eff}\rho _{0}-\rho _{0}H_{eff}^{\dagger }\right] , \] $\rho _{0}$ being the density operator for the field state at $t=0$. The no-count super-operator $\exp \left[ L\left( t_{k}-t_{k-1}\right) \right] $ evolves the initial state $\rho _{0}$ from time $t_{k-1}$ to the latter time $t_{k}$ without taking out any photon from the field, it represents the field monitoring by a MA. The QJS ${J\bullet }=\gamma O\bullet O^{\dagger }$ is an operation which takes out instantaneously one photon from the field. Actually, Tr$\left[ {J}\rho _{0}\right] $ is the rate of photodetection~\cite {SD}.
The explicit form of the QJS is not predetermined. In the phenomenological photon counting theory developed by Srinivas and Davies \cite{SD} the QJS was introduced {\em ad hoc} as \begin{equation} J_{SD}\bullet=\gamma _{SD}a\bullet a^{\dagger }. \label{JSD} \end{equation} Later, Ben-Aryeh and Brif~\cite{Aryeh} and Oliveira {\em et al.}~\cite{Oliveira} considered QJS of the form \begin{equation} J_E\bullet=\gamma_E E_-\bullet E_+, \label{JE} \end{equation} where \begin{equation} E_-=(a^{\dagger}a+1)^{-1/2}a \quad {\rm and} \quad E_+=E_-^\dagger \label{E-E+} \end{equation} are the exponential phase operators of Susskind and Glogower \cite{susg,CarNiet}. These ``non-linear'' operators allow to remove some inconsistencies of the SD theory noticed by its authors.
However, the QJS (\ref{JE}) was introduced in \cite{Aryeh,Oliveira} also {\em ad hoc}. Therefore it is desirable to have not only a phenomenological theory, but also some {\em microscopic models}, which could justify the phenomenological schemes. The simplest example of such a model was considered for the first time in \cite{Imoto}, where the QJS of Srinivas and Davies was derived under the assumption of highly efficient detection. The two fundamental assumptions of that model were: (a) infinitesimally small interaction time between the field and the MA, and (b) the presence of only few photons in the field mode. Only under these conditions one can use a simple perturbative approach and arrive at the mathematical expression for the QJS, which is independent of the details of interaction between the MA and the EM.
If the conditions (a) or (b) are not fulfilled, the QJS should depend on many factors, such as, for example, the kind of interaction between the field and MA, the interaction strength and the time $T$ of the interaction. Moreover, it should be emphasized, that the instant $t_{j}$ at which the quantum jump occurs cannot be determined exactly --- it can happen randomly at any moment within $T$. Making different assumptions concerning the moment of `quantum jump', one can obtain different formal expressions for the QJS. In \cite{AVS} we have proposed a simple heuristic model for obtaining the `non-linear' QJS of the form \begin{equation} {J\bullet }=\gamma F(a^{\dagger}a) a\bullet a^{\dagger} F(a^{\dagger}a). \label{JF} \end{equation}
In this connection, the aim of the present paper is to provide a more rigorous derivation of QJS\'{}s, using a more sophisticated model that takes into account dissipation effects due to the `macroscopic part' of the MA. Our approach is based on the hypothesis that the transition probability must be averaged over the interaction time $T$, during which a photon can be gobbled by the detector at any time in the interval $(0,T) $. Considering two different models of MA\'{}s: a 2-level atom and a harmonic oscillator interacting with a single-mode EM field, we shall demonstrate that different kinds of interaction result in quite different QJS\'{}s.
The plan of the paper is as follows. In Sec. II we derive the QJS using the modified Jaynes--Cummings model (with account of damping due to the spontaneous decay of the excited state) and calculating the time average of the transition operator. In Sec. III we apply the same scheme to the model of two coupled oscillators, showing explicitly how the variation of the relative strength of coupling constants results in the change of the function $F(a^{\dagger}a)$ in Eq. (\ref{JF}).
Sec. IV contains a summary and conclusions.
\section{Model of two-level atom detector}
\label{2level}
Let us consider first the model, which is a straightforward generalization of the one studied in \cite{Imoto}. The role of the `system' is played by a single mode of the electromagnetic field, while the `detector' of the MA (sub-system constituting the MA that actually interacts with the EM field) consists of a single two-level atom. The Hamiltonian for the total system is chosen in the standard form of the Jaynes--Cummings model \cite{JCM} \begin{equation} H_{0}=\frac{1}{2}\omega _{0}{\sigma }_{0}+\omega {\hat{n}}+g{a}{\sigma } _{+}+g^{*}{a}^{\dagger }{\sigma }_{-}\,, \label{ham01} \end{equation} where the Pauli pseudo-spin operators ${\sigma }_{0}$ and ${\sigma
}_{\pm }$ correspond to the atom ($\sigma _{+}=|e\rangle \langle g|$, $\sigma _{-}=|g\rangle \langle e|$ and $\sigma _{0}=|e\rangle
\langle e|-|g\rangle \langle g|$) and one considers that there were chosen two levels of the atom (the ground state $|g\rangle $
with frequency $\omega _{g}$ and the excited state $|e\rangle $ with frequency $\omega _{e}=\omega _{g}+\omega _{0}$); {$a $,
$a^{\dagger }$ and ${\hat{n}}=a^{\dagger }a$} are the lowering, rising and number operators, respectively, of the EM field. Since the coupling between the field and the atom is weak, we assume that $\omega \gg |g|$. Until now, the detector can absorb and emit photons back into the EM field, since the detector is not coupled to some macroscopic device that irreversibly absorbs the photons.
Therefore, we have to take into consideration that the detector is coupled to the `macroscopic part' (MP) of the MA (e.g., phototube and associated electronics). Hence the detector suffers dissipative effects responsible for the spontaneous decay of the excited level of the detector (in this case of the atom). And it is precisely this physical process that represents a photodetection -- the excited level of the detector decays, emitting a photoelectron into the MP of the MA, which is amplified by appropriate electronics and is seen as a macroscopic electrical current inside the MP of MA. We can take into account this dissipation effects by describing the whole photodetection process, including the spontaneous decay, by the master equation \begin{equation} \frac{d{\rho }_{t}}{dt}+i\left( {H}_{eff}{\rho }_{t}-{\rho }_{t}{H} _{eff}^{\dagger }\right) =2\lambda {\sigma }_{-}{\rho }_{t}{\sigma }_{+}, \label{mestra1} \end{equation} which is the special case of Eq. (\ref{eqmestra2}), where $O={\sigma }_{-}$, $O^{\dagger }={\sigma }_{+}$, ${H}_{eff}=H_{0}-i\lambda {\sigma }_{+}{\sigma }_{-}$, and $2\lambda $ is the coupling of the excited level of the atom (detector) to the MP of the MA (here we make a reasonable assumption that $
\lambda $ has the same order of magnitude as $|g|$). The `sink' term \begin{equation} {R}\bullet = 2\lambda {\sigma }_{-}\bullet {\sigma }_{+} \end{equation}
represents the $|e\rangle \rightarrow |g\rangle $ transition within the detector (the atomic decay process in this case). If $\lambda=0$, then the detector interacts with the EM field, but photoelectrons are not emitted (thus no counts happen), because the absorbed photons are emitted back to the field and then reabsorbed at a later time, periodically, analogously to the Rabi oscillations.
In the following, we shall use the quantum trajectories approach \cite{carmichael}. The effective Hamiltonian (\ref{Heff}) becomes \begin{eqnarray} {H}_{eff} &=& {H}-i{\lambda }{\sigma }_{+}{\sigma }_{-}=\frac{1}{2} \left( \omega _{0}-i{\lambda }\right) {\sigma }_{0} \nonumber \\ && +\omega \hat{n}+g{a}{\sigma }_{+}+g^{*}{a}^{\dagger }{\sigma }_{-} -i\lambda/2 \label{efet} \end{eqnarray} (where we have used $\sigma _{+}\sigma _{-}=(1+\sigma _{0})/2$) and the evolution of the system between two spontaneous decays is given by the no-count super-operator \begin{equation} {\cal D}_{t}{\rho }_{0}={U}(t){\rho }_{0}{U}^{\dagger }(t), \qquad {U}(t)=\exp \left( -i{H}_{eff}t\right) . \end{equation} After a standard algebraic manipulation \cite{JCM,Cress96} we obtain the following explicit form of the {\em non-unitary} evolution operator $U(t)$: \begin{eqnarray} {U}(t) &=&e^{-\lambda t/2}\exp \left[ -i\omega \left( {\sigma }_{0}/2+{\hat{n}}\right) t\right] \nonumber \\ &&\times \left\{
\frac{1}{2}\left[ C_{\hat{n}+1}(t) -i\frac{\delta }{\left|
g\right| } S_{\hat{n}+1}(t) \right] \left( 1+\sigma_{0}\right) \right. \nonumber \\
&&\left. -i\frac{g}{\left| g\right| } S_{\hat{n}+1}(t) {a}{\sigma
}_{+}-i\frac{g^{*}}{\left|g\right| } {a}^{\dagger } S_{\hat{n}+1}(t) {\sigma }_{-} \right. \nonumber \\
&&\left. +\frac{1}{2}\left[ C_{\hat{n}}(t) +i\frac{\delta }{\left|
g\right| } S_{\hat{n}}(t) \right] \left( 1-\sigma _{0}\right) \right\} , \label{aray} \end{eqnarray} where \begin{equation}C_{\hat{n}}(t) \equiv \cos \left(
\left|g\right| {B_{\hat{n}}}t\right) , \quad S_{\hat{n}}(t) \equiv
\sin \left( \left|g\right| {B_{\hat{n}}}t\right) /{B}_{\hat{n}}, \label{def-CS} \end{equation}
\begin{equation}
{B}_{\hat{n}}=\sqrt{{\hat{n}} + \left({\delta }/{|g|}\right) ^{2}},\qquad \delta =\frac{1}{2}\left( \omega _{0}-\omega -i\lambda \right) \label{def-Bd} \end{equation} (note that parameter $\delta $ is complex and $\hat{n}$ is an operator).
Assuming that the field state is ${\rho }_{0}={\rho }
_{F}\otimes |g\rangle \langle g|$ at time $t=0$
or, analogously, the last photoemission occurred at $t=0$, the probability that the {\em next} photoelectron emission will occur within the time interval $[t,t+\Delta t)$ is given by \cite{carmichael,SD,Cohen} \begin{equation} P(t)={\rm Tr}_{F-D}\left[ R{\cal {D}}_{t}{\rho }_{0}\right] \Delta t, \label{p} \end{equation} (the subscripts $F$ and $D$ are a reminder that the trace operation is on {\em field} and {\em detector} spaces, respectively) where $\Delta t$ is the time resolution of the MA. Tracing out first over the detector variables, the probability density for the next photoemission to occur at time $t$ will be~\cite{Cohen}
\begin{equation} p(t)=\lim_{\Delta t\rightarrow 0}\frac{P(t)}{\Delta t}={\rm Tr} _{F}\left[ \Xi (t){\rho }_{F}\right] , \label{salto1} \end{equation} where the time-dependent {\em transition super-operator} \begin{equation} {\Xi }(t)\bullet =2\lambda {\Gamma }(t)\bullet {\Gamma }^{\dagger }(t), \label{xi} \end{equation} acting on the EM field, stands for the photoelectron emission into the MP of the MA (i.e., the actual photodetection). Once again, the probability for detecting a photoelectron in $[t,t+\Delta t)$ is $P(t)={\rm Tr}_{F}\left[ \Xi (t)\rho _{F}\right] \Delta t$ (now on we omit the subscript and write $ \rho _{F}\equiv \rho $ for the field operator). In Eq. (\ref{xi}) $\Gamma (t) $ is the time-dependent {\em transition operator} \begin{equation}
{\Gamma }(t)=\langle e|{U}(t)|g\rangle , \label{gamma} \end{equation} that takes out a single photon from the field state. Substituting Eq. (\ref{aray}) into Eq. (\ref{gamma}) we can write ${\Gamma }(t)$ as \begin{equation}
{\Gamma }(t)=-i\frac{g}{|g|}\exp\left(-\lambda t/2-i\omega {\hat{n}}t\right) S_{\hat{n}+1}(t){a}, \end{equation} so the time-dependent transition super-operator (\ref{xi}) becomes \begin{equation} {\Xi }(t){\rho }=2\lambda e^{-\lambda t}e^{-i\omega {\hat{n}}t} S_{\hat{n}+1}(t){a}{\rho }{a}^{\dagger} S_{\hat{n}+1}^{\dagger }(t) e^{i\omega {\hat{n}}t}. \label{expr} \end{equation} In the resonant case, $\omega _{0}=\omega $, we have \begin{equation} B_{\hat{n}}=\sqrt{\hat{n}- \chi^{2}}, \qquad \chi \equiv \lambda
/(2|g|). \label{Bn-chi} \end{equation}
If the interaction time $\Delta t$ is small, and the number of photons in the field is not very high, in the sense that the condition \begin{equation}
|g|\Delta t\sqrt{n+1}\ll 1 \label{important} \end{equation} is fulfilled for all eigenvalues of $\hat{n}$, for which the probabilities $
p_{n}=\langle n|\rho |n\rangle $ are important, then one can replace the operator $\sin \left( B_{\hat{n}+1}|g|\Delta t\right)
$ in Eq. (\ref{def-CS}) simply by $B_{\hat{n}+1}|g|\Delta t$ and arrive at the QJS \begin{equation} {J}{\rho }=e^{-i\omega {\hat{n}}\Delta t}\left[2 \lambda \left(
|g|\Delta t\right) ^{2}{a}{\rho }{a}^{\dagger }\right] e^{i\omega {\hat{n}}\Delta t}, \label{almost-SD} \end{equation} which has {\em almost\/} the Srinivas--Davies form (\ref{JSD}), with the coupling constant \begin{equation}
\gamma _{SD}=2\lambda \left( |g|\Delta t\right) ^{2}. \label{gamasd} \end{equation} Taking $2\lambda = (\Delta t)^{-1}$ we obtain the same coupling constant $\gamma _{SD}$ as in \cite{Imoto}, but this assumption is not the only possible. Note that super-operator (\ref{almost-SD}) contains the factors $ \exp (\pm i\omega {\hat{n}}\Delta t)$, which can be essentially different from the unit operator even under condition (\ref
{important}), for two reasons: (1) the condition $|g|\Delta t\ll 1$ does not imply $\omega \Delta t\ll 1$, because $\omega \gg |g|$; (2) the condition (
\ref{important}) contains the square root of $n$, whereas the eigenvalues of $\exp (\pm i\omega {\hat{n}}\Delta t)$ depend on the number $n$ itself, which is much greater than $\sqrt{n}$ if $n\gg 1$. Consequently, even the simplest microscopic model gives rise to a QJS, which is, strictly speaking, different from the SD jump super-operator, coinciding with the former only for the diagonal elements $\left| n\right\rangle \left\langle n\right| $ of the density matrix in the Fock basis.
If condition (\ref{important}) is not satisfied, we propose that the QJS can be defined by {\em averaging\/} the transition super-operator (\ref{expr}) over the interaction time $T$, because the exact instant within $(0,T)$ at which the photodetection occurs in each run is unknown, so a reasonable hypothesis is that these events happen randomly with uniform probability distribution: \begin{equation} {J}_{T}{\rho }=\frac{1}{T}\int_{0}^{T}dt\ {\Xi }(t){\rho }. \label{J-Xi} \end{equation}
Writing the field density operator as \begin{equation}
{\rho }=\sum_{m,n=0}^{\infty }\rho _{mn}|m\rangle \langle n|, \label{dens2} \end{equation} we have \begin{equation} {J}_{T}{\rho }= \sum_{m,n=1}^{\infty }\rho _{mn} \sqrt{mn} f_{mn}
|m-1\rangle \langle n-1|, \label{rho-f} \end{equation} where \begin{equation} f_{mn} = \frac{2\lambda}{T} \int_{0}^{T}e^{i\omega t(n-m) -\lambda t } S_{m}(t)S_{n}(t) \, dt. \label{fmn} \end{equation}
It is natural to suppose that the product $\lambda T$ is big enough, so that the photodetection can happen with high probability. Mathematically, it means that we assume that $\exp(-\lambda T) \ll 1$. If $\lambda \ll \omega$ (this is also a natural assumption), then the off-diagonal coefficients $f_{mn}$ with $m\neq n$ are very small due to fast oscillations of the integrand in Eq. (\ref{J-Xi}), so they can be neglected (a rough estimation gives for these terms the order of magnitude ${\cal O}(\lambda/\omega)$, compared with the diagonal coefficients $f_{nn}$). Consequently, the microscopic model leads to the nonlinear {\em diagonal\/} QJS of the form
\begin{equation}
J\rho = \gamma\, \mbox{diag} \left[ F(\hat{n})a{\rho }a^{\dagger}F(\hat{n})\right], \label{J-diag} \end{equation} where $\mbox{diag}(\hat{A})$ means the diagonal part of the operator $\hat{A}$ in the Fock basis. The function $F({n})$ can be restored from the coefficients $f_{nn}$ (apart the constant factor which can be included in the coefficient $\gamma$) as \begin{equation} F({n}) =\sqrt{f_{n+1,n+1}}. \label{F-f} \end{equation}
Under the condition $\exp(-\lambda T) \ll 1$, the upper limit of integration in Eq. (\ref{J-Xi}) can be extended formally to infinity, with exponentially small error. Then, taking into account the definition of the function $S_n(t)$ (\ref{def-CS}), we arrive at integrals of the form \[ \int_0^{\infty} dt\, e^{-\lambda t}\times \left\{ \begin{array}{ll} \sin^2(\mu t)/\mu^2 & {\rm for} \;\; \chi<1 \\ t^2 & {\rm for} \;\; \chi=1 \\
\sinh^2(\mu t)/\mu^2 & {\rm for} \;\; \chi>1 \end{array} \right. , \] which can be calculated exactly (see, e.g., Eqs. 3.893.2 and 3.541.1 from \cite{Grad}). The final result does not depend on $\lambda$ or $\chi$ (and it is the same for either $\chi<1$ or $\chi>1$): \begin{equation} f_{nn} = (nT)^{-1}. \label{fnT} \end{equation}
Thus we obtain the QJS \begin{equation} {J}_{T}\rho ={\gamma }_{T} \sum_{n=1}^{\infty }\rho _{nn}
|n-1\rangle \langle n-1| ={\gamma }_{T}\mbox{diag}\left( {E}_{-}{\rho }{E}_{+}\right), \label{JT} \end{equation} where ${\gamma }_{T}=T^{-1}$, and the operators $E_-$ and $E_+$ are defined by Eq. (\ref{E-E+}).
Notice that, in principle, ${\gamma }_{T}$ {\em is different} from $\gamma _{SD}$. Moreover, the super-operator (\ref{JT}) derived from the microscopic model turns out to be {\em different\/} from the phenomenological QJS (\ref{JE}) studied in \cite{Oliveira,AVS}. The difference is that ${J}_{T}$ has no off-diagonal matrix elements, while ${J}_{E}$ has. We see that the microscopic model concerned (which can be justified in the case of big number of photons in the field mode) predicts that each photocount not only diminishes the number of photons in the mode exactly by one, but also destroys off-diagonal elements, which means the total decoherence of the field due to the interaction with MA.
Note, however, that the formula (\ref{fnT}) holds under the assumption that the upper limit of integration in Eq. (\ref{fmn}) can be extended to the infinity. But this cannot be done if parameter $\chi$ is very big. Indeed, for $\chi>1$ and $\lambda T \gg 1$, the integrand in (\ref{fmn}) at $t=T$ is proportional to $\exp\left[-\lambda T\left(1- \sqrt{1-n/\chi^2}\right)\right]$, so it is not small when $n/\chi^2\ll 1$. Calculating the integral in the finite limits under the conditions $n/\chi^2\ll 1$ and $\lambda T \gg 1$, we obtain the approximate formula \begin{equation} f_{nn}=(Tn)^{-1}\left\{1-\exp\left[-\lambda Tn/\left(2\chi^2\right) \right]\right\}, \label{interp} \end{equation} which shows that $f_{nn}$ does not depend on $n$ if $\lambda Tn/\left(2\chi^2\right) \ll 1$. Thus we see how the QJS (\ref{JT}) can be
continuously transformed to the SD jump super-operator (\ref{JSD}), when the number $n$ changes from big to relatively small values. It should be emphasized, nonetheless, that the off-diagonal coefficients $f_{mn}$ remain small even in this limit. Their magnitude approaches that of the diagonal coefficients only in the case of $\lambda \sim \omega$, which does not seem to be very physical.
\section{Model of harmonic oscillator detector}
Now let us consider another model, where the role of the detector is played by a harmonic oscillator interacting with one EM field mode. This is a simplified version of the model proposed by Mollow~\cite{mollow} (for its applications in other areas see, e.g., \cite{DK96} and references therein). In the rotating wave approximation (whose validity was studied, e.g., in Ref. \cite{Estes68}) the Hamiltonian is \begin{equation} {H}=\omega _{a}{a}^{\dagger }{a}+\omega _{b}{b}^{\dagger }{b}+g{a}{b} ^{\dagger }+g^{*}{a}^{\dagger }{b}, \label{ham1} \end{equation} where the mode ${b}$ assumes the role of the detector and the mode ${a}$ corresponds to the EM field ($\omega _{b}$ and $\omega _{a}$ are the corresponding frequencies and $g$ is the detector-field coupling constant). In the following we shall repeat the same procedures we did in the section \ref{2level}. The dissipation effects due to the macroscopic part of the MA, associated to the mode $b$, can be taken into account by means of the master equation in the form \begin{equation} \frac{d{\rho }}{dt}+i\left[ {H}_{eff}{\rho } - \rho {H}_{eff}^{\dagger}\right] =2\lambda {b}{\rho }{b} ^{\dagger }. \label{mestra2} \end{equation} with the effective Hamiltonian \begin{eqnarray} {H}_{eff}&=&{H}-i{\lambda }{b}^{\dagger }{b}=\left( \omega _{b} -i \lambda \right) {b}^{\dagger }{b}\nonumber \\ &&+\omega _{a}{a}^{\dagger }{a}+g{b}{a} ^{\dagger }+g^{*}{b}^{\dagger }{a}. \label{quadra} \end{eqnarray}
The evolution operator $U(t)=\exp (-iH_{eff}t)$ for the {\em quadratic\/} Hamiltonian (\ref{quadra}) can be calculated by means of several different approaches \cite{Dbook}. Here we use the algebraic approach \cite {Ban93,Wei,alg,inter}, since Hamiltonian (\ref{quadra}) is a linear combination of the generators of algebra $su(1,1)$ \[ {K}_{+}\equiv {b}^{\dagger }{a},\quad {K}_{-}\equiv -{b}{a}^{\dagger },\quad {K}_{0}\equiv ({b}^{\dagger }{b}-{a}^{\dagger }{a})/2, \] \[ \lbrack {K}_{0},\ {K}_{\pm }]=\pm {K}_{\pm },\quad [{K}_{-},\ {K}_{+}]=2{K} _{0}. \] The evolution operator can be factorized as \begin{equation} {U}(t)=e^{-i\Omega t{N}}e^{A(t){K}_{+}}e^{B(t){K}_{0}}e^{C(t){K}_{-}}, \label{evol1} \end{equation} where \[ {N}\equiv \left( {b}^{\dagger }{b}+{a}^{\dagger }{a}\right) /2,\quad \Omega \equiv \omega _{b}+\omega _{a}-i\lambda . \] The time-dependent coefficients are \begin{equation} A(t)= -\frac{ig^{*}\sin (\eta t)}{\eta \Upsilon \left( t\right) }, \quad C(t)= \frac{ig\sin (\eta t)}{\eta \Upsilon \left( t\right) }, \label{AC} \end{equation} \begin{equation} B(t)=-2\ln \Upsilon \left( t\right), \label{B} \end{equation} with \begin{equation} \Upsilon \left( t\right) =\cos (\eta t) + i\left[\omega_{ba}/(2\eta) \right] \sin(\eta t), \label{def-Ups} \end{equation}
\begin{equation} \omega _{ba}\equiv \omega _{b}-\omega _{a}-i\lambda , \quad \eta
\equiv \left( |g|^{2}+\omega _{ba}^{2}/4\right) ^{1/2}. \label{eqs2} \end{equation}
Assuming that the detector is in resonance with the EM field's mode one gets $\omega _{ba}=-i\lambda$ and \begin{equation} \Upsilon \left( t\right) =\cos (\eta_0 t) + \left[\lambda/(2\eta_0) \right] \sin(\eta_0 t), \label{eqs2a} \end{equation} \begin{equation}
\eta_0 = \left( |g|^{2} -\lambda^{2}/4\right) ^{1/2}. \label{eta0} \end{equation} If, initially, the detector oscillator is in the ground state
$|0_{b}\rangle $, the time-dependent transition operator, corresponding to the absorption of one photon from the EM field, defined in (\ref{gamma}), is \begin{eqnarray}
{\Gamma }(t)&=&\langle 1_{b}|U(t)|0_{b}\rangle \nonumber \\ &=&A(t)\exp \left[ -\frac12\left( i\Omega t+B(t)\right) ({a}^{\dagger }{a}+1)\right] {a} \label{G2} \end{eqnarray} and the transition super-operator becomes \begin{eqnarray}
\Xi (t)\rho &=& 2\lambda |A(t)|^{2}\exp \left[ -\frac12\left( i\Omega t+B(t)\right) (a^{\dagger }a+1)\right] \nonumber \\ && \times a\rho a^{\dagger } \exp \left[ \frac12\left( i\Omega^{*}t -B^{*}(t)\right) (a^{\dagger }a+1)\right] . \end{eqnarray}
For ``small'' $t=\Delta t$ and few photons in the cavity, the QJS (\ref{almost-SD}) is recovered. Considering, instead, the time-averaged QJS, one has Eqs. (\ref{J-Xi})-(\ref{rho-f}). For
$\chi =\lambda/(2|g|)<1$ (when the parameter $\eta_0$ is real) one can represent the coefficients $f_{mn}$ as (we consider the resonance case with $\omega_a=\omega_b =\omega$)
\begin{eqnarray} f_{mn} &=& \frac{4\chi }{T(1-\chi^2)^{3/2}}
\int_{0}^{Z} dz\, [\cos( z) +\xi \sin(z)]^{m+n-2}
\nonumber \\ && \times
\sin^2( z)
\exp\left[i\overline\omega z(n-m)-\xi z(m+n) \right], \label{fmn-osc} \end{eqnarray} where \begin{equation} \xi=\frac{\chi}{\sqrt{1-\chi^2}}, \quad \overline\omega=
\frac{\omega}{|g|\sqrt{1-\chi^2}}, \quad Z = \frac{\lambda T}{2\xi}. \end{equation} Since the parameter $\overline\omega$ is big, the off-diagonal coefficients $f_{mn}$ with $n \neq m$ are very small due to the strongly oscillating factor $\exp[i\overline\omega t(n-m)]$. Consequently, they can be neglected in the first approximation, and we arrive again at the diagonal QJS of the form (\ref{J-diag}).
We notice that the exact analytical expression for the integral in Eq. (\ref{fmn-osc}) is so complicated (even if $m=n$), that it is difficult to use it. For example, in the limit $\chi \to 1$ Eq. (\ref{fmn-osc}) can be reduced to the form \begin{equation} f_{nn} =\frac{4}{T} \int_{0}^{\lambda T/2} dy\, y^2(1+y)^{2n-2} \exp(-2ny). \label{int-1} \end{equation} Replacing the upper limit by the infinity, we recognize the integral representation of the Tricomi confluent hypergeometric function $\Psi(a;c;z)$ \cite{Bate}. Thus we have (neglecting small corrections of the order of $\exp(-\lambda T)$) \begin{equation} f_{nn}=\frac{8}{T}\Psi(3;2n+2;2n). \label{1-Psi} \end{equation} Although the $\Psi$-function in the right-hand side of Eq. (\ref{1-Psi}) can be rewritten in terms of the associated Laguerre polynomials \cite{Bate} as \begin{equation} \Psi(3;2n+2;2n)= \frac{(2n)!}{2(2n)^{1+2n}} L_{2n-2}^{(-1-2n)}(2n), \label{Psi-Lag} \end{equation} neither Eq. (\ref{1-Psi}) nor Eq. (\ref{Psi-Lag}) help us to understand the behavior of the coefficient $f_{nn}$ as function of $n$. Therefore it is worth trying to find simple approximate formulas for the integral in (\ref{fmn-osc}).
If $\chi\ll 1$, then also $\xi \ll 1$, so we can neglect the term $\xi\sin(z)$ in the integrand of Eq. (\ref{fmn-osc}) and the function $\sin^2( z)[\cos( z)]^{2n-2}$ can be replaced by its average value taken over the period $2\pi$ of fast (in the scale determined by the characteristic time $\xi^{-1}$) oscillations. After simple algebra we obtain (replacing the upper limit of integration $Z$ by infinity) \begin{equation}f_{nn} = \frac{4(2n-2)!}{T (2^n n!)^2}, \qquad \chi \ll 1. \end{equation} Using Stirling's formula $n!\approx \sqrt{2\pi n}(n/e)^n$, we can write for $n\gg 1$ \begin{equation} f_{nn} \approx \left(T\sqrt{\pi n^5}\right)^{-1}. \label{Stir} \end{equation} This function corresponds to the QJS (\ref{J-diag}) with \begin{equation} F(\hat{n})=F_5(\hat{n}) \equiv (\hat{n}+1)^{-5/4}, \quad \gamma =\gamma_5 \equiv (T\sqrt{\pi})^{-1}. \label{V} \end{equation} Thus, differently from the case of two-level detector, in the simplest version of the oscillator detector model the lowering operator contains the factor $(\hat{n}+1)^{-5/4}$, instead of $(\hat{n}+1)^{-1/2}$ as in the ``E-model'' (\ref{JE})
or simply $\hat{1}$ as in the SD model (\ref{JSD}).
The case $\chi\ll 1$ is not very realistic from the practical point of view, since it corresponds to the detector with very low efficiency. However, we can calculate the integral (\ref{fmn-osc}) with arbitrary $\xi$ approximately, assuming that $n \gg 1$ and
using the {\em method of steepest descent\/}. Rewriting the integrand as $\exp[G(z)]$, one can easily verify that the points of maxima of the function \[ G(z)= 2\ln[\sin(z)] +2(n-1)\ln[\cos(z) +\xi\sin(z)] -2\xi n z \] are given by the formula $ z_k = \pm z_0 + k\pi$, where \begin{equation} z_0 =\tan^{-1}(\mu), \quad \mu = \left(\xi^2 n +n -1\right)^{-1/2}, \end{equation}
$k=0,1,2,\ldots$ for the plus sign and $k=1,2,\ldots$ for the minus sign. One can verify that
\begin{equation} \exp\left[G(z_k)\right]= \frac{\mu^2(1+\xi\mu)^{2n-2}}{(1+\mu^2)^n} \exp\left(-2z_0 \xi n - 2\xi\pi n k\right). \label{Gk} \end{equation} The second derivatives of the function $G(z)$ at the points of maxima do not depend on $k$: \begin{equation} G''(z_k)= - \frac{4n (\xi^2 +1)}{1+\xi \mu}. \label{Gpr} \end{equation}
Using Eqs. (\ref{Gk}) and (\ref{Gpr}) and performing summation over $k$ we find (taking $Z=\infty$) \begin{equation} f_{nn} = \frac{\chi\sqrt{8\pi} (1+\xi \mu)^{2n-3/2} \exp\left(-2z_0 \xi n\right)} {T\sqrt{n} (n +\chi^2 -1) (1+\mu^2)^{n}} \coth\left(\xi n\pi\right),
\label{fnn-chism0} \end{equation} Although the application of the steepest descent method can be justified for $n\gg 1$, formula (\ref{fnn-chism0}) seems to be a good approximation for $n\sim 1$, too. For example, for $n=1$ (when $\mu=\xi^{-1}$) it yields \begin{equation} Tf_{11} \approx 4\chi\sqrt{\pi}\coth(\pi\xi) \exp\left[-2\xi\tan^{-1}\left(\xi^{-1}\right)\right], \label{appr} \end{equation} and the numerical values of (\ref{appr}) in the whole interval $0<\chi<1$ are not very far from the exact value $Tf_{11}=1$, which holds independently of $\chi$, as far as the upper limit of integration in (\ref{fmn-osc})
can be extended to the infinity.
For $n\gg 1$ (when $\mu\ll 1$)
Eq. (\ref{fnn-chism0}) can be simplified as
\begin{equation} f_{nn}(\chi) \approx \frac{\chi\sqrt{8\pi}}{eT} n^{-3/2}\coth\left(\frac{\chi n\pi}{\sqrt{1-\chi^2}}\right), \quad \chi \le 1. \label{fnn-chism} \end{equation} For $\chi\ll 1$ the function (\ref{fnn-chism}) assumes the form (\ref{Stir}), with slightly different coefficient $\gamma'=(eT)^{-1}\sqrt{8/\pi}
\approx 1.04 \gamma_5$.
For $\chi>1$ (when parameter $\eta_0$ is imaginary) we have, instead of (\ref{fmn-osc}), the integral (considering diagonal coefficients only) \begin{eqnarray} f_{nn} &=& \frac{4\chi }{T(\chi^2-1)^{3/2}}
\int_{0}^{Y} dz\, [\cosh( z) +\zeta \sinh(z)]^{2n-2}
\nonumber \\ && \times
\sinh^2( z)
\exp\left(-2n\zeta z \right), \label{fmn-osc2} \end{eqnarray} where \begin{equation} \zeta={\chi}/{\sqrt{\chi^2 -1}}, \quad Y= {\lambda T}/(2\zeta). \end{equation}
Applying again the steepest descent method, we have now the only point of maximum \begin{equation} z_{max}=\tanh^{-1}(\nu), \quad \nu = \left[\left(\zeta^2 -1\right)n +1\right]^{-1/2}. \label{zmax} \end{equation} Taking into account the value of the second derivative of the logarithm of integrand at this point, \begin{equation} G''(z_{max})= -\frac{4n (\zeta^2 -1)} {1 +\zeta\nu}, \quad \zeta\nu = \frac{\chi}{\sqrt{n + \chi^2 -1}}, \label{Gprchibig} \end{equation} we obtain \begin{equation} f_{nn}=\frac{\chi\sqrt{8\pi} (1+\zeta \nu)^{2n-3/2} (1-\nu)^{n(\zeta-1)} } {T\sqrt{n} (n +\chi^2 -1) (1+\nu)^{n(\zeta+1)}}. \label{fchi>} \end{equation} One can check that the limit of formula (\ref{fchi>}) at $\chi \to 1$ coincides with the analogous limit of formula (\ref{fnn-chism0}), so the transition through the point $\chi=1$ is continuous.
The asymptotical form of (\ref{fchi>}) for $n\gg \chi^2$ is the same as (\ref{fnn-chism}), except for the last factor: \begin{equation} f_{nn}(\chi) \approx \frac{\chi\sqrt{8\pi}} {eT} n^{-3/2}, \quad \chi \ge 1, \label{fnn-chi>1} \end{equation} Applying the steepest descent method to the integral (\ref{int-1}) (for $n\gg 1$), we obtain the same result (\ref{fnn-chi>1}) with $\chi=1$. Thus for $\chi \sim 1$ (not too small and not too big) we obtain the QJS in the form (\ref{J-diag}) with \begin{equation} F(\hat{n}) = F_3(\hat{n}) \equiv (\hat{n}+1)^{-3/4}, \quad \gamma = \gamma_3 \equiv \frac{\chi\sqrt{8\pi}}{eT}. \label{F34} \end{equation}
For very big values of parameter $\chi$ (exceeding $\sqrt{ n} $) the steepest descent method cannot be used, because the second derivative of the logarithm of integrand, given by Eq. (\ref{Gprchibig}), becomes small, and because the coordinate $z_{max}$, determined by Eq. (\ref{zmax}), tends to infinity, while the upper limit $Y$ of integration in (\ref{fmn-osc2}) tends to the fixed value $\lambda T/2$. For $\chi \gg 1$, Eq. (\ref{F34}) holds for the values of $n$ satisfying approximately the inequality $n > n_*\sim 4\chi^2 \exp(-\lambda T)$. If $n<n_*$, then it can be shown that Eq. (\ref{fmn-osc2}) leads to the same approximate formula (\ref{interp}) as in the model of two-level detector, so the SD super-operator (however, without off-diagonal elements) is restored for relatively not very big values of $n$.
\begin{figure}
\caption{Dependence of diagonal coefficients $f_{nn}$ on the number $n$, obtained by numerical integration of (\ref{fmn-osc}) and (\ref{fmn-osc2}) with the fixed value $\lambda T=10$, for small and moderate values of the parameter $\chi$ (from below): $\chi=0.1,0.3,0.5,0.8,1.1$.}
\label{fig1}
\end{figure}
In Figures \ref{fig1} and \ref{fig2} we show the dependence of diagonal coefficients $f_{nn}$ on the number $n$ for different values of parameter $\chi$, obtained by numerical integration of (\ref{fmn-osc}) and (\ref{fmn-osc2}) for the fixed value of the parameter $\lambda T = 10$; in figure \ref{fig3} we compare them with the approximate analytical formulas (\ref{fnn-chism0}) and (\ref{fchi>}). We see that the coincidence is rather satisfactory for big values of $n$, although there are some differences for $n\sim 1$.
We also see in Figure \ref{fig2} that the increase of parameter $\chi$ results in the appearance of the SD plateau for small values of $n$, which goes into a slope corresponding to the power-law dependence for big values of $n$. The height of plateaus diminishes as $\chi^{-2}$ in accordance with Eq. (\ref{interp}), because big values of $\chi$ correspond
(for fixed values of $\lambda$ and $T$) to small coupling coefficient $|g|^2$ between the field and MA and, consequently, low probability of photocount.
\begin{figure}
\caption{Dependence of diagonal coefficients $f_{nn}$ on the number $n$, obtained by numerical integration of (\ref{fmn-osc2}) with the fixed value $\lambda T=10$, for big values of the parameter $\chi$ (from above): $\chi=5,10,20,40,70$. Notice the appearance of plateaus corresponding to SD model for initial values of $n$; for large $n$ they are transformed in curves with the slope given by power-law dependence.}
\label{fig2}
\end{figure}
\begin{figure}
\caption{Comparison of numerical integration of (\ref{fmn-osc}) and (\ref{fmn-osc2}) with the approximate analytical formulas (\ref{fnn-chism0}) and (\ref{fchi>}) for $\chi=0.5,1.1,2,3,4$ (from below). We defined relative error by $Er=(f_{nn}^{num}-f_{nn}^{anal})/f_{nn}^{num}$.}
\label{fig3}
\end{figure}
\section{Conclusions}
Here we presented two microscopic models for deducting QJS's. In the first one we supposed that the detector behaves like a 2-level atom, and in the second -- as a harmonic oscillator. The main difference between our models and previous ones is that we take into account the dissipative effects that arise when one couples the actual detector to the phototube. This scheme includes the spontaneous decay of the detector with originated photoelectron emission inside the phototube, which is amplified and viewed as macroscopic electric current. Using quantum trajectories approach we deduced general time-dependent transition super-operator, responsible for taking out a single photon from the field. Since it depends explicitly on interaction time, we proposed two distinct schemes for obtaining time independent QJS\'{}s from it. In the first case we assumed that the interaction time is small and that there are few photons in the cavity; in this situation we recovered the QJS proposed by Srinivas and Davies in both detector models. As a second scheme, we calculated time-averaged QJS on the time interval during which a photon is certainly absorbed; as the result, we obtained different non-linear QJS's for the 2-level atom model and the model of harmonic oscillator. In particular, we have shown that for quantum states with the predominant contribution of Fock components with big values of $n$, the QJS has the {\em nonlinear\/} form (\ref{J-diag}) with the power-law asymptotic function $F(\hat{n})=(\hat{n}+1)^{-\beta}$. However, the concrete value of the exponent $\beta$ is model-dependent.
For the 2-level atom model we obtained $\beta=1/2$, whereas in the model of harmonic oscillator the values $\beta=5/4$ and $\beta= 3/4$ were found, depending on the ratio between the spontaneous decay frequency of the excited state and the effective frequency of coupling between the detector and field mode. Also, we have demonstrated how the simple Srinivas--Davies QJS arises in the case of states with small number of photons. Another important result we obtained is that the QJS's, when applied to density matrix' non-diagonal elements, are null in average in both models due to the strong oscillations of the free field terms.
\begin{acknowledgments} Work supported by FAPESP (SP, Brazil) contracts \# 00/15084-5, 04/13705-3. SSM and VVD acknowledge partial financial support from CNPq (DF, Brazil). \end{acknowledgments}
\end{document} |
\begin{document}
\title{Continuous decomposition of quantum measurements via Hamiltonian feedback} \author{Jan Florjanczyk} \email{[email protected]} \author{Todd A. Brun} \email{[email protected]} \affiliation{Center for Quantum Information Science and Technology, \\ Communication Sciences Institute, Department of Electrical Engineering, \\ University of Southern California Los Angeles, CA 90089, USA.} \date{\today}
\pacs{03.65.Aa, 03.65.Ta}
\keywords{quantum continuous measurement, quantum feedback control, Jordan algebras} \begin{abstract} We characterize the set of generalized quantum measurements that can be decomposed into a continuous measurement process using a stream of probe qubits and a tunable interaction Hamiltonian. Each probe in the stream interacts weakly with the target quantum system, then is measured projectively in a standard basis. This measurement result is used in a closed feedback loop to tune the interaction Hamiltonian for the next probe. The resulting evolution is a stochastic process with the structure of a one-dimensional random walk. To maintain this structure, and require that at long times the measurement outcomes be independent of the path, the allowed interaction Hamiltonians must lie in a restricted set, such that the Hamiltonian terms on the target system form a finite dimensional Jordan algebra. This algebraic structure of the interaction Hamiltonians yields a large class of generalized measurements that can be continuously performed by our scheme, and we fully describe this set. \end{abstract} \maketitle
Many quantum systems either exhibit slow measurement read-out times or can only be probed weakly. Under such conditions, it is natural to monitor the systems continuously while simultaneously exerting some closed-loop feedback. Experiments can already be performed with such low latency that feedback can be performed continuously in real time~\cite{rydberg, rydberg-feedback}. While generalized continuous measurements have been studied~\cite{weakuniversal, generalizedstochastic}, in most systems the diffusive weak measurements~\cite{simplemodel} that constitute the continuous process must be applied via coupling to a probe system. Previously, we've studied a system with closed-loop feedback applied to a stream of probe qubits interacting with the system by a fixed Hamiltonian~\cite{constham}. Here, we investigate the possibilities that arise from closed-loop feedback when the interaction Hamiltonian is itself subject to control.
A key feature of~\cite{constham} was the derivation of a reversibility equation which was used to restrict the class of measurements that admitted a continuous decomposition. This equation is necessary again in this work to ensure that the final measurement at long times is independent of the details of the path. Although we'll restrict our analysis to qubit probes and two-outcome measurements, we note that general two-outcome measurements are sufficient building blocks for $n$-outcome measurements~\cite{nonlocality, weakuniversal}.
\begin{figure}\label{fig:circuit}
\end{figure}
Consider a quantum system $S$ undergoing a stochastic evolution driven by two-outcome diffusive weak measurements. The outcome of any particular step during the evolution is one of two \emph{weak measurement step operators} $M_{\pm}(x)$. These step operators are functions of a pointer variable $x$ which updates with each outcome. The exact feedback scheme is illustrated in Figure~\ref{fig:circuit}, and the process terminates when $x$ reaches a fixed constant $\pm X$. The reversibility condition can be written \begin{equation}
\label{eqn:Revers}
M_{\mp}(x \pm \d) M_{\pm} (x) \propto I . \end{equation} From the above equation, two consecutive outcomes that step ``forward'' from $x$ to $x+\d$ then ``backward'' from $x+\d$ to $x$ have no net effect on $\ket{\psi}$. However, rather than track the evolution of $\ket{\psi}$ directly, we can express the total action of our procedure as the \emph{total walk operator} \begin{equation}
\label{eqn:totalwalkoperator}
M(x) = \lim_{\d \rightarrow 0} \prod_{j=1}^{ \lfloor x / \d \rfloor} M_+(j\d), \end{equation} For negative values of $x$ we replace $M_+$ with $M_-$ above. We identify the \emph{endpoint} operators $M_1 \propto M(X)$ and $M_2 \propto M(-X)$ with the final measurement being decomposed by this process. We will consider a simple model of probe-state interaction that will generate $M_{\pm}(x)$. In~\cite{constham} we found that weak measurements with qubit probes had to form a \emph{probe-basis} on the qubit Hilbert space. In particular, we required that the probe state, the orthogonal quantum states of the detector, and the probe eigenstates of the interaction Hamiltonian, have mutually orthonormal representations on the Bloch sphere. For this reason, we choose the interaction Hamiltonian to be $H_{PS} = Y_P \ox \hat{\e}(x)$, the probe state to be $\ket{0}$, and the detector states to be $\bra{\pm}$. The operator acting on the system $S$ is $\hat{\e}$ and is defined to be an $x$-dependent linear combination of $d$ constant Hamiltonian terms, \begin{equation}
\hat{\e}(x) = \sum_{i=0}^d p_i(x) H_i . \label{eqn:epsilon} \end{equation} The weak measurement step operators of Figure~\ref{fig:circuit} then become \begin{align}
M_{\pm}(x) & = \bra{\pm} e^{i \d H_{PS}(x)} \ket{0} \\
& \approx \frac{1}{\sqrt{2}} I \mp \frac{\d}{\sqrt{2}} \hat{\e}(x) - \frac{\d^2}{2\sqrt{2}} \hat{\e}^2(x). \nonumber \end{align} The reversibility condition of Eq.~\ref{eqn:Revers} can now be rewritten in terms of $\hat{\e}(x)$. Note that this condition need only be satisfied up to $O(\d^2)$ since the random walk induced on the pointer variable $x$ will take $O(N^2)$ steps to converge when $N = \lfloor X/\d \rfloor$. Collecting terms by orders of $\d$ yields \begin{equation*}
M_{\mp}(x \pm \d) M_{\pm}(x) = \frac{I}{2} + \frac{\d^2}{2} \lp \partial_x \hat{\e}(x) - 2 \hat{\e}^2(x)\rp + O(\d^3). \end{equation*} Let $\a(x)$ be the proportionality constant in Eq.~\eqref{eqn:Revers}. We find that the reversibility equation reduces to \begin{equation}
\label{eqn:revers}
\partial_x \hat{\e}(x) = 2 \hat{\e}^2(x) + \a(x) I. \end{equation} In the derivations that follow, we will ignore the $\a(x) I$ term, as it will not change the class of measurements that satisfy the reversibility equation. (In practice, the term can be reintroduced to help find bounded solutions.)
Consider the set of controls that appear in Eq.~\eqref{eqn:epsilon}. Without loss of generality, we can always assume that $H_0 = I$ since the action of $I$ is equivalent to an overall phase on the probe system. The reversibility equation in Eq.~\eqref{eqn:revers} can then be rewritten as \begin{equation}
\sum_{k=0}^d \partial_x p_k(x) H_k = \sum_{i, j=0}^d p_i(x) p_j(x) \frac{1}{2} \lb H_i, H_j \rb \label{eqn:symmetrizing}. \end{equation} where $\lb \cdot, \cdot \rb$ is the anti-commutator. It will be useful to introduce the tensor $\G_{ij}^k$ for expressing the action of the anti-commutator on the matrices $H_i$. In particular, \begin{equation}
\frac{1}{2} \lb H_i, H_j \rb = \sum_{k=0}^{n(n-1)/2} \G_{ij}^k H_k . \end{equation} We choose the matrices $H_i$ for $i>d$ such that they form a basis for $\mcal{H}_n \lp \mbC \rp$, the space of all $n$-dimensional complex Hermitian matrices. We will use $\G^{(k)}$ to denote the matrix resulting from fixing the index $k$. The reversibility equation Eq.~\eqref{eqn:revers} can then be read as \begin{equation} \label{eqn:di_xp}
\lb \begin{array}{rlcl}
\partial_x p_k &= \vec{p}^T \G^{(k)} \vec{p} & \hspace{0.15in} & 0 \leq k \leq d \\
0 &= \vec{p}^T \G^{(k)} \vec{p} & \hspace{0.15in} & d < k .
\end{array} \right. \end{equation}
We now present our main result which characterizes solutions to the above equations. Let us denote $\mbF = \spa{H_i}$ so that $\hat{\e} \in \mbF$. We prove the following lemma about solutions to Eq.~\eqref{eqn:di_xp}.
\begin{lem} \label{lem:closure} Any solution $\hat{\e}(x)$ to Eq.~\eqref{eqn:di_xp} must lie entirely in $\mbV$, a subspace of $\mbF$ that is closed under anti-commutation. \end{lem} \begin{proof} We note that if $\mbF$ is already closed under anti-commutation, then the reversibility equation reduces to an initial value problem in terms of the control coefficients $\vec{p}(x)$ at $x=0$. However if $\mbF$ is not closed under anti-commutation, then we must characterize the set of vectors $\vec{p}$ such that Eq.~\eqref{eqn:di_xp} is satisfied. To do so, consider choosing any $k>d$ and solving the associated equation $\vec{p}^T \G^{(k)} \vec{p} = 0$. Note that the matrix $\G^{(k)}$ is symmetric and defines a quadratic space over $\mbR^d$. Every quadratic space admits a Witt decomposition~\cite{omeara} which in our case is \begin{equation}
\lp \G^{(k)}, \mbR^d \rp \cong \bigoplus_{i=0}^{N} W_i \oplus V_0 \oplus V' . \end{equation} In the above, $W_i$ are hyperbolic planes, $V_0$ is the nullspace of $\G^{(k)}$, and $V'$ is an anisotropic subspace of $\mbR^d$. Solutions to $\vec{x}^T W_i \vec{x} = 0$ are $\spa{[1,1]} \cup \spa{[1,-1]}$. Additionally, there are no vectors which satisfy $\vec{x}^T V' \vec{x} = 0$ for the anisotropic subspace $V'$. Let $T^{(k)}$ be the isomorphism of $\lp \G^{(k)}, \mbR^d \rp$ to $\lp I_d, \mbR^d \rp$ and $\vec{p} = T^{(k)} \vec{q}$. Then possible solutions to $\vec{p}^T \G^{(k)} \vec{p} = 0$ must lie in \begin{equation}
V = T^{(k)} \lp \bigoplus_{i=0}^{N} \spa{\ls 1, x_i \rs} \oplus V_0 \rp \end{equation} for a fixed choice of $x_i = \pm 1$. To fully solve Eq.~\eqref{eqn:di_xp} we must now recurse the above procedure. At each step we restrict $\vec{p}$ to lie in the subspace $V$ defined by a choice of $x_i$. We then define a new matrix basis for the controls restricted to $V$ and generate a new set of $\G^{(k)}$ matrices. We then choose a new $k$ and decompose $V$ using $\G^{(k)}$. Since the order in which the $k$ are chosen will affect the form of $V$, it is also important to enumerate all sequences of choices of $k$ and $x_i$. This procedure terminates when the vector space of Hermitian matrices $\mbV$ formed from $V$ is closed under anti-commutation. Furthermore, since the Witt decomposition is unique (up to isometries of $V'$), we can guarantee that this procedure lists all closed subspaces contained in $\mbF$. It remains only to show that if $\vec{p}(0) \in V$ for a particular sequence of choices of $k$ and $x_i$, that $\vec{p}(x)$ will remain in the same subspace for all other values of $x$. This follows directly, however, from the fact that if $\hat{\e}(x) \in \mbV$ then $\hat{\e}^2(x) \in \mbV$ and so $\di_x \hat{\e} \in \mbV$. \end{proof}
Lemma~\ref{lem:closure} establishes that in order to solve the reversibility equation, one must use a set of controls whose span is closed under anti-commutation. The proof of the lemma also includes an implicit algorithm for finding closed subspaces given a set of Hermitian matrices. The next lemma gives the structure of the subspaces enumerated by lemma~\ref{lem:closure}.
\begin{lem} \label{lem:wedderburn} The $\hat{\e}(x)$ operator has the form \begin{equation}
\label{eqn:eps_decomp}
\hat{\e}(x) = \bigoplus_{l=1}^{S(\mbV)} U_l(x) D_l(x) U_l^{\dag}(x) . \end{equation} where $S(\mbV)$ is the number of simple components of the algebra $\mbV$ (with anti-commutation as its product), and $D_l(x)$ and $U_l(x)$ correspond to the $l^{\mathrm{th}}$ simple component and are given by Table~\ref{tab:repr}. \end{lem} \begin{proof} We begin by identifying $\mbV$ as a finite-dimensional Jordan algebra. Every such algebra accepts a Wedderburn-type decomposition~\cite{albert-wedderburn, penico-wedderburn}, \begin{equation}
\label{eqn:wedderburn}
\mbV \cong \bigoplus_{l=1}^{S(\mbV)} \mbB_l, \end{equation} where $S(\mbV)$ is the number of simple components $\mbB_l$ of $\mbV$. A classification of all finite-dimensional simple Jordan algebras was given by Jordan, von Neumann, and Wigner~\cite{jordanmain}. The three types of Jordan algebras that can be found in our decomposition are the self-adjoint real, complex, and quaternionic matrices. The isomorphism in Eq.~\eqref{eqn:wedderburn} leaves a lot of freedom in terms of how to represent each of these simple components by Hamiltonian terms. We summarize the possible representations in Table~\ref{tab:repr}. (Note that the exceptional Albert algebra is absent, since octonions do not have a matrix representation over $\mbR$ or $\mbC$~\cite{albert}). Since $\mbV$ can be written as a direct sum, we can also write \[ \hat{\e} = \bigoplus_{l=1}^{S(\mbV)} \hat{\e}_l(x). \] Each operator in the direct sum can, in turn, be diagonalized to yield the form in the statement of the lemma. \begin{table}
\renewcommand\arraystretch{1.5}
\renewcommand\tabcolsep{6pt}
\begin{tabular}{|c|c|c|c|c|}
\hline Block $\mbB_l$ & $D_l(x)$ & $U_l(x)$ \\
\hline
\hline $\mcal{H}_{n}(\mbR)$ & $\diag{\mbR^n}$ & $SO(n)$ \\
\hline $\mcal{H}_{n}(\mbC)$ & $\diag{\mbR^n}$ & $SU(n)$ \\
\hline $\mcal{H}_{n}(\mbC) \cong \mcal{H}_{2n}(\mbR)$ & $\diag{\mbR^n}\ox I_2$ & $SO(n) \ox SO(2)$ \\
\hline $\mcal{H}_{n}(\mbH) \cong \mcal{H}_{2n}(\mbC)$ & $\diag{\mbR^n}\ox I_2$ & $SU(n) \ox SU(2)$ \\
\hline $\mcal{H}_{n}(\mbH) \cong \mcal{H}_{4n}(\mbR)$ & $\diag{\mbR^n}\ox I_4$ & $SO(n) \ox SO(4)$ \\
\hline
\end{tabular}
\caption{We list all rank-$n$ representations of Jordan algebras that can be embedded into a span of Hermitian matrices. The third representation corresponds to the $2$-dimensional embedding of $\mbC$ into $\mbR$. The fourth and fifth representations correspond to $2$- and $4$-dimensional embeddings of $\mbH$ into $\mbC$ and $\mbR$. The notation $\diag{\mbR^n}$ refers to the space of $n$-dimensional diagonal matrices.}
\label{tab:repr} \end{table} \end{proof}
It remains to describe the form of the endpoints of the continuous process $M_1$, $M_2$. We use the reversibility and propagation equations to solve for them directly in the following lemma.
\begin{lem} \label{lem:diagonal} The $\hat{\e}(x)$ operator and the total walk operator $M(x)$ are simultaneously diagonalizable. \end{lem} \begin{proof} We begin by noting that Eq.~\ref{eqn:revers} can be solved for individual blocks $\hat{\e}_l(x)$ which yield, \begin{equation}
\di_x \lp U_l(x) D_l(x) U_l^{\dag}(x) \rp = 2 \lp U_l(x) D_l(x) U_l^{\dag}(x) \rp^2 . \end{equation} Since $U_l(x)$ is a unitary matrix we can write it as the exponent of a Hermitian matrix $G_l(x)$ and we note that $U_l^{\dag}(x) \partial_x U_l(x)= i \partial_x G_l(x)$. This reduces the above equation to \begin{equation}
\label{eqn:diag}
i \ls \partial_x G_l, D_l \rs + \partial_x D_l = 2 D_l^2 . \end{equation} The entries of the commutator term are \begin{equation}
\lp \ls \partial_x G_l, D_l \rs \rp_{ij} = \di_x g^{(l)}_{ij} \lp d^{(l)}_i - d_j^{(l)} \rp \end{equation} from which we can infer that the diagonal entries of the commutator term are $0$ if $d^{(l)}_i \neq d^{(l)}_j$. We can show that $d^{(l)}_i(x)$ has the solution $\tanh(x-c^{(l)}_i)$, and thus for any $i,j$ such that $c^{(l)}_i \neq c^{(l)}_j$, $g_{ij}^{(l)}$ is constant. All together, Eq.~\eqref{eqn:diag} has the solution \begin{align}
\label{eqn:d_l}
\lb \begin{array}{lcl}
d^{(l)}_i(x) = \tanh(x-c^{(l)}_i) & \hspace{0.1in} & \forall \; i , \\
g^{(l)}_{ij}(x) = g^{(l)}_{ij}(0) & & \forall\; i,j \; : \; c^{(l)}_i \neq c^{l)}_j , \\
g^{(l)}_{ij}(x) = g^{(l)}_{ij}(x) & & \forall\; i,j \; : \; c^{(l)}_i = c^{l)}_j .
\end{array}\right. \end{align} The solution above for $d_i^{(l)}(x)$ is found by reintroducing the $\a(x) I$ term to our equations (which we've ignored thus far). We note that in the cases where $c^{(l)}_i = c^{(l)}_j$, $g_{ij}^{(l)}$ need not be constant. This, however, does not affect the form of $\hat{\e}$, or of $M_1$, $M_2$.
We now turn our attention to the total walk operator given in Eq.~\eqref{eqn:totalwalkoperator} which obeys the following differential equation (up to a normalization factor): \begin{equation}
\label{eqn:propagation}
\di_x M(x) = -\hat{\e}(x) M(x) . \end{equation} We can write $M(x)$ in the diagonal basis of $\hat{\e}(x)$ by introducing the operator \begin{equation}
\label{eqn:N}
N(x) = \lp \bigoplus_{k=1}^{S(\mbV)} U^{\dag}_l(x) \rp M(x) \lp \bigoplus_{l=1}^{S(\mbV)} U_l(x) \rp . \end{equation} Eq.~\eqref{eqn:propagation} can then be rewritten as \begin{equation}
\label{eqn:Npropagation}
\di_x N(x) = - \bigoplus_{l=1}^{S(\mbV)} D_l(x) N(x) - i \bigoplus_{l=1}^{S(\mbV)} \ls \di_x G_l(x), N_l(x) \rs . \end{equation} Note that since $M(0) = I$ then $N(0)=I$ and any solution $N(x)$ must be diagonal. Thus the total walk operator and the $\hat{\e}(x)$ operator are diagonal in the same basis. \end{proof}
Lemmas~\ref{lem:closure},~\ref{lem:wedderburn}, and~\ref{lem:diagonal} combined give the full characterization of $M_1$ and $M_2$ operators achievable by our scheme:
\begin{thm}[Main result] \label{thm:main} A continuous measurement using qubit probes and closed-loop feedback on the interaction Hamiltonian (as in Fig.~\ref{fig:circuit}) can realize any measurement $\lb M_1, M_2 \rb$ of the form \begin{equation}
M_1 = \bigoplus_{l=1}^{S(\mbV)} U^{\dag}_l \lp \bigoplus_{i=1}^{\mathrm{rank}\lp \mbB_l \rp} \lambda^{(l)}_i \Pi^{(l)}_i \rp U_l, \end{equation} where $M_2 = (I - M_1^{\dag}M_1)^{1/2}$ is diagonal in the same basis. The parameters $\lambda^{(l)}_i$ are real and contained in $(0,1)$ and $\Pi^{(l)}_i$ is a projector onto $1$, $2$, or $4$ basis states. \end{thm} \begin{proof} Recall that the number of distinct diagonal entries possible in $D_l(x)$ is $\mathrm{rank} \lp \mbB_l \rp$. However, each distinct entry can appear $1$, $2$, or $4$ times depending on the particular representation from Table~\ref{tab:repr}. Using lemma~\ref{lem:diagonal} we can plug our solution for $D_l(x)$ into Eq.~\eqref{eqn:Npropagation} to find that the diagonal entries of $N(x)$ are $\exp ( \int_0^x \tanh (y - c^{(l)}_i) dy )$. The total walk operator $M(x)$ must then be \begin{equation}
M(x) = U^{\dag}_l(x) \lp \bigoplus_{i=1}^{\mathrm{rank} \lp \mbB_l \rp} e^{\int_0^x \tanh \lp y - c^{(l)}_i \rp dy} \Pi^{(l)}_i \rp U_l(x) . \end{equation} The endpoint operators $M_1$ and $M_2$ are proportional to $M(X)$ and $M(-X)$. Their diagonal entries are $\lambda^{(l)}_i$, which after renormalization approach $0$ when $c^{(l)}_i \rightarrow \infty$ and $1$ when $c^{(l)}_i \rightarrow -\infty$. \end{proof}
Note that in theorem~\ref{thm:main} the eigenvalues of $M_1$ and $M_2$ are restricted to lie in the open set $(0,1)$, not the closed set $[0,1]$. This is a consequence of the reversibility condition at the points $x=X-\d$ and $x=X+\d$. At these points, setting any eigenvalue of the total walk operator to $0$ would be effectively a projection, which is an irreversible operation for the random walk. However we can approach arbitrarily close to any such projective measurement.
To allow for direct comparisons with the scheme of~\cite{constham}, we provide the following corollary.
\begin{cor}[Spectrum of the measurement] Given the ability to perform any unitary transformations directly before and after the continuous process of theorem~\ref{thm:main}, one can continuously decompose any measurement with $\sum_{l=1}^{S(\mbV)} \mathrm{rank} \lp \mbB_l \rp$ distinct singular values. \end{cor} \begin{proof} The endpoint measurement operators $M_1$, $M_2$ in theorem~\ref{thm:main} can have up to $\sum_{l=1}^{S(\mbV)} \mathrm{rank} \lp \mbB_l \rp$ distinct eigenvalues. We can decompose any pair of general endpoint operators $M_1$, $M_2$ using their polar decompositions $M_i = W_i (M_i^{\dag}M_i)^{1/2}$. Then, we can use a procedure like that of Figure~\ref{fig:circuit} to measure the positive Hermitian operators $(M_i^{\dag}M_i)^{1/2}$ and subsequently apply $W_i$ depending on the measurement result. \end{proof}
In this work we've characterized the full class of continuous measurements achievable using a stream of probe qubits and a tunable interaction Hamiltonian. Given a set of linearly controlled Hamiltonian terms we provide a method to exhaustively list all continuous decompositions achievable with the control set. The class we find has a simple block-diagonal form, but results from a non-trivial application of the reversibility condition. Notably, measurements in this class have a quantifiably broader spectrum than in the case of a fixed interaction Hamiltonian.
Our work makes critical use of finite-dimensional Jordan algebras. This is surprising since these algebras have had little application elsewhere in quantum mechanics. Our model for continuous measurements does not include internal dynamics $H_S$ for the system or the probe, nor does it account for environment noise. In the presence of $H_S$, successive realizations of the continuous decomposition would yield inconsistent results unless $H_S$ commutes with the measurement operators.
The model presented here is still not the most general description of all continuous measurements realizable with a stream of probes. A completely general description would have to consider higher-dimensional probes, multiple outcomes to the weak measurement steps (as well as the endpoint measurements), and a more general reversibility condition. This is the subject of ongoing work. If Jordan algebras reappear in that scenario, then they will have found renewed application in quantum mechanics.
\begin{acknowledgments} JF and TAB thank Daniel Lidar and Ognyan Oreshkov for useful discussions. This research was supported in part by the ARO MURI under Grant No. W911NF-11-1-0268. \end{acknowledgments}
\end{document} |
\begin{document}
\title{Bayesian inference using synthetic likelihood: asymptotics and adjustments} \date{\empty}
\author[1,6]{David T. Frazier} \author[2,3]{David J. Nott\thanks{Corresponding author: [email protected]}} \author[4,6]{Christopher Drovandi} \author[5,6]{Robert Kohn} \affil[1]{Department of Econometrics and Business Statistics, Monash University, Clayton VIC 3800, Australia} \affil[2]{Department of Statistics and Applied Probability, National University of Singapore, Singapore 117546} \affil[3]{Operations Research and Analytics Cluster, National University of Singapore, Singapore 119077} \affil[4]{School of Mathematical Sciences, Queensland University of Technology, Brisbane 4000 Australia} \affil[5]{Australian School of Business, School of Economics, University of New South Wales, Sydney NSW 2052, Australia} \affil[6]{Australian Centre of Excellence for Mathematical and Statistical Frontiers (ACEMS)}
\maketitle
\begin{abstract} Implementing Bayesian inference is often computationally challenging in applications involving complex models, and sometimes calculating the likelihood itself is difficult. Synthetic likelihood is one approach for carrying out inference when the likelihood is intractable, but it is straightforward to simulate from the model. The method constructs an approximate likelihood by taking a vector summary statistic as being multivariate normal, with the unknown mean and covariance matrix estimated by simulation for any given parameter value. Previous empirical research demonstrates that the Bayesian implementation of synthetic likelihood can be more computationally efficient than approximate Bayesian computation, a popular likelihood-free method, in the presence of a high-dimensional summary statistic. Our article makes three contributions. The first shows that if the summary statistic satisfies a central limit theorem, then the synthetic likelihood posterior is asymptotically normal and yields credible sets with the correct level of frequentist coverage. This result is similar to that obtained by approximate Bayesian computation. The second contribution compares the computational efficiency of Bayesian synthetic likelihood and approximate Bayesian computation using the acceptance probability for rejection and importance sampling algorithms with a ``good'' proposal distribution. We show that Bayesian synthetic likelihood is computationally more efficient than approximate Bayesian computation, and behaves similarly to regression-adjusted approximate Bayesian computation. Based on the asymptotic results, the third contribution proposes using adjusted inference methods when a possibly misspecified form is assumed for the covariance matrix of the synthetic likelihood, such as diagonal or a factor model, to speed up the computation. The methodology is illustrated with some simulated and real examples.
\noindent \textbf{Keywords.} Approximate Bayesian computation; likelihood-free inference; model misspecification. \end{abstract}
\section{Introduction}\label{sec:Intro}
Synthetic likelihood is a popular method used in likelihood-free inference when the likelihood is intractable, but it is possible to simulate from the model for any given parameter value. The method takes a vector summary statistic that is assumed to be informative about the parameter and assumes it is multivariate normal, estimating the unknown mean and covariance matrix by simulation to produce an approximate likelihood function. \citet{price+dln16} provide empirical and preliminary theoretical evidence that Bayesian synthetic likelihood (BSL) can perform favourably compared to approximate Bayesian computation \citep[ABC,][]{sisson2018}, a more mature likelihood-free method that has been subjected to extensive theoretical examination. The performance gains of BSL are particularly noticeable in the presence of a regular, high-dimensional summary statistic. Given the promising empirical performance of BSL, it is important to study its theoretical properties.
This article makes three contributions. First, it investigates the asymptotic properties of synthetic likelihood when the summary statistic satisfies a central limit theorem. The conditions required for the results are similar to those in \citet{frazier+mrr18} in the asymptotic analysis of ABC algorithms, but with an additional assumption controlling the uniform behaviour of summary statistic covariance matrices. Under appropriate conditions, the posterior density is asymptotically normal and it quantifies uncertainty accurately, similarly to ABC approaches \citep{li+f18b,li+f18a,frazier+mrr18}.
The second contribution is to show that a rejection sampling BSL algorithm has a non-negligible acceptance probability for a ``good'' proposal density. A similar ABC algorithm has an acceptance probability that goes to zero asymptotically, and synthetic likelihood performs similarly to regression-adjusted ABC \citep{li+f18b,li+f18a}.
The third contribution considers situations where a parsimonious but misspecified form is assumed for the covariance matrix of the summary statistic, such as a diagonal matrix or a factor model, to speed up the computation. For example, \citet{priddle+sfd19} show that for a diagonal covariance matrix, the number of simulations need only grow linearly with the summary statistic dimension to control the variance of the synthetic likelihood estimator, as opposed to quadratically for the full covariance matrix. This is especially important for models where simulation of summary statistics is expensive. We use our asymptotic results to motivate sandwich-type variance adjustments to account for the misspecification and implement these in some examples. The adjustments just discussed are also potentially useful when the model for the original data is misspecified and we wish to carry out inference for the pseudo-true parameter value with the data generating density closest to the truth; Section 2.1 elaborates on these ideas.
For the adjustment methods to be valid, it is important that the summary statistic satisfies a central limit theorem, so that we can make use of the asymptotic normality of the posterior density. This means that these adjustments are not useful for correcting for the effects of violating the normality assumption for the summary statistic. \cite{muller13} considers some related methods, although not in the context of synthetic likelihood or likelihood-free inference. \citet{frazier+rr17} studies the consequences of misspecification for ABC approaches to likelihood-free inference.
\citet{wood10} introduced the synthetic likelihood and used it for approximate (non-Bayesian) inference. \citet{price+dln16} discussed Bayesian implementations focusing on efficient computational methods. They also show that the synthetic likelihood scales more easily to high-dimensional problems and that it is easier to tune than competing approaches such as ABC.
There is much recent development of innovative methodology for accelerating computations for synthetic likelihood and related methods \citep{meeds+w14,wilkinson14,gutmann+c15,everitt17,ong+ntsd16,ong+ntsd18,An2016,priddle+sfd19}. However, there is also interest in weakening the normality assumption on which the synthetic likelihood is based. This led several authors to use other surrogate likelihoods for more flexible summaries. For example, \citet{Fasiolo2016} consider extended saddlepoint approximations, \citet{Dutta2016} consider a logistic regression approach for likelihood estimation, and \cite{an+nd18} consider semiparametric density estimation with flexible marginals and a Gaussian copula dependence structure. \citet{mengersen+pr13} and \citet{chaudhuri+gnp18} consider empirical likelihood approaches. An encompassing framework for many of these suggestions is the parametric Bayesian indirect likelihood of \citet{drovandi+pl15}.
As mentioned above, the adjustments for misspecification developed here do not contribute to this literature on robustifying synthetic likelihood inferences to non-normality of the summary statistics, as they can only be justified when a central limit theorem holds for the summary statistic. Bayesian analyses involving pseudo-likelihoods have been considered in the framework of Laplace-type estimators discussed in \cite{chernozhukov+h03}, but their work does not deal with settings where the likelihood itself must be estimated using Monte Carlo. \cite{forneron+n18} developed some theory connecting ABC approaches with simulated minimum distance methods widely used in econometrics, and their discussion is also relevant to simulation versions of Laplace-type estimators.
\section{Bayesian synthetic likelihood}\label{asymptotic} Let $y=(y_1,\dots,y_n)^{\intercal}$ denote the observed data and define $P^{(n)}_0$ as the true distribution generating $y$. The model $P^{(n)}_0$ is approximated using a parametric family of models $\{P^{(n)}_\theta:\theta\in\Theta\subset\mathbb{R}^{d_\theta}\}$, and $\Pi$ denotes the prior distribution over $\Theta$, with density $\pi(\theta)$.
We are interested in situations where, due to the complicated nature of the model, the likelihood of $P^{(n)}_\theta$ is intractable. In such cases, approximate methods such as BSL can be used to conduct inference on the unknown $\theta$.
Like the ABC method, BSL is most commonly implemented by replacing the observed data $y$ by a low-dimensional vector of summary statistics. Throughout, we let the function $S_n:\mathbb{R}^n\rightarrow\mathbb{R}^d$, $d\ge d_\theta$, represents the chosen vector (function) of summary statistics. For a given model $P^{(n)}_\theta$, let $z=(z_1,\dots,z_n)^{\intercal}$ denote data generated
under the model $P^{(n)}_\theta$, and let $b_{}(\theta):=\mathbb{E}\{S_n(z)|\theta\}$ and
$\Sigma_n(\theta):=\text{var}\{S_n(z)|\theta\}$ denote the mean and variance of the summaries calculated under $P^{(n)}_\theta$; the map $\theta\mapsto b(\theta)$ may technically depend on $n$. However, if the data are independent and identically distributed or weakly dependent, and if $S_n$ can be written as an average, $b(\theta)$ will not meaningfully depend on $n$. As the vast majority of summaries used in BSL satisfy this scenario, neglecting the potential dependence on $n$ is reasonable.
The synthetic likelihood method approximates the intractable likelihood of $S_n(z)$ by a normal likelihood. If $b_{}(\theta)$ and $\Sigma_n(\theta)$ are known, then the synthetic likelihood is \begin{align*}
g_n(S_n|\theta) & := N\left\{S_n;b(\theta),\Sigma_n(\theta)\right\}; \end{align*} here, and below, $N(\mu,\Sigma)$ denotes a normal distribution with mean $\mu$ and covariance matrix $\Sigma$, and $N(x;\mu,\Sigma)$ is its density function evaluated at $x$.
The idealized BSL posterior using known $b(\theta)$ and $\Sigma_n(\theta)$ is
$$\pi(\theta|S_n)=\frac{g_n(S_n|\theta)\pi(\theta)}{\int_\Theta g_n(S_n|\theta)\pi(\theta)\text{d} \theta};$$
Markov chain Monte Carlo (MCMC) is used to obtain draws from the target posterior $\pi(\theta|S_n)$, which we assume exists for all $n$. However, outside of toy examples, posterior inference based on $\pi(\theta|S_n)$ is infeasible since $b(\theta)$ and $\Sigma_n(\theta)$ can only be analytically calculated if the mean and variance of $S_n(z)$ is known.
Therefore, BSL is generally implemented by replacing $b(\theta)$ and $\Sigma_n(\theta)$ with estimates $\widehat{b}_n(\theta)$ and $\widehat{\Sigma}_n(\theta)$. To obtain these estimates, we generate $m$ independent summary statistics $\{S(z^i)\}_{i=1}^{m}$, where $z^i\sim P^{(n)}_\theta$, and take $\widehat{b}_n(\theta)$ as the sample mean of the $S_n(z^i)$ and $\widehat{\Sigma}_n(\theta)$ as their sample covariance matrix. The notation does not show the dependence of $\widehat{b}_n(\theta)$ and $\widehat{\Sigma}_n(\theta)$ on $m$, since $m$ is later taken as a function of $n$. In practical applications of BSL, the use of variance estimates other than $\widehat{\Sigma}_n(\theta)$ is common (e.g.\ \citealp{An2016}, \citealp{ong+ntsd18} and \citealp{priddle+sfd19}). To encapsulate these and other situations, we take ${\Delta}_n(\theta)$ to be a general covariance matrix estimator.
When $b(\theta)$ and $\Sigma_n(\theta)$ are replaced with estimates, BSL attempts to sample the following posterior target \begin{align}\label{eq:perturbedpost}
\widehat{\pi}(\theta|S_n)& \propto \pi(\theta) \widehat{g}_n(S_n|\theta),
\end{align}where, for $q_n(\cdot|\theta)$ the density of the simulated summary statistics under $P^{(n)}_\theta$, \begin{align}
\widehat{g}_n(S_n|\theta) & := \int N\{S_n;\widehat{b}_n(\theta),{\Delta}_n(\theta)\} \prod_{i=1}^m q_n\{S(z^i)|\theta\}\,\text{d} S(z^1)\,\dots\, \text{d} S(z^m) \label{noisySL}. \end{align}
Noting that an unbiased estimator of $\widehat{g}_n(S_n|\theta)$ can be obtained by taking a single draw of $S_n(z^i)\sim q_n(\cdot|\theta)$, and following arguments in Andrieu and Roberts (2009), a pseudo-marginal algorithm employing an estimator of $\widehat{g}_n(S_n|\theta)$ results in sampling from the posterior density $\widehat{\pi}(\theta|S_n)$ in \eqref{eq:perturbedpost} under reasonable integrability assumptions. Therefore, estimation of $b(\theta)$ and $\Sigma_n(\theta)$ ensures that the BSL posterior target, $\widehat{\pi}(\theta|S_n)$, and the idealized BSL posterior,
$\pi(\theta|S_n)$, will differ.
Under idealized, but useful assumptions, \cite{pitt+sgk12}, \cite{doucet+pdk15} and \cite{sherlock2015} choose the number of samples $m$ in pseudo-marginal MCMC to optimize the time normalized variance of the posterior mean estimators. They show that a good choice of $m$ occurs (for a given $\theta$) when the variance $\sigma^2(\theta)$ of the log of the likelihood estimator lies between 1 and 3, with a value of 1 suitable for a very good proposal, i.e., close to the posterior, and around 3 for an inefficient proposal, e.g. a random walk. \cite{deligiannidis2018correlated} propose a correlated pseudo-marginal sampler that tolerates a much greater value of $\sigma^2(\theta)$, and hence a much smaller value of $m$, when the random numbers used to construct the estimates of the likelihood at both the current and proposed values of $\theta$ are correlated; see also \cite{tran2016block} for an alternative construction of a correlated block pseudo-marginal sampler.
Here, the perturbed BSL target is \eqref{noisySL} and the log of its estimate is, \begin{equation}\label{eq:gest}
-\frac{1}{2}\log\left\{\left|\Delta_n(\theta)\right|\right\}-\frac{1}{2}\left\{S_n-\widehat{b}_n(\theta)\right\}^{\intercal}{\Delta}^{-1}_n(\theta)\left\{S_n-\widehat{b}_n(\theta)\right\}, \end{equation} omitting additive terms not depending on $\theta$. It is straightforward to incorporate either the correlated or block pseudo-marginal approaches into the estimation and show that \eqref{eq:gest} is bounded in a neighbourhood of $\theta_0$ if the eigenvalues of $\Delta_n(\theta)$ are bounded away from zero, suggesting that the variance of the log of the estimate of the synthetic likelihood \eqref{eq:gest}
will not have a high variance in practice. We do not
not derive theory for how to select $m$ optimally because that requires taking account of the bias and variance of the synthetic likelihood, which is unavailable in general due to the intractability of the likelihood. However, our empirical
work limits $\sigma^2(\theta)$ to lie between 1 and 3, which produces good results. \cite{price+dln16} find in their examples that the approximate posterior in \eqref{eq:perturbedpost} depends only weakly on the choice of $m$, and hence they often choose a small value of $m$ for faster computation.
The BSL posterior in \eqref{eq:perturbedpost} is constructed from three separate approximations: (1) the representation of the observed data $y$ by the summaries $S_n(y)$; (2) the approximation of the unknown distribution for the summaries by a Gaussian with unknown mean $b(\theta)$ and covariance $\Sigma_n(\theta)$; (3) the approximation of the unknown mean and covariance by the estimates $\widehat{b}_n(\theta)$ and ${\Delta}_n(\theta)$.
Given the various approximations involved in BSL, it is critical to understand precisely how these approximations impact the resulting inferences on the unknown parameters $\theta$. {In practice, understanding how $m$ and ${\Delta}_n(\theta)$ affect the resulting inferences is particularly important. The larger $m$, the more time consuming is the computation of the BSL posterior. Replacing $\Sigma_n(\theta)$, the covariance of the summaries, by $\Delta_n(\theta)$ means that the posterior may not reliably quantify uncertainty (if $\Delta_n(\theta)$ is not carefully chosen).}
Any theoretical analysis of the BSL posterior is made difficult by the intractability of $P^{(n)}_\theta$, and ensures that exploring the finite-sample behavior of the BSL likelihood estimate in \eqref{eq:gest}, and ultimately $\widehat{\pi}(\theta|S_n)$, is difficult in general problems. We therefore use asymptotic methods to study the impact of the various approximation within BSL on the resulting inference for $\theta$.
\section{Asymptotic Behavior of BSL} This section contains several results that disentangle the impact of the previously mentioned approximations used in BSL. These demonstrate that, under regularity conditions, BSL delivers inferences that are just as reliable as other approximate Bayesian methods, such as ABC. Moreover, unlike the commonly applied accept/reject ABC, the acceptance probability obtained by running BSL does not converge to zero as the sample size increases, and is not affected by the number of summaries {(assuming they are of fixed dimension, i.e., $d=\text{dim}(S_n)$ does not change as $n$ increases)}.
A Bernstein von-Mises result is first proved and is then used to deduce asymptotic normality of the BSL posterior mean. Using these results, we can demonstrate that valid uncertainty quantification in BSL requires: (1) $m\rightarrow\infty$ as $n\rightarrow\infty$; (2) the chosen covariance matrix used in BSL, ${\Delta}_n(\theta)$, must be a consistent estimator for the asymptotic variance of the observed summaries $S_n(y)$.
Some notation is now defined to make the results below easier to state and follow. For $x\in\mathbb{R}^{d}$, $\| x\| $ denotes the Euclidean norm of $x$. For any matrix $M\in\mathbb{R}^{d\times d}$, we define $|M|$ as the determinant of $M$, and, with some abuse of notation,
let $\|M\|$ denote any convenient matrix norm of $M$; the choice of $\|\cdot\|$ is immaterial since we will always be working with matrices of fixed $d\times d$ dimension, so that all matrix norms are equivalent. Let $\text{Int}(\Theta)$ denote the interior of the set $\Theta$.
Throughout, let $C$ denote a generic positive constant that can change with each use. For real-valued sequences $\{a_{n}\}_{n\geq 1}$ and $\{b_{n}\}_{n\geq 1}$: $a_{n}\lesssim b_{n}$ denotes $a_{n}\leq Cb_{n}$ for some finite $C>0$ and all $n$ large, $a_{n}\asymp b_{n}$ implies $a_{n}\lesssim b_{n}$ and $b_n \lesssim a_{n}$. For $x_{n}$ a random variable, $x_{n}=o_{p}(a_{n})$ if
$\lim_{n\rightarrow \infty }\text{pr} (|x_{n}/a_{n}|\geq C)=0$ for any $C>0, $
and $x_{n}=O_{p}(a_{n})$ if for any $C>0$ there exists a finite $M>0$ and a finite $n'$ such that, for all $n>n'$, $\text{pr}(|x_{n}/a_{n}|\geq M)\leq C$. All limits are taken as $n\rightarrow\infty$, so that, when no confusion will result, we use $\lim_{n}$ to denote $\lim_{n\rightarrow\infty}$. The notation $\Rightarrow$ denotes weak convergence. The Appendix contains all the proofs.
\subsection{Asymptotic Behavior of the BSL Posterior}
{This section establishes the asymptotic behavior of the BSL posterior $\widehat{\pi}(\theta|S_n)$ in equation \eqref{eq:perturbedpost}.} We do not assume that ${\Delta}_n(\theta)$ is a consistent estimator of $\Sigma_n(\theta)$ to allow the synthetic likelihood covariance to be ``misspecified''. The following regularity conditions are assumed on $S_n$, $b(\theta)$ and ${\Delta}_n(\theta)$.
\begin{assumption}\label{ass:one} There exists a sequence of positive real numbers $v_n$ diverging to $\infty$ and a vector $b_0\in\mathbb{R}^d$, $d\ge d_\theta$, such that $V_0:=\lim_{n}\text{var}\left\{v_n(S_n-b_0)\right\}$ exists and $$v_n\left(S_ { n }-b_0\right)\Rightarrow N\left(0, V_0\right),\text{ under } P^{(n)}_0.$$ \end{assumption}
\begin{assumption}\label{ass:four}
(i) The map $\theta\mapsto{b}(\theta)$
is continuous, and there exists a unique $\theta_0\in\text{Int}(\Theta)$, such that $b(\theta_0)=b_0$; (ii) for some $\delta>0$, and all $\|\theta-\theta_0\|\le\delta$, the Jacobian $\nabla{b}(\theta)$ exists and is continuous, and $\nabla{b}(\theta_0)$ has full column rank $d_\theta$. \end{assumption}
\begin{assumption}\label{ass:two}The following conditions are satisfied for some $\delta>0$: (i) for $n$ large enough, the matrix $v_n^2{\Delta}_n(\theta)$ is positive-definite for all $\|\theta-\theta_0\|\le\delta$; (ii) there exists some matrix $\Delta(\theta)$, positive semi-definite uniformly over $\Theta$, and such that $\sup_{\theta\in\Theta}\|v^2_n{\Delta}^{}_n(\theta)-\Delta^{}(\theta)\|=o_{p}(1)$, and, for all $\|\theta-\theta_0\|\le\delta$, $\Delta(\theta)$ is continuous and positive-definite; (iii) for any $\epsilon>0$, $\sup_{\|\theta-\theta_0\|\ge\epsilon}-\{b(\theta)-b_0\}^\intercal \Delta(\theta)^{-1}\{b(\theta)-b_0\}<0$. \end{assumption}
\begin{assumption}\label{ass:three}
For $\theta_0$ defined in Assumption \ref{ass:four}, $\pi(\theta_0)>0$, and $\pi(\cdot)$ is continuous on $\Theta$. For some $p>0$, and all $n$ large enough, $\int_{ \Theta}|v_n^2\Delta_n(\theta)|^{-1/2}\|\theta\|^{p}\pi(\theta)\text{d}\theta <\infty$. \end{assumption}
\begin{assumption}\label{ass:propO}
There exists a function $k:\Theta\rightarrow\mathbb{R}_{+}$ such that: (i) for all $\alpha\in\mathbb{R}^{d}$, $\mathbb{E}\left(\exp \left[\alpha^{\intercal}v_n\left\{S_n(z)-b(\theta)\right\}\right]\right) \leq \exp \left\{\|{\alpha}\|^{2} k(\theta) / 2\right\}$; (ii) there exists a constant $\kappa$ such that $k(\theta)\lesssim \|\theta\|^{\kappa}$; (iii) for all $n$ large enough, $\sup_{\theta\in\Theta}\{\|v_n^2\Delta^{-1}_n(\theta)\|k(\theta)\}<\infty$. \end{assumption}
These assumptions are similar to those used to prove Bernstein--von Mises results in ABC \citep{frazier+mrr18,li+f18b}. In particular, Assumption \ref{ass:one} requires that the observed summaries satisfy a central limit theorem. Assumption \ref{ass:four} ensures that, over $\Theta$, the summaries $S_n(z)$ have a well-behaved limit $b(\theta)$ that is continuous over $\Theta$, can identify $\theta_0$, and whose derivative has full column rank at $\theta_0$. Assumption \ref{ass:four} does not require that $P^{(n)}_0$ corresponds to $P^{(n)}_{\theta_0}$, so that the model can be misspecified, but instead requires the weaker condition that there exists a unique value $\theta_0\in\Theta$ under which $b(\theta_0)=b_0$, referred to subsequently as the ``true'' parameter value.
{Variants of Assumption~\ref{ass:three} are commonly encountered in the literature on Bayesian asymptotics. In addition to the continuity of $\pi(\theta)$, Assumption~\ref{ass:three} requires the existence of a certain prior moment. This condition is slightly stronger than the prior moment condition needed in the standard case. The need to strengthen this assumption comes from the fact that the matrix $\Delta_n(\theta)$ may be singular far away from $\theta$. As such, in order to ensure the BSL posterior is well-behaved, we require that the prior has thin enough tails in the region where $\Delta_n(\theta)$ is singular, so that the potential singularity of $\Delta_n(\theta)$ does not impact posterior concentration. When $\Delta(\theta)$ in Assumption \ref{ass:two} is positive-definite, uniformly over $\Theta$, this latter condition can be replaced by the standard assumption that $\int_{ \Theta}\|\theta\|^p\pi(\theta)\text{d}\theta<\infty$ for some $p>0$. }
Assumption \ref{ass:propO} requires that the simulated summaries have a sub-Gaussian tail. Intuitively, this condition requires that the simulated summaries have an exponential moment, and is similar to certain conditions employed by \cite{frazier+mrr18} for ABC. Without further conditions on the number of model simulations $m$, this assumption seems necessary to ensure that
the BSL posterior exists, since $\widehat{g}_n(S_n|\theta)$ is defined as an expectation with respect to the distribution of the simulated summaries.
The key difference between the current
assumptions and those used in the theoretical analysis of ABC is that in BSL
the behavior of the quadratic form $\|\Delta_n^{-1/2}(\theta)\{b(\theta)-S_n\}\|^2$ determines the behavior of the synthetic likelihood, and needs to be controlled.
Assumption \ref{ass:two}(i) requires that, for $n$ large enough,
the matrix in this quadratic form is positive-definite for any $\theta$ sufficiently close to $\theta_0$, while Assumption \ref{ass:two}(ii) requires that $\Delta_n(\theta)$ converges uniformly to $\Delta(\theta)$, which is continuous and positive-definite for all $\theta$ sufficiently close to $\theta_0$. Assumption \ref{ass:two}(ii) does not require $\Delta(\theta)$ to be positive-definite uniformly over $\Theta$, and thus it is unnecessary for
it to be invertible far from $\theta_0$. This implies that the quadratic form $\|\Delta^{-1/2}(\theta)\{b(\theta)-b_0\}\|^2$ may not be continuous (or finite) uniformly over $\Theta$. In such situations, it is necessary to
maintain the additional identification assumption given in Assumption \ref{ass:two}(iii). However, if $\Delta(\theta)$ is continuous over $\Theta$ this identification assumption is automatically satisfied.
Assumptions \ref{ass:one}-\ref{ass:propO} are sufficient to deduce a Bernstein von-Mises result for the BSL posterior. To state this result, define the local parameter $$t:=W_0^{}v_n(\theta-\theta_0)-Z_n,$$where
$$
Z_n:=\nabla_{} b\left(\theta_{0}\right)^{\intercal}\Delta(\theta_0)^{-1}v_n\left\{b\left(\theta_{0}\right)-S_ { n }\right\},\;W_0:=\left\{\nabla_{} b\left(\theta_{0}\right)^{\intercal}\Delta(\theta_0)^{-1}\nabla_{} b\left(\theta_{0}\right)\right\},
$$ and denote the BSL posterior for $t$ as
$$ \widehat{\pi}(t|S_n):={|W_0^{-1}|}{}\widehat{\pi}\left(\theta_0+W_0^{-1}{t}/v_n+W_0^{-1}Z_n/v_n\large\mid S_n\right)/v_n.
$$The support of $t$ is denoted by $\mathcal{T}_n:=\{W_0v_n(\theta-\theta_0)-Z_n:\theta\in\Theta\}$, which can be seen as a scaled and shifted translation of $\Theta$.
The following result states that the total variation distance between $\widehat{\pi}(t|S_n)$, and $N\{t;0,W^{}_0\}$ converges to zero in probability. It also
demonstrates that the covariance of the Gaussian density to which $\widehat{\pi}(t|S_n)$ converges depends on the variance estimator ${\Delta}_n(\theta)$ used in BSL.
\begin{theorem}\label{prop:bvm} If Assumptions \ref{ass:one}-\ref{ass:propO} are satisfied, and if $m=m(n)\rightarrow\infty$ as $n\rightarrow\infty$, then $$
\int_{\mathcal{T}_n}\left|\widehat{\pi}(t|S_n)-N\{t;0,W_0^{}\}\right|\text{d} t=o_{p}(1). $$ For any $0<\gamma\le2$, if Assumption \ref{ass:three} is satisfied with $p\ge\gamma+\kappa$, then $$
\int_{\mathcal{T}_n} \|\theta\|^\gamma\left|\widehat{\pi}(t|S_n)-N\{t;0,W_0^{}\}\right|\text{d} t=o_{p}(1). $$ \end{theorem}
The second result in Theorem \ref{prop:bvm} demonstrates that, under moment assumptions on the prior, the mean difference between the BSL posterior $\widehat{\pi}(t|S_n)$ and $N\{t;0,W_0\}$ converges to zero in probability. Using this result, we demonstrate that the BSL posterior mean
$\bar{\theta}_n :=\int_{\Theta} \theta \widehat{\pi}(\theta|S_n)\text{d}\theta$ is asymptotically Gaussian with a covariance matrix that depends on the version of ${\Delta}_n(\theta)$ used in the synthetic likelihood. \begin{corollary}\label{cor:one} If the Assumptions in Theorem \ref{prop:bvm} are satisfied, then for $m\rightarrow\infty$ as $n\rightarrow\infty$, $$ v_{n}(\bar{\theta}_n-\theta_{0}) \Rightarrow N\left[0, W_0^{-1} \left\{ \nabla b\left(\theta_{0}\right)^{\intercal}\Delta(\theta_0)^{-1}V_0\Delta(\theta_0)^{-1}\nabla b\left(\theta_{0}\right)\right\}W_0^{-1} \right], \text{ under }P^{(n)}_0. $$ \end{corollary}
\begin{remark}
{\normalfont The above results only require weak conditions on the number of simulated datasets, $m$, and are satisfied for any $m=C\lfloor{n^\gamma}\rfloor$, with $C>0$, $\gamma>0$, and $\lfloor x\rfloor$ denoting the integer floor of $x$. Therefore, Theorem \ref{prop:bvm} and Corollary \ref{cor:one} demonstrate that the choice of $m$ does not strongly impact the resulting inference on $\theta$ and its choice
should be driven by computational considerations. We note that this requirement is in contrast to ABC, where the choice of tuning parameters, i.e., the tolerance, significantly impacts both the theoretical behavior of ABC and the practical (computing) behavior of ABC algorithms. However, this lack of dependence on tuning parameters comes at the cost of
requiring that a version of Assumptions~\ref{ass:two} and \ref{ass:propO} are
satisfied. ABC requires no condition similar to Assumption \ref{ass:two}, while Assumption \ref{ass:propO} is stronger than the tail conditions on the summaries required for the ABC posterior to be asymptotically Gaussian. } \end{remark}
\begin{remark}\label{ref:corr}{\normalfont Theorem \ref{prop:bvm} and Corollary \ref{cor:one} demonstrate the trade-off between using a parsimonious choice for ${\Delta}_n(\theta)$, leading
to faster computation, and a posterior that correctly quantifies uncertainty. BSL credible sets provide valid uncertainty quantification, in the sense that they have the correct level of asymptotic coverage, when \begin{flalign*}
\int_{\mathcal{T}_n} t t^{\intercal}\widehat{\pi}(t|S_n)\text{d} t =& \nabla b\left(\theta_{0}\right)^{\intercal}\Delta(\theta^0)^{-1}V_0\Delta(\theta^0)^{-1}\nabla b\left(\theta_{0}\right)+o_p(1). \end{flalign*}However, the second part of Theorem \ref{prop:bvm} implies that $$
\int_{\mathcal{T}_n} t t^{\intercal}\widehat{\pi}(t|S_n)\text{d} t = W_0+o_p(1)= \nabla b\left(\theta_{0}\right)^{\intercal}\Delta(\theta^0)^{-1}\nabla b\left(\theta_{0}\right)+o_p(1), $$so that a sufficient condition for the BSL posterior to correctly quantify uncertainty is that \begin{equation}\label{eq:correct} {\Delta}(\theta_0)=V_0. \end{equation} Satisfying equation \eqref{eq:correct} generally necessitates using the more computationally intensive variance estimator $\widehat{\Sigma}_n(\theta)$, and that the variance model is ``correctly specified''; here, correctly specified means $\theta_0$ satisfies $b(\theta_0)=b_0$ and $\theta_0$ also satisfies equation \eqref{eq:correct}, and where we note that the latter condition {\textit{is not implied by Assumptions \ref{ass:one}-\ref{ass:propO}}}. While a sufficient condition for \eqref{eq:correct} is that $P_\theta^{(n)}=P^{(n)}_0$ for some $\theta_0\in\Theta$, this condition is not necessary in general. Given the computational costs associated with using $\widehat{\Sigma}_n(\theta)$ when the summaries are high-dimensional, Section \ref{adjustments} proposes an adjustment approach to BSL that allows the use of the simpler, possibly misspecified, variance estimator ${\Delta}_n(\theta)$, but which also yields a posterior that has valid uncertainty quantification. } \end{remark}
\begin{remark}{\normalfont In contrast to ABC point estimators, Corollary \ref{cor:one} demonstrates that BSL point estimators are generally asymptotically inefficient. It is known that $\left\{\nabla b\left(\theta_{0}\right)^{\intercal}V_0^{-1}\nabla b\left(\theta_{0}\right)\right\}^{-1}$ is the smallest achievable asymptotic variance for any $v_n$-consistent and asymptotically normal estimator of $\theta_0$ based on the parametric class of models $\{P_\theta^{(n)}:\theta\in\Theta\}$ and conditional on the summary statistics $S_n(y)$; see, e.g., \citealp{li+f18a}. We also have that
$$ W_0^{-1} \left\{\nabla b\left(\theta_{0}\right)^{\intercal}\Delta(\theta^0)^{-1}V_0\Delta(\theta^0)^{-1}\nabla b\left(\theta_{0}\right)\right\}W_0^{-1}\ge \left\{\nabla b\left(\theta_{0}\right)^{\intercal}V_0^{-1}\nabla b\left(\theta_{0}\right)\right\}^{-1}; $$ where for square matrices $A,B$, $A\ge B$ means that $A-B$ is positive semi-definite. Given this, the BSL posterior mean $\overline \theta_n$ is
asymptotically efficient only when equation \eqref{eq:correct} is satisfied.
In this case, BSL simultaneously delivers efficient point estimators and asymptotically correct uncertainty quantification. } \end{remark}
\begin{remark}\normalfont{
The BSL posterior can be interpreted as a type of quasi-posterior; see, e.g., \citealp{chernozhukov+h03} and \citealp{bissiri2016general}. {However, since the posterior $\widehat{\pi}(\theta|S_n)$ depends on the ``integrated likelihood'' $\widehat{g}_n(S_n|\theta)$, defined in \eqref{noisySL} and calculated using simulated data,} existing large sample results are not applicable to BSL. } \end{remark}
\subsection{Computational efficiency}
\cite{li+f18b,li+f18a} discuss the computational efficiency of vanilla and regression-adjusted ABC algorithms using a rejection sampling method based on a ``good'' proposal density $q_n(\theta)$. They show that regression-adjusted ABC yields asymptotically correct uncertainty quantification, i.e., credible sets with the correct level of frequentist coverage, and an asymptotically non-zero acceptance rate, while vanilla ABC can only accomplish one or the other.
This section shows that BSL can deliver correct uncertainty quantification and an asymptotically non-zero acceptance rate, if the number of simulated data sets used in the synthetic likelihood tends to infinity with the sample size. We follow \cite{li+f18b} and consider implementing synthetic likelihood using a rejection sampling algorithm based on the proposal $q_n(\theta)$ analogous to the one they consider for ABC. Following Assumption~\ref{ass:two}(i), there exists a uniform upper bound of the form $Cv_n^{d_\theta}$ for some $0<C<\infty$ locally in a neighbourhood of $\theta_0$ on $N\left\{S_n;b(\theta),{\Delta}_n(\theta)\right\}$ for $n$ large enough; an asymptotically valid rejection sampler then proceeds as follows.
\begin{algorithm}[H] Rejection sampling BSL algorithm \begin{wideenumerate}
\item Draw $\theta'\sim q_n(\theta)$
\item Accept $\theta'$ with probability $(C v_n^{d_\theta})^{-1}\widehat{g}_n(S_n|\theta')=(C v_n^{d_\theta})^{-1}N\left\{S_n;\widehat b_n(\theta'),\Delta_n(\theta')\right\}.$ \end{wideenumerate} \end{algorithm}
An accepted value from this sampling scheme is a draw from the density proportional to $q_n(\theta)\widehat{g}_n(S_n|\theta)$. Similarly to the analogous ABC scheme considered in
\cite{li+f18b}, samples from this rejection sampler can be reweighted with importance weights proportional to $\pi(\theta')/q_n(\theta')$ to recover draws from $\widehat{\pi}(\theta|S_n)\propto \pi(\theta)\widehat{g}_n(S_n|\theta)$.
We choose the proposal density $q_n(\theta)$ to be from the location-scale family $$\mu_n+\sigma_nX,$$ where $X$ is a $d_\theta$-dimensional random variable such that $X\sim q(\cdot)$, $\mathbb{E}_q[X]=0$ and $\mathbb{E}_q[\|X\|^2]<\infty$. The sequences $\mu_n$ and $\sigma_n$ depend on $n$ and satisfy Assumptions \ref{ass:propO} and \ref{ass:prop}. \begin{assumption}\label{ass:prop}
(i) There exists a positive constant $C$, such that $0<\sup_{x}q(x)\leq C<\infty;$ (ii) the sequence $\sigma_n>0$, for all $n\ge1$, satisfies $\sigma_n=o(1)$, and $v_n\sigma_n\rightarrow c_\sigma$, for some positive constant $c_\sigma$; (iii) the sequence $\mu_n$ satisfies $\sigma^{-1}_n\left(\mu_n-\theta_0\right)=O_p(1)$; (iv) for $h(\theta)=q_n(\theta)/\pi(\theta)$, $\limsup_{n\rightarrow\infty}\int h^2_n(\theta)\pi(\theta|S_n)\text{d}\theta<\infty$. \end{assumption}
\begin{remark} {\normalfont Assumption \ref{ass:prop} formalizes the conditions required of the proposal density and are similar to those required in \cite{li+f18b}. Assumption \ref{ass:prop} is satisfied if the proposal density $q_n(\theta)$ is built from $v_n$-consistent estimators of $\theta_0$, such as those based on pilot runs. } \end{remark}
The acceptance probability associated with Algorithm 1 is $$
\widetilde\alpha_n^{}:=({Cv_n^{d_\theta}})^{-1}\int_{\Theta}q_n(\theta)\widehat{g}_n(S_n|\theta)\text{d} \theta. $$ We measure the computational efficiency of the rejection sampling BSL algorithm via the behavior of $\widetilde\alpha_n^{}$. If $\widetilde\alpha_n^{}$ is asymptotically non-zero, then
by Corollary 3, and under the restriction in \eqref{eq:correct}, implementing a rejection-based BSL approach can
yield a posterior that has credible sets with the correct level of frequentist coverage and computational properties that are similar to those of regression-adjusted ABC.
Theorem~\ref{thm:acc} describes the asymptotic behavior of $\widetilde{\alpha}_n$ using the proposal density given in Assumption \ref{ass:prop}. The result uses the following definition: for a random variable $x_n$, we write $x_n=\Xi_p(v_n)$ if there exist constants $0<c\leq C<\infty$ such that $\lim_n\text{pr}\left(c<|x_n/v_n|<C\right)=1$. \begin{theorem}\label{thm:acc}
If Assumptions \ref{ass:one}-\ref{ass:prop} are satisfied and if $\int k(\theta)^2\pi(\theta|S_n)\text{d}\theta<\infty$, then for $m\rightarrow\infty$ as $n\rightarrow\infty$
$$
\widetilde\alpha_n=\Xi_p(1)+O_p(1/m).
$$ \end{theorem}
While Theorem \ref{thm:acc} holds for all choices of $\Delta_n(\theta)$ satisfying Assumption \ref{ass:two}, taking $\Delta_n(\theta)=\Sigma^{}_n(\theta)$ implies that the resulting BSL posterior yields credible sets with the appropriate level of frequentist coverage and that the rejection-based algorithm has a non-negligible acceptance rate asymptotically. Therefore, the result in Theorem \ref{thm:acc} is a BSL version of Theorem 2 in \cite{li+f18b}, demonstrating a similar result, under particular choices of the tolerance sequence, for regression-adjusted ABC.
The example in Section 3 of \citet{price+dln16} compares rejection ABC and a rejection version of synthetic likelihood, where the model is normal and $\Sigma_n(\theta)$ is constant and does not need to be estimated. They find that with the prior as the proposal, ABC is more efficient when $d=1$, equally efficient when $d=2$, but less efficient than synthetic likelihood when $d>2$. The essence of the example is that the sampling variability in estimating $b_{}(\theta)$ can be equated with the effect of a Gaussian kernel in their toy normal model for a certain relationship between $\epsilon$ and $m$. The discussion above suggests that in general models, and with a good proposal, in large samples the synthetic likelihood is preferable to the vanilla ABC algorithm no matter the dimension of the summary statistic. However, this greater computational efficiency is only achieved through the strong tail assumption on the summaries.
\section{Adjustments for misspecification}\label{adjustments}
By~Remark \ref{ref:corr}, if BSL uses a misspecified estimator for the variance for the summaries, in the sense that equation \eqref{eq:correct} does not hold, then the BSL posterior gives invalid uncertainty quantification. This section outlines one approach for adjusting inferences to account for this form of misspecification when Assumption \ref{ass:four} is satisfied, but, there are other ways to do so. Suppose $\theta^{q}$, $q=1,\dots, Q$, is an approximate sample from $\widehat{\pi}(\theta|S_n)$, obtained by MCMC for example. Let $\overline{\theta}_n$ denote the synthetic likelihood posterior mean, let $\widetilde{\Gamma}$ denote the synthetic likelihood posterior covariance, and write $\widehat{\theta}$ and $\widehat{\Gamma}$ for their sample estimates based on $\theta^{q}$, $q=1,\dots, Q$. Consider the adjusted sample \begin{align}
\theta^{A,q} & = \widehat{\theta}+\widehat{\Gamma}\widetilde{\Omega}^{1/2}\widehat{\Gamma}^{-1/2}(\theta^{q}-\widehat{\theta}), \;\;\;\label{adjustedsamp} \end{align}
$q=1,\dots, Q$, where $\widetilde{\Omega}$ is an estimate of $\text{var}\left\{\nabla_\theta \log g_n(S_n|\widehat{\theta})\right\}$; the estimation of $\widetilde{\Omega}$ is discussed below. We propose using (\ref{adjustedsamp}) as an approximate sample from the posterior, which is similar to the original sample when the model is correctly specified, but gives asymptotically valid frequentist inference about the pseudo-true parameter value when the model is misspecified.
The motivation for (\ref{adjustedsamp}) is that if $\theta^{q}$ is approximately drawn from the normal distribution $N(\widehat{\theta},\widehat{\Gamma})$, then $\theta^{A,q}$ is approximately drawn from $N(\widehat{\theta},\widehat{\Gamma}\widetilde{\Omega}\widehat{\Gamma})$. The results of Corollary 1 imply that if $\widetilde{\Omega}\approx \text{var}\left\{\nabla_\theta \log g_n(S_n|\theta_0)\right\}$ and $\widehat{\Gamma}$ is approximately the inverse negative Hessian of $\log g(S_n|\theta)$ at $\theta_0$, then the covariance matrix of the adjusted samples is approximately that of the sampling distribution of the BSL posterior mean, giving approximate frequentist validity to posterior credible intervals based on the adjusted posterior samples. We now suggest two ways to obtain $\widetilde{\Omega}$. The first is suitable if
the model assumed for $y$ is true, but the
covariance matrix $\lim_n v_n^2 {\Delta}_n(\theta)\neq V_0$, which we refer to as misspecification of the working covariance matrix. The second way is suitable when the models for both $y$ and the working covariance matrix may be misspecified, but Assumption \ref{ass:four} holds.
\subsection{Estimating $\normalfont{\text{var}}\left\{\nabla_\theta \log g_n(S_n|\theta_0)\right\}$ when the model for $y$ is correct}
\begin{algorithm}\label{alg:one} Algorithm 1: Estimating $\widetilde{\Omega}$ when the model for $y$ is correct \begin{enumerate} \item[] \item For $j=1,\dots, J$, draw $S^{(j)}\sim G_n^{\widehat{\theta}}$, where $\widehat{\theta}$ is the estimated synthetic likelihood posterior mean.
\item Approximate $g^{(j)}=\nabla_\theta \log g_n(S^{(j)}|\widehat{\theta})$.
Section 6.2 discusses the approximation to this gradient as used in the examples. \item Return \begin{align*}
\widetilde{\Omega}=\frac{1}{J-1} \sum_{j=1}^J (g^{(j)}-\bar{g})(g^{(j)}-\bar{g})^{\intercal}, \end{align*} where $\bar{g}=J^{-1}\sum_{j=1}^J g^{(j)}$. \end{enumerate} \end{algorithm}
\subsection{Estimating $\normalfont{\text{var}}\left\{\nabla_\theta \log g_n(S_n|\theta_0)\right\}$ when both the model for $y$ and the covariance matrix may be incorrect}
It may still be possible estimate $\text{var}\left\{\nabla_\theta \log g_n(S|\theta_0)\right\}$,
even if the model for $y$ is incorrect. In particular, if $y_1,\dots, y_n$ are independent, then we can use the bootstrap to approximate the distribution of $S_n$ at $\theta_0$ and hence estimate
$\text{var}\left\{\nabla_\theta \log g_n(S|\theta_0)\right\}$. The approximation can be done as in Algorithm 1, but with Step 1 replaced by \begin{verse} 1. For $j=1,\dots, J$, sample $y$ with replacement to get a bootstrap sample $y^{(j)}$ with corresponding summary $S^{(j)}$. \end{verse} If the data is dependent it may still be possible to use the bootstrap \citep{kriess+p11};
however the implementation details are model dependent.
\subsection{What the adjustments can and cannot do}
The adjustments suggested above are intended to achieve asymptotically valid frequentist inference when the consistency in \eqref{eq:correct} is not satisfied, i.e., when $\lim_n {\Delta}_n(\theta)\neq V_0$, or when the model for $y$ is misspecified, but $S_n$ still satisfies a central limit theorem. The adjustment will not recover the posterior distribution that is obtained when the model is correctly specified. Asymptotically valid frequentist estimation based on the synthetic likelihood posterior mean for the misspecified synthetic likelihood is frequentist inference based on a point estimator of $\theta$ that is generally less efficient than in the correctly specified case. Matching posterior uncertainty after adjustment to the sampling variability of such an estimator does not recover the posterior uncertainty from the correctly specified situation.
\section{Examples}
\subsection{Toy example}
Suppose that $y_1,\dots, y_n$ are independent observations from a negative binomial distribution $\text{NB}(5,0.5)$ so they have mean $5$ and variance $10$. We model the $y_i$ as independent and coming from a $\text{Poisson}(\theta)$ distribution and act as if the likelihood is intractable, basing inference on the sample mean $\bar{y}$ as the summary statistic $S$. The pseudo-true parameter value $\theta_0$ is $5$, since this is the parameter value for which the summary statistic mean matches the corresponding mean for the true data generating process.
Under the Poisson model, the synthetic likelihood has $b(\theta)=\theta$ and ${\Delta}_n(\theta)=\theta/n$. We consider a simulated dataset with $n=20$, and and deliberately misspecify the variance model in the synthetic likelihood under the Poisson model as ${\Delta}_n(\theta)=\theta/(2n)$. As noted previously, the deliberate misspecification of $\text{var}(S_n|\theta)$ may be of interest in problems with a high-dimensional $S_n$ as a way of reducing the number of simulated summaries needed to estimate $\text{var}(S_n|\theta)$ with reasonable precision; for example, we might assume $\text{var}(S_n|\theta)$ is diagonal or based on a factor model.
Figure~\ref{fig1} shows the estimated posterior densities obtained using a number of different
approaches, when the prior for $\theta$ is $\text{Gamma}(2,0.5)$. The narrowest green density is obtained from the synthetic likelihood with a misspecified variance. This density is obtained using 50,000 iterations of a Metropolis-Hastings MCMC algorithm with a normal random walk proposal. The red density is the exact posterior assuming the Poisson likelihood is correct, which is $\text{Gamma}(2+n\bar{y},0.5+n)$. The purple kernel density estimate based on the adjusted synthetic likelihood samples; it uses the method of Section 5.1 for the adjustment in which the $y$ model is assumed correct but the working covariance matrix is misspecified. The figure shows that the adjustment gives a result very close to the exact posterior under an assumed Poisson model. Finally, the light blue kernel density estimate based on the samples from the adjusted synthetic likelihood, uses the method of Section 5.2 based on the bootstrap without assuming that the Poisson model is correct. This posterior is more dispersed than the one obtained under the Poisson assumption, since the negative binomial generating density is overdispersed relative to the Poisson, and hence the observed $\bar{y}$ is less informative about the pseudo-true parameter value than implied by the Poisson model.
\begin{figure}
\caption{ Exact, synthetic and adjusted synthetic posterior densities for the toy example. }
\label{fig1}
\end{figure}
\subsection{Examples with a high-dimensional summary statistic}
This section explores the efficacy of the adjustment method when using a misspecified covariance in the presence of a high-dimensional summary statistic $S$. All the examples below use the \citet{warton08} shrinkage estimator
to reduce the number of simulations required to obtain a stable covariance matrix estimate in the synthetic likelihood. Based on $m$ independent model simulations the covariance matrix estimate is \begin{align} \widehat{\Sigma}_\gamma & =\widehat{D}^{1/2}\left\{\gamma\widehat{C}+(1-\gamma)I\right\}\widehat{D}^{1/2}, \label{wartonest} \end{align} where $\widehat{C}$ is the sample correlation matrix, $\widehat{D}$ is the diagonal matrix of component sample variances, and $\gamma \in [0,1]$ is a shrinkage parameter. The matrix $\widehat{\Sigma}_\gamma$ is non-singular if $\gamma<1$,
even if $m$ is less than the dimension of the observations. This estimator shrinks the sample correlation matrix towards the identity. When $\gamma = 1$ (resp.\ $\gamma = 0$) there is no shrinkage (resp.\ a diagonal covariance matrix is produced). We choose $\gamma$ to require only
1/10 of the simulations required by the standard synthetic likelihood for Bayesian inference. We are interested in the shrinkage effect on the synthetic likelihood approximation and whether our methods can offer a useful correction. Heavy shrinkage is used to stabilize covariance estimation in the synthetic likelihood; So, the shrinkage estimator can be thought of as specifying ${\Delta}_n(\theta)$.
To perform the adjustment, it is necessary to approximate the derivative of the synthetic log-likelihood, with shrinkage applied, at a point estimate of the parameter; we take this point as the estimated posterior mean $\widehat{\theta}$ of the BSL approximation. A computationally efficient approach for estimating these derivatives uses Gaussian process emulation of the approximate log-likelihood surface based on a pre-computed training sample. The training sample is constructed around $\widehat{\theta}$, because this is the only value of $\theta$ for which the approximate derivative is required. We sample $B$ values using Latin
hypercube sampling from the hypercube defined by $[\widehat{\theta}_k - \delta_k, \widehat{\theta}_k + \delta_k]$, where $\widehat{\theta}_k$ denotes the $k$th component of $\widehat{\theta}$,
and take $\delta_k$ as the approximate posterior standard deviation of $\theta_k$; {see \citealp{mckay+bc1979} for details on Latin hypercube sampling}. Denote the collection of training data as $\mathcal{T} = \{\theta^b,\mu^b,\Sigma_\gamma^b\}_{b=1}^B$, where $\theta^b$ is the $b$th training sample and $\mu^b$ and $\Sigma_\gamma^b$ are the corresponding estimated mean and covariance of the synthetic likelihood from the $m$ model simulations, respectively. This training sample is stored and recycled for each simulated dataset generated from $\widehat{\theta}$ that needs to be processed in the adjustment method, which is now
described in more detail.
For a simulated statistic $S^{(j)}$ generated from the model at $\widehat{\theta}$, the shrinkage synthetic log-likelihood is rapidly computed at each $\theta^b$ in the training data $\mathcal{T}$ using the pre-stored information, denoted as $l^b = l(\theta^b;S^{(j)})$. A Gaussian process regression model
based on the collection $\{\theta^b,l^b\}_{b=1}^B$, is then fitted
with $l^b$ as the response and $\theta^b$ as the predictor. We use a zero-mean Gaussian process with squared exponential covariance function having
different length scales for different components of $\theta$ and then approximate the gradient of $\log g_n(S^{(j)}|\widehat{\theta})$ by computing the derivative of the smooth predicted mean function of the Gaussian process at $\widehat{\theta}$. We can show that this is equivalent to considering the bivariate Gaussian process of the original process and its derivative, and performing prediction for the derivative value. The derivative is estimated using a finite difference approximation because it is simpler than computing the estimate explicitly. The matrix $\widetilde{\Omega}$ is constructed using $B=200$ training samples and $J=200$ datasets. Both examples below use 20,000 iterations of MCMC for standard and shrinkage BSL with a multivariate normal random walk proposal. In each case, the covariance of the random walk was set based on an approximate posterior covariance obtained by pilot MCMC runs.
\subsection*{Moving average example}
We consider the second order moving average model (MA(2)): \begin{equation*} y_t = z_t + \theta_1 z_{t-1} + \theta_2 z_{t-2}, \end{equation*} for $t = 1,\dots,n$, where $z_t \sim N(0,1)$, $t=-1,\dots,n$, and $n$ is the number of observations in the time series. {To ensure invertibility of the MA(2) model, the space $\Theta$ is constrained as
$-1<\theta_2<1,\theta_1+\theta_2>-1,\theta_1-\theta_2<1$ and we specify a uniform prior over this region.} The density of the observations from an MA(2) model is multivariate normal, with $\mathrm{var}(y_t)=1+\theta_1^2+\theta_2^2$, $\mathrm{cov}(y_t,y_{t-1})=\theta_1+\theta_1\theta_2$, $\mathrm{cov}(y_t,y_{t-2})=\theta_2$, with all other covariances equal to $0$. The coverage assessment is based on 100 simulated datasets from the model with true parameters $\theta_1 = 0.6$ and $\theta_2 = 0.2$. Here, we consider a reasonably large sample size of $n=10^4$.
This example uses the first 20 autocovariances as the summary statistic. The autocovariances are a reasonable choice here as they are informative about the parameters and satisfy a central limit theorem \citep{Hannan1976}.
To compare with BSL, we use ABC with a Gaussian weighting kernel having covariance $\epsilon V$, where $V$ is a positive-definite matrix. To favor the ABC method, $V$ is set as the covariance matrix of the summary statistic obtained via many simulations at the true parameter value. This ABC likelihood corresponds to using the Mahalanobis distance function with a Gaussian weighting kernel. We also consider BSL with a diagonal covariance, and the corresponding adjustment described in Section 4.
To sample from the approximate posterior distributions for each method and dataset, importance sampling with a Gaussian proposal is used with a mean given by the approximate posterior mean and a covariance that is
twice the approximate posterior covariance. We treat this as the `good' proposal distribution for posterior inference. The initial approximations of the (approximate) posteriors are obtained from pilot runs.
For BSL, we use $10,000$ importance samples and consider $m=100$, 200, 500 and $2000$ for estimating the synthetic likelihood. Table~\ref{tab:ma_coverages_compare} reports the mean and minimum effective sample size (ESS) of the importance sampling approximations \citep{kong92} over the 100 datasets. It shows that for standard BSL with $m=100$ the minimum ESS is small, suggesting this is close to the smallest value of $m$ that can be considered
to ensure the results are not dominated by Monte Carlo error. For a given $m$, the ESS values are larger when using a diagonal covariance matrix, demonstrating the computational benefit over estimating a full covariance matrix in standard BSL. For the BSL adjustment approach, the initial sample before adjustment consists of a re-sample of size 1000 from the relevant diagonal BSL importance sampling approximation to avoid having to work with a weighted sample.
We use 10 million importance samples for ABC-twice as many model simulations compared to BSL with $m=500$. For each dataset, $\epsilon$ is selected so that the ESS is around 1,000, to reduce $\epsilon$ as much as possible, while ensuring that the results are robust to Monte Carlo error. To reduce storage, a resample of size 1,000 is taken from the ABC importance sampling approximation to produce the final ABC approximation. We also apply the local regression adjustment of \cite{beaumont+zb02} to the ABC samples for each dataset.
Table~\ref{tab:ma_coverages_compare} presents the
estimated marginal coverage rates for $\theta_1$, $\theta_2$ marginally, and the joint coverage for $(\theta_1,\theta_2)$, for nominal coverages of $95\%$, $90\%$ and $80\%$ using kernel density estimates. The densities are estimated from 1000 samples, performing resampling for the importance sampling approximations when required to avoid dealing with a weighted sample.
It is evident that standard BSL produces reasonable coverage rates, with some undercoverage at the 80\% nominal rate; $m$ seems to have negligible effect on the estimated coverage.
BSL with a diagonal covariance produces gross overcoverage for $\theta_1$. Interestingly, despite the overcoverage for $\theta_1$, there is undercoverage at the 95\% and 90\% nominal rates for the joint confidence regions for $\theta_1$ and $\theta_2$, due to the incorrect estimated dependence structure based on the misspecified covariance. In contrast, the adjusted BSL results produce accurate coverage rates for the marginals and the joint.
The ABC method produces substantial overcoverage. ABC with regression adjustment produces more accurate coverage rates, although some overcoverage remains in general.
\begin{table}
\centering
\begin{tabular}{|ccccccc|}
\hline
method & $m$ & mean ESS & min ESS & $95\%$ & $90\%$ & $80\%$ \\
\hline
BSL & 100 & 1400 & 21 & 96/97/93 & 91/88/86 & 72/74/73 \\
BSL & 200 & 3000 & 240 & 95/97/91 & 91/89/88 & 73/78/74 \\
BSL & 500 & 5000 & 2000 & 95/96/94 & 91/88/88 & 73/74/76 \\
BSL & 2000 & 6700 & 4900 & 95/97/91 & 89/88/86 & 71/74/75 \\
BSL diag & 100 & 4200 & 620 & 99/95/89 & 97/88/86 & 95/78/81 \\
BSL diag & 200 & 5400 & 1500 & 99/95/90 & 98/88/85 & 94/78/81 \\
BSL diag & 500 & 6500 & 3400 & 99/95/89 & 98/88/87 & 94/78/80 \\
BSL diag & 2000 & 7200 & 6000 & 99/95/90 & 97/87/87 & 94/78/76 \\
BSL adj & 100 & - & - & 95/95/94 & 91/92/92 & 80/80/80 \\
BSL adj & 200 & - & - & 96/95/96 & 91/90/91 & 79/81/77 \\
BSL adj & 500 & - & - & 94/95/93 & 91/88/86 & 80/80/80 \\
BSL adj & 2000 & - & - & 95/95/93 & 91/88/85 & 80/78/79 \\
ABC & - & - & - & 98/100/97 & 96/99/96 & 89/93/94 \\
ABC reg & - & - & - & 97/97/94 & 93/96/90 & 82/84/87 \\
\hline
\end{tabular}
\caption{Estimated coverage for credible intervals having nominal 95/90/80\% credibility for standard BSL, BSL with a diagonal covariance (BSL diag), BSL diag with an adjustment (BSL adj), ABC and regression adjustment ABC (ABC reg) for $\theta_1/\theta_2/(\theta_1,\theta_2)$. }
\label{tab:ma_coverages_compare} \end{table}
\subsection*{Toad Example}
This example is an individual-based model of a species called Fowler's Toads (\textit{Anaxyrus fowleri}) developed by \citet{Marchand2017}, which was previously analysed by \citet{an+nd18}. The example is briefly described here; see \citet{Marchand2017} and \citet{an+nd18} for further details.
The model assumes that a toad hides in its refuge site in the daytime and moves to a randomly chosen foraging place at night. GPS location data are collected on $n_t$ toads for $n_d$ days, so the matrix of observations $Y$ is $n_d \times n_t$ dimensional. This example uses both simulated and real data. The simulated data uses $n_t=66$ and $n_d=63$ and summarize the data by $4$ sets of statistics comprising the relative moving distances for time lags of $1,2,4$ and $8$ days. For instance, $y_1$ consists of the displacement information of lag $1$ day, $y_1 = \{|Y_{i,j}-Y_{i+1,j}| ; 1 \leq i \leq n_{d}-1, 1 \leq j \leq n_t \}$.
Simulating from the model involves two processes. For each toad, we first generate an overnight displacement, $\Delta y$, then mimic the returning behaviour with a simplified model. The overnight displacement is assumed to belong to the L\'evy-alpha stable distribution family, with stability parameter $\alpha$ and scale parameter $\delta$. With probability $1-p_0$, the toad takes refuge at the location it moved to. With probability $p_0$, the toad returns to the same refuge site as day $1 \leq i \leq M$ (where $M$ is the number of days the simulation has run for), where $i$ is selected randomly from ${1,2,\dots,M}$ with equal probability. For the simulated data, $\theta=(\alpha,\delta,p_0)=(1.7,35,0.6)$, which is a parameter value fitting the real data well, and assume a uniform prior over $(1,2) \times (0,100) \times (0,0.9)$ for $\theta$.
As in \citet{Marchand2017}, the dataset of displacements is split into two components. If the absolute value of the displacement is less than 10 metres, it is assumed the toad has returned to its starting location. For the summary statistic, we consider the number of toads that returned. For the non-returns (absolute displacement greater than 10 metres), we calculate the log difference between adjacent $p$-quantiles with $p=0,0.1,\ldots,1$ and also the median. These statistics are computed separately for the four time lags, resulting in a $48$ dimensional statistic. For standard BSL, $m=500$ simulations are used per MCMC iteration. However, with a shrinkage parameter of $\gamma=0.1$, it was only necessary to use $m=50$ simulations per MCMC iteration. For the simulated data, the MCMC acceptance rates are 16\% and 21\% for standard and shrinkage BSL, respectively. For the real data, the acceptance rates are both roughly 24\%.
Figure \ref{fig:toad_results} summarizes the results for the simulated data and shows that the shrinkage BSL posterior underestimates the variance and has the wrong dependence structure compared to the standard BSL posterior. The adjusted posterior produces uncertainty quantification that is closer to the standard BSL procedure, although its larger variances indicate that there is a loss in efficiency in using frequentist inference based on the shrinkage BSL point estimate. The results for the real data in Figure \ref{fig:toad_results_realdata} are qualitatively similar. There is less difference in the posterior means between the standard and shrinkage BSL methods for the real data compared to the simulated data, and generally less variance inflation in the adjusted results for the real data compared to the simulated data.
\begin{figure}
\caption{
Adjustment results for the toad example based on the simulated data. The panels in the top row are bivariate contour plots of the standard and shrinkage BSL posteriors. The panels in the bottom row are bivariate contour plots of the standard and adjusted BSL posteriors.
}
\label{fig:toad_results}
\end{figure}
\begin{figure}
\caption{
Adjustment results for the toad example based on the real data. The top row panels are bivariate contour plots of the standard and shrinkage BSL posteriors. The bottom row panels are bivariate contour plots of the standard and adjusted BSL posteriors.
}
\label{fig:toad_results_realdata}
\end{figure}
\section{Discussion}
Our article examines the asymptotic behaviour of Bayesian inference using the synthetic likelihood when the summary statistic satisfies a central limit theorem. The synthetic likelihood asymptotically quantifies uncertainty similarly to ABC methods under appropriate algorithmic settings and assumptions leading to correct uncertainty quantification. We also examine the effect of estimating the mean and covariance matrix in synthetic likelihood algorithms, as well as the computational efficiency of similar versions of rejection and importance sampling algorithms for BSL and ABC. BSL is more efficient than vanilla ABC,
and behaves similarly to regression-adjusted ABC.
{Adjustments are also discussed for a misspecified synthetic likelihood covariance of the synthetic likelihood.} These adjustments may also be useful when the model for $y$ is misspecified, and inference on the pseudo-true parameter
is of interest. {Our adjustment methods do not help correct inference in the case where the summary statistics are not normal.} Some approaches consider more complex parametric models than the normal for addressing this issue, and the asymptotic framework developed here could be adapted to other parametric model approximations for the summaries. These extensions are left to future work.
Although our adjustments could be useful when the model for $y$ is misspecified, it is helpful to distinguish different types of misspecification. Model incompatibility is said to occur when it is impossible to recover the observed summary statistic for any $\theta$, but we do not investigate
the behaviour of synthetic likelihood in detail in this case. \citet{Frazier2019} and \citet{frazier+rr17} demonstrate that standard BSL and ABC can both perform poorly under incompatibility. \citet{Frazier2019} propose some extensions to BSL allowing greater robustness and computational efficiency in this setting. More research is needed to compare BSL and ABC when model incompatibility occurs.
\section*{Acknowledgments} David Frazier was supported by the Australian Research Council's Discovery Early Career Researcher Award funding scheme (DE200101070). David Nott was supported by a Singapore Ministry of Education Academic Research Fund Tier 1 grant and is affiliated with the Operations Research and Analytics Research cluster at the National University of Singapore. Christopher Drovandi was supported by an Australian Research Council Discovery Project (DP200102101). Robert Kohn was partially supported by the Center of Excellence grant CE140100049 and Robert Kohn, Christopher Drovandi and David Frazier are affiliated with the Australian Centre of Excellence for Mathematical and Statistical Frontiers. We thank Ziwen An for preparing computer code for the toad example.
\appendix \section{Proofs and Lemmas} \subsection{Proofs of the main results} \begin{proof}[Proof of Theorem \ref{prop:bvm}] We only prove the second result in Theorem \ref{prop:bvm}, the first result then follows by taking $\gamma=0$. Upper bound the integral in question as \begin{flalign}
\int_{\mathcal{T}_n} \|t\|^{\gamma}|\widehat{\pi}(t|S_n)-N\{t;0,W_0\}|\text{d} t\le& \int_{\mathcal{T}_n} \|t\|^{\gamma}|{\pi}(t|S_n)-N\{t;0,W_0\}|\text{d} t\nonumber\\&+\int_{\mathcal{T}_n} \|t\|^\gamma |\widehat{\pi}(t|S_n)-{\pi}(t|S_n)|\text{d} t, \label{eq:decomp} \end{flalign}and the stated result follows if both terms in \eqref{eq:decomp} are $o_p(1)$. The first term on the RHS of \eqref{eq:decomp} is $o_p(1)$ by Lemma \ref{prop:bvm1}; we now show that the second term is $o_p(1)$.
Define $M^{}_n(\theta):=[v_n^2{\Delta}_n(\theta)]^{-1}$, $Q_n(\theta):=-v_n^2\{b(\theta)-S_n\}^{\intercal}M_n(\theta)\{b(\theta)-S_n\}/2$, and $$\widehat Q_{n}(\theta):=-v_n^2\left\{\widehat b_{n}(\theta)-S_n\right\}^{\intercal}M_n(\theta)\left\{\widehat b_{n}(\theta)-S_n\right\}/2.$$ We first demonstrate that, uniformly over $\Theta$, \begin{flalign}\label{eq:app1} \mathbb{E}\left[\exp\left\{\widehat Q_{n}(\theta)\right\}\mid\theta,S_n\right]= \exp\left\{Q_n(\theta)\right\}\left\{1+O\left(\frac{k(\theta)}{m}\right)\right\}. \end{flalign} Using properties of quadratic forms, and Assumption \ref{ass:propO}, \begin{flalign*}
\mathbb{E}\left[\left\|M^{1/2}_n(\theta)v_n\left\{\widehat{b}_n(\theta)-S_n\right\}\right\|^2\mid\theta,S_n\right]=&\text{Tr}\left\{M_n(\theta)\cdot\text{Cov}\left[v_n\left\{\widehat{b}_n(\theta)-S_n\right\}\right]\right\}+\mu_n(\theta)^\intercal M_n(\theta) \mu_n(\theta)\\&\leq \text{Tr}[M_n(\theta)] k(\theta)/m+\mu_n(\theta)^\intercal M_n(\theta)\mu_n(\theta), \end{flalign*}
where $$\mu_n(\theta)=v_n\mathbb{E}[S_n(z^i)-S_n|S_n,\theta]=v_n\{b(\theta)-S_n\}.$$ Apply Lemma \ref{lem:propO} with $A=M^{1/2}_n(\theta)$, $x=v_n\{\widehat{b}_n(\theta)-S_n\}$, and
$M=M_n(\theta)$, which is valid for $\eta$ satisfying $$0\leq \eta<{1}\big{/}{\left[2\frac{k(\theta)}{m}\|M_n(\theta)\|\right]}.$$ However, by Assumption~\ref{ass:propO}(ii), for any $\theta\in\Theta$, $\|M_n(\theta)\|k(\theta)/m=o(1)$ as $n\rightarrow\infty$. Therefore, for $n$ large enough and uniformly over $\Theta$ , we take $\eta = 1$, without loss of generality. Applying Lemma~\ref{lem:propO}, with $\eta=1$, yields \begin{flalign}\label{eq:logeq}
\log\left\{\mathbb{E}[\exp(\|Ax\|^2)]\right\}&\leq \text{Tr}[M_n(\theta)]k(\theta)/m+\frac{\|M^{1/2}_n
(\theta)\mu_n(\theta)\|^2}{[1+o(1)]}+O\left(\frac{\text{Tr}[M_n(\theta)^2]k^2(\theta)/m^2} {1+o(1)}\right). \end{flalign} One half of the numerator of the second term in the above equation is equivalent to $$\mu_n(\theta)^\intercal M^{}_n(\theta)\mu_n(\theta)/2=v_n^2\{b(\theta)-S_n\}^\intercal M_n(\theta)\{b(\theta)-S_n\}/2= -Q_n(\theta).$$ Therefore, from equation \eqref{eq:logeq}, \begin{flalign*} \mathbb{E}\left[\exp\{\widehat{Q}_n(\theta)\}\mid\theta,S_n\right]&=
\exp(-\|M_n^{1/2}(\theta)\mu_n(\theta)\|^2/2)\exp\left[O\left\{\text{Tr}\left[M_n(\theta) \right]k(\theta)/m\right\}\right]\\&=\exp\left\{Q_n(\theta)\right\}O\left\{\text{Tr} \left[M_n(\theta)\right]k(\theta)/m\right\}\\&\leq\exp\left\{Q_n(\theta)\right\} \left\{1+O(k(\theta)/m)\right\} \end{flalign*}
From equation \eqref{eq:app1} and the definitions of $\widehat{g}_n(\theta|S_n)$ and $g_n(\theta|S_n)$, \begin{flalign}\label{eq:app3}
|\widehat{g}_n(S_n|\theta)-g_n(S_n|\theta)|\leq g_n(S_n|\theta)\left[O\left\{{k(\theta)}/{m}\right\}\right], \end{flalign}so that \begin{flalign*}
\left|\int_{\Theta} \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d} \theta-\int_{\Theta} {g}_n(S_n|\theta)\pi(\theta)\text{d} \theta\right|&\leq \int_\Theta|\widehat{g}_n(S_n|\theta)-{g}_n(S_n|\theta)|\pi(\theta)\text{d} \theta\\&\lesssim \frac{1}{m}\int_{\Theta} k(\theta)g_n(S_n|\theta)\pi(\theta)\text{d} \theta\\&= \frac{1}{m}\int_{\Theta} g_n(S_n|\theta)\pi(\theta)\text{d} \theta\int_{\Theta} k(\theta)\pi(\theta|S_n)\text{d} \theta, \end{flalign*}where the second line follows from equation \eqref{eq:app3}, and the equality from reorganizing terms.
The proof of Lemma \ref{prop:bvm1} demonstrates that $\int_\Theta g_n(S_n|\theta)\pi(\theta)\text{d} \theta<\infty$ for all $n$ large enough; hence, \begin{flalign}
\left|\int_{\Theta} \|\theta\|^{\gamma}\widehat{g}_n(S_n|\theta)\pi(\theta)\text{d} \theta-\int_{\Theta} \|\theta\|^{\gamma}{g}_n(S_n|\theta)\pi(\theta)\text{d} \theta\right|&\lesssim \frac{1}{m}\int_{\Theta}\|\theta\|^{\gamma}k(\theta)\pi(\theta|S_n)\text{d} \theta\nonumber\\&\lesssim \frac{1}{m}\int_{\Theta}\|\theta\|^{\xi}\pi(\theta|S_n)\text{d} \theta, \label{eq:neweq} \end{flalign}
where $\xi=\gamma+\kappa$, with $\kappa$ as in Assumption~\ref{ass:propO}(ii). Consider the term $\int_\Theta\|\theta\|^\xi\pi(\theta|S_n)\text{d}\theta$. Recall that $t:=v_nW_0^{}(\theta-\theta_0)-Z_n$, and we obtain \begin{flalign*}
\|\theta\|^{\xi}&= \|W_0^{-1}t/v_n+\theta_0+W_0^{-1}Z_n/v_n\|^{\xi}\lesssim{v_n^{-\xi}}
\|t\|^{\xi}+\|\{b(\theta_0)-S_n\}+\theta_0\|^{\xi}. \end{flalign*} Applying the change of variables $\theta\mapsto t$ and the above inequality yields \begin{flalign}
\int_{}\|\theta\|^{\xi}\pi(\theta|S_n)\text{d} \theta&\lesssim {v_n^{-\xi}}\int\|t\|^{\xi}\pi(t|S_n) \text{d} t+\|\{b(\theta_0)-S_n\}+\theta_0\|^{\xi}\label{eq:app4}. \end{flalign} Now, \begin{flalign*}
\int\|t\|^{\xi}\pi(t|S_n) \text{d} t&\leq \int\|t\|^{\xi}|\pi(t|S_n)-N\{t;0,W_0\}|\text{d} t+\int\|t\|^{\xi}N\{t;0,W_0\}\text{d} t \end{flalign*} The first term in the above equation is $o_p(1)$ by Lemma \ref{prop:bvm1} under Assumption \ref{ass:three} with $p\ge\xi$, and the second term is finite due to Gaussianity; hence, \begin{flalign}
\int\|t\|^{\xi}\pi(t|S_n) \text{d} t&=o_p(1)+C\label{eq:app5}. \end{flalign}
Using equation \eqref{eq:app5} in equation \eqref{eq:app4}, and the fact that, by Assumption~\ref{ass:one}, $\|S_n- b(\theta_0)\|=o_p(1)$, \begin{flalign}
\int_{\Theta}\|\theta\|^{\xi}\pi(\theta|S_n)\text{d} \theta&\leq C/v_n^{\xi}+o_p(1/v_n^{\xi})+{\|\theta_0+o_p(1)\|^{\xi}}{}.\label{eq:new2} \end{flalign}Applying equation \eqref{eq:new2} into the RHS of equation \eqref{eq:neweq} then yields, \begin{flalign}
\left|\int_{\Theta}\|\theta\|^{\gamma} \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d} \theta-\int_{\Theta}\|\theta\|^{\gamma} {g}_n(S_n|\theta)\pi(\theta)\text{d} \theta\right|&\lesssim \frac{1}{m}\int_{\Theta}\|\theta\|^{\xi}\pi(\theta|S_n)\text{d} \theta= O_p(1/m).\label{eq:result1} \end{flalign}
It then follows from equation \eqref{eq:result1} that \begin{flalign}\label{eq:result2}
\frac{\left|\int \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d} \theta-\int {g}_n(S_n|\theta)\pi(\theta)\text{d} \theta\right|}{\int g_n(S_n|\theta)\pi(\theta)\text{d}\theta}= O_p(1/m), \end{flalign} and so \begin{flalign*}
\frac{\int_{\Theta} \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d} \theta}{\int_{\Theta} g_n(S_n|\theta)\pi(\theta)\text{d}\theta}=1+O_p(1/m);\text{ }\frac{\int_{\Theta} g_n(S_n|\theta)\pi(\theta)\text{d}\theta}{\int_{\Theta} \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d} \theta}=1+O_p(1/m). \end{flalign*}
Write $\{\widehat{\pi}_{}(\theta|S_n)-\pi_{}(\theta|S_n)\}$ as \begin{flalign*}
\{\widehat{\pi}_{}(\theta|S_n)-\pi_{}(\theta|S_n)\}=& \frac{\widehat{g}_n(S_n|\theta)\pi(\theta) }{\int_{\Theta} \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d} \theta}-\frac{{g}_n(S_n|\theta)\pi(\theta)}{\int_{\Theta} {g}_n(S_n|\theta)\pi(\theta)\text{d} \theta} \\
=&\left\{ \widehat{g}_n(S_n|\theta)-{g}_n(S_n|\theta)\right\}\frac{\pi(\theta)}{\int_{\Theta} {g}_n(S_n|\theta)\pi(\theta)\text{d} \theta} \frac{\int_{\Theta} {g}_n(S_n|\theta)\pi(\theta)\text{d} \theta}{\int_{\Theta} \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d} \theta} \\
&-g_{n}(S_n|\theta) \pi(\theta)\left(\frac{1}{\int_{\Theta} g_{n}(S_n|\theta) \pi(\theta)\text{d}\theta}-\frac{1}{\int_{\Theta} \widehat{g}_{n}(S_n|\theta) \pi(\theta)\text{d}\theta}\right), \end{flalign*} and apply the triangle inequality to obtain \begin{flalign*}
\left|\widehat{\pi}_{}(\theta|S_n)-\pi_{}(\theta|S_n)\right| &\leq\left|\widehat{g}_n(S_n|\theta)-{g}_n(S_n|\theta)\right| \frac{\pi(\theta)}{\int \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta}\\&+\frac{\left|\int_{\Theta} \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta-\int_{\Theta} {g}_n(S_n|\theta)\pi(\theta)\text{d}\theta\right|}{\int_{\Theta} \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta} \pi_{}(\theta|S_n).
\end{flalign*} Multiplying by $\|\theta\|^\gamma$, integrating both sides and applying
equations~\eqref{eq:result1} and \eqref{eq:result2}, $$ \begin{aligned}
\int_{\Theta}\|\theta\|^\gamma\left|\widehat{\pi}_{}(\theta|S_n)-\pi_{}(\theta|S_n)\right| \text{d} \theta & \leq \frac{1}{\int_{\Theta} \widehat{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta} \int\|\theta\|^\xi\left|\widehat{g}_n(S_n|\theta)-{g}_n(S_n|\theta)\right|\pi(\theta) \text{d}\theta\\&+\frac{\left|\int_{\Theta}\widehat{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta-\int_{\Theta}{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta\right|}{\int_{\Theta}\widehat{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta}\int_{\Theta}\|\theta\|^\xi\pi(\theta|S_n)\text{d}\theta \\
& \leq O_p\left (1/m\right )+\frac{\int_{\Theta}{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta}{\int_{\Theta}\widehat{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta} O_p\left (1/m\right )\int_{\Theta}\|\theta\|^\xi\pi(\theta|S_n)\text{d}\theta \\ &=O_p\left(1/m\right). \end{aligned} $$
By equation \eqref{eq:new2}, $\int_{\Theta} \|\theta\|^\xi \pi(\theta|S_n)<\infty$, and
the first term in the second inequality is $O_p(1/m)$;
the second term is also $O_p(1/m)$ because
${\int_{\Theta}{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta}/
{\int_{\Theta}\widehat{g}_n(S_n|\theta)\pi(\theta)\text{d}\theta}=1+O_p(1/m)$. The stated result then follows.
\end{proof}
\begin{proof}[Proof of Corollary \ref{cor:one}] The proof follows from Theorem~\ref{prop:bvm}. First, decompose $\bar{\theta}_n$ as \begin{flalign*}
\bar\theta_n&=\int \theta \widehat{\pi}(\theta|S_n)\text{d}\theta=\int \theta\left\{ \widehat{\pi}(\theta|S_n)-\pi(\theta|S_n)\right\}\text{d}\theta+\int\theta\pi(\theta|S_n)\text{d}\theta ; \end{flalign*}
by the result of Theorem~\ref{prop:bvm1}, $$
\int \theta\left\{ \widehat{\pi}(\theta|S_n)-\pi(\theta|S_n)\right\}\le \int \|\theta\|| \widehat{\pi}(\theta|S_n)-\pi(\theta|S_n)|\text{d}\theta=O_p(1/[v_nm]) $$ so that \begin{flalign*}
\overline\theta_n&=O_p(1/[v_nm])+\int\theta\pi(\theta|S_n)\text{d}\theta . \end{flalign*} Changing variables $\theta\mapsto t$ yields
\begin{flalign*}
\int_{\Theta} \theta\pi(\theta|S_n)\text{d} \theta=\int_{\mathcal{T}_n} \left(\theta_0+W_0^{-1}Z_n/v_n+W_0^{-1}t/{v_n}\right)\pi(t|S_n)\text{d} t;
\end{flalign*} hence
\begin{flalign*}
W_0v_n(\bar{\theta}_n-\theta_0)-Z_n&=\int t \pi(t|S_n)\text{d} t+O_p(1/m)\\&= \int t \left[\pi(t|S_n)-N\{t;0,W_0\}\right]\text{d} t+\int t N\{t;0,W_0\}\text{d} t +O_p(1/m). \label{eq:post_mean1}
\end{flalign*} The second term on the right is zero. Therefore,
\begin{flalign*}
\left|W_0v_n(\bar{\theta}_n-\theta_0)-Z_n\right|&=\left| \int t \left[\pi(t|S_n)-N\{t;0,W_0\}\right]\text{d} t\right| +O_p(1/m)\\&\leq \int \|t\|\left|\pi(t|S_n)-N\{t;0,W_0\}\right|\text{d} t+O_p(1/m)\\&=o_p(1)+O_p(1/m),
\end{flalign*} where the last line follows from Lemma~\ref{prop:bvm1}. Recall the definition $Z_n=\nabla b(\theta_0)^{\intercal}\Delta^{-1}(\theta_0)\{b(\theta_0)-S_n\}$; under Assumption~\ref{ass:one},
\begin{equation*}
Z_n\Rightarrow N\left\{0, \nabla b(\theta_0)^{\intercal}\Delta^{-1}(\theta_0)V_0\Delta^{-1}(\theta_0)\nabla b(\theta_0)\right\},
\end{equation*} and the result follows.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:acc}]
We first show that the result is satisfied if $\widehat{g}_n(S_n|\theta)$ in $\widetilde\alpha_n$ is replaced with the idealized counterpart ${g}_n(S_n|\theta)$,
yielding the acceptance rate $$\alpha_n\asymp\frac{1}{v_n^{d_\theta}}\int q_n(\theta)g_n(S_n|\theta)\text{d}\theta.$$From the posterior concentration of $\pi(\theta|S_n)$ in Lemma \ref{prop:bvm1} and the restrictions on the proposal in Assumption \ref{ass:prop}, the acceptance probability $\alpha_n$ can be rewritten as $$
\alpha_n\asymp \int_{}\mathbb{I}\left[\|\theta-\theta_0\|\leq\delta_n\right]q_n(\theta)g_n(S_n|\theta)/v_n^{d_\theta}\text{d} \theta+o_p(1), $$ for some $\delta_n=o(1)$ with $v_n\delta_n\rightarrow\infty$.
Following arguments mirroring those in the proof of Lemma \ref{prop:bvm1}, for any $\delta_n=o(1)$, on the set $\{\theta\in\Theta:\|t(\theta)\|\leq \delta_n v_n\}$, and disregarding $o(1)$ terms, \begin{flalign*}
g_n\left(S_ { n }|\theta\right)/v_n^{d_\theta}\asymp \exp\left\{-t^{\intercal}(\theta)W_0^{-1}t(\theta)/2\right\}, \end{flalign*}
where $t(\theta):=W_0v_n\{\theta-\theta_0\}-Z_n$ (see the proof of Lemma \ref{prop:bvm1} for details). By construction, $t(\theta)$ is a one-to-one transformation of $\theta$ for fixed $\theta_0$ and $Z_n$. From the definition of the proposal, we can restrict $\theta$ to the set $$\{\theta\in\Theta:\|v_n(\theta-\theta_0)\|\leq\delta_nv_n\}\cap\{\theta\in\Theta:\theta=\mu_n+\sigma_n X\},$$with $\mathbb{E}[X]=0,\;\mathbb{E}[\|X\|^2]<\infty$. On this set, up to negligible terms, \begin{equation}\label{eq:bsl_like}
g_n\left(S_ { n }|\theta\right)/v_n^{d_\theta}\asymp \exp\left[-\left\{W_0v_n(\theta-\theta_0)-Z_n\right\}^{\intercal}W^{-1}_{0}\left\{W_0v_n(\theta-\theta_0)-Z_n\right\}/2\right]. \end{equation} Define $r_n:=v_n\sigma_n$, $c_n^\mu:=\sigma^{-1}_n(\mu_n-S_n)$, and apply equation \eqref{eq:bsl_like} along with the change of variables $\theta\mapsto t=W_0v_n(\theta-\theta_0)$ to obtain \begin{flalign*}
\alpha_n&\asymp\int_{}\mathbb{I}\left[\|v_n(\theta-\theta_0)\|\leq v_n\delta_n\right]q_n(\theta)g_n(S_n|\theta)/v_n^{d_\theta}\text{d}\theta\\&\asymp\int_{\|t\|\leq v_n\delta_n}r_n^{-1}{q\left(t/r_n-c_n^{\mu}\right) }{}\exp\left[-\left\{t-Z_n\right\}^{\intercal}M(\theta_0)\left\{t-Z_n\right\}/2\right]\text{d} t,
\end{flalign*}where the second equality makes use of the location-scale nature of the proposal. For $\delta_n v_n\rightarrow\infty$, $T_\delta:=\{t:\|t\|\leq\delta_n v_n\}\rightarrow\mathbb{R}^{d_\theta}$. Define $x(t):=t/r_{n}-c_{n}^{\mu}$ and the set $x(A):=\{x:x=x(t)\text{ for some }t\in A\}$. Then, by construction, $x(T_\delta)$ also converges to $\mathbb{R}^{d_\theta}$. Applying the change of variable $t\mapsto x$ yields \begin{flalign} \alpha_n&\asymp\int_{x(T_\delta)}r_n^{-1}q\left(x\right) \exp\left\{-r_n^2\left(x+c_{n}^{\mu}-Z_n/r_n\right)^{\intercal}M_{0}\left(x+c_{n}^{\mu}-Z_n/r_n\right)/2\right\}\text{d} x.\label{eq:acc1} \end{flalign}
Applying part (i) of Assumption \ref{ass:prop} then yields $$\underline{\alpha}_n\leq \alpha_n\leq \widehat{\alpha}_n,$$ where \begin{flalign*}
\widehat{\alpha}_n&=\frac{C}{r_n^{}}\frac{|M(\theta_0)|^{1/2}}{(2\pi)^{d_\theta/2}}\int_{x(T_\delta)} \exp\left\{-r^2_n\left(x+c_{n}^{\mu}-Z_n/r_n\right)^{\intercal}M_{0}\left(x+c_{n}^{\mu}-Z_n/r_n\right)/2\right\}\text{d} x,\\
\underline{\alpha}_n&=\frac{\exp\left(-Z_n^\intercal{M}_0Z_n/2\right)}{{r_n^{}}}\frac{|M(\theta_0)|^{1/2}}{(2\pi)^{d_\theta/2}}\int_{x(T_\delta)}q(x) \exp\left\{-r_n^2\left(x+c_{n}^{\mu}\right)^{\intercal}M_{0}\left(x+c_{n}^{\mu}\right)/2\right\}\text{d} x. \end{flalign*} By part (ii) of Assumption \ref{ass:prop}, $r_n\rightarrow c_\sigma>0$; by Assumption 1, $Z_n/r_n=O_p(1).$ Therefore, for $Z$ denoting a random variable whose distribution is the same as the limiting distribution of $Z_n$, by the dominated convergence theorem and part (iii) of Assumption \ref{ass:prop} \begin{flalign*}
\widehat{\alpha}_n&\rightarrow\frac{C}{r_0}\frac{|M(\theta_0)|^{1/2}}{(2\pi)^{d_\theta/2}}\int_{\mathbb{R}^d} \exp\left\{-r_0\left(x+c^{\mu}\right)^{\intercal}M_{0}\left(x+c_{}^{\mu}\right)/2\right\}\text{d} x,\\
\underline{\alpha}_n&\rightarrow\frac{\exp\left(-Z^\intercal M(\theta_0)Z/2\right)}{r_0}\frac{|M(\theta_0)|^{1/2}}{(2\pi)^{d_\theta/2}}\int_{\mathbb{R}^{d_\theta}}q(x) \exp\left\{-r_0^2\left(x+c^{\mu}\right)^{\intercal}M_{0}\left(x+c_{}^{\mu}\right)/2\right\}\text{d} x, \end{flalign*} in distribution as $n\rightarrow\infty$, where $c^\mu$ denotes a random variable whose distribution is the same as the limiting distribution of $\sigma^{-1}_n(\mu_n-S_n)$ and $r_0=\lim_n r_n$. By part (ii) of Assumption~\ref{ass:prop}, $c^\mu$ is finite except on sets of measure zero, ensuring that $\widehat{\alpha}_n=\Xi_p(1)$ and $\underline{\alpha}_n=\Xi_p(1).$ We have $\alpha_n=\Xi_n(1)$ because the above limits are $\Xi_p(1)$.
To deduce the stated result, we first bound $|\widetilde{\alpha}_n-\alpha_n|$ as \begin{flalign*}
\left|\int q_n(\theta)\widehat{g}_n(S_n|\theta)\text{d}\theta-\int q_n(\theta){g}_n(S_n|\theta)\text{d}\theta\right|&\leq\int q_n(\theta)\left\{k(\theta)/m\right\}g_n(S_n|\theta)\text{d}\theta\\&={m}^{-1}\int\pi(\theta)g_n(S_n|\theta)\text{d}\theta\int\frac{q_n(\theta)}{\pi(\theta)}k(\theta)\pi(\theta|S_n)\text{d}\theta, \end{flalign*}where the first inequality follows from equation \eqref{eq:app3} in the proof of Theorem \ref{prop:bvm}, and the equality follows from reorganizing terms. Define $h_n(\theta):={q_n(\theta)}/{\pi(\theta)}$ and obtain \begin{flalign*}
\int \frac{q_n(\theta)}{\pi(\theta)}k(\theta)\pi(\theta|S_n)\text{d}\theta=\int h_n(\theta)k(\theta)\pi(\theta|S_n)\text{d}\theta&\leq \left[\int h_n^2(\theta)\pi(\theta|S_n)\text{d}\theta\right]^{1/2}\left[\int k^2(\theta)\pi(\theta|S_n)\text{d}\theta\right]^{1/2}\\&\leq O_p(1)\left[\int \|\theta\|^{2\kappa}\pi(\theta|S_n)\text{d}\theta\right]^{1/2}\\&\leq O_p(1),
\end{flalign*}where the first inequality follows from Cauchy-Schwartz, the second from Assumption \ref{ass:prop} part (iv) and Assumption \ref{ass:propO} part (ii), while $\int \|\theta\|^{2\kappa}\pi(\theta|S_n)\text{d}\theta<\infty$ by hypothesis. Consequently, \begin{flalign*}
|\widetilde\alpha_n-\alpha_n|=\left|\int q_n(\theta)\widehat{g}_n(S_n|\theta)\text{d}\theta-\int q_n(\theta){g}_n(S_n|\theta)\text{d}\theta\right|=O_p(1/m) \end{flalign*}and the stated result follows from the behavior of $\alpha_n$ obtained in the first part of the result. \end{proof}
\subsection{Lemmas\label{SS: lemmas}} This section contains several lemmas used to prove the main results. The first lemma draws on elements from \cite{lehmann2006theory} and \cite{chernozhukov+h03} to demonstrate that the exact BSL posterior is asymptotically normal. We note that the simulated nature of the BSL likelihood implies that the above results are not directly applicable in our context.
\begin{lemma}\label{prop:bvm1}
Recall $t:=W_0v_n(\theta-\theta_0)-Z_n;$ if Assumptions \ref{ass:one}-\ref{ass:two} are satisfied, and if Assumption \ref{ass:three} is satisfied with $p\ge\gamma\ge0$, then
$$
\int\|t\|^\gamma\left|\pi(t|S_n)-N\{t;0,W_0\}\right|\text{d} t=o_{p}(1).
$$ \end{lemma}
\begin{proof}[Proof of Lemma \ref{prop:bvm1}]Recall the following definitions used in the proof of Theorem~\ref{prop:bvm}: $M_n(\theta):=\left[v_n^2{\Delta}^{}_n(\theta)\right]^{-1}$, $M(\theta):=\Delta(\theta)^{-1}$, and $Q_n(\theta):=-v_n^2\{b(\theta)-S_n\}^\intercal M_n(\theta)\{b(\theta)-S_n\}/2.$ For an appropriately defined remainder term $R_n(\theta)$, consider the identity \begin{flalign} Q_n(\theta)-Q_n(\theta_{0})&=v_n^2\left\{b(\theta_0)-S_n\right\}^{\intercal}M_n(\theta_0)\nabla b(\theta_0)^{\intercal}(\theta-\theta_0)\nonumber\\&-\frac{v_n^2}{2}(\theta-\theta_0)\nabla b(\theta_0)^{\intercal}M_n(\theta_0)\nabla b(\theta_0)(\theta-\theta_0)+R_n(\theta)\nonumber \\&=-\frac{1}{2}t^{\intercal}W_0^{-1}t+\frac{1}{2}Z_n^{\intercal}W_0^{-1}Z_n+R_n(\theta).\label{eq:new3} \end{flalign} To simplify notation, let $T_n:=\theta_0+W_0^{-1}Z_n/v_n$, $t_w:=W_0^{-1}t$, and define \begin{flalign*} \omega(t)&:= Q_n\left(T_n+t_w/v_n\right)-Q_n(\theta_0)-\frac{1}{2}Z_n^{\intercal}W_0^{-1}Z_n. \end{flalign*} Applying \eqref{eq:new3}, we see that $$ \omega(t)=-\frac{1}{2}t^{\intercal}W_0^{-1}t+R_n(T_n+t_w/v_n).$$ Then, for $\mathcal{T}_n:=\{W_0v_n(\theta-\theta_0)-Z_n:\theta\in\Theta\}$, \begin{flalign*}
\pi(t|S_n)&=\frac{\left|M_n\left(T_n+t_w/v_n\right)\right|^{1/2}\exp\left\{Q_n\left(T_n+t_w/v_n\right)\right\}\pi\left(T_n+t_w/v_n\right)}{\int_{\mathcal{T}_n} \left|M_n\left(T_n+t_w/v_n\right)\right|^{1/2}\left\{Q_n\left(T_n+t_w/v_n\right)\right\}\pi\left(T_n+t_w/v_n\right)\text{d} t}\\&=\frac{\left|M_n\left(T_n+t_w/v_n\right)\right|^{1/2}\exp\left\{Q_n\left(T_n+t_w/v_n\right)-Q_n(\theta_0)-\frac{1}{2}Z_n^{\intercal}W_0^{-1}Z_n\right\}\pi\left(T_n+t_w/v_n\right)}{\int_{\mathcal{T}_n} \left|M_n\left(T_n+t_w/v_n\right)\right|^{1/2}\left\{Q_n\left(T_n+t_w/v_n\right)-Q_n(\theta_0)-\frac{1}{2}Z_n^{\intercal}W_0^{-1}Z_n\right\}\pi\left(T_n+t_w/v_n\right)\text{d} t}\\&={\left|M_n\left(T_n+t_w/v_n\right)\right|^{1/2}\exp\left\{ w(t) \right\}\pi\left(T_n+t_w/v_n\right)}/{C_n}, \end{flalign*}where $$
C_n=\int_{\mathcal{T}_n} |M_n\left(T_n+t_w/v_n\right)|^{1/2}\exp\left\{ \omega(t) \right\}\pi\left(T_n+t_w/v_n\right)\text{d} t. $$Throughout the rest of the proof, unless otherwise specified, integrals are calculated over ${\mathcal{T}_n}$.
The stated result follows if \begin{flalign*}
\int \|t\|^{\gamma}\left|\pi(t|S_n)-N\{t;0,W_0\}\right|\text{d} t&=C_n^{-1}J_n=o_p(1), \end{flalign*} where \begin{flalign*}
J_{n}&=\int\|t\|^{\gamma}\bigg{|}\left|M_n\left(T_n+\frac{t_w}{v_n}\right)\right|^{1/2}\exp\left\{\omega(t)\right\} \pi_{}\left(T_n+\frac{t_w}{v_n}\right)-\left|M(\theta_0)\right|^{1/2 } \exp \left\{-\frac{1}{2} t^{\intercal}W^{-1}_0 t\right\}C_{n}^{}\bigg{|} \text{d} t. \end{flalign*} However, $$J_{n}\leq J_{1n}+J_{2n},$$ where \begin{flalign*}
J_{1n}&:= \int\|t\|^{\gamma}\left|\left|M_n\left(T_n+\frac{t_w}{v_n}\right)\right|^{\frac{1}{2}}\exp \left\{{\omega_{}(t)}{}\right\} \pi_{}\left(T_n+\frac{t_w}{v_n}\right)-|M(\theta_0)|^{\frac{1}{2}}\exp \left\{-\frac{1}{2} t^{\intercal} W_0^{-1}t\right\} \pi_{}\left(\theta_0\right)\right| \text{d} t\\
J_{2n}&:=\left|C_{n}-\pi(\theta_0)\right|\int \|t\|^{\gamma}|M(\theta_0)|^{1/2}\exp \left\{-\frac{1}{2} t^{\intercal} W_0^{-1} t\right\} \text{d} t . \end{flalign*} Therefore, if $J_{1n}=o_{p}(1)$ the result follows since, taking $\gamma=0$, $J_{1n}=o_{p}(1)$ implies that \begin{flalign*}
\left|C_n-\pi(\theta_0) \right|&=\bigg{|}\int \left|M_n\left(T_n+\frac{t_w}{v_n}\right)\right|^{1/2}\exp\left\{\omega(t)\right\} \pi_{}\left(T_n+\frac{t_w}{v_n}\right)
\text{d} t\\&-\pi(\theta_0)\int |M(\theta_0)|^{1/2}\exp \left\{-\frac{1}{2} t^{\intercal} W_0^{-1} t\right\} \text{d} t\bigg{|}\\&=o_{p}(1), \end{flalign*}which implies that $J_{2n}=o_{p}(1)$.
To demonstrate that $J_{1n}=o_p(1)$, we split $\mathcal{T}_n$ into three regions. For some $0<h<\infty$ and $\delta>0$, with $\delta=o(1)$: region 1: $ \|t\|\leq h$; region 2: $ h<\|t\|\leq \delta v_n$; region 3: $ \|t\|\geq \delta v_n$.
\noindent\textbf{\textbf{Region 1}:} Over this region the result follows if $$
\|t\|^\gamma \left|\left|M_n\left(T_n+\frac{t_w}{v_n}\right)\right|^{1/2}\exp\left\{\omega(t)\right\} \pi_{}\left(T_n+\frac{t_w}{v_n}\right)-\pi(\theta_0) |M(\theta_0)|^{1/2}\exp \left\{-\frac{1}{2} t^{\intercal} W_0^{-1} t\right\}\right|=o_p(1). $$ Note that, \begin{flalign*}
\quad \sup_{\|t\|\leq h} \left\|M_n\left(T_n+\frac{t_w}{v_n}\right)-M(\theta_0)\right\|=o_{p}(1),\text{ and }\sup_{\|t\|\leq h}&\left|\pi\left(T_n+\frac{t_w}{v_n}\right)-\pi(\theta_0)\right|=o_{p}(1), \end{flalign*} where the first equation follows from Assumptions \ref{ass:two} and \ref{ass:three}, and because $$T_n=\theta_0+W_0^{-1}Z_n/v_n=\theta_0+o_{p}(1),$$ since $Z_n=O_p(1)$ by Assumption \ref{ass:one}. Likewise, by Assumption \ref{ass:one}, $$
\sup_{\|t\|\le h}\left\|T_n+t_w/v_n-\theta_0\right\|=O_p(1/v_n) $$ so that by the first part of Lemma \ref{lem:remain}, $$
\sup_{\|t\|\le h}|R_n(T_n+t_w/v_n)|=o_p(1). $$ Hence, $J_{1n}=o_{p}(1)$ from these equivalences and the dominated convergence theorem.
\noindent\textbf{\textbf{Region 2}:}
For $\delta=o(1)$ and small enough, by Assumption 3, $\sup_{h\le\|t\|\le \delta v_n}\|M_n\left(T_n+t_w/v_n\right)-M(\theta_0)\|^{}=o_{p}(1)$. For $h$ large enough and $\delta=o(1)$, we have the bound ${J}_{1n}\leq C_{1n}+C_{2n}+C_{3n}$, where \begin{flalign*}
C_{1n}:=&C\int_{h\leq \|t\| \leq \delta v_{n}}\|t\|^{\gamma}\exp(-t^{\intercal}W_0^{-1}t/2)\sup _{\|t\| \leq h}\left|\exp\left\{|R_n(T_n+t_w/v_n)|\right\}\left\{ \pi_{}\left(T_n+{t_w}/{v_n}\right)-\pi_{}\left(\theta_0\right)\right\}\right|\text{d} t \\
C_{2n}:=&C\int_{h\leq \|t\| \leq \delta v_{n}}\|t\|^{\gamma} \exp(-t^{\intercal}W_0^{-1}t/2)\exp\left\{|R_n(T_n+t_w/v_n)|\right\} \pi_{}\left(T_n+{t_w}/{v_n}\right) \text{d} t \\C_{3n}:=&C\pi_{}\left(\theta_0\right) \int_{h\leq \|t\| \leq \delta v_{n}}\|t\|^{\gamma}\exp(-t^{\intercal}W_0^{-1}t/2)\text{d} t . \end{flalign*}
The first term $C_{1n}=o_{p}(1)$ for any fixed $h$, so that $C_{1n}=o_{p}(1)$ for $h\rightarrow\infty$, by the dominated convergence theorem. For $C_{3n}$, we have that for any $0\le\gamma\le2$ there exists some $h'$ large enough such that for all $h>h'$, and $\|t\|\ge h$ $$\|t\|^{\gamma}\exp\left(-t^{\intercal}M_{0}t\right)=O(1/h).$$ Hence, $C_{3n}$ can be made arbitrarily small by taking $h$ large and $\delta$ small enough.
The result follows if $C_{2n}=o_p(1)$. We show that, for some $C>0$, and all $h\le\|t\|\le\delta v_n$, with probability converging to one (wpc1), \begin{equation}\label{eq:bound1}
\exp(-t^{\intercal}W_0^{-1}t/2)\exp\left\{|R_n(T_n+t_w/v_n)|\right\}\pi(T_n+t_w/v_n)\le C\exp\left\{-t^{\intercal}W_0^{-1}t/4\right\}. \end{equation} If equation \eqref{eq:bound1} is satisfied, then $C_{2n}$ is bounded above by \begin{flalign*}
C_{2n}\le &C\int_{h\le\|t\|\le\delta v_n}\|t\|^{\gamma}\exp\left\{-t^{\intercal}W_0^{-1}t/4\right\}\text{d} t,
\end{flalign*}which, again can be made arbitrarily small for some $h$ large and $\delta$ small. To demonstrate equation \eqref{eq:bound1}, first note that by continuity of $\pi(\theta)$, Assumption \ref{ass:three}, $\pi(T_n+t_w/v_n)$ is bounded over $\{t:h\le \|t\|\le\delta v_n\}$ so that it may be dropped from the analysis. Now, since $\|T_n-\theta_0\|=o_p(1)$, for any $\delta>0$, $\|T_n+t_w/v_n-\theta_0\|<2\delta$ for all $\|t_w\|\le\delta v_n$ and $n$ large enough. Therefore, by Lemma \ref{lem:remain}, there exists some $\delta>0$ and $h$ large enough so that (wpc1) $$
\sup_{h\le\|t\|\le\delta v_n}|R_n(T_n+t_w/v_n)|\le \frac{1}{4}\|t-Z_n\|^2\lambda_{\text{min}}\left\{W_0\right\}.
$$Since $Z_n=O_p(1)$, we have $Z_n^{\intercal}W_0^{-1}Z_n\le C\|Z_n\|^2=O_p(1)$, so that, for some $C>0$, wpc1, $$
\exp\{\omega(t)\}\leq \exp\{-t^{\intercal}W_{0}^{-1}t+|R_n(T_n+t_w/v_n)|\}\leq C\exp\left(-t^{\intercal}W_0^{-1}t/4\right), $$and the result follows.
\noindent\textbf{\textbf{Region 3}:} For $\delta v_n$ large, $$
\int_{\|t\|\ge \delta v_n}\|t\|^{\gamma}N\{t;0,W_0\}\text{d} t$$ can be made arbitrarily small and is therefore dropped from the analysis. Consider \begin{align*}
\tilde{J}_{1n}& :=\int_{\|t\|\ge \delta v_n}\|t\|^{\gamma} \left| M_{n}\left(t / v_{n}+S_{n}\right)\right|^{1/2} \exp\{\omega(t) \} \pi\left(t / v_{n}+S_{n}\right)\text{d} t , \\
& = v_{n}^{d_\theta+\gamma}\int_{\|\theta-T_n\|\ge \delta }\|\theta-T_n\|^{\gamma}|M_n(\theta)|^{1/2}\exp\left\{Q_n(\theta)\right\} \pi\left(\theta\right)\text{d} \theta, \end{align*} by using the change of variables $\theta=T_n+t_w/v_n$. Now, \begin{flalign*}
\tilde{J}_{1n}& =\exp\left\{Q_n(\theta_0)\right\}v_{n}^{d_\theta+\gamma}\int_{\|\theta-T_n\|\ge \delta }\|\theta-T_n\|^{\gamma}|M_n(\theta)|^{1/2}\exp\left\{Q_n(\theta)-Q_n(\theta_0)\right\} \pi\left(\theta\right)\text{d} \theta, \end{flalign*} and note that $\exp\{Q_n(\theta_0)\}=O_{p}(1)$ because $Q_n(\theta_0)=O_{p}(1)$ by Assumptions \ref{ass:one} and \ref{ass:two}.
Define $Q(\theta):=-\{b(\theta)-b(\theta_0)\}^{\intercal}M(\theta)\{b(\theta)-b(\theta_0)\}/2$ and note that $Q(\theta_0)=0$ by virtue of Assumption \ref{ass:four}(i) and positive-definiteness of $\Delta(\theta_0)$ (Assumption \ref{ass:two}(ii)). For any $\delta>0$, \begin{flalign*}
\sup_{\|\theta-\theta_0\|\ge \delta}\frac{1}{v^2_n}\left\{Q_n(\theta)-Q_n(\theta_0)\right\}\leq& \sup_{\|\theta-\theta_0\|\ge \delta}2|v_n^{-2}Q_n(\theta)-Q(\theta)|+\sup_{\|\theta-\theta_0\|\ge \delta}\left\{Q(\theta)-Q(\theta_0)\right\}. \end{flalign*} From Assumptions~\ref{ass:one} and \ref{ass:two}, the first term converges to zero in probability. From Assumption~\ref{ass:two}(iii), for any $\delta>0$ there exists an $\epsilon>0$ such that $$
\sup_{\|\theta-\theta_0\|\ge \delta}\left\{Q(\theta)-Q(\theta_0)\right\}\le -\epsilon. $$ Hence, \begin{equation} \label{eq:expconv}
\lim_{n\rightarrow\infty}P^{(n)}_0\left[\sup_{\|\theta-\theta_0\|\geq \delta}\exp\left\{Q_n(\theta)-Q_n(\theta_0)\right\}\leq \exp(-\epsilon v_n^2)\right]=1. \end{equation} Use $T_n=\theta_0+O_p(1/v_n)$, the definition $M_n(\theta)=v_n^2\Delta_n(\theta)$, and equation \eqref{eq:expconv} to obtain \begin{align*}
\tilde{J}_{1n} & = \{1+o_p(1)\}\exp\left\{Q_n(\theta_0)\right\}v_{n}^{d_\theta+\gamma}\int_{\|\theta-\theta_{0}\|\ge \delta }|v^2_n\Delta_n(\theta)|^{-1/2}\|\theta-\theta_0\|^{\gamma}\pi\left(\theta\right)\exp\{Q_n(\theta)-Q_n(\theta_0)\}\text{d} \theta\\&\leq O_p(1) \exp\left(-\epsilon v_n^2\right)v_{n}^{d_\theta+\gamma}\int_{\|\theta-\theta_0\|\ge \delta }|v^2_n\Delta_n(\theta)|^{-1/2}\|\theta-\theta_0\|^{\gamma}\pi\left(\theta\right)\text{d} \theta\\&\leq O_p\left\{\exp\left(-\epsilon v_n^2\right)v_{n}^{d_\theta+\gamma}\right\}
\\&=o_p(1); \end{align*}where the third inequality follows from the moment hypothesis in Assumption~\ref{ass:three}. \end{proof}
The following result is a consequence of Proposition 1 in \cite{chernozhukov+h03}. \begin{lemma}\label{lem:remain} Under Assumptions \ref{ass:one}-3, and for $R_n(\theta)$ as defined in the proof of Lemma \ref{prop:bvm1}, for each $\epsilon>0$ there exists a sufficiently small $\delta>0$ and $h>0$ large enough, such that $$
\limsup_{n\rightarrow\infty}\text{Pr}\left[\sup_{h/v_n\le \|\theta-\theta_0\|\le\delta}\frac{|R_n(\theta)|}{1+n\|\theta-\theta_0\|^2}>\epsilon\right]<\epsilon $$ and $$
\limsup_{n\rightarrow\infty}\text{Pr}\left[\sup_{ \|\theta-\theta_0\|\le h/v_n}{|R_n(\theta)|}>\epsilon\right]=0. $$ \end{lemma} \begin{proof} The result is a specific case of Proposition~1 in \cite{chernozhukov+h03}. Therefore, it is only necessary to verify that their sufficient conditions are satisfied in our context.
Assumptions (i)-(iii) in their result follow
directly from Assumptions \ref{ass:four} and \ref{ass:two}, and the normality of $v_n\{b(\theta_0)-S_n\}$ in
Assumption~\ref{ass:one}. Therefore, all that remains is to verify their Assumption (iv).
Defining ${g}_n(\theta)=b(\theta)-S_n$, their Assumption~(iv) is stated as follows: for any $\epsilon>0$, there is a $\delta>0$ such that $$
\limsup_{n\rightarrow\infty}\text{Pr}\left\{\sup_{\|\theta-\theta'\|\le\delta}\frac{v_n\| \{g_n(\theta)-g_n(\theta')\}-\{\mathbb{E}\left[g_n(\theta)\right]-\mathbb{E}
\left[g_n(\theta')\right]\}}{1+v_n\|\theta-\theta'\|}>\epsilon\right\}<\epsilon . $$ In our context, this condition is always satisfied: for $g_n(\theta)=b(\theta)-S_n$, and all $n$ \begin{flalign*}
{\|\{g_n(\theta)-g_n(\theta')\}-\{\mathbb{E}\left[g_n(\theta)\right]-\mathbb{E}\left[g_n(\theta')\right]\}}\|&={\|\{b(\theta)-b(\theta')\}-\{[b(\theta)-b_0]-[b(\theta')-b_0]\}}\|\\&=0. \end{flalign*} \end{proof}
The following result is used in the proof of Theorem \ref{prop:bvm1} and is an intermediate result of Theorem 1 in \cite{hsu2012tail}.
\begin{lemma}[Theorem 1, \cite{hsu2012tail}]\label{lem:propO} Suppose $x=(x_1,\dots,x_{d})^\intercal$ is a random vector such that for some $\mu\in\mathbb{R}^d$ and some $\sigma\ge 0$, $$
\mathbb{E}\left[\exp \left\{\alpha^{\intercal}(x-\mu)\right\}\right] \leq \exp \left(\|{\alpha}\|^{2} \sigma^{2} / 2\right),
$$for all $\alpha\in\mathbb{R}^d$. For $M\in\mathbb{R}^{d\times d}$ a positive-definite and symmetric matrix such that $M:=A^{\intercal} A$, for $0\leq \eta<1/(2\sigma^2\|M\|)$, $$ \begin{aligned}
\mathbb{E}\left[\exp \left(\eta\|A x\|^{2}\right)\right] &
& \leq \exp \left\{\sigma^{2} \operatorname{tr}(M) \eta+\frac{\sigma^{4} \operatorname{tr}\left(M^{2}\right) \eta^{2}+\|A \mu\|^{2} \eta}{1-2 \sigma^{2}\|M\|\eta}\right\} \end{aligned}. $$
\end{lemma}
\end{document} |
\begin{document}
\title{Recurrent set on some Bedford-McMullen carpets } \renewcommand{* corresponding author}{* corresponding author} \footnotetext{ } \begin{abstract}
In this paper, we study the Hausdorff dimension of the quantitative recurrent set of the canonical endomorphism on the BedfordβMcMullen carpets whose Hausdorff dimension and box dimension are equal. \end{abstract} \textbf{keywords:} BedfordβMcMullen carpet, recurrent set, Hausdorff dimension
\title{Recurrent set on some Bedford-McMullen carpets }
\section{Introduction} \subsection{Background} The concept of recurrence plays an important role in dynamical systems and ergodic theory. Let $(X,\mathcal{B},\mu,T)$ be a measure preserving system equipped with a compatibSle metric $d$, i.e. $(X,d)$ is a metric space, $\mathcal{B}$ is a Borel $\sigma$-algebra of $X$, and $\mu$ is a $T$-invariant probability measure. If $(X, d)$ is a separable metric space, the Poincar\'{e} Recurrence Theorem implies that $\mu$-almost every $x\in X$ is recurrent in the sense that \begin{equation*}
\liminf_{n\rightarrow\infty}d(T^nx, x)=0. \end{equation*} In nature, the result provides no information about the rate at which an orbit will return to the initial point or in what manner a neighborhood of the initial point will shrink under the iteration. The interests in these quantitative characterization has provoked a rich subsequent literature on the so-called quantitative recurrent sets: given the \emph{rate function} $\psi:X \rightarrow (0,\infty)$, the \emph{quantitative recurrent set with respect to $\psi$} is defined as \begin{equation} \label{eq:quantitative_recurrent_set} R(T,\psi)=\big\{x\in X:d(T^nx, x)<\psi(n,x) \text{ for infinitely many } n \in \mathbb{N} \big\}. \end{equation} Boshernitzan \cite{Boshernitzan1993} gave an outstanding result for general systems concerning the size in measure of $R(T,\psi)$. Later Barreira and Saussol \cite{Barreira2001} stated a finer result.
In recent years, many authors have turned their eyes to the problem of recurrent sets on fractals. On the one hand, some researchers showed that the $\mu$-measure of the set $R(T,\psi)$ is null or full according to convergence or divergence of a certain series in some dynamical systems (see Chang-Wu-Wu \cite{Chang2018}, Baker-Farmer \cite{Baker2019}, Hussain-Li-Simmons-Wang \cite{Hussain2021}, Kirsebom-Kunde-Persson \cite{Kirsebom2022}, Persson \cite{Persson2022} and Kleinbock-Zheng \cite{Kleinbock2022}). On the other hand, many researchers studied the Hausdorff dimension of the set $R(T,\psi)$ in some dynamical system (see Tan-Wang \cite{Tan2011} and Seuret-Wang \cite{Seuret2013}). Note that when we require $\{T^n x\}_{n\ge 1}$ to return to the neighborhoods of a chosen point $x_0\in X$ rather than the initial point $x$, the problem becomes the so-called shrinking target problem, which was first investigated by Hill and Velani \cite{Hill1995}. Since their initial introduction, many more authors have contributed to the study of shrinking target problem. To name but a few, see \cite{Allen2021ONTH, Barany2018ShrinkingTO, Bugeaud2003, Fan2013AMM, Hill2002, koivusalo2018, li2022, Li2014, liao_seuret_2013, persson_rams_2017, Shen2013ShrinkingTP, Tseng2007OnCR} and references within.
It is to be noted that the aforementioned works mainly involve systems of $\mathbb{R}^1$ or the dynamical systems in conformal dynamics, and hardly anything is known as far as high-dimensional non-conformal dynamics are concerned. The only known result was presented by B\'{a}r\'{a}ny and Troscheit \cite{Barany2021}, who investigated the dimensions of the shrinking target problem, a quantitative version of recurrence for self-affine dynamic systems. In this work, we intend to consider the recurrent set in affine iterated function systems. Among them, the Bedford-McMullen carpets form a typical family of self-affine sets which are introduced in \cite{bedfordcrinkly} and \cite{McMullen1984}.
In this paper we will present a Hausdorff dimension formula to the recurrent set valid for the subfamily of BedfordβMcMullen carpets whose Hausdorff dimensions are equal to the box dimensions. The paper is organized as follows. In Section \ref{S1}, we present necessary notations, preliminaries and the statements of the main theorem. Section \ref{S2} and Section \ref{S3} are devoted to the proof of Theorem \ref{M1}. Finally, in Section \ref{S4}, we provide some examples that the main theorem in the paper are applicable to them.
\section{Notions, preliminaries and the main result}\label{S1} In this section we introduce necessary notations and preliminaries and state the main results of the paper. \subsection{Notations and preliminaries} Let $2 \le m_1 \le m_2$ be two integers and $\Sigma_{m_i}=\{0,1,\cdots,m_i-1\}$ for $i=1, 2$. Define for every $\mathtt{a}=(\mathtt{a}^{(1)},\mathtt{a}^{(2)}) \in \Sigma_{m_1} \times \Sigma_{m_2}$ a map $\phi_{\mathtt{a}}:[0,1]^2 \to [0,1]^2$ as \[ \phi_{\mathtt{a}}(x^{(1)},x^{(2)}):=\left(\frac{x^{(1)}+\mathtt{a}^{(1)}}{m_1},\frac{x^{(2)}+\mathtt{a}^{(2)}}{m_2}\right). \] Given any nonempty subset $A \subseteq \Sigma_{m_1} \times \Sigma_{m_2}$, the \emph{Bedford-McMullen carpet $K$ associated with $A$} is the unique attractor of the iterated function system $\{\phi_{\mathtt{a}}: \mathtt{a} \in A\}$. Alternatively, if we consider the coding map $\pi: (\Sigma_{m_1} \times \Sigma_{m_2})^{\mathbb{N}} \to [0,1]^2$ defined as \[ \pi(\mathtt{x})=\left(\sum_{n=1}^{\infty}\frac{\mathtt{x}^{(1)}_n}{m_1},\sum_{n=1}^{\infty}\frac{\mathtt{x}^{(2)}_n}{m_2}\right), \] where $\mathtt{x}=(\mathtt{x}^{(1)}_1,\mathtt{x}^{(2)}_1)(\mathtt{x}^{(1)}_2,\mathtt{x}^{(2)}_2)(\mathtt{x}^{(1)}_3,\mathtt{x}^{(2)}_3) \cdots$, then the Bedford-McMullen carpet $K$ can be expressed as $K=\pi(A^{\mathbb{N}})$. It is noteworthy that the latter definition naturally endows the carpet $K$ with a map $T:K \to K$ defined as \[ T(x)=(T_{m_1}(x^{(1)}), T_{m_2}(x^{(2)})):=\left(m_1 x^{(1)}\ (\bmod\ 1),m_2 x^{(2)}\ (\bmod\ 1)\right). \]
For convenience sake, the following notations of the symbolic space are also introduced. Let $\Sigma_{m_i}^n=\{u:u=(u_1,\dots,u_n), u_i\in \Sigma_{m_i}, \ i=1,\dots,n \}.$ Firstly, for any $l\in \mathbb{N} \cup \{\infty\}$ and $u \in \Sigma_{m_i}^n$, write $(u)^l$ for the word $(u,\cdots,u)$ ($l$ times repeated concatenation of the word). More generally, for any positive number $l$, denote by $(u)^{l}$ the word $(u)^{ \lfloor l \rfloor }u'$, where $u'$ is the prefix of $u$ with length $\lfloor(l-\lfloor l \rfloor)|u|\rfloor$ and $\lfloor l \rfloor=\max\{k\in \mathbb{N}: k\leq l\}$. Secondly, we identify the spaces $(\Sigma_{m_1} \times \Sigma_{m_2})^{\mathbb{N}}$ and $(\Sigma_{m_1}^{\mathbb{N}} \times \Sigma_{m_2}^{\mathbb{N}})$ by setting \[ (\mathtt{x}^{(1)}_1,\mathtt{x}^{(2)}_1)(\mathtt{x}^{(1)}_2,\mathtt{x}^{(2)}_2)(\mathtt{x}^{(1)}_3,\mathtt{x}^{(2)}_3) \cdots \sim (\mathtt{x}^{(1)}_1 \mathtt{x}^{(1)}_2 \mathtt{x}^{(1)}_3 \cdots ,\mathtt{x}^{(2)}_1 \mathtt{x}^{(2)}_2 \mathtt{x}^{(2)}_3 \cdots), \] which is a one-to-one correspondence. With this identification, we introduce the following notation of cylinder set with different length in its coordinates: \[ [(\mathtt{w}^{(1)},\mathtt{w}^{(2)})]=\big\{(\mathtt{x}^{(1)},\mathtt{x}^{(2)}): \mathtt{x}^{(i)}_{1:n_i} = \mathtt{w}^{(i)}, i=1,2\big\}, \quad \big(\mathtt{w}^{(1)},\mathtt{w}^{(2)}\big) \in \Sigma_{m_1}^{n_1} \times \Sigma_{m_2}^{n_2}, \] each of which is associated with a half-open half-closed rectangle \[ I([(\mathtt{w}^{(1)},\mathtt{w}^{(2)})])=\left[\sum_{i=1}^{n_1} \frac{\mathtt{w}^{(1)}}{m_{1}^i},\sum_{i=1}^{n_1} \frac{\mathtt{w}^{(1)}}{m_{1}^i}+m_{1}^{-n_1}\right) \times \left[\sum_{i=1}^{n_2} \frac{\mathtt{w}^{(2)}}{m_{2}^i},\sum_{i=1}^{n_2} \frac{\mathtt{w}^{(2)}}{m_{2}^i}+m_{2}^{-n_2}\right). \] In particular, we call $I([(\mathtt{w}^{(1)},\mathtt{w}^{(2)})])$ an \emph{$n_1$-th level approximate square} when $n_2 = \lceil \log_{m_2} m_1 \cdot n_1 \rceil$. These notations will appear frequently in our discussions, and they are summarized in Table \ref{tab:notations} for the reader's convenience.
\begin{table}[t]
\centering
\begin{tabular}{l l}
\hline
IFS & iterated function system \\
$m_1$ & the number of columns in $K$ \\
$m_2$ & the number of rows in $K$ \\
$M$ & the number of columns containing at least one chosen rectangle \\
$N_i$ & the number of rectangles chosen from the $i$-th non-empty column \\
$\Sigma_{m_i}$ & the digit sets $\{0,1,\ldots,m_i-1\}$ \\
$A $ & the alphabet $A \subseteq \Sigma_{m_1} \times \Sigma_{m_2}$\\
$K $ & the Bedford-McMullen carpet determined by $A$\\
$T_{m_i}$ & canonical $\times m_i$-map: $T_{m_i}(x)= m_i x \pmod{1}$\\
$A^n$ & the set $A^n=\big\{(\mathtt{x}^{(1)}_1\mathtt{x}^{(1)}_2\dots \mathtt{x}^{(1)}_n, \ \mathtt{x}^{(2)}_1\mathtt{x}^{(2)}_2\dots \mathtt{x}^{(2)}_n): (\mathtt{x}^{(1)}_i,\mathtt{x}^{(2)}_i)\in A, 1\leq i\leq n \big\}$ \\
$ \mathtt{x}^{(i)}_{1:n}$ & the word $\mathtt{x}^{(i)}_{1}\mathtt{x}^{(i)}_{2}\dots\mathtt{x}^{(i)}_{n}$ \\
$[(\mathtt{w}^{(1)},\mathtt{w}^{(2)})]$ & cylinder set: $[(\mathtt{w}^{(1)},\mathtt{w}^{(2)})]=\big\{(\mathtt{x}^{(1)},\mathtt{x}^{(2)}): \mathtt{x}^{(i)}_{1:n_i} = \mathtt{w}^{(i)}, i=1,2\big\}$, $\big(\mathtt{w}^{(1)},\mathtt{w}^{(2)}\big) \in \Sigma_{m_1}^{n_1} \times \Sigma_{m_2}^{n_2}$ \\
$\psi: \mathbb{N} \to \mathbb{R}_+$ & rate function \\
$W(K,T,\psi)$ & the recurrent set \\
$\pi$ & the coding map $(\Sigma_{m_1} \times \Sigma_{m_2})^{\mathbb{N}} \to [0,1]^2$ \\
$I(\mathtt{w})$ & the half-open half-closed rectangle coded as $\mathtt{w}$ \\
$J(\mathtt{w})$ & the set of returning points in $I(\mathtt{w})\cap K$ associated with $\mathtt{w}$ \\
$B(x,r)$ & the ball of radius $r$ around $x$ \\
$\#S$ & the cardinality of the finite set $S$\\
\end{tabular}
\caption{Table of notations}
\label{tab:notations} \end{table}
\subsection{The main theorem} It is the purpose of this paper to study the quantitative recurrent set problem on the system $(K, T)$ of Bedford-McMullen carpet $K$. Let $\psi: \mathbb{N} \to (0,\infty)$ be a \emph{rate function} and define the \emph{recurrent set $W(K,T,\psi)$ with respect to $\psi$} as: \begin{align}\label{e2}
W(K,T,\psi):=\left\{x \in K: \begin{cases} |x^{(1)}-T^n_{m_1}(x^{(1)})| < \psi(n) \\
|x^{(2)}-T^n_{m_2}(x^{(2)})| < \psi(n)
\end{cases} \text{for infinitely many } n \right\}, \end{align} Denote by $\dim_{\mathrm{H}}$, $\dim_{\mathrm{B}}$ the Hausdorff and box dimensions, respectively. Following notations are used throughout the discussions. \begin{equation}\label{E0}
\ell_{i}(n) = -\log_{m_i} \psi(n) \ \text{ for } i=1,2. \end{equation} \begin{equation}\label{E1}
\frac{\ell_{i}(n)}{n} =\tau_i(n) \ \text{ and } \
\liminf_{n \to \infty}\frac{\ell_{i}(n)}{n}= \liminf_{n \to \infty} \frac{-\log_{m_i} \psi(n)}{n} =\tau_i, \ \text{ for } i=1,2. \end{equation} Denote \begin{align}\label{E8}
\hat{\ell}_{i}(n)=\lceil \ell_{i}(n) \rceil \end{align} for $i=1,2$ and $n\in \mathbb{N}$, where $\lceil\ell_{i}(n)\rceil=\min\{k\in \mathbb{N}: k\geq \ell_{i}(n)\}$.
Our main result related to the Hausdorff dimension of the set $W(K,T,\psi)$ is as follows. \begin{theorem}\label{M1} Let $ W(K,T,\psi)$ be the set as defined in (\ref{e2}) and $\tau_1 \ge 0$. Suppose that $\dim_{\mathrm{B}} K = \dim_{\mathrm{H}} K$, i.e. $N_i=N$ for all $i$.
$(1)$ If $\log_{m_1} m_2 > 1 + \tau_1$, then
\begin{align*}
\dim_{\mathrm{H}} W(K,T,\psi)=
\min \left\{\frac{\log_{m_1} M + \log_{m_2} N}{1+\tau_2},
\frac{\log_{m_1} M}{1+\tau_1} + \log_{m_2} N\right\}.
\end{align*}
$(2)$ If $\log_{m_1} m_2 \le 1 + \tau_1$, then
\begin{align*}
\dim_{\mathrm{H}} W(K,T,\psi) = \min\left\{
\frac{\log_{m_1} M +\log_{m_2}N}{1+\tau_2}, \
\frac{\log_{m_1} M +\log_{m_1} N}{1+\tau_1} \right\},
\end{align*}
where $\tau_{i}$, $i=1,2$, is defined in (\ref{E1}). \end{theorem} We note that in Theorem \ref{M1}, if $\tau_1<0$, then $\dim_{\mathrm{H}} W(K,T,\psi)=\dim_{\mathrm{H}} K$. Besides, the cases $\tau_1=\infty$ and $\tau_1=0$ follow immediately from that when $0 < \tau_1 < \infty$, and thus the latter is always assumed in what follows. \begin{remark}
The set $W(K,T,\psi)$ can be interpreted as a quantitative recurrent set defined in \eqref{eq:quantitative_recurrent_set} with maximum norm of $\mathbb{R}^2$. Nevertheless, Theorem \ref{M1} implies that the dimension of the recurrent set is invariant under all equivalent metrics, including the one induced by the Euclidean norm. \end{remark}
\section{Proof of the lower bound}\label{S2}
In this section, we will prove the lower bound of the Hausdorff dimension of $\dim_{\mathrm{H}} W(K,T,\psi)$, and to this end, we are going to prove Lemma \ref{lem:local_dimension}-\ref{L5}.
In the following, we outline our strategy of proof. Let $p \in [0,1]^{A}$ be a probability vector indexed by $A$. For every $\mathtt{a} \in A$, let $p_{\mathtt{a}^{(1)}}=\sum_{\mathtt{b}\in A, \ \mathtt{b}^{(1)}=\mathtt{a}^{(1)}}p_{\mathtt{b}}$ and $A^{(1)}=\{\mathtt{a}^{(1)}: \mathtt{a}=(\mathtt{a}^{(1)}, \mathtt{a}^{(2)}), \ \mathtt{a} \in A\}$.
We take an increasing sequence of natural numbers $n_i$ such that
\begin{align}\label{E30}
2^i \sum_{j=1}^{i}n_j\ll n_{i+1} \quad \text{ and } \quad \lim_{i \to \infty} \tau_1(n_j) = \tau_1.
\end{align}
Define a collection of Borel measurable maps $X_n: A^{\mathbb{N}} \to A$, $n \in \mathbb{N}$, as $X_n(\mathtt{x}):=\mathtt{x}_n$, together with the following probability measure $\mu: A^{\mathbb{N}} \to [0,1]$. For any $\mathtt{x} \in A^{\mathbb{N}}$, if the following condition
\begin{equation}\label{E13}
\begin{cases}
\mathtt{x}_{j}^{(1)}=\mathtt{x}_{j+n_i}^{(1)} & \text{for \ all } 1 \le j \le \hat{\ell}_{1}(n_i); \\
\mathtt{x}_{j}^{(2)}=\mathtt{x}_{j+n_i}^{(2)} & \text{for \ all } 1 \le j \le \hat{\ell}_{2}(n_i),
\end{cases}
\end{equation}
is satisfied,
then we can define for all $i \in \mathbb{N}$ as
\begin{align*}
&\mu(X_i=\mathtt{x}_i, 1 \le i \le n_i+\hat{\ell}_{1}(n_i)) := \begin{cases}
\prod_{i=1}^{i_0}\prod_{j=n_{i-1}+\hat{\ell}_{1}(n_{i-1})+1}^{n_i} p_{\mathtt{x}_j} \cdot \prod_{j=n_i+\hat{\ell}_{2}(n_i)+1}^{n_i+\hat{\ell}_{1}(n_i)} \frac{p_{\mathtt{x}_j}}{p_{\mathtt{x}^{(1)}_j}} & \text{if } \eqref{E13} \text{ are satisfied}; \\
0 & \text{otherwise},
\end{cases}
\end{align*}
where $\hat{\ell}_{i}$, $i=1,2$, is definied in (\ref{E8}) and $p_{\mathtt{x}_j}/p_{\mathtt{x}^{(1)}_j}$ is defined to be $0$ if $p_{\mathtt{x}^{(1)}_j} = 0$. Under the circumstances, $\mu$ is a pre-measure on the algebra generated by the cylinder sets of length $n_i$. According to the Carath\'{e}odory extension theorem, this pre-measure can be uniquely extended to a Borel probability measure on $A^{\mathbb{N}}$, which we also denote as $\mu$ by abuse of notation. It then follows from this definition that $\text{supp} (\mu \circ \pi^{-1})\subseteq W(K,T,\psi)$. We note that our choice of measure is very much similar to a piecewise Bernoulli measure considered in \cite{Barany2018ShrinkingTO}, and by writing
\begin{align}\label{E21}
F(\mathtt{x},k)= \mu\bigg(\big[(\mathtt{x}^{(1)}_{1:k},\mathtt{x}^{(2)}_{1: \lceil k \log_{m_2} m_1\rceil})\big]\bigg),
\end{align}
we recover in Lemma \ref{lem:local_dimension} an analogue of \cite[Lemma 4.2]{Barany2018ShrinkingTO}:
\begin{align*}
\liminf_{k \to \infty} \frac{-\log_{m_1} F(\mathtt{x},k)}{k} = \liminf_{k \to \infty} \mathbb{E}_{p} \left[\frac{-\log_{m_1} F(X,k)}{k}\right] \quad \mu \text{-a.e.},
\end{align*}
where $\mathbb{E}_{p}$ denote the mathematical expectation with respect to $\mu$. It is noteworthy that the above limit coincides with the lower local dimension function almost everywhere (see for example \cite{K_enm_ki_2013}), i.e.
\[
\liminf_{k \to \infty} \frac{-\log_{m_1} F(\mathtt{x},k)}{k} =\liminf_{r\rightarrow 0} \frac{\log \mu \circ \pi^{-1}(B(\pi(\mathtt{x}),r))}{\log r} \quad \mu\text{-a.e.}.
\]
We then apply \cite[Proposition 2.3]{falconer1997techniques} to conclude that
\[
\dim_{\mathrm{H}}(W(K,T,\psi)) \ge \sup_{p} \left[ \liminf_{k \to \infty} \mathbb{E}_{p} \left[\frac{-\log_{m_1} F(X,k)}{k}\right] \right],
\]
and the problem of lower bound is then reduced to the maximization of the right-hand side of the inequality. By writing
\begin{equation} \label{E22}
H(p)=\mathbb{E}_{p}\left[-\log p_{X_i}\right]=\sum\limits_{\mathtt{a} \in A} - p_{\mathtt{a}} \log p_{\mathtt{a}},
\end{equation}
\begin{equation} \label{E9}
H_1(p)=\mathbb{E}_{p}\left[-\log p_{X^{(1)}_i}\right]=\sum\limits_{\mathtt{a}^{(1)} \in A^{(1)}}- p_{\mathtt{a}^{(1)}} \log p_{\mathtt{a}^{(1)}},
\end{equation}
and
\begin{equation} \label{E18}
H_2(p)=\mathbb{E}_{p}\left[-\log \frac{p_{X_i}}{p_{X^{(1)}_{i}}}\right]=\sum\limits_{\mathtt{a} \in A}- p_{\mathtt{a}}\log \frac{p_{\mathtt{a}}}{p_{\mathtt{a}^{(1)}}},
\end{equation}
we have that when $\log_{m_1} m_2 > 1 + \tau_1$,
\begin{align*}
\liminf_{k \to \infty} \mathbb{E}_{p} \left[\frac{-\log_{m_1} F(X,k)}{k}\right] \ge \min \Bigg\{ &\frac{H_1(p)\log_{m_2} m_1+H_2(p)}{(1+\tau_2)\log m_1},\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{(1+\tau_1)\log m_1}\Bigg\}
\end{align*}
by Lemma \ref{L3}, and that when $\log_{m_1} m_2 \leq 1 + \tau_1$,
\begin{align*}
\liminf_{k \to \infty} \mathbb{E}_{p} \left[\frac{-\log_{m_1} F(X,k)}{k}\right] \ge \min\Bigg\{
& \frac{H_1(p)\log_{m_2} m_1+H_2(p)}{(1+\tau_2)\log m_1},\frac{H_1(p) + H_2(p)}{(1+\tau_{1})\log m_1}\Bigg\}
\end{align*}
by Lemma \ref{L5}, both of which turn out to admit an obvious maximum point.
Now, we begin to prove Lemma \ref{lem:local_dimension}-\ref{L5}.
\begin{lemma} \label{lem:local_dimension}
Let $F(X,k)$ be as defined in (\ref{E21}) and suppose $0 \le \tau_1 < \infty$. Then,
\[
\liminf_{k \to \infty} \frac{-\log_{m_1} F(X,k)}{k} = \liminf_{k \to \infty} \mathbb{E}_{p}\left[\frac{-\log_{m_1} F(X,k)}{k}\right] \quad \mu \text{-a.e.}.
\] \end{lemma} \begin{proof}
According to the definition of $\mu$, random variables $\{X_j: j \in S\}$ are independent when $S$ is chosen to be
\begin{equation} \label{eq:independent_set}
S=\bigcup_{i \in \mathbb{N}} \left(n_i \mathbb{Z} + \big(n_{i-1}+\hat{\ell}_{1}(n_{i-1}),n_i\big] \right) \cap \left(n_{i-1}+\hat{\ell}_{1}(n_{i-1}),n_{i}+\hat{\ell}_{1}(n_{i})\right].
\end{equation}
Moreover, it can be seen from definition of $\mu$ and $p$ that since $S$ satisfies
\[
\underline{d}(S):=\liminf_{k \to \infty} \frac{\# \big(S \cap [1,k]\big)}{k} = 1.
\]
By virtue of these, we shall consider in the following argument, instead of $F(\mathtt{x},k)$, the function
\[
F_{\ast}(\mathtt{x},k)= \sum_{\mathtt{w} \in \Sigma_{m_1}^k \times \Sigma_{m_2}^{\lceil k \log_{m_2} m_1\rceil}: \forall j \in S, \mathtt{w}_j=\mathtt{x}_j}\mu([\mathtt{w}]),
\]
since it follows from $\underline{d}(S)=1$ that
\[
\liminf_{k \to \infty} \frac{-\log_{m_1} F(X,k)}{k} = \liminf_{k \to \infty} \frac{-\log_{m_1} F_{\ast}(X,k)}{k} \quad \mu\text{-a.e.}.
\]
In the following we prove the lemma for the case $1+\tau_1 \ge \log_{m_2} m_1$. Let $R_{3 i} = n_i + \hat{\ell}_{1}(n_i)$, $R_{3 i + 1} = n_{i+1}$, and $R_{3 i + 2} = n_{i+1} + \hat{\ell}_{2}(n_{i+1})$, and let $0 \le r_{i,j} \le s_{i,k_i}$ such that $r_{i,j} = R_{3i+j} \pmod{n_i}$ for $j=1,2$. Now for all sufficiently large $i$ let
\[
R_{3i}= s_{i,1} < \cdots < s_{i,k_i-1}<s_{i,k_i}=R_{3i+1}
\]
be a sequence such that (a) $r_{i,1},r_{i,2}$ are included if they lie in $(s_{i,1},s_{i,k_i}]$, and (b) $s_{i,j+1}-s_{i,j}=2^i+j$ except for at most $C_0:=2 \cdot \#\{r_{i,1},r_{i,2},R_{3i+1}\} = 6$ different $j$'s that we allow $2^i+j \ge s_{i,j+1}-s_{i,j} \ge 2^{i-1}$. For convenience, we also define $s_{i,j+k_i}=s_{i,j}+n_i$ for $j \ge n_k$ if $s_{i,j}+n_i$ does not exceed $R_{3i+3}$. We now show the claim that given any $\epsilon >0$, for $\mu$-a.e.~$\mathtt{x}$, there exists $L = L(\mathtt{x},\epsilon) \in \mathbb{N}$ such that for all $i \ge L$ and all $j \ne 0 \pmod{k_i}$,
\[
\Bigg|\frac{1}{s_{i,j+1}-s_{i,j}}\sum_{k=s_{i,j}+1}^{s_{i,j+1}} \log{p_{\mathtt{x}_k}} - H(p)\Bigg| < \epsilon
\]
and
\[ \Bigg|\frac{1}{s_{i,j+1}-s_{i,j}}\sum_{k=s_{i,j}+1}^{s_{i,j+1}} \log{p_{\mathtt{x}^{(1)}_k}} - H_1(p)\Bigg| < \epsilon.
\]
Here we only prove the case of $H(p)$ since the other is similar. By virtue of the choice of $s_{i,j}$ we have
\[
\mathbb{E}_{p}\left[\frac{1}{s_{i,j+1}-s_{i,j}}\sum_{k=s_{i,j}+1}^{s_{i,j+1}} \log{p_{\mathtt{x}_k}}\right] = H(p), \quad \text{if } j \ne 0\pmod{k_i}
\]
Hence, By Chebyshev's inequality,
\[
\mu\left(\Bigg|\frac{1}{s_{i,j+1}-s_{i,j}}\sum_{k=s_{i,j}+1}^{s_{i,j+1}} \log{p_{\mathtt{x}_k}} - H(p)\Bigg| > \epsilon \right) < \frac{C}{\epsilon^2(s_{i,j+1}-s_{i,j})^2},
\]
where $C$ is a constant depending only on $p$. Hence, property (a) allow us give an estimate
\begin{align*}
&\mu\left(\Bigg|\frac{1}{s_{i,j+1}-s_{i,j}}\sum_{k=s_{i,j}+1}^{s_{i,j+1}} \log{p_{\mathtt{x}_k}} - H(p)\Bigg| > \epsilon \text{ for i.~m.~} i \in \mathbb{N}, j \ne 0 \pmod{k_i} \right) \\
\le & \liminf_{L \to \infty} \sum_{i=L}^{\infty} \sum_{j=1}^{k_i}\frac{C}{\epsilon^2 (2^i+j)^2} + \left(\frac{1}{2^{i-1}}\right)^2 \cdot C_0 = 0,
\end{align*}
where `i.~m.' means `infinitely many'. The claim is then proved. At this point, we see that the union of all intervals $(s_{i,j},s_{i,j+1}]$ coincides with $S \cap [L',\infty)$ for some $L' \in \mathbb{N}$. Furthermore, if we denote by $t_{k}$ the sequence formed by $s_{i,j}$, i.e. $\{t_k\}_{k \in \mathbb{N}}=\{s_{i,j}\}_{i,j}$, then property (b) implies $\frac{t_{k+1}}{t_{k}}$ tends to $1$ since we have $2^i \ll R_{3 i}$ in \eqref{E30}. Therefore, our claim implies
\begin{align*}
\liminf_{k \to \infty} \frac{-\log_{m_1} F(X,k)}{k} =&\liminf_{k \to \infty} \frac{-\log_{m_1} F_{\ast}(X,k)}{k} = \liminf_{k \to \infty} \frac{-\log_{m_1} F_{\ast}(X,t_k)}{t_k} \\
&=\liminf_{k \to \infty} \mathbb{E}_{p}\left[\frac{-\log_{m_1} F_{\ast}(X,t_k)}{t_k}\right] = \liminf_{k \to \infty} \mathbb{E}_{p}\left[\frac{-\log_{m_1} F_{\ast}(X,k)}{k}\right].
\end{align*}
Finally, by exploiting the fact $\underline{d}(S)=1$ again we derive
\[
\liminf_{k \to \infty} \frac{-\log_{m_1} F(X,k)}{k} = \liminf_{k \to \infty} \mathbb{E}_{p}\left[\frac{-\log_{m_1} F(X,k)}{k}\right]
\]
and complete the proof. \end{proof}
\begin{lemma}\label{L3} Let $ F(X,k)$ be defined in (\ref{E21}). If $\log_{m_1} m_2 > 1 + \tau_1$, where $\tau_1$ is defined in (\ref{E1}), then
\begin{align*}
\liminf_{k \to \infty} \mathbb{E}_{p} \left[\frac{-\log_{m_1} F(X,k)}{k}\right] \ge \min \Bigg\{&\frac{H_1(p)\log_{m_2} m_1+H_2(p)}{(1+\tau_2)\log m_1},\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{(1+\tau_{1})\log m_1}\nonumber\Bigg\}.
\end{align*} \end{lemma} \begin{proof}
According to our choice of $n_i$ (see \eqref{E30}), there exist $G\in \mathbb{N}$ such that for any $j\geq G$,
\begin{align}\label{E31}
n_j+n_j\tau_1(n_j)<n_j\log_{m_1}m_2 \quad \text{for all $j\geq G$}.
\end{align}
In the following, we calculate the value of $\liminf_{k \to \infty} \mathbb{E}_{p}\left[\frac{-\log_{m_1} F(X,k)}{k}\right]$ according to the classification of $k$. Let $C=\min\{ p_{\mathtt{a}}:\mathtt{a} \in A, p_{\mathtt{a}} \ne 0 \}$.
\textbf{Case 1:} $\lceil(n_i+ \hat{\ell}_{1}(n_i))\log_{m_1} m_2\rceil < k \le n_{i+1}$ ($i\ge G$). If $\mathtt{x}\in \mathrm{supp} (\mu)$, it is derived via computation that
\begin{align*}
F(\mathtt{x},k)=& \mu(X_j=\mathtt{x}_j, 1\leq j\leq n_{i-1}+ \hat{\ell}_{1}(n_{i-1})) \cdot \left(\prod_{j=1}^{n_{i-1}+\hat{\ell}_{1}(n_{i-1})} p_{\mathtt{x}_j}\right)^{-1}\\
& \hspace{1em} \cdot \left(\prod_{j=1}^{n_{i}} p_{\mathtt{x}_j} \cdot \prod_{j=n_{i}+\hat{\ell}_{2}(n_i)+1}^{n_{i}+\hat{\ell}_{1}(n_i)} \frac{p_{\mathtt{x}_j}}{p_{\mathtt{x}^{(1)}_j}} \cdot \prod_{j=n_{i}+\hat{\ell}_{1}(n_{i})+1}^{\lceil k \log_{m_2} m_1\rceil}p_{\mathtt{x}_j} \prod_{j=\lceil k \log_{m_2} m_1\rceil+1}^{k}p_{\mathtt{x}^{(1)}_j}\right).
\end{align*}
Note that since $i\sum_{j=1}^{i}n_j\ll n_{i+1}$, we have an estimate of the first two terms:
\begin{align*}
0 \ge \mathbb{E}_{p}\left[\frac{-\log_{m_1} \mu(X_j, 1\leq j\leq n_{i-1}+ \hat{\ell}_{1}(n_{i-1}))}{k}\right] \ge \frac{n_{i-1}+\hat{\ell}_{1}(n_{i-1})}{n_i} \log_{m_1} C,
\end{align*}
\begin{align*}
0 \ge \mathbb{E}_{p}\left[\frac{-\log_{m_1} \left(\prod_{j=1}^{n_{i-1}+\hat{\ell}_{1}(n_{i-1})} p_{\mathtt{x}_j}\right)}{k}\right] \ge \frac{n_{i-1}+\hat{\ell}_{1}(n_{i-1})}{n_i} \log_{m_1} C.
\end{align*}
As for the third term, we note that for every $j$ there exists a sequence $j_1 < j_2 < \cdots < j_{\ell} = j$ and $i_1 < i_2 < \cdots < i_{\ell}$ such that $n_{i_1-1}+\ell_{1}(n_{i_1-1}) < j_1 \le n_{i_1}$ and $n_{i_k} < j_{k} \le n_{i_{k}} + \ell_{1}(n_{i_{k}})$ with $j_k = j_{k-1} \pmod{n_{i_k}}$ for all $k \ge 2$. Therefore, $X_j$ shares the same distribution with $X_{j_1}$ and as a consequence, we can express the third term, in terms of \eqref{E22}\eqref{E9}\eqref{E18}, as
\begin{align*}
&\mathbb{E}_{p}\left[ \frac{-1}{k}
\log_{m_1}\Big(\prod_{j=1}^{n_{i}} p_{\mathtt{x}_j}\prod_{j=n_i+\hat{\ell}_{2}(n_i)+1}^{n_i+\hat{\ell}_{1}(n_i)}\frac{p_{\mathtt{x}_j}}{p_{\mathtt{x}^{(1)}_j}} \cdot \prod_{j=n_{i}+\hat{\ell}_{1}(n_{i})+1}^{\lceil k \log_{m_2} m_1\rceil}p_{\mathtt{x}_j} \prod_{j=\lceil k \log_{m_2} m_1\rceil+1}^{k}p_{\mathtt{x}^{(1)}_j} \Big) \right] \\
=&\ \alpha (\tau_1(n_i) - \tau_2(n_i)) \frac{H_1(p)}{\log m_1} + (1-\log_{m_2} m_1) \frac{H_2(p)}{\log m_1} + (\log_{m_2} m_1 - \alpha \tau_1(n_i)) \frac{ H(p)}{\log m_1} \\
=&\ (\log_{m_2} m_1 - \alpha \tau_2(n_i)) \frac{ H_1(p)}{\log m_1} +(1 - \alpha \tau_1(n_i)) \frac{ H_2(p)}{\log m_1},
\end{align*}
where $\alpha: = \frac{n_i}{k}$.
This implies that
\begin{align*}
\liminf_{k\to \infty} \mathbb{E}_{p}\left[\frac{-\log_{m_1}F(X,k)}{k}\right] &=\liminf_{k\to \infty}\left(
(\log_{m_2} m_1 - \alpha \tau_2(n_i)) \frac{ H_1(p)}{\log m_1} +(1 - \alpha \tau_1(n_i)) \frac{ H_2(p)}{\log m_1} \right)\\
&\ge \liminf_{k\to \infty}\left( \frac{1+\tau_1(n_i)-\tau_2(n_i)}{1 + \tau_1(n_i) }\bigg(\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{\log m_1}\bigg)\right)\\
&=\frac{1+\tau_1-\tau_2}{1 + \tau_1 }\bigg(\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{\log m_1}\bigg)
\end{align*}
where the inequality is due to the fact $k\in [(n_i+\hat{\ell}_{1}(n_i))\log_{m_1} m_2, \ n_{i+1}]$ and the last equation follows from the definitions of $\tau_1$ and $\tau_2$.
\textbf{Case 2:} $\lceil(n_i+\hat{\ell}_{2}(n_i))\log_{m_1} m_2 \rceil < k \le \lceil(n_i+\hat{\ell}_{1}(n_i))\log_{m_1} m_2\rceil$ ($i\ge G$). Analysis similar to that in Case 1 shows that
\begin{align*}
\liminf_{k\to \infty} \mathbb{E}_{p}\left[\frac{-\log_{m_1}F(X,k)}{k}\right]
=&\liminf_{k\to \infty}\left( \frac{\alpha H(p)}{\log m_1} + (\log_{m_2} m_1 - \alpha - \alpha \tau_2(n_i)) \frac{H_1(p)}{\log m_1} + (1-\alpha-\alpha\tau_1(n_i)) \frac{H_2(p)}{\log m_1}\right) \\
=&\liminf_{k\to \infty}\left( \ (\log_{m_2} m_1 - \alpha \tau_2(n_i)) \frac{H_1(p)}{\log m_1} +(1 - \alpha \tau_1(n_i)) \frac{H_2(p)}{\log m_1} H_2(p)\right)\\
\ge&\liminf_{k\to \infty}\left( \frac{\log_{m_2} m_1}{1+\tau_2(n_i)} \frac{H_1(p)}{\log m_1} +\frac{ H_2(p)}{\log m_1}\frac{1}{1+\tau_2(n_i)}\right)\\
=&\frac{\log_{m_2} m_1}{1+\tau_2} \frac{H_1(p)}{\log m_1} +\frac{ H_2(p)}{\log m_1}\frac{1}{1+\tau_2}
\end{align*}
where $\alpha = \frac{n_i}{k}$.
\textbf{Case 3:} $ \lceil n_i\log_{m_1} m_2 \rceil < k \le \lceil(n_i+\hat{\ell}_{2 }(n_i))\log_{m_1} m_2\rceil$ ($i\ge G$).
Analysis similar to that in Case 1 shows, by writing $\alpha = \frac{n_i}{k}$, that
\begin{align*}
\liminf_{k\to \infty} \mathbb{E}_{p}\left[-\frac{\log_{m_1}F(X,k)}{k}\right] &=\liminf_{k\to \infty} \left( \alpha \frac{H(p)}{\log m_1} + (1-\alpha-\alpha\tau_1(n_i)) \frac{H_2(p)}{\log m_1} \right)\\
& = \liminf_{k\to \infty}\left(\alpha \frac{H_1(p)}{\log m_1} + (1-\alpha\tau_1(n_i)) \frac{H_2(p)}{\log m_1}\right)\\
& \ge \min\Bigg\{ \frac{H_1(p)}{(1+\tau_2)\log m_2} + \frac{H_2(p)}{(1+\tau_2)\log m_1},\frac{H_1(p)}{\log m_2} + (1-\tau_2) \frac{H_2(p)}{\log m_1}\Bigg\}.
\end{align*}
According to $n\tau_1(n)=\ell_{1}(n)$ by (\ref{E1}) and (\ref{E31}), we have the following Case 4.
\textbf{Case 4:} $ n_{i}+\hat{\ell}_{1}(n_i) < k \le \lceil n_i\log_{m_1} m_2 \rceil$ ($i\ge G$). Similarly, we have, by writing $\alpha = \frac{n_i}{k}$, that
\begin{align*}
\liminf_{k\to \infty} \mathbb{E}_{p}\left[\frac{-\log_{m_1}F(X,k)}{k}\right] &=\liminf_{k\to \infty} \left(\frac{H_1(p) \log_{m_2}m_1}{\log m_1} + (1-\alpha\tau_1(n_i)) \frac{H_2(p)}{\log m_1}\right)\\
&\ge\liminf_{k\to \infty} \left( \frac{H_1(p)}{\log m_2} + \frac{1}{1+\tau_1(n_i)}\frac{H_2(p)}{\log m_1}\right)\\
&=\frac{H_1(p)}{\log m_2} + \frac{1}{1+\tau_1}\frac{H_2(p)}{\log m_1},
\end{align*}
where the inequality follows from the value scope of $k$.
\textbf{Case 5:} $n_i < k \le n_{i}+\hat{\ell}_{1}(n_i)$($i\ge G$). It follows by the same method as in the Case 1, by writing $\alpha = \frac{n_i}{k}$, that
\begin{align*}
\liminf_{k\to \infty} \mathbb{E}_{p}\left[-\frac{\log_{m_1}F(X,k)}{k}\right] & =\liminf_{k\to \infty} \left(\frac{H(p)\log_{m_2} m_1}{\log m_1} + (\alpha-\log_{m_2}m_1) \frac{H_2(p)}{\log m_1} \right)\\
& =\liminf_{k\to \infty} \left( \frac{H_1(p)}{\log m_2} + \alpha \frac{H_2(p)}{\log m_1}\right) \\
&\ge\liminf_{k\to \infty} \left(\frac{H_1(p)}{\log m_2} +\frac{1}{1+\tau_1(n_i)} \frac{H_2(p)}{\log m_1}\right)\\
&=\frac{H_1(p)}{\log m_2} +\frac{1}{1+\tau_1} \frac{H_2(p)}{\log m_1},
\end{align*}
where the equality is due to the fact that $H(p)= H_1(p)+H_2(p)$ and the inequality is from the value scope of $k$.
Combining the above cases, we can obtain that
\begin{align}\label{E14}
\liminf_{k \to \infty}\mathbb{E}_{p}\left[ -\frac{\log_{m_1}F(X,k)}{k}\right]
\ge & \min \Bigg\{ \frac{1+\tau_1-\tau_2}{1 + \tau_1 }\bigg(\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{\log m_1}\bigg),\frac{H_1(p)\log_{m_2} m_1+H_2(p)}{(1+\tau_2)\log m_1}, \\
& \frac{H_1(p)}{\log m_2} + (1-\tau_2) \frac{H_2(p)}{\log m_1}, \frac{H_1(p) + H_2(p)}{(1+\tau_{1})\log m_1},\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{(1+\tau_{1})\log m_1}\Bigg\}. \nonumber
\end{align}
By the definition of $\tau_1$ and $\tau_2$, we have
\begin{align}\label{e6}
\frac{1+\tau_1-\tau_2}{1 + \tau_1 }>\frac{1}{1+\tau_2},\ 1-\tau_2\ge \frac{1}{1 + \tau_1 }.
\end{align}
Note that $\frac{1}{1 + \tau_1 }>\log_{m_2} m_1$ by (\ref{E31}) which implies that
\begin{align}\label{E3}
\frac{H_1(p) + H_2(p)}{(1+\tau_{1})\log m_1}\ge
\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{(1+\tau_{1})\log m_1}.
\end{align}
By (\ref{E14}-\ref{E3}), we conclude that
\begin{align*}
\liminf_{k \to \infty}\mathbb{E}_{p}\left[ -\frac{\log_{m_1}F(X,k)}{k}\right]\ge \min \Bigg\{&\frac{H_1(p)\log_{m_2} m_1+H_2(p)}{(1+\tau_2)\log m_1},\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{(1+\tau_{1})\log m_1}\nonumber\Bigg\}.
\end{align*}
This completes the proof of Lemma \ref{L3}. \end{proof}
The following proof of Lemma \ref{L5} is similar to the one above. \begin{lemma}\label{L5} Let $ F(X,k)$ be defined in (\ref{E21}). If $\log_{m_1} m_2 \le 1 + \tau_1$, where $\tau_1$ is defined in (\ref{E1}), then
\begin{align}\label{E7}
\liminf_{k \to \infty} \mathbb{E}_{p} \left[\frac{-\log_{m_1} F(X,k)}{k}\right] \ge \min\Bigg\{
& \frac{H_1(p)\log_{m_2} m_1+H_2(p)}{(1+\tau_2)\log m_1},\frac{H_1(p) + H_2(p)}{(1+\tau_{1})\log m_1}\Bigg\} .
\end{align} \end{lemma}
\begin{proof}
Since the proof is similar to the proof of Lemma \ref{L3} we will simply list the results according to the classification of $k$.
\textbf{Case 1:} $\lceil (n_i+\hat{\ell}_{1}(n_i))\log_{m_1} m_2\rceil < k \le n_{i+1}$. If $\mathtt{x} \in \mathrm{supp} (\mu)$, let $\alpha = \frac{n_i}{k}$ so that
\begin{align*}
\liminf_{k\to \infty}\mathbb{E}_{p}\left[-\frac{\log_{m_1} F(X,k)}{k}\right] & = \liminf_{k\to \infty} \left((\log_{m_2} m_1 - \alpha \tau_2(n_i)) \frac{H_1(p) }{\log m_1}+(1 - \alpha \tau_1(n_i)) \frac{H_2(p)}{\log m_1}\right),\\
&\ge \frac{1+\tau_1-\tau_2}{1 + \tau_1}\big(\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{\log m_1}\big).
\end{align*}
\textbf{Case 2:} $\lceil(n_i+\hat{\ell}_{2}(n_i))\log_{m_1} m_2\rceil < k \le \lceil (n_i+\hat{\ell}_{1}(n_i)) \log_{m_1} m_2\rceil$. Let $\alpha = \frac{n_i}{k}$ so that
\begin{align*}
\liminf_{k\to \infty}\mathbb{E}_{p}\left[-\frac{\log_{m_1} F(X,k)}{k}\right] & = \liminf_{k\to \infty}\left( (\log_{m_2} m_1 - \alpha \tau_2(n_i)) \frac{H_1(p)}{\log m_1} +(1 - \alpha \tau_1(n_i)) H_2(p)\right)\\
&\ge \frac{\log_{m_2} m_1}{1+\tau_2} \frac{H_1(p)}{\log m_1} +\frac{ H_2(p)}{(1+\tau_2)\log m_1}.
\end{align*}
\textbf{Case 3:} $\lceil n_{i}+\hat{\ell}_{1}(n_i)\rceil < k \le \lceil(n_i+\hat{\ell}_{2 }(n_i)) \log_{m_1} m_2\rceil$. Let $\alpha = \frac{n_i}{k}$ so that
\begin{align*}
\liminf_{k\to \infty}\mathbb{E}_{p}\left[-\frac{\log_{m_1} F(X,k)}{k}\right] &=\liminf_{k\to \infty}\left(\alpha \frac{H_1(p)}{\log m_1} + (1-\alpha\tau_1(n_i)) \frac{H_2(p)}{\log m_1}\right)\\
& \ge \min\Bigg\{ \frac{H_1(p)+H_2(p)}{(1+\tau_1)\log m_1}, \frac{H_1(p) \log_{m_2} m_1+H_2(p)}{(1+\tau_2)\log m_1}\Bigg\}.
\end{align*}
\textbf{Case 4:} $ \lfloor n_i\log_{m_1} m_2 \rfloor < k \le \lceil n_{i}+\hat{\ell}_{1}(n_i)\rceil$. By writing $\alpha = \frac{n_i}{k}$, we have
\begin{align*}
\liminf_{k\to \infty} \mathbb{E}_{p}\left[-\frac{\log_{m_1} F(X,k)}{k}\right] =\liminf_{k\to \infty}\alpha \frac{H(p)}{\log m_1} = \liminf_{k\to \infty} \alpha \frac{H_1(p) + H_2(p)}{\log m_1} \ge \frac{H_1(p) + H_2(p)}{(1+\tau_{1})\log m_1}.
\end{align*}
\textbf{Case 5:} $ n_i < k \le \lfloor n_i\log_{m_1} m_2 \rfloor$. Let $\alpha = \frac{n_i}{k}$ so that
\begin{align*}
\liminf_{k\to \infty} \mathbb{E}_{p}\left[-\frac{\log_{m_1} F(X,k)}{k}\right] &= \liminf_{k\to \infty} \left(\frac{H(p)}{\log m_2} + (\alpha-\log_{m_2}m_1) \frac{H_2(p)}{\log m_1} \right)\\
& = \liminf_{k\to \infty}\left( \frac{H_1(p) }{\log m_2}+ \alpha \frac{H_2(p)}{\log m_1}\right)\\
&\ge \frac{H_1(p) }{\log m_2} + \frac{ H_2(p)}{\log m_2}.
\end{align*}
Combining the above cases, we can obtain that
\begin{align*}
\liminf_{k \to \infty} \mathbb{E}_{p}\left[-\frac{\log_{m_1} F(X,k)}{k}\right]
\ge & \min \Bigg\{ \frac{1+\tau_1-\tau_2}{1 + \tau_1 }\bigg(\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{\log m_1}\bigg),\frac{H_1(p)\log_{m_2} m_1+H_2(p)}{(1+\tau_2)\log m_1}, \\
&\hspace{10em}\frac{H_1(p) + H_2(p)}{(1+\tau_{1})\log m_1},\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{\log m_2}\Bigg\}.
\end{align*}
By the definition of $\tau_1$ and $\tau_2$, we have $\frac{1+\tau_1-\tau_2}{1 + \tau_1 }>\frac{1}{1+\tau_2}$.
Note that $\frac{1}{1 + \tau_1 }\le \log_{m_2} m_1$, which implies that
\begin{align*}
\frac{H_1(p) + H_2(p)}{(1+\tau_{1})\log m_1}\le
\frac{H_1(p)}{\log m_2} + \frac{H_2(p)}{\log m_2}.
\end{align*}
Therefore, we can get (\ref{E7}). \end{proof} Finally, we wrap up this section by noting that the lower bound of $\dim_{\mathrm{H}} W(K,T,\psi)$ in Theorem \ref{M1} holds as a consequence of Lemma \ref{L3} and \ref{L5}, for which we simply take $p_{\mathtt{a}}=\frac{1}{\# A}$ for all $\mathtt{a} \in A$ so that $H_1(p)=\log M$ and $H_2(p)=\log N$.
\section{Proof of the upper bound}\label{S3}
In this section, we give the rest of the proof of Theorem \ref{M1}. We note that it suffices to prove the case when $\lim_{n \to \infty}-\frac{\log \psi(n)}{n}$ exists, since for those $\psi(n)$ without this property we may consider $\phi(n):=\max\{\psi(n),m_1^{-\tau_1 \cdot n}\}$ so that $\dim_{\mathrm{H}} W(K,T,\psi) \le \dim_{\mathrm{H}} W(K,T,\phi)$ and we may apply the aforementioned result.
Note that the set $W(K,T,\psi)$ can be written as a limsup set
\[ W(K,T,\psi)=\limsup_{n \to \infty} W_n(K,T,\psi),
\]
where
\[
W_n(K,T,\psi):=\left\{x \in K: \begin{cases} |x^{(1)}-T^n_{m_1}(x^{(1)})| < \psi(n) \\
|x^{(2)}-T^n_{m_2}(x^{(2)})| < \psi(n)
\end{cases}\right\}.
\]
We note that the set above can further be written as the following union:
\[
W_n(K,T,\psi)=\bigcup_{\mathtt{w} \in A^n} J(\mathtt{w}) := \bigcup_{\mathtt{w} \in A^n} W_{n}(K,T,\psi) \cap I(\mathtt{w}).
\]
Now for sufficiently large $n$, the set $J(\mathtt{w})$ is contained in the interior of a rectangle of length $4\psi(n)m_1^{-n}$ and width $4\psi(n)m_2^{-n}$, since for any $\pi(\mathtt{x}) \in J(\mathtt{w})$,
\begin{equation} \label{eq:recurrence_set_diameter}
\begin{aligned}
|\pi(\mathtt{x})^{(i)}-\pi(\mathtt{w}^{\infty})^{(i)}| &\ge |T^n(\pi(\mathtt{x}))^{(i)}-T^n(\pi(\mathtt{w}^{\infty}))^{(i)}|-|\pi(\mathtt{x})^{(i)}-T^n(\pi(\mathtt{x})^{(i)})| \\
&\ge m_i^n |\pi(\mathtt{x})^{(i)}-\pi(\mathtt{w}^{\infty})^{(i)}| - \psi(n).
\end{aligned}
\end{equation}
Now for any given $\delta>0$, we can choose $G$ large enough so that for any $n\geq G$, $4\psi(n)m_i^{-n}<\delta$. For simplicity, we write $L_{i,n}=\lceil-\log_{m_1} 4\psi(n)m_i^{-n}\rceil$ and denote by $\mathcal{N}_{i,n}$ the smallest number of $L_{i,n}$-th level approximate squares needed to cover $W_n(K,T,\psi)$, the $s$-dimensional Hausdorff measure has the following estimate:
\begin{align*}
\mathcal{H}_{\delta}^s(W(K,T,\psi))\leq \sum_{n=G}^{\infty} \mathcal{N}_{i,n} \cdot (4\psi(n)m_i^{-n})^s.
\end{align*}
\textbf{Case 1:} $i=2$. We note that for any $\mathtt{w} \in A^n$, $J(\mathtt{w})$ can be covered by nine neighboring rectangles $\pi(\mathtt{v}^{\mathtt{w},j})$, $j=1,\cdots,9$, for which $\mathtt{v}^{\mathtt{w},j} \in \Sigma_{m_1}^{\lceil n+\ell_1(n)-\log_{m_1} 4 \rceil} \times \Sigma_{m_2}^{\lceil n+\ell_2(n)-\log_{m_1} 4 \rceil}$ satisfies
\begin{equation} \label{eq:rectangle_selection}
\pi([\mathtt{v}^{\mathtt{w},j}]) \cap \pi([(\mathtt{w}^{(1)})^{\lceil n+\ell_1(n)-\log_{m_1} 4 \rceil/n},(\mathtt{w}^{(1)})^{\lceil n+\ell_2(n)-\log_{m_1} 4 \rceil/n}]) \ne \emptyset,
\end{equation}
since each of the rectangles has length greater than $4\psi(n)m_1^{-n}$ and width greater than $4\psi(n)m_2^{-n}$. Now note that each of the rectangles contains at most $M^{(\log_{m_1} m_2-1) n}$ approximate squares of level $L_{2,n}$, since
\begin{align*}
\#\{\mathtt{u} \in \Sigma_{m_1}^{L_{2,n}} \times \Sigma_{m_2}^{\lceil \log_{m_2} m_1 \cdot L_{2,n}\rceil}: \pi([\mathtt{u}]) \subset \pi([\mathtt{v}^{\mathtt{w},j}]) \subset K\}
\le M^{L_{2,n} - \lceil n+\ell_2(n)-\log_{m_1} 4 \rceil} \le M^{(\log_{m_1} m_2-1) n}.
\end{align*}
Therefore,
\[
\mathcal{N}_{2,n} \le 9 M^{(\log_{m_1} m_2-1) n} \cdot \#A^{n}= 9 M^{(\log_{m_1} m_2-1) n} (MN)^n
\]
and
\begin{align}\label{E15}
\mathcal{H}_{\delta}^s(W(K,T,\psi))&\leq \sum_{n=G}^{\infty} 9 M^{(\log_{m_1} m_2-1) n}(MN)^n(4\psi(n)m_2^{-n})^s= C \sum_{n=G}^{\infty}m_1^{n h_n},
\end{align}
where $C:=9 \cdot 4^{s}$ is a constant and
\begin{align*}
h_n=\log_{m_1}M\cdot \log_{m_1}m_2+\log_{m_1}N+s(n^{-1}\log_{m_1}\psi(n)-\log_{m_1}m_2).
\end{align*} Since we assume $\lim_{n \to \infty}-\frac{\log \psi(n)}{n}$ exists, it is clear that $\sum_{n=G}^{\infty}m_1^{n \cdot h_n}$ converges as long as $\lim_{n \to \infty}h_n<0$ and that this is equivalent to the condition that \begin{align*}
s> \lim_{n \to \infty}\frac{\log_{m_1}M\cdot \log_{m_1}m_2+\log_{m_1}N}{\log_{m_1}m_2-n^{-1}\log_{m_1}\psi(n)}= \lim_{n \to \infty} \frac{\log_{m_1}M+\log_{m_2}N}{1-n^{-1}\log_{m_2}\psi(n)}=\frac{\log_{m_1}M+\log_{m_2}N}{1+\tau_2}. \end{align*} This together with (\ref{E15}) we can get that \begin{align*}
0\leq \mathcal{H}^s(W(K,T,\psi))=\lim_{\delta \to 0} \mathcal{H}_{\delta}^s(W(K,T,\psi))\leq \lim_{G \to \infty}C \sum_{n=G}^{\infty} m_1^{n \cdot h_n}=0, \end{align*} which implies that $\dim_{\mathrm{H}}W(K,T,\psi)\leq \frac{\log_{m_1}M+\log_{m_2}N}{1+\tau_2}$.
\textbf{Case 2:} $i=1$. The discussion is divided into the following two sub-cases. If $\lceil \log_{m_2} m_1 \cdot L_{1,n}\rceil \le n$, then for each $J(\mathtt{w})$ we may choose nine rectangles $\mathtt{v}^{\mathtt{w},j}$ just as in Case 1. We note that $(\mathtt{v}^{\mathtt{w},j})^{(i)} = \mathtt{w}^{(i)}$ for $i=1,2$. Furthermore, as a result of \eqref{eq:rectangle_selection}, if $\mathtt{w}^{(1)}=\mathtt{w'}^{(1)}$, then $\{(\mathtt{v}^{\mathtt{w},j})^{(1)}\} = \{(\mathtt{v}^{\mathtt{w}',j})^{(1)}\}$. Therefore,
\begin{align*}
\mathcal{N}_{1,n} \le & \# \bigcup_{\mathtt{w} \in A^{n}} \{\mathtt{u} \in \Sigma_{m_1}^{L_{1,n}} \times \Sigma_{m_2}^{\lceil \log_{m_2} m_1 \cdot L_{1,n}\rceil}: \pi([\mathtt{v}^{\mathtt{w},j}]) \subset \pi([\mathtt{u}]) \subset K\} \\
= & \# \bigcup_{\mathtt{w} \in A^n} \{((\mathtt{v}^{\mathtt{w},j})^{(1)},\mathtt{w}_{1:\lceil \log_{m_2} m_1 \cdot L_{1,n}\rceil}^{(2)}):j=1,\cdots,9\} \\
= & 3 (M N)^{L_{1,n}} M^{n-L_{1,n}} \le 3 (M N)^{\log_{m_2} m_1 \cdot n + \ell_{2}(n)} M^{(1-\log_{m_2} m_1) n - \ell_{2}(n)}
\end{align*}
If $\lceil \log_{m_2} m_1 \cdot L_{1,n}\rceil > n$, then similarly we have
\begin{align*}
\mathcal{N}_{1,n} \le & \# \bigcup_{\mathtt{w} \in A^{n}} \{\mathtt{u} \in \Sigma_{m_1}^{L_{1,n}} \times \Sigma_{m_2}^{\lceil \log_{m_2} m_1 \cdot L_{1,n}\rceil}: \pi([\mathtt{v}^{\mathtt{w},j}]) \subset \pi([\mathtt{u}]) \subset K\} \\
= & \# \bigcup_{\mathtt{w} \in A^n} \{((\mathtt{v}^{\mathtt{w},j})^{(1)},(\mathtt{v}^{\mathtt{w},j})^{(2)}_{1:\lceil \log_{m_2} m_1 \cdot L_{1,n}\rceil}):j=1,\cdots,9\} \\
\le & 9 (M N)^{n}
\end{align*}
Since we assume $\lim_{n \to \infty}-\frac{\log \psi(n)}{n}$ exists, we can adopt a similar argument as in Case 1 to conclude that $\dim_{\mathrm{H}}W(K,T,\psi)\leq \frac{\log_{m_1}M+\log_{m_1}N}{1+\tau_1}$ if $\log_{m_1} m_2 \le 1 + \tau_1$ and $\dim_{\mathrm{H}} W(K,T,\psi) \le \frac{\log_{m_1} M}{1+\tau_1} + \log_{m_2} N$ if $\log_{m_1} m_2 > 1 + \tau_1$.
Combining the Case 1 and 2 yields the upper bound for Theorem \ref{M1}, and thus the conclusion is reached.
\section{Applications}\label{S4} In this section we present some examples. Example \ref{T3} is a special case of Theorem \ref{M1} for which $m_1=M$ and $m_2=N$. \begin{example}\label{T3} Let $T: [0,1]^2 \to [0,1]^2 $ be an integer diagonal matrix transformation of $[0,1]^2$, i.e., \[T(x)=(T_{m_1}(x^{(1)}), T_{m_2}(x^{(2)})):=\left(m_1 x^{(1)}\ (\bmod\ 1),m_2 x^{(2)}\ (\bmod\ 1)\right),\] with $2\leq m_1\leq m_2$. Suppose $\psi: \mathbb{N}^{+}\to \mathbb{R}^{+}$ be a real positive function. Let \begin{align*}
W(T,\psi):=\left\{\mathtt{x}\in [0,1]^2:
T^n(x) \in B(x, \psi(n)) \ \text{for infinitely many } n.
\right\} \end{align*}
Then
$(1)$ If $\log_{m_1} m_2 > 1 + \tau_1$,
then
\begin{align*}
\dim_{\mathrm{H}} W(T,\psi)=
\min \left\{\frac{2}{1+\tau_2},
\frac{1}{1+\tau_1} + 1\right\}.
\end{align*}
$(2)$ If $\log_{m_1} m_2 \le 1 + \tau_1$, then
\begin{align*}
\dim_{\mathrm{H}} W(T,\psi) = \min\left\{
\frac{2}{1+\tau_2}, \
\frac{1 +\log_{m_1} m_2}{1+\tau_1} \right\},
\end{align*}
where $\tau_{i}$, $i=1,2$, is defined in (\ref{E1}). \end{example}
The following example illustrates the case when $K$ is a product of cantor sets. \begin{example} Let $\mathcal{C}_{\frac{1}{3}} $ denote the middle third Cantor set and $\mathcal{C}_{\frac{1}{4}} $ be the attractor of the IFS $\{f_1,f_2,f_3\}$ on $[0,1]$, where $ f_1(x)=\frac{1}{4}x, f_2(x)=\frac{1}{4}x+\frac{1}{4}$ and $f_3(x)=\frac{1}{4}x+\frac{3}{4}$. Define $T: \mathcal{C}_{\frac{1}{3}}\times \mathcal{C}_{\frac{1}{4}} \to \mathcal{C}_{\frac{1}{3}}\times \mathcal{C}_{\frac{1}{4}} $ as \[T(x)=\big(T_{3}(x^{(1)}), T_{4}(x^{(2)})\big):=\left(3 x^{(1)}\ (\bmod\ 1), 4 x^{(2)}\ (\bmod\ 1)\right).\] Suppose $\psi: \mathbb{N}^{+}\to \mathbb{R}^{+}$ is a real positive function. Let \begin{align*}
W_1(T,\psi):=\left\{\mathtt{x}\in \mathcal{C}_{\frac{1}{3}}\times \mathcal{C}_{\frac{1}{4}}: T^n(x) \in B(x, \psi(n)) \ \text{for infinitely many } n \right\}. \end{align*}
Then
$(1)$ If $\log_{3} 4 > 1 + \tau_1$,
then
\begin{align*}
\dim_{\mathrm{H}} W_1(T,\psi)=
\min \left\{\frac{\log_{3} 2 + \log_{4} 3}{1+\tau_2},
\frac{\log_{3} 2}{1+\tau_1} + \log_{4} 3\right\}.
\end{align*}
$(2)$ If $\log_3 4 \le 1 + \tau_1$, then
\begin{align*}
\dim_{\mathrm{H}} W_1(T,\psi) = \min\left\{
\frac{\log_{3} 2 + \log_{4} 3}{1+\tau_2}, \
\frac{\log_{3} 2 +1}{1+\tau_1} \right\},
\end{align*}
where $\tau_{i}$, $i=1,2$, is defined in (\ref{E1}). \end{example}
\noindent \textbf{Acknowledgements.} The authors would like to thank professor Bing Li for his guidance and suggestions. The authors would like to thank Professor Meng Wu for drawing our attention to these problems and reading the early draft of the manuscript. The work is supported by China Scholarship Council and Academy of Finland (No. 318217).
\end{document} |
\begin{document}
\baselineskip17pt
\title[Competition]{Does a population with the highest turnover coefficient win competition?} \author[R. Rudnicki]{Ryszard Rudnicki} \address{R. Rudnicki, Institute of Mathematics, Polish Academy of Sciences, Bankowa 14, 40-007 Katowice, Poland.} \email{[email protected]} \keywords{nonlinear Leslie model, competitive exclusion, periodic cycle, population dynamics} \subjclass[2010]{Primary: 92D25; Secondary: 37N25, 92D40}
\begin{abstract} We consider a discrete time competition model. Populations compete for common limited resources but they have different fertilities and mortalities rates. We compare dynamical properties of this model with its continuous counterpart. We give sufficient conditions for competitive exclusion and the existence of periodic solutions related to the classical logistic, Beverton-Holt and Ricker models. \end{abstract}
\maketitle
\section{Introduction} \label{s:int}
It is well known that if species compete for common limited resources, then usually they cannot coexist in the long term. This law was introduced by Gause \cite{Gause} and it is called \textit{the principle of competitive exclusion}. There are a lot of papers where the problem of competitive exclusion or coexistence is discussed. Most of them are described by continuous time models, but there are also some number of discrete time models devoted to this subject (see \cite{AS,CLCH} and the references given there). If we have continuous and discrete time versions of a similar competition model
it is interesting to compare the properties of both versions of the model, especially to check if they are dynamically consistent, i.e., if they possess the same dynamical properties as stability or chaos. In this paper we consider a discrete time competition model with overlapping generations. We prove a sufficient condition for competitive exclusion and compare it with its continuous counterpart.
The model considered here is the following. A population consists of $k$ different individual strategies. The state of the population at time $t$ is given by the vector $\mathbf x(t)=[x_{1}(t),\dots ,x_{k}(t)]$, where $x_{i}(t)$ is the size of subpopulation with the phenotype $i$. Individuals with different phenotypes do not mate and each phenotype $i$ is characterized by per capita reproduction $b_i$ and mortality $d_i$. We assume that the juvenile survival rate depends on the state $\mathbf x$ and it is given by a function $f\colon \mathbb R^k_+ \to [0,1]$. Therefore, $f$ describes the suppression of growth driven, for example, by competition for food or free nest sites for newborns. The time evolution of the state of population is given by the system \begin{equation} \label{d-m} x_i(t+1)=x_i(t)-d_ix_i(t)+ b_i x_i(t)f(\mathbf x(t)). \end{equation} We assume that $0<d_i\le 1$, $d_i<b_i$ for $i=1,\dots,k$ and $f(\mathbf x)>0$ for $\mathbf x\ne 0$. The model is similar in spirit to that presented in \cite{AB} (a continuous version) and in \cite{AR} (a discrete version) but in those papers $f$ has a special form strictly connected with competition for free nest sites for newborns. A simplified Leslie/Gower model \cite{AlSR} is also of the form~ (\ref{d-m}). The suppression function $f$ can be quite arbitrary. Usually, it is of the form $f(\mathbf x)=\varphi (w_1x_1+\dots+w_kx_k)$, where $\varphi$ is a decreasing function and $w_1,\dots,w_k$ are positive numbers, but e.g. in \cite{AB} it is of the form \[ f(\mathbf x)= \begin{cases} 1,&\textrm{if $\,(1+b_1)x_1+\dots+ (1+b_k)x_k\le K$},\\
\dfrac{K-x_1-\dots-x_k}{b_1x_1+\dots+b_kx_k}, &\textrm{if $\,(1+b_1)x_1+\dots +(1+b_k)x_k> K$}. \end{cases} \]
Now we present some motivation for studying model (\ref{d-m}). We begin with a continuous time version of it. The time evolution of the state of population is described by the system \begin{equation} \label{c-m} x_i'(t)=-d_ix_i(t)+ b_i x_i(t)f(\mathbf x(t)), \quad i=1,\dots,k. \end{equation} We assume that $0<d_i< b_i$, $f$ has values in the interval $[0,1]$, and \begin{equation} \label{b:f} f(\mathbf x) \le \min\bigg\{\frac{d_i}{b_i} \colon \,\, i=1,\dots,k\bigg\}
\quad\textrm{if $|\mathbf x|\ge M$}, \end{equation}
where $|\mathbf x|=x_1+\dots+x_k$. From the last condition it follows that the total size $|\mathbf x(t)|$
of the population is bounded by $\max(M,|\mathbf x(0)|)$. We also assume that $f$ is a sufficiently smooth function to have the existence and uniqueness of the solutions of (\ref{c-m}), for example it is enough to assume that $f$ satisfies a local Lipschitz condition.
We denote by $L_i=b_i/d_i$ the turnover coefficient for the strategy $i$. We assume that \begin{equation} \label{ineq} L_1>L_2\ge\dots \ge L_k. \end{equation} It is well known that \begin{equation} \label{goto0} \lim_{t\to\infty} x_i(t)=0 \textrm{ \ for $i\ge 2$}. \end{equation} Indeed, from (\ref{c-m}) it follows that \begin{equation} \label{goto0-2} (b_i^{-1}\ln x_i(t))'= -L_i^{-1}+f(\mathbf x(t)). \end{equation} Thus \begin{equation} \label{goto0-3} (b_1^{-1}\ln x_1(t)-b_i^{-1}\ln x_i(t))'= L_i^{-1}-L_1^{-1}. \end{equation} Therefore \begin{equation} \label{goto0-4} \frac{d}{dt} \ln \bigg( \frac{x_1(t)^{b_i}}{x_i(t)^{b_1}}\bigg) = b_1b_2(L_i^{-1}-L_1^{-1})>0 \end{equation} and, consequently, \begin{equation} \label{goto0-5} \lim_{t\to\infty}\frac{x_1(t)^{b_i}}{x_i(t)^{b_1}}=\infty. \end{equation} Since $x_1(t)$ is a bounded function, from (\ref{goto0-5}) it follows that (\ref{goto0}) holds.
Now we return to a discrete time version of model (\ref{c-m}).
From (\ref{b:f}) it follows immediately that $x_i(t+1)\le x_i(t)$ if $|\mathbf x(t)|\ge M$ and, therefore, the sequence $(x_i(t))$ is bounded. Moreover, since $d_i \le 1$ we have $x_i(t)>0$ if $x_i(0)>0$. It is of interest to know whether (\ref{goto0}) holds also for discrete model (\ref{d-m}). Observe that (\ref{d-m}) can be written as \begin{equation} \label{d-m1} \frac{x_i(t+1)-x_i(t)}{b_ix_i(t)}= -L_i^{-1}+ f(\mathbf x(t)). \end{equation} Now (\ref{goto0-3}) takes the form \begin{equation} \label{d-m2}
\frac{1}{b_1}D_l\, x_1(t)-\frac{1}{b_i}D_l\, x_i(t) = L_i^{-1}-L_1^{-1}. \end{equation} In the last formula the \textit{logarithmic derivative} $x_i'/x_i$ was replaced by its discrete version \[ D_l\, x_i(t) :=\dfrac{x_i(t+1)-x_i(t)}{x_i(t)}. \] Let $\alpha=b_1/b_i$ and $\beta=b_1(L_i^{-1}-L_1^{-1})=\alpha d_i-d_1$. Then $0<\beta<\alpha$ and
(\ref{d-m2}) can be written in the following way \begin{equation} \label{d-m3} D_l\, x_1(t)= \alpha D_l\, x_i(t) +\beta. \end{equation} We want to find a sufficient condition for (\ref{goto0}). In order to do it we formulate the following general question, which can be investigated independently of the above biological models.
\begin{problem} \label{prob1} Find parameters $\alpha$ and $\beta$, $0<\beta<\alpha$, such that the following condition holds:\\ (C) \ if $(u_n)$ and $(v_n)$ are arbitrary bounded sequences of positive numbers
satisfying \begin{equation} \label{p1} \frac{u_{n+1}-u_n}{u_n}= \alpha \frac{v_{n+1}-v_n}{v_n} +\beta, \end{equation} for $n\in\mathbb{N}$, then $\lim\limits_{n\to\infty} v_n=0$. \end{problem}
In the case when the model has the property of competitive exclusion (\ref{goto0}) one can ask if the dynamics of the $k$-dimensional model is the same as in the restriction to the one-dimensional model. The answer to this question is positive for the continuous version, because the one-dimensional model has a very simple dynamics. In Section~\ref{ss:one} we also show that both dynamics are similar if the one-dimensional model has the shadowing property. More interesting question is what can happen when condition (C) does not hold. One can expect that then subpopulations with different strategies can coexist even if condition (\ref{ineq}) holds. But we do not have a coexistence equilibrium (i.e. a positive stationary solution of (\ref{d-m})) which makes the problem more difficult. In Section~\ref{ss:periodic} we check that two-dimensional systems with $f$ related to the classical logistic, Beverton-Holt and Ricker models can have periodic solutions even in the case
when one-dimensional versions of these models have stationary globally stable solutions and the two-dimensional model has a locally stable boundary equilibrium $(x_1^*,0)$.
\section{Competitive exclusion} \label{s:mr} The solution of Problem~\ref{prob1} is formulated in the following theorem.
\begin{theorem} \label{th1} If \ $\alpha\le 1+\beta$ then condition (C) is fulfilled. If $\alpha> 1+\beta$ then we can find periodic sequences $(u_n)$ and $(v_n)$ of period two with positive elements which satisfy $(\ref{p1})$. \end{theorem}
\begin{lemma} \label{l:c} Consider the function \begin{equation} \label{l-1} g_n (x_1,\dots,x_n)=(\alpha x_1+\gamma)(\alpha x_2+\gamma)\cdots(\alpha x_n+\gamma) \end{equation} defined on the set $S_{n,m}=\{\mathbf x\in \mathbb R^n_+\colon \,x_1x_2\cdots x_n=m\}$, where $\alpha>0$, $\gamma\ge 0$, $m>0$ and $n$ is a positive integer. Then \begin{equation} \label{l-2} g_n (x_1,\dots,x_n)\ge \left(\alpha m^{1/n}+\gamma \right)^n. \end{equation} \begin{proof} The case $\gamma=0$ is obvious, so we assume that $\gamma>0$. We use the standard technique of the Lagrange multipliers for investigating problems on conditional extrema. Let \[ L(x_1,\dots,x_n)=g_n (x_1,\dots,x_n)+\lambda(m-x_1x_2\cdots x_n). \] Then \[ \frac{\partial L(x_1,\dots,x_n)}{\partial x_i} =\frac{\alpha g_n (x_1,\dots,x_n)}{\alpha x_i+\gamma} -\frac{\lambda x_1x_2\cdots x_n}{x_i}. \] Observe that if $\dfrac{\partial L(x_1,\dots,x_n)}{\partial x_i}=0$ for $i=1,\dots,n$, then $x_1=\dots=x_n=m^{1/n}$. It means that the function $g_n$
has only one local conditional extremal point and this point is the global minimum because $g_n(\mathbf x)$ converges to infinity as $\|\mathbf x\|\to \infty$. \end{proof}
\end{lemma} \begin{proof}[Proof of Theorem~\ref{th1}] Equation (\ref{p1}) can be written in the following form \begin{equation} \label{p2} \frac{u_{n+1}}{u_n}= \alpha \frac{v_{n+1}}{v_n} +\gamma, \end{equation} where $\gamma=\beta+1-\alpha$. Consider the case $\alpha\le 1+\beta$. Then $\gamma\ge 0$ and $\alpha+\gamma=\beta+1>1$. We show that if $(v_n)$ is a sequence of positive numbers such that $\limsup\limits_{n\to\infty} v_n>0$ and $(u_n)$ is a sequence of positive numbers such that (\ref{p2}) holds, then the sequence $(u_n)$ is unbounded. Indeed, then we can find an $\overline{m}>0$ and a subsequence $(v_{n_i})$ of $(v_n)$ such that $v_{n_i}\ge \overline m$ for $i\in\mathbb N$. We set $v_0=1$ and $x_i=v_{i}/v_{i-1}$ for $i\in\mathbb N$. Then $v_{n}=x_1\cdots x_n$ and $u_n=u_0g_n(x_1,\dots,x_n)$, where $u_0=u_1/(\alpha v_1+\gamma)$. If $m=x_1\cdots x_{n_i}$, then $m\ge \overline m$ and from Lemma~\ref{l:c} it follows that \[ u_{n_i}=u_0g_{n_i}(x_1,\dots,x_{n_i})\ge u_0\left(\alpha m^{1/n_i}+\gamma \right)^{n_i}\ge u_0\left(\alpha \overline m^{1/n_i}+\gamma \right)^{n_i}. \] Since $\lim\limits_{i\to\infty}\overline m^{1/n_i}=1$ and $\alpha+\gamma>1$ we obtain $\lim\limits_{i\to\infty} u_{n_i}=\infty$, which proves the first part of the theorem. Now we assume that $\alpha> 1+\beta$. Then $\gamma<0$. First we check that there exists $\theta>1$ such that \begin{equation} \label{per1} (\alpha \theta+\gamma)(\alpha \theta^{-1}+\gamma)=1. \end{equation} Equation (\ref{per1}) can be written in the following form \begin{equation} \label{per3}
\theta+\theta^{-1}=L, \textrm{ \, where $L=\frac{\alpha^2+\gamma^2-1}{\alpha|\gamma|}$}. \end{equation} Since $\alpha+\gamma=\beta+1>1$ we have
$\alpha^2+\gamma^2-1> 2\alpha|\gamma|$, which gives $L>2$ and implies that there exists $\theta>1$ such that (\ref{per1}) holds. Now we put $u_{2n-1}=c_1$, $u_{2n}=c_1(\alpha \theta+\gamma)$, $v_{2n-1}=c_2$, $v_{2n}=c_2\theta$ for $n\in\mathbb N$, where $c_1$ and $c_2$ are any positive constants. Then \[ \frac{u_{2n}}{u_{2n-1}}= \alpha \theta+\gamma=\alpha\frac{v_{2n}}{v_{2n-1}}+\gamma, \] and using (\ref{per1}) we obtain \[ \frac{u_{2n+1}}{u_{2n}}= \frac1{\alpha \theta+\gamma} =\alpha \theta^{-1}+\gamma=\alpha\frac{v_{2n+1}}{v_{2n}}+\gamma. \qedhere \] \end{proof} \begin{remark} \label{r:1} We have proved a slightly stronger condition than (C) in the case $\alpha\le 1+\beta$. Namely, if $(u_n)$ is a bounded sequence of positive numbers, $(v_n)$ is a sequence of positive numbers and they satisfy (\ref{p1}), then $\lim\limits_{n\to\infty} v_n=0$. In the proof of condition (C) we have not used the preliminary assumption that $\beta<\alpha$. \end{remark}
\section{Applications} \label{s:appl} Now we return to the model given by (\ref{d-m}). We assume that $f\colon \mathbb R^k_+\to [0,1]$ is a continuous function which satisfies (\ref{b:f}). From (\ref{b:f}) it follows that there exists $\overline{M}>0$ such that the set \[ X=\{\mathbf x\in\mathbb R_+^k\colon x_1+\dots+x_k\le \overline M\} \] is invariant under (\ref{d-m}), i.e., if $\mathbf x(0)\in X$ then $\mathbf x(t)\in X$ for $t>0$. We restrict the domain of the model to the set $X$. Let $T\colon X\to X$ be the transformation given by $T_i(\mathbf x)=(1-d_i)x_i+b_if(\mathbf x)x_i$, for $i=1,\dots,k$.
\subsection{Persistence} \label{ss:persistence} First we check that if $f(\mathbf 0)=1$ then the population is \textit{persistent}, i.e.,
$\liminf_{n \to\infty}\|T^n(\mathbf x)\|\ge \varepsilon_1 >0$ for all $\mathbf x\ne \mathbf 0$. This is a standard result from persistence theory but we check it to make the paper self-contained.
Since $b_i>d_i$ for $i=1,\dots,k$ we find $\varepsilon>0$ and $\delta>0$ such that \begin{equation} \label{ej-f-p} T_i(\mathbf x)\ge (1+\delta)x_i \quad\textrm{for $i=1,\dots,k$ and $\mathbf x \in B(\mathbf 0,\varepsilon)$}, \end{equation} where $B(\mathbf 0,\varepsilon)$ denotes the open ball in $X$ with center $\mathbf 0$ and radius $\varepsilon$. Moreover, since $T(\mathbf x)\ne \mathbf 0$ for $\mathbf x\ne 0$ the closed set $T(X\setminus B(\mathbf 0,\varepsilon))$ is disjoint with some neighbourhood of $\mathbf 0$. Using (\ref{ej-f-p}) we find $\varepsilon_1\in (0,\varepsilon)$ such that for each $\mathbf x \ne \mathbf 0$ we also find an integer $n_0(\mathbf x)$ such that $T^n(\mathbf x)\notin B(\mathbf 0,\varepsilon_1)$ for $n\ge n_0(\mathbf x)$.
\subsection{Convergence to one-dimensional dynamics} \label{ss:one} Now we present some corollaries of Theorem~\ref{th1} concerning the long-time behaviour of the population. The inequality $0<\alpha\le 1+\beta$ can be written in terms of birth and death coefficients as \begin{equation} \label{a:1} b_1(1-d_i)\le b_i(1-d_1)\quad\textrm{for $i=2,\dots,k$.} \end{equation} It means that if (\ref{ineq}) and (\ref{a:1}) hold, then all strategies except the first one become extinct. It suggests that the model should behave asymptotically as $t\to\infty$, like a one-dimensional model corresponding to a population consisting of only the first strategy. This reduced model is given by the recurrent formula \begin{equation} \label{d-m-a} y(t+1)=S(y(t)), \end{equation} where $S(y)=y-d_1y+ b_1yf(y,0,\dots,0)$.
In order to check that the model given by (\ref{d-m}) has the same asymptotic behaviour as the transformation $S$, we need some auxiliary definitions. A sequence $(y_k)$ is called an $\eta$-\textit{pseudo-orbit} of a transformation $S$ if
$|S(y_k)- y_{k+1}|<\eta$ for all $k\ge 1$. The transformation $S$ is called \textit{shadowing}, if for every $\delta>0$ there exists $\eta>0$ such that for each $\eta$-pseudo-orbit $(y_k)$ of $S$ there is a point $y$ such that
$|y_k -S^k(y)|<\delta$.
\begin{theorem} \label{th-as} Assume that $f(\mathbf 0)=1$ and that conditions $(\ref{ineq})$, $(\ref{a:1})$ hold. Then \[ \lim_{t\to\infty} x_i(t)=0 \ \textrm{ for $\,i=2,\dots,k$}. \] If $S$ is shadowing then for each $\delta>0$ and for each initial point $\mathbf x(0)=(x_1,\dots,x_k)$ with $x_1>0$ there exists $t_0\ge 0$ such that $y(t_0)>0$ and \begin{equation} \label{wn-sh}
\big| x_1(t) - y(t)
\big| <\delta \textrm{ \ for $t\ge t_0$}. \end{equation} \end{theorem}
\begin{proof} Let us fix a $\delta>0$ and let $\eta>0$ be a constant from the shadowing property of $S$. From the uniform continuity of the function $f$ there is an $\varepsilon>0$ such that \begin{equation} \label{a:t1}
\overline M b_1\big|f(x_1,\dots,x_k)-f(x_1,0,\dots,0)
\big| <\eta \textrm{ if $\,\mathbf x\in X$, $x_2+\dots+x_k<\varepsilon$.} \end{equation} Since all strategies except the first one become extinct, there exists $t_0\ge 0$ such that $x_2(t)+\dots+x_k(t)<\varepsilon$ for $t\ge t_0$. From (\ref{a:t1}) it follows that \[
|x_1(t+1)-S(x_1(t))|<\eta \textrm{ \ for $t\ge t_0$} \] and, consequently,
the sequence $x_1(t_0),x_1(t_0+1),\dots$ is
an $\eta$-pseudo-orbit. Since $S$ is shadowing we have (\ref{wn-sh}). \end{proof}
The shadowing property was intensively studied for the last thirty years and there are a lot of results concerning the shadowing property for one-dimensional maps (cf. a survey paper by Ombach and Mazur \cite{OmbachMazur}). It is obvious that if $S$ has an asymptotically stable periodic orbit then $S$ is shadowing on the basin of attraction of this orbit. Moreover, for a continuous one-dimensional transformation the convergence of all iterates to a unique fixed point $x$ implies its global stability
\cite{Sedeghat}.
Thus, as a simple consequence of Theorem~\ref{th-as} we obtain \begin{corollary} \label{cor-as} Assume that $f(\mathbf 0)=1$ and that conditions $(\ref{ineq})$, $(\ref{a:1})$ hold. If $S$ has a fixed point $x_*>0$ and $\lim\limits_{n\to\infty}S^n(x)=x_*$ for all $x>0$, then for each initial point $\mathbf x(0)=(x_1,\dots,x_k)$ with $x_1>0$, we have $\lim\limits_{t\to\infty}\mathbf x(t)=(x_*,0,\dots,0)$. \end{corollary} Some applications of shadowing to semelparous population similar to Theorem~\ref{th-as} and Corollary~\ref{cor-as} can be found in \cite{RudnickiWieczorek2010}. An interested reader will find there also some observations concerning chaotic behaviour of such models. In particular, the model given by (\ref{d-m}) can exhibit chaotic behaviour if the suppression function is of the form $f(\mathbf x)=1-x_1-\dots-x_n$, i.e., it is a generalization of the logistic model.
\begin{remark}[Dynamical consistence] \label{r:d-c} If we replace $x'_i(t)$ with $(x_i(t+h)-x_i(t))/h$ in (\ref{c-m}) then we get \begin{equation} \label{d-mh} x_i(t+h)=x_i(t)-d_ihx_i(t)+ b_ih x_i(t)f(\mathbf x(t)),\quad i=1,\dots,k. \end{equation} One can ask if this scheme is dynamically consistent with (\ref{c-m}). Observe, that inequalities (\ref{ineq}) also hold if we replace $b_i$ with $b_ih$ and $d_i$ with $d_ih$. The difference equation (\ref{d-mh}) is said to be \textit{dynamically consistent} with (\ref{c-m}) if they possesses the same dynamical behavior such as local stability, bifurcations, and chaos \cite{LE}, or more specifically \cite{Mickens,RG} if they have the same given property, e.g. if the competitive exclusion takes place in both discrete and continuous models. The model (\ref{d-mh}) is biologically meaningful only if the death coefficients are $\le 1$, i.e., if \begin{equation} \label{w:h} 0<h\le h_{\max{}}=\min \{d_1^{-1},\dots,d_k^{-1}\}. \end{equation} We assume that $b_i$ and $d_i$ satisfy (\ref{ineq}), i.e., $b_1d_i>b_id_1$ for $i=2,\dots,k$. Let $b_{i,h}=b_ih$, $d_{i,h}=d_ih$. Now, (\ref{a:1}) applied to $b_{i,h}$ and $d_{i,h}$ gives \begin{equation} \label{a:1-r} b_1-b_i\le (b_1d_i-b_id_1)h\quad\textrm{for $i=2,\dots,k$}. \end{equation} In particular if (\ref{ineq}) holds and $b_i\ge b_1$ for $i=2,\dots,k$ then for all $h$ satisfying (\ref{w:h}) all strategies except the first one become extinct, i.e., the difference equation (\ref{d-mh}) is dynamically consistent with (\ref{c-m}) with respect to this property. We cannot expect ``full" dynamical consistence if the above conditions hold, because in the case of the logistic map,
i.e., if $f(\mathbf x)=1-x_1-x_2$, the stationary point $\mathbf x_1^*=((b_1-d_1)d_1^{-1},0)$ of (\ref{c-m}) is globally stable but in the numerical scheme
(\ref{d-mh}) this point loses stability when $b_1h>2+d_1h$. \end{remark}
\subsection{Periodic solutions} \label{ss:periodic} Theorem~\ref{th1} can be also useful if we look for periodic oscillation in the model given by (\ref{d-m}). We restrict our investigation to the two-dimensional model. We recall that if $\alpha> 1+\beta$, then the periodic sequences given by $u_{2n-1}=c_1$, $u_{2n}=c_1(\alpha \theta+\gamma)$, $v_{2n-1}=c_2$, $v_{2n}=c_2 \theta$
for $n\in\mathbb N$, satisfy $(\ref{p1})$. Here $c_1$ and $c_2$ are any positive constants, $\theta>1$ is a solution of the equation \[ (\alpha \theta+\gamma)(\alpha \theta^{-1}+\gamma)=1, \] $\alpha=b_1/b_2$, $\beta=\alpha d_2-d_1>0$, and $\gamma=1+\beta-\alpha=\alpha(d_2-1)+(1-d_1)<0$. Under these assumptions we are looking for $c_1, c_2>0$ such that \begin{equation} \left\{ \label{ukl-2'} \begin{aligned} &\theta=1-d_2+b_2f(c_1,c_2), \\ &1=\theta(1-d_2)+b_2 \theta f(c_1(\alpha \theta+\gamma),c_2\theta). \end{aligned} \right. \end{equation} This system is equivalent to \begin{equation} \label{ukl-3'} \left\{ \begin{aligned} &f(c_1,c_2)=\left(\theta+d_2-1\right)b_2^{-1} \\ &f(c_1(\alpha \theta+\gamma),c_2\theta) =\left(\theta^{-1}+d_2-1\right)b_2^{-1}. \end{aligned} \right. \end{equation} Since $f(\mathbf x)\in (0,1)$ for $\mathbf x\in X\setminus\{\mathbf 0\}$, we have the following necessary condition for the existence of positive solution of the system (\ref{ukl-3'}) :
\begin{equation} \label{e:wko} \theta<1+b_2-d_2\quad\textrm{and}\quad \theta<(1-d_2)^{-1}. \end{equation}
Let $f(\mathbf x)=\varphi(x_1+x_2)$, where $\varphi$ is a strictly decreasing function defined on the interval $[0,K)$, $0<K\le \infty$, such that $\varphi(0)=1$ and $\lim_{x\to K}\varphi(x)=0$. Define $m_1=\left( \theta+d_2-1\right)b_2^{-1}$,
$m_2=\left(\theta^{-1}+d_2-1\right)b_2^{-1}$, $p_1=\varphi^{-1}(m_1)$,
and $p_2=\varphi^{-1}(m_2)$. If (\ref{e:wko}) holds then the constants $p_1$, $p_2$
are well defined and $0<p_1<p_2$. Thus, we find a positive solution of system (\ref{ukl-3'}) if and only if (\ref{e:wko}) holds and
\begin{equation} \label{e:wkw} c_1+c_2=p_1 \quad\textrm{and}\quad c_1(\alpha \theta+\gamma)+c_2\theta=p_2. \end{equation} System (\ref{e:wkw}) has a unique solution
\begin{equation} \label{e:wkw2} c_1=
\frac{p_2-p_1\theta} {\alpha \theta+\gamma-\theta}, \quad c_2=
\frac{p_1(\alpha \theta+\gamma)-p_2} {\alpha \theta+\gamma-\theta}. \end{equation} Since $\alpha>1$, $\theta>1$, and $\beta>0$ we have \[ \alpha \theta+\gamma-\theta=\alpha \theta+1+\beta-\alpha-\theta=(\alpha-1)(\theta-1)+\beta>0. \] Thus system (\ref{ukl-3'}) has a positive solution if and only if (\ref{e:wko}) holds and \begin{equation} \label{e:wkw3} p_1\theta< p_2<p_1(\alpha \theta+\gamma). \end{equation}
Now we show how to find parameters $b_1,b_2,d_1,d_2$ such that (\ref{e:wko}) and (\ref{e:wkw3}) hold.
Assume that $\beta$ is sufficiently small. Since $\gamma=\beta+1-\alpha$, from (\ref{per3}) it follows \[ \theta+\theta^{-1}=\frac{2\alpha(\alpha-1)-2(\alpha-1)\beta+\beta^2}{\alpha(\alpha-1)-\alpha\beta}= 2+\frac{2\beta}{\alpha(\alpha-1)}+O(\beta^2). \] Let $\theta=1+\varepsilon$. Then
$\varepsilon=\sqrt{2/(\alpha^2-\alpha)}\,\sqrt{\beta}+O(\beta) $
and we can assume that $\varepsilon$ is also sufficiently small. Hence $\theta^{-1}=1-\varepsilon+O(\varepsilon^2)$, $\alpha\theta+\gamma=1+\alpha\varepsilon+O(\varepsilon^2)$, $m_1=(d_2+\varepsilon)b_2^{-1}$, and $m_2=(d_2-\varepsilon)b_2^{-1}+O(\varepsilon^2)$. Assume that $\varphi^{-1}$ is a $C^2$-function in a neighbourhood of the point $\bar x=d_2b_2^{-1}$. Then \begin{equation} \label{e:wkw4} p_1= A+Bb_2^{-1}\varepsilon+O(\varepsilon^2), \quad p_2= A-Bb_2^{-1}\varepsilon+O(\varepsilon^2), \end{equation} where $A=\varphi^{-1}(\bar x)$ and $B=(\varphi^{-1})'(\bar x)=1/\varphi'(A)$. Substituting (\ref{e:wkw4}) to (\ref{e:wkw3}) we rewrite (\ref{e:wkw3}) as \begin{equation} \label{e:wkw5} A+O(\varepsilon)<-2 Bb_2^{-1}<\alpha A+O(\varepsilon). \end{equation} Taking sufficiently small $\beta$, we are also able to check condition (\ref{e:wko}). Thus, if $A<-2 Bb_2^{-1}$, $\beta$ is sufficiently small and $\alpha$ is sufficiently large, both conditions (\ref{e:wko}) and (\ref{e:wkw3}) are fulfilled and a non-trivial periodic solution exists.
\begin{example} \label{ex1} We consider the two-dimensional model (\ref{d-m}) related to the logistic map, i.e., with $f(\mathbf x)=\varphi(x_1+x_2)$ and $\varphi(x)=1-x/K$ for $x\in [0,K]$ and $\varphi(x)=0$ for $x>K$. Then $\varphi^{-1}(x)=K(1-x)$, and so $A=K(1-d_2/b_2)$, $B=-K$. From (\ref{e:wkw5}) it follows that if $2b_2/b_1<b_2-d_2<2$, $d_1=b_1d_2/b_2-\beta$ and $\beta$ is sufficiently small, then there exists a periodic solution. Let us consider a special example with the following coefficients $b_1=2.02$, $b_2=0.505$, $d_1=0.0399$, $d_2=0.01$. Then $b_1/d_1>b_2/d_2$, $\alpha= b_1/b_2=4$, $\beta=\alpha d_2-d_1=0.0001$, $\gamma=-2.9999$, and $\theta\approx 1.00408$. Then all conditions hold and a positive periodic solution exists. If $K=1$ then the periodic sequence is given by $x_1(2n-1)\approx 0.8482$, $x_1(2n)\approx 0.8622$, $x_2(2n-1)\approx 0.1099$, $x_2(2n)\approx 0.1103$
for $n\in\mathbb N$.
It is interesting that in this case, one-dimensional models (i.e., with the birth and death coefficients $b_1,d_1$ or $b_2,d_2$) have positive and globally stable fixed points because $b_i<2+d_i$ for $i=1,2$ (see Section~\ref{ss:loc-stab}). Hence the two-dimensional model has a locally stable fixed point $(1-d_1/b_1,0)$ but this point is not globally stable. \end{example}
\begin{example} \label{ex2} We consider now the two-dimensional Beverton-Holt model with harvesting, i.e., a model of type (\ref{d-m}) with $f(\mathbf x)=\varphi(x_1+x_2)$ and $\varphi(x)=c/(c+x)$ for $x\in [0,\infty)$, $c>0$.
A one-dimensional version of this model always has one positive fixed point and this point is globally asymptotically stable (see Section~\ref{ss:loc-stab}). We have $\varphi^{-1}(x)=c/x-c$, and so $A=c(b_2/d_2-1)$, $B=-cb_2^2/d_2^2$. Inequality (\ref{e:wkw5}) takes the form \[ (b_2-d_2)+O(\varepsilon)<2b_2/d_2<\alpha (b_2-d_2)+O(\varepsilon). \] The first inequality is automatically fulfilled for sufficiently small $\beta$. The second inequality holds if $b_1>\dfrac{2b_2^2}{(b_2-d_2)d_2}$ and $\beta$ is sufficiently small and then a positive periodic solution exists. \end{example}
\begin{example} \label{ex3} We consider now the two-dimensional model (\ref{d-m}) related to the Ricker map, i.e., with $f(\mathbf x)=\varphi(x_1+x_2)$ and $\varphi(x)=e^{-cx}$ for $x\in [0,\infty)$. We have $\varphi^{-1}(x)=- c^{-1}\ln x$, $(\varphi^{-1})'(x)=- (cx)^{-1}$ and so $A=c^{-1}\ln(b_2/d_2)$, $B=-b_2/(cd_2)$. Inequality (\ref{e:wkw5}) takes the form \[ \ln(b_2/d_2) +O(\varepsilon)<2/d_2<\alpha \ln(b_2/d_2)+O(\varepsilon). \] Thus if $d_2e^{2/(\alpha d_2)}<b_2<d_2e^{2/d_2}$ and $\beta$ is sufficiently small, then a positive periodic solution exists. Now we give an example when $T$ have a positive periodic point and both one-dimensional models have globally stable fixed points, i.e., $b_r<d_re^{2/d_r}$ holds (see Section~\ref{ss:loc-stab}).
Let $b_1=1.0001e^2$, $b_2=b_1/4$, $d_1=0.9999$, $d_2=0.25$. The coefficients $\alpha= b_1/b_2=4$, $\beta=\alpha d_2-d_1=0.0001$, $\gamma=-2.9999$ are the same as in Example ~\ref{ex1}. Thus $\theta\approx 1.00408$, $\theta^{-1}\approx 0.99594$, and $\alpha\theta+\gamma=1.01642$. For $c=1$ we have $p_i=-\ln m_i$ for $i=1,2$ and we can check that the periodic sequence is given by $x_1(2n-1)\approx 1.49009$, $x_1(2n)\approx 1.51455$, $x_2(2n-1)\approx 0.000868$, $x_2(2n)\approx 0.000871$
for $n\in\mathbb N$. \end{example} \begin{remark} \label{r:per} We have restricted examples only to $f$ of the form $f(\mathbf x)=\varphi(x_1+x_2)$ with the typical $\varphi$ used in the classic competition models, to show that these models can have no coexistence equilibrium, but they can have a positive periodic solution. Formula (\ref{ukl-3'}) can be used to find periodic solutions of models with other $f$'s. \end{remark}
\subsection{Stability of fixed points} \label{ss:loc-stab} In the previous sections we use some results concerning local and global stability of the transformation $T$ and to make our exposition self-contained we add a section concerning this subject. First we look for fixed points of the transformation $T$ and check their local stability. We assume that $f(\mathbf 0)=1$ and \begin{equation} \label{strict} L_1>L_2>\dots>L_k. \end{equation} Let $\mathbf x^*$ be a fixed point of $T$, i.e., $T({\mathbf x}^*)={\mathbf x}^*$. Then $x^*_i=0$ or $f(\mathbf x^*)=d_i/b_i=L_i^{-1}$ for $i=1,\dots,k$. From (\ref{strict}) it follows that $\mathbf x^*$ is a fixed point of $T$ if
$\mathbf x^*=\mathbf x^*_0=\mathbf 0$ or $\mathbf x^*=\mathbf x^*_r=(0,\dots,x^*_r,\dots,0)$,
where $r\in\{1,\dots,k\}$ and $f(\mathbf x^*_r)=L_r^{-1}$.
We assume that the functions $x\mapsto f(0,\dots,x,\dots,0)$ are strictly decreasing. Then $T$ has exactly $k+1$ fixed points $ \mathbf x^*_r$, $r=0,\dots,k$. Let $A_r$ be the matrix with
$a_{ij}^r=\dfrac{\partial T_i}{\partial x_j}({\mathbf x_r^*})$. We have \begin{equation} \label{wzpoch}
\dfrac{\partial T_i}{\partial x_j}({\mathbf x}) =\delta_{ij}(1-d_i+b_if(\mathbf x))+b_ix_i\dfrac{\partial f}{\partial x_j}(\mathbf x), \end{equation} where $\delta_{ii}=1$ and $\delta_{ij}=0$ if $i\ne j$. Since $f(\mathbf 0)=1$ we obtain $a_{ii}^0=1-d_i+b_i>1$ and $a_{ij}^0=0$ if $i\ne j$, and therefore $\mathbf x_0^*=\mathbf 0$ is a repulsive fixed point. Now we consider a point $\mathbf x_r$ with $r>0$. Then $f(\mathbf x_r)=d_r/b_r$ and from (\ref{wzpoch}) we obtain
\[
a_{ij}^r=\dfrac{\partial T_i}{\partial x_j}(\mathbf x_r^*)=
\begin{cases}
\delta_{ij}(1-d_i+b_id_r/b_r), \quad \textrm{if $i\ne r$},\\
\delta_{ij}+b_rx_r\dfrac{\partial f}{\partial x_r}(\mathbf x_r^*),
\quad \textrm{if $i= r$}. \end{cases} \] The matrix $A_r$ has $k$-eigenvalues $\lambda_i$, $i=1,\dots,k$ and $\lambda_i=a^r_{ii}$.
We have \[ \begin{aligned} \lambda_i&=1-d_i+b_id_r/b_r=1 +b_i(L_r^{-1}-L_i^{-1}),\textrm{ if $i\ne r$},\\ \lambda_r&=1+b_rx_r\dfrac{\partial f}{\partial x_r}(\mathbf x_r^*). \end{aligned} \]
Observe that if $r=1$ then $\lambda_i\in (0,1)$ for $i>1$. If
we assume that $-2<b_1x_1\dfrac{\partial f}{\partial x_1}(\mathbf x_1^*)<0$, then also $\lambda_1\in (0,1)$ and the fixed point $\mathbf x_1^*$ is locally asymptotically stable.
If $r>1$ then $\lambda_i>1$ for $i<r$ and, consequently,
the fixed point $\mathbf x_1^*$ is not asymptotically stable.
But if
$-2<b_rx_r\dfrac{\partial f}{\partial x_r}(\mathbf x_r^*)<0$,
the point $\mathbf x_r^*$ is locally \textit{semi-stable}, i.e., is stable for the transformation $T$ restricted to the set \[ X_r=\{{\mathbf x}\in X \colon \,x_1=\dots=x_{r-1}=0\}. \]
In the case of the logistic map $f(\mathbf x)=1-(x_1+\dots +x_k)/K$ we have $x_r=K(1-d_r/b_r)$, $\dfrac{\partial f}{\partial x_r}(\mathbf x_r^*)=-1/K$, and conditions for stability (or semi-stability) of $\mathbf x_r^*$ reduce to $b_r<2+d_r$. If the positive fixed point $x^*$ of a one-dimensional logistic map is locally asymptotically stable
then this point is globally stable, i.e., $T^n(x)\to x^*$, for $x\in (0,K)$. Thus, Example~\ref{ex1} shows that the behaviour of a $k$-dimensional logistic map
and its one-dimensional restrictions can be different. It can have a locally stable fixed point $\mathbf x_1^*$ but this point can be not globally asymptotically stable.
Consider a model with the Beverton-Holt birth rate \[ f(\mathbf x)=\dfrac{c}{c+x_1+\dots +x_k}. \] Then we have $x_r=c\left(\dfrac{b_r}{d_r}-1\right)$, $\dfrac{\partial f}{\partial x_r}(\mathbf x_r^*)=-\dfrac{c}{(c+x_r)^2}$. Conditions for stability (or semi-stability) of $x_r$ reduce to the inequality $b_rx_rc< 2(c+x_r)^2$ or, equivalently to $b_r^2/d_r^2>b_r^2/d_r-b_r$, which always holds because $0\le d_r\le 1$. The positive fixed point $x^*$ of the one-dimensional map $T$ is globally stable because $x<T(x)<x^*$ for $x\in (0,x^*)$ and $T(x)<x$ for $x>x^*$. Example~\ref{ex2} shows that a two-dimensional map have a locally stable fixed point $\mathbf x_1$ but this point can be not globally asymptotically stable.
In the case of the Ricker map $f(\mathbf x)=e^{-c(x_1+\dots +x_k)}$ we have $x_r=\dfrac 1c\ln\dfrac{b_r}{d_r}$, $\dfrac{\partial f}{\partial x_r}(\mathbf x_r^*)=-c\dfrac{d_r}{b_r}$, and conditions for stability (or semi-stability) of $x_r$ reduce to $cd_rx_r>2$, which takes place when $b_r<d_re^{2/d_r}$. The last inequality is also sufficient for global stability of the fixed point (see e.g. \cite[Th.\ 9.16]{Thieme}).
\section{Conclusion} In this paper we consider a discrete time strong competition model. While in its continuous counterpart a population having the maximal turnover coefficient drives the other to extinction, the discrete time model can have no this property. We give sufficient conditions for competitive exclusion for a discrete model. Although this model does not have a coexistence equilibrium, it can have a positive periodic solution. It is interesting that this periodic solution can exist in the case when the restrictions of the model to one dimensional cases have globally stable stationary solutions. Theorem~\ref{th1} can be also applied to models when the suppression function $f$ depends on other factors, for example the suppression function $f$ can include resource density. It would be interesting to generalize Theorem~\ref{th1} to models with weaker competition, i.e., when the suppression function is not identical for all subpopulations, or to discrete-continuous hybrid models \cite{GHL,ML} or to equations on time scales \cite{BP}.
\section*{Acknowledgments} The author is grateful to Dr. Magdalena Nockowska for several helpful discussions while this work was in progress. This research was partially supported by the National Science Centre (Poland) Grant No. 2014/13/B/ST1/00224.
\end{document} |
\begin{document}
\begin{abstract}
We present a completely new structure theoretic approach to the dilation theory of linear operators. Our main result is the following theorem: if $X$ is a super-reflexive Banach space and $T$ is contained in the weakly closed convex hull of all invertible isometries on $X$, then $T$ admits a dilation to an invertible isometry on a Banach space $Y$ with the same regularity as $X$. The classical dilation theorems of Sz.-Nagy and Akcoglu-Sucheston are easy consequences of our general theory. \end{abstract}
\title{A Toolkit for Constructing Dilations on Banach Spaces}
\section{Introduction} \label{sec:introduction}
Consider a bounded linear operator $T$ on a Banach space $X$. We say that $T$ has a dilation to a Banach space $Y$ if there exist linear contractions $J\colon X \to Y$ and $Q\colon Y \to X$ and an invertible linear isometry $U\colon Y \to Y$ such that \begin{equation}
T^n = QU^nJ \qquad \text{ for all } n\in \mathbb{N}_0. \label{eq:dilation} \end{equation} It follows from this definition that $T$ is necessarily contractive. Constructing a dilation for a given operator $T$ is a very subtle endeavor since the questions whether such a dilation exists and, granted that it exists, whether it is useful in applications depend crucially on the choice of $Y$.
In concrete applications one is usually interested in $Y$ having the same regularity as the original space $X$. If, for instance, $X$ is a Hilbert space or an $L^p$-space, then $Y$ should be out of the same class. One therefore requires $Y$ to be out of a prescribed class of Banach spaces $\mathcal{X}$. The question whether a dilation to such a space exists is a delicate one, and for some operators it may not be possible to find a dilation in the class $\mathcal{X}$. For instance, by a result of Sz.-Nagy every Hilbert space contraction has a dilation in the class of Hilbert spaces~\cite[Theorem~1.1]{Pis01}, whereas for $p \in (1, \infty) \setminus \{2\}$, as a direct consequence of~\cite[Theorem~3]{Pel81}, there exist contractions on $L^p$ that do not have a dilation in the class of all $L^p$-spaces.
The applications of dilation results are plenty and profound. For example, Sz.-Nagy's dilation, in conjunction with the spectral theorem for normal operators, implies von Neumann's inequality: every Hilbert space contraction $T \in \mathcal{L}(H)$ satisfies \begin{equation*}
\norm{p(T)}_{\mathcal{L}(H)} \le \sup_{\abs{z} = 1} \norm{p(z)} \end{equation*} for all complex polynomials $p$. A second celebrated dilation theorem due to Akcoglu and Sucheston~\cite{AkcSuc77} states that every positive contraction on a reflexive $L^p$-spaces has a dilation to a positive invertible isometry on another $L^p$-space for the same $p$. As a consequence one obtains for positive contractions on $L^p$ the almost everywhere convergence of ergodic means~\cite{Akc75} and the validity of Matsaev's inequality~\cite[Theorem~9]{Pel81}. Further, Akcoglu and Sucheston's theorem is used to deduce fundamental properties of both discrete and continuous positive contractive semigroups on reflexive $L^p$-spaces such as the boundedness of their $H^{\infty}$-calculus (\cite[Remark~4.9c)]{Wei01} and~\cite[Theorem~8.3]{Mer14}) or maximal $L^q$-regularity~\cite[Theorem~1.3]{Blu01b}.
\subsection*{A structure theoretic approach}
Usually, proofs of dilation theorems are explicitly adapted to the spaces and operators under consideration and make massive use of their particular properties. As a concrete instance of this observation, we refer to the detailed presentation of Akcoglu and Sucheston's dilation theorem given in~\cite[Chapters~2~\&~3]{Fendler1998}. This makes the proof difficult to understand at an abstract level and, moreover, the proof does not really clarify the exact role of the geometry of the underlying Banach space $X$. Furthermore, although at least three different proofs for Akcoglu and Sucheston's theorem are nowadays known and have been known for several decades, namely those in \cite{AkcSuc77}, \cite{Pel81} and~\cite{NagPal82}, no general technique seems to be available to construct dilations on more general spaces. To the best of our knowledge, no dilation result is known for any non-trivial class of contractions on general super-reflexive or UMD Banach spaces.
In order to address those issues we develop a new, structure theoretical approach to dilation problems. We introduce the concept of simultaneous dilations for a set of operators which turns out to be the right framework for proving general dilation theorems. Based on this concept, we develop a toolkit for constructing dilations on general classes of reflexive Banach spaces. This toolkit essentially consists of topological and algebraical closedness results. Given a set of simultaneously dilating operators, our main result asserts that its convex hull also admits a simultaneous dilation to a Banach space with the same regularity. Moreover, it is easy to see that the existence of dilations is preserved by strong operator limits. As a consequence the weakly closed convex hull of all invertible isometries on a Banach space simultaneously dilates to a space with similar regularity (Corollary~\ref{cor:convex-combinations-of-isometries}). Hence, proving dilation theorems reduces to the task of finding approximation theorems on Banach spaces, and it is only here where the particular geometry of the Banach space comes into play. As a consequence, we achieve the following three goals:
\begin{enumerate}
\item[(i)] We show that every convex combination of invertible isometries on a super-reflexive or UMD space dilates to an invertible isometry on another space of the same class (Theorem~\ref{thm:super-reflexive-spaces} and Corollary~\ref{cor:umd-spaces}).
\item[(ii)] We give a new proof of Akcoglu and Sucheston's dilation theorem (Subsection~\ref{subsec:L-p}).
\item[(iii)] We generalize their theorem by establishing \emph{simultaneous} dilations (Theorem~\ref{thm:akcoglu-simultaneously}). \end{enumerate}
\subsection*{Dilation and regularity}
As pointed out above, the existence of a dilation crucially depends on the considered class of Banach spaces. Let us demonstrate by a simple construction that, for \emph{every} contraction $T$ on a Banach space $X$, there exists a Banach space $Y$ such that $T$ dilates to an invertible isometry on $Y$.
\begin{construction} \label{constr:simple-dilation-ell-1}
Let $T\colon X \to X$ be a contractive linear operator on a Banach space $X$. Choose $Y \coloneqq \ell^{1}(\mathbb{Z};X)$, let $U\colon \ell^1(\mathbb{Z};X) \to \ell^1(\mathbb{Z};X)$ be the right shift $(x_n)_{n \in \mathbb{Z}} \mapsto (x_{n-1})_{n \in \mathbb{Z}}$, let $J\colon X \to Y$ be the injection into the zeroth component, meaning that $Jx = (\ldots, 0, x, 0 , \ldots)$ for all $x \in X$, and define $Q\colon Y \to X$ by \begin{align*}
Q(x_n)_{n \in \mathbb{Z}} = \sum_{n = 0}^\infty T^n x_n \end{align*} for all $(x_n)_{n \in \mathbb{Z}} \in Y$. Then $U$ is an invertible isometry on $Y$, the operators $J$ and $Q$ are contractive and~\eqref{eq:dilation} holds. \end{construction}
A similar -- and somewhat dual -- construction can be found on $Y = \ell^{\infty}(\mathbb{Z};X)$; see also \cite{Stroescu1973} for a related construction. The above construction demonstrates why dilation theory is all about the choice of the space $Y$. It is easy to construct a dilation to \emph{some} Banach space $Y$; this space, though, does not inherent any regularity properties such as reflexivity from $X$.
In this context it is also worthwhile pointing out that, in case that $X$ is an $L^1$-space, the space $Y$ from Construction~\ref{constr:simple-dilation-ell-1} is an $L^1$-space, too. However, if the underlying measure space of $X$ is finite, say a probability space, the underlying measure space of $Y$ is only $\sigma$-finite, though. This drawback prevents us, for instance, from applying Construction~\ref{constr:simple-dilation-ell-1} in probability theory.
\subsection*{Outline of the paper}
In Section~\ref{sec:tool-kit} we introduce our framework and then state our main result, Theorem~\ref{thm:main-result}, and an important consequence, Corollary~\ref{cor:convex-combinations-of-isometries}. In Section~\ref{sec:elementary-properties} we show some simple stability properties of dilations. In Sections~\ref{sec:convex-combinations} and~\ref{sec:convex-combinations-simultaneous} we prove that dilations are well-behaved with respect to convex combinations. We consciously chose to introduce a bit of redundancy in the Sections~\ref{sec:convex-combinations} and~\ref{sec:convex-combinations-simultaneous} to make the quite technical proof of Theorem~\ref{thm:convex-combinations-simultaneous} more transparent. In Section~\ref{sec:super-properties} we use our main result to deduce dilation results on Banach spaces satisfying certain regularity properties such as super-reflexive spaces and UMD-spaces. In Section~\ref{sec:L-p} we discuss how our main result can be used to reprove the Akcoglu--Sucheston dilation theorem on $L^p$-spaces and to even obtain a generalization of it; we also show that the Sz.-Nagy dilation theorem on Hilbert spaces is a direct consequence of our approach. We conclude with an outlook in Section~\ref{sec:outlook}. Appendix~\ref{appendix:group-theory} contains a few simple results from group theory used in the proofs of Theorems~\ref{thm:convex-combination} and~\ref{thm:convex-combinations-simultaneous}.
\subsection*{Related literature}
For information on the Sz.-Nagy's dilation theorem on Hilbert spaces we refer the reader to \cite{SFBK10}. For several predecessors of Akcoglu and Sucheston's result we refer the reader to the references in~\cite{AkcSuc77}. For a certain class, the Ritt operators, the existence of dilations can be characterized (see~\cite[Theorem~4.1]{ArhFacMer17} and~\cite[Theorem~4.8]{ArhMer14}). A dilation theorem on rearrangement invariant Banach function spaces due to Peller can be found in \cite[Section~6, Theorem~7]{Pel81}. Dilations on $L^1$-spaces can, for example, be constructed by using methods from the theory of Markov processes; see for instance \cite{Kern1977}; moreover, the dilations constructed in this way are well-behaved with respect to spectral properties \cite[Section~5]{Kern1977}. Dilation results on non-commutative spaces such as $W^*$-algebras are of importance in quantum physics; a systematic treatment of them was initiated by K\"ummerer in the 1980s; see for example~\cite{Kuemmerer1985a}. The situation on non-commutative $L^p$-spaces is for instance discussed in~\cite{Junge2007} and in the recent paper \cite{ArhancetPreprint}.
\subsection*{Preliminaries}
Throughout the paper the underlying scalar field of all Banach spaces is allowed to be either $\mathbb{R}$ or $\mathbb{C}$, but we assume that it is the same field for all occurring Banach spaces. If $X$ is a Banach space, then we denote the space of all bounded linear operators from $X$ to $X$ by $\mathcal{L}(X)$. For the product of finitely many operators $T_1,\dots,T_n \in \mathcal{L}(X)$ we use the notation \begin{align*}
\prod_{k=1}^n T_k \coloneqq T_1 \dots T_n, \end{align*} i.e.\ the operator with the lowest index is placed left in the product. Moreover, we use the common convention that $\prod_{k=1}^0 \coloneqq \Id_X$, i.e.\ the empty product is defined to be the identity operator.
\section{The dilation toolkit and main results} \label{sec:tool-kit}
In this section we introduce the framework for our theory, we present our main results in Theorem~\ref{thm:main-result} and Corollary~\ref{cor:convex-combinations-of-isometries} and we give some fundamental characterizations of dilation properties.
\subsection*{The framework}
Let $I$ be a non-empty index set, let $(X_i)_{i \in I}$, $(Y_i)_{i\in I}$ be two families of Banach spaces and let $(T_i)_{i \in I}$ be a family with $T_i \in \mathcal{L}(X_i,Y_i)$ for all $i \in I$. For every ultrafilter $\mathcal{U}$ on $I$ we denote the ultraproducts of the families $(X_i)_{i \in I}$ and $(Y_i)_{i \in I}$ along $\mathcal{U}$ by $\prod_{\mathcal{U}} X_i$ and $\prod_{\mathcal{U}} Y_i$, respectively; the ultraproduct of the operator family $(T_i)_{i \in I}$ along $\mathcal{U}$ is denoted by $\prod_{\mathcal{U}} T_i$. Let $x_i \in X_i$ for each index $i \in I$. Then we denote the equivalence class of the family $(x_i)_{i \in I}$ within $\prod_{\mathcal{U}} X_i$ by $(x_i)_\mathcal{U}$. If $X_i = X$ for a Banach space $X$ and all $i \in I$, then we use the abbreviation $X^\mathcal{U} \coloneqq \prod_\mathcal{U} X_i$; in this case the ultraproduct $X^\mathcal{U}$ is called a ultrapower of $X$. In a canonical way every operator $T \in \mathcal{L}(X)$ induces an operator on $X^\mathcal{U}$ which we denote by $T^\mathcal{U}$. For an introduction to the theory of ultraproducts we refer the reader to~\cite{Hei80} and~\cite[Section~8]{DJT95}.
Let $p \in (1,\infty)$ and $n \in \mathbb{N}$. For a Banach space $X$ we denote the space $X^n$, endowed with the norm \begin{align*}
\|(x_1,\dots,x_n)\|_p \coloneqq \big(\sum_{k=1}^n \|x_k\|^p\big)^{1/p}, \end{align*} by $\ell^p_n(X)$. Throughout we often consider classes of Banach spaces that fulfill a set of conditions adopted to our constructions. Fix a number $p \in (1,\infty)$.
\begin{assumptions} \label{ass:framework}
We say that a class of Banach spaces $\mathcal{X}$ fulfills Assumptions~\ref{ass:framework} if the following conditions hold.
\begin{enumerate}
\item[(a)] The class $\mathcal{X}$ is stable with respect to finite $\ell^p$-powers, i.e.\ for every $X \in \mathcal{X}$ and every $n \in \mathbb{N}$ we have $\ell^p_n(X) \in \mathcal{X}$.
\item[(b)] The class $\mathcal{X}$ is ultra-stable, i.e.\ for every family of spaces $(X_i)_{i \in I}$ in $\mathcal{X}$ and every ultrafilter $\mathcal{U}$ on $I$ we have $\prod_{\mathcal{U}} X_i \in \mathcal{X}$.
\item[(c)] Every space $X \in \mathcal{X}$ is reflexive.
\end{enumerate} \end{assumptions}
Let us mention two simple but important classes which fulfills the Assumptions~\ref{ass:framework}.
\begin{example}\label{ex:class_hilbert_space}
Let $p = 2$. Then the class of all Hilbert spaces fulfills Assumptions~\ref{ass:framework}. \end{example}
\begin{example}
Fix $p \in (1,\infty)$. Then the class of all $L^p$-spaces (over arbitrary measure spaces) fulfills Assumptions~\ref{ass:framework}. \end{example}
\subsection*{Super-reflexive spaces}
Recall that a Banach space $Z$ is called \emph{super-reflexive} if every ultrapower of $Z$ is reflexive. If a class of Banach spaces $\mathcal{X}$ fulfills Assumptions~\ref{ass:framework} then, as a consequence of~(b) and~(c), all spaces in $\mathcal{X}$ are super-reflexive. Conversely, we now show that, for every super-reflexive Banach space $Z$, there exists a class $\mathcal{X}_Z$ of Banach spaces that contains $Z$ and fulfills Assumptions~\ref{ass:framework}. We need the following terminology.
\begin{definition}\label{def:finitely_representable}
A Banach space $X$ is \emph{finitely representable} in a second Banach space $Z$, and we write $X \xhookrightarrow{f} Z$ for this, if for every finite dimensional subspace $E \subseteq X$ and every $\epsilon > 0$ there exists a subspace $F \subseteq Z$ and an isomorphism $u\colon E \to F$ with $\norm{u} \normalnorm{u^{-1}} \le 1 + \epsilon$. \end{definition}
The following elementary observation is quite useful for our purposes.
\begin{lemma}\label{lem:ell_2_finitely_repres}
Let $X$ and $Z$ be Banach spaces with $X \xhookrightarrow{f} Z$. Then one has $\ell^2_n(X) \xhookrightarrow{f} \ell^2(Z)$ for all $n \in \mathbb{N}$. \end{lemma} \begin{proof}
Let $n \in \mathbb{N}$, $\epsilon > 0$ and let $E \subseteq \ell^2_n(X)$ be a finite dimensional subspace. Choose a basis $y_1, \ldots, y_m$ of $E$. We write $y_k = (x_{k1}, \ldots, x_{kn})$ for all $k = 1, \ldots, m$ and we set
\begin{equation*}
\tilde{E} = \linspan \big\{ x_{kl}: k \in \{ 1, \ldots, m \}, \, l \in \{ 1, \ldots, n \} \big\} \subseteq X.
\end{equation*}
By assumption, there exist a subspace $\tilde{F} \subseteq Z$ and an isomorphism $\tilde{u}\colon \tilde{E} \to \tilde{F}$ with $\norm{\tilde{u}} \normalnorm{\tilde{u}^{-1}} \le 1 + \epsilon$. Let $z_k = (\tilde{u}(x_{k1}), \ldots, \tilde{u}(x_{kn}),0, \ldots) \in \ell^2(Z)$ for $k = 1, \ldots, m$ and $F = \linspan \{ z_1, \ldots, z_m \} \subseteq \ell^2(Z)$. Then $u\colon \sum_{k=1}^m a_k y_k \mapsto \sum_{k=1}^m a_k z_k$ is an isomorphism from $E$ onto $F$ with $\norm{u} \normalnorm{u^{-1}} \le 1 + \epsilon$. \end{proof}
It is well-known that a Banach space $Z$ is super-reflexive if and only if every Banach space $X$ that is finitely representable in $Z$ is reflexive. We can now show that every super-reflexive Banach space is contained in a class of spaces which fulfill the Assumptions~\ref{ass:framework}.
\begin{proposition}\label{prop:X_Z_satisfies_assumptions}
Let $Z$ be a super-reflexive Banach space and define the class
\begin{equation*}
\mathcal{X}_Z \coloneqq \left\{ X \text{ Banach space}: X \xhookrightarrow{f} \ell^2(Z) \right\}.
\end{equation*}
Then $\mathcal{X}_{Z}$ contains $Z$ and fulfills Assumptions~\ref{ass:framework} for $p=2$. \end{proposition} \begin{proof}
Since $Z$ is isometrically isomorphic to a subspace of $\ell^2(Z)$ we clearly have $Z \in \mathcal{X}_Z$. Let us now show that $\mathcal{X}_Z$ fulfills Assumptions~\ref{ass:framework}.
Since $Z$ is super-reflexive, so is $\ell^2(Z)$. Hence, every space $X \in \mathcal{X}_Z$ is reflexive, which shows~(c). For (a) let $n \in \mathbb{N}$ and $X \in \mathcal{X}_Z$. By Lemma~\ref{lem:ell_2_finitely_repres} one has $\ell^2_n(X) \xhookrightarrow{f} \ell^2(\ell^2(Z)) = \ell^2(Z)$, where the last equality holds in the sense of isometric isomorphisms. The remaining property~(b) of Assumptions~\ref{ass:framework} follows from the fact that every ultra power of a Banach space is finitely representable in the space itself \cite[Lemma~11.66]{Pis16}. \end{proof}
In Section~\ref{sec:super-properties} we combine Proposition~\ref{prop:X_Z_satisfies_assumptions} with so-called \emph{super-properties} of Banach spaces to find classes of Banach spaces which fulfill Assumptions~\ref{ass:framework} and which have, at the same time, further regularity properties.
\subsection*{Simultaneously dilating operators} Let us now define sets of \emph{simultaneously dilating} operators.
\begin{definition} \label{def:dilations}
Let $\mathcal{X}$ be a class of Banach spaces and let $X \in \mathcal{X}$.
\begin{enumerate}
\item[(a)] An operator $T \in \mathcal{L}(X)$ has a \emph{dilation} in $\mathcal{X}$ if there exist a space $Y \in \mathcal{X}$, linear contractions $J\colon X \to Y$, $Q\colon Y \to X$ and a linear invertible isometry $U \in \mathcal{L}(Y)$ such that for all $n \in \mathbb{N}_0$
\begin{align}
\label{eq:single-dilation}
T^n = QU^nJ.
\end{align}
\item[(b)] A set of operators $\mathcal{T} \subseteq \mathcal{L}(X)$ has a \emph{simultaneous dilation} in $\mathcal{X}$ if there exist a space $Y \in \mathcal{X}$, linear contractions $J\colon X \to Y$, $Q\colon Y \to X$ and invertible isometries $U_T \in \mathcal{L}(Y)$ (for $T \in \mathcal{T}$) such that the equality
\begin{align}
\label{eq:simultaneous-dilation}
T_1 \cdots T_n = Q \, U_{T_1} \cdots U_{T_n} \, J
\end{align}
holds for all $n \in \mathbb{N}_0$ and all $T_1,\dots, T_n \in \mathcal{T}$.
\end{enumerate} \end{definition}
For a proper reading of part~(b) of the above definition it is important to recall that we agreed on the empty product to be the identity operator.
If $T$ has a dilation, then it follows from the equality $T^n = QU^nJ$ for $n=0$ that $QJ = \Id_X$. Hence, $J$ is automatically an isometry and $JQ$ is a contractive projection on $Y$ with range $J(X)$. Thus, $X$ may be considered as a subspace of $Y$ (via $J$) and $Q$ may be considered as a projection from $Y$ onto this subspace. Moreover, $T = QUJ$ implies that $T$ is contractive, i.e.\ only contractive operators can have a dilation in our sense. Similar observations hold for simultaneous dilations.
We point out that our notion of a dilation differs slightly from the definition which is, for instance, used by Akcoglu and Sucheston in~\cite{AkcSuc77}. Yet, it is easy to see that if an operator has a dilation in the sense of Definition~\ref{def:dilations}(a), then it also has a dilation in the sense of \cite{AkcSuc77}.
If $X$ is a Banach space taken from a given class $\mathcal{X}$, then the set of all invertible isometries always has a simultaneous dilation in $\mathcal{X}$.
\begin{example} \label{ex:ismoetries-have-simultaneous-dilation}
Let $\mathcal{X}$ be a class of Banach spaces, let $X \in \mathcal{X}$ and $\mathcal{T} \subseteq \mathcal{L}(X)$ be a set of linear invertible isometries. Then $\mathcal{T}$ has a simultaneuous dilation in $\mathcal{X}$. Indeed, simply take $Y = X$, $J = Q = \Id$ and $U_T = T$ for all $T \in \mathcal{T}$. \end{example}
The whole point of our approach is to first find a (not too small) set of operators which admits a simultaneous dilation and then to construct, out of this set, a larger set that has a dilation by proving stability results. Example~\ref{ex:ismoetries-have-simultaneous-dilation} shows that we can always start with the set of invertible isometries; proving stability results is much more involved. Our main result, Theorem~\ref{thm:main-result}, is of this type.
\subsection*{The main result}
Theorem~\ref{thm:main-result} below is our main result; in conjunction with Example~\ref{ex:ismoetries-have-simultaneous-dilation} it yields simultaneous dilations for large sets of operators.
\begin{theorem} \label{thm:main-result}
Fix $p \in (1,\infty)$ and let $\mathcal{X}$ be a class of Banach spaces which fulfills Assumptions~\ref{ass:framework}. Let $X \in \mathcal{X}$ and let $\mathcal{T} \subseteq \mathcal{L}(X)$ be a set of operators that has a simultaneous dilation in $\mathcal{X}$. Then the weak operator closure of the convex hull $\conv(\mathcal{T})$ also has a simultaneous dilation in $\mathcal{X}$. \end{theorem}
As a consequence of Example~\ref{ex:ismoetries-have-simultaneous-dilation} and Theorem~\ref{thm:main-result} we immediately obtain the following corollary.
\begin{corollary} \label{cor:convex-combinations-of-isometries}
Fix $p \in (1,\infty)$ and let $\mathcal{X}$ be a class of Banach spaces which fulfils Assumptions~\ref{ass:framework}. Let $X \in \mathcal{X}$ and let $\mathcal{T} \subseteq \mathcal{L}(X)$ denote the weak operator closure of the convex hull of all linear invertible isometries on $X$. Then $\mathcal{T}$ has a simultaneous dilation in $\mathcal{X}$. In particular, every $T \in \mathcal{T}$ has a dilation in $\mathcal{X}$. \end{corollary}
The above corollary demonstrates how our approach uncouples dilation theory from any geometric considerations. The dilation result in Corollary~\ref{cor:convex-combinations-of-isometries} holds for all classes of Banach spaces that fulfill the rather mild Assumptions~\ref{ass:framework}, with no regard of the special choice of the spaces in $\mathcal{X}$. However, in order to apply Corollary~\ref{cor:convex-combinations-of-isometries} to concrete operators one has to determine the weak operator closure of the convex hull of all invertible isometries. This is a pure approximation theoretical task and it is here where the special geometry of the Banach space $X$ comes into play.
\begin{remarks} \label{rem:positive-morphisms}
(a) Sometimes, it is desirable to restrict not only the class of Banach spaces $\mathcal{X}$ out of which the space $Y$ in Definition~\ref{def:dilations} is taken, but also the choice for the operators $J$, $Q$ and $U_T$. A typical situation of this type is a follows:
Assume that all spaces in $\mathcal{X}$ are Banach lattices and that $\mathcal{T}$ consists of positive operators only. Then we would like to construct a \emph{positive} dilation for an operator $T \in \mathcal{T}$ or, in greater generality, a \emph{positive} simultaneous dilation of $\mathcal{T}$. By this we mean that the operators $J$ and $Q$ from Definition~\ref{def:dilations} should not only be contractive, but also positive, and that the operators $U_T$ (for $T \in \mathcal{T}$) should not only be invertible isometries, but also lattice isomorphisms.
Under the assumption that all spaces in $\mathcal{X}$ are Banach lattices and that all operators in $\mathcal{T}$ are positive, all our results yield \emph{positive} (simultaneous) dilations instead of only dilations -- provided that the assumptions are adjusted in the obvious way. For instance, in Theorem~\ref{thm:main-result} one has to assume that $\mathcal{T}$ has a simultaneous positive dilation in $\mathcal{X}$ and in Corollary~\ref{cor:convex-combinations-of-isometries} one has to assume that $\mathcal{T}$ is the weak operator closure of the convex hull of all positive invertible isometries.
(b) In view of remark~(a) notice that an invertible isometry $T \in \mathcal{L}(X)$ on a Banach lattice $X$ is automatically a lattice isomorphism if it is positive; this is a theorem of Abramovich, see for instance \cite{Abramovich1988} or \cite[Theorem~2.2.16]{Emelyanov2007}.
(c) One could also formalize the idea discussed in~(a) by using the language of category theory. One then considers a class of Banach spaces $\mathcal{X}$ which fulfills Assumptions~\ref{ass:framework} and a class $\mathcal{M}$ of morphisms that satisfies certain stability conditions. In Definition~\ref{def:dilations}, we would require the operators $T \in \mathcal{T}$, as well as $J$, $Q$ and $U_T$, to be in $\mathcal{M}$.
If we chose $\mathcal{M}$ to be the class of all linear contractions, we would obtain the dilations introduced in Definition~\ref{def:dilations}; if we chose $\mathcal{X}$ to be a class of Banach lattices and $\mathcal{M}$ to be the class of positive contractions, we would obtain the notion of a \emph{positive dilation} as discussed in~(a).
One could even go further and consider two classes of morphisms $\mathcal{M}_1$ and $\mathcal{M}_2$, where the operators $J$ and $Q$ are required to be contained in $\mathcal{M}_2$ while the operators in $\mathcal{T}$ and the operators $U_T$ are in $\mathcal{M}_1$. If we chose $\mathcal{X}$ to be a class of Banach lattices, $\mathcal{M}_2$ to be the class of all positive contractions and $\mathcal{M}_1$ to be the class of all regular operators with regular norm at most $1$, we would thus obtain a more general class of dilations. These are of relevance in a version of the Akcoglu--Sucheston theorem on $L^p$-spaces; see for
instance \cite[p.~58ff.]{CoiRocWei78} or \cite[Section~3]{Pel81}.
We shall however not pursue this category theoretical approach here since we wish to state all our results in a concrete and easily accessible way. The interested reader won't find it difficult to restate our results in the language of category theory. \end{remarks}
Next we note that one can replace the convex hull in Theorem~\ref{thm:main-result} by somewhat larger sets.
\begin{remarks} \label{rem:subconvex-and-absolutely-convex-hull}
(a) Let $V$ be a vector space over the field $\mathbb{R}$ or $\mathbb{C}$. The \emph{absolutely convex hull} of a subset $C \subseteq V$ is given by
\begin{align*}
\bigg\{\sum_{k=1}^n \lambda_k v_k: \; n \in \mathbb{N}, \; v_1,\dots,v_n \in C, \; \lambda_1,\dots,\lambda_n \in \mathbb{K} \text{ and } \sum_{k=1}^n \lvert \lambda_k \rvert \le 1\bigg\}.
\end{align*}
It coincides with the convex hull of the set $\{\lambda v: \; v \in C, \; \lambda \in \mathbb{K} \text{ and } \lvert \lambda \rvert = 1 \}$. On the other hand it is easy to see that, under the assumptions of Theorem~\ref{thm:main-result}, the set
\begin{align*}
\big\{ \lambda T: \; T \in \mathcal{T}, \; \lambda \in \mathbb{K} \text{ and } \lvert \lambda \rvert = 1 \big\}
\end{align*}
has a simultaneous dilation in $\mathcal{X}$; here $\mathbb{K}$ is the underlying scalar field of the spaces in $\mathcal{X}$. Hence, Theorem~\ref{thm:main-result} implies that the weak operator closure of the absolutely convex hull of $\mathcal{T}$ has a simultaneous dilation in $\mathcal{X}$.
(b) The arguments given in~(a) do no longer apply if one is looking for positive dilations as discussed in Remark~\ref{rem:positive-morphisms}(a). Indeed, if an operator $T$ has a positive dilation in some Banach lattice, then its negative $-T$ does not have a positive dilation, though. However, one can still combine Theorem~\ref{thm:main-result} with Proposition~\ref{prop:zero-operator} below. This way one obtains that the weak operator closure of the \emph{subconvex hull}
\begin{align*}
\bigg\{ \sum_{k=1}^n \lambda_k T_k: \; n \in \mathbb{N},\; T_1, \dots, T_n \in \mathcal{T}, \; \lambda_1,\dots,\lambda_n \in [0,1] \text{ and } \sum_{k=1}^n \lambda_k \le 1 \bigg\}
\end{align*}
admits a positive simultaneous dilation in $\mathcal{X}$ if $\mathcal{T}$ does so. This works since the proof of Proposition~\ref{prop:zero-operator} does not destroy the positivity structure of the dilation. In particular, one obtains the following version of Corollary~\ref{cor:convex-combinations-of-isometries}:
Let $X$ be a class of Banach lattices that fulfills Assumptions~\ref{ass:framework}. Let $X \in \mathcal{X}$ and let $\mathcal{T} \subseteq \mathcal{L}(X)$ be the set of all positive, invertible isometries. Then the weak operator closure of the subconvex hull of $\mathcal{T}$ has a positive simultaneous dilation in $\mathcal{X}$. \end{remarks}
The rest of Section~\ref{sec:tool-kit}, as well as Sections~\ref{sec:elementary-properties}, \ref{sec:convex-combinations} and~\ref{sec:convex-combinations-simultaneous}, are devoted to the proof of Theorem~\ref{thm:main-result}. We start with two useful characterizations of simultaneous dilations in the remaining part of Section~\ref{sec:tool-kit}: a ``finitary characterization'' for simultaneous dilations which shows that it is actually sufficient to consider finite sets of operators and a result which shows that it suffices to establish the dilation equality only for powers/monomials of bounded degree.
In Section~\ref{sec:elementary-properties} we proceed with our preparations for the proof of Theorem~\ref{thm:main-result}. Note it suffices to show that simultaneous dilations behave well with respect to strong operator closures and with respect to taking convex combinations since the weak and the strong operator closure of a convex set coincide \cite[Corollary VI.1.5]{DunSch1958}. The stability result for strong closures is rather simple and will be given in Proposition~\ref{prop:stop-limits}. In Section~\ref{sec:elementary-properties} we also discuss a few further elementary properties of simultaneous dilations such as stability with respect to operator multiplication. The stability result for convex combinations is, however, much more involved. Therefore, we first prove in Section~\ref{sec:convex-combinations} that, if a set $\mathcal{T}$ of operators has a simultaneous dilation in a class $\mathcal{X}$, then every single convex combination of operators from $\mathcal{T}$ also has a dilation in $\mathcal{X}$. In Section~\ref{sec:convex-combinations-simultaneous} we then prove that the convex hull of $\mathcal{T}$ actually has a simultaneous dilation. All results of Section~\ref{sec:convex-combinations} can be seen as special cases of results from Section~\ref{sec:convex-combinations-simultaneous}, so we introduce a bit of redundancy by considering those two cases separately. However, given the rather technical computations in those sections, we think that the reader might benefit from this redundancy.
\subsection*{A finitary characterization of dilations}
The following proposition shows that, when dealing with simultaneous dilations, one can restrict to finite sets of operators.
\begin{proposition} \label{prop:finitary}
Fix $p \in (1,\infty)$ and let $\mathcal{X}$ be a class of Banach spaces that fulfills Assumptions~\ref{ass:framework}. Let $X \in \mathcal{X}$ and let $\mathcal{T} \subseteq \mathcal{L}(X)$. Then the following are equivalent:
\begin{enumerate}
\item[(i)] $\mathcal{T}$ has a simultaneous dilation in $\mathcal{X}$.
\item[(ii)] Every finite subset of $\mathcal{T}$ has a simultaneous dilation in $\mathcal{X}$.
\end{enumerate} \end{proposition}
To keep the proof of Proposition~\ref{prop:finitary} as transparent as possible we first show the following lemma which is, in a way, a more abstract version of the proposition.
\begin{lemma} \label{lem:extend-dilations-along-a-net}
Fix $p \in (1,\infty)$, let $\mathcal{X}$ be a class of Banach spaces that fulfills Assumptions~\ref{ass:framework} and let $X \in \mathcal{X}$. Consider a net $(\mathcal{T}_i)_{i \in I}$ of subsets of $\mathcal{L}(X)$ and assume that this net is monotone, i.e.\ $\mathcal{T}_j \supseteq \mathcal{T}_i$ whenever $j \ge i$. If each set $\mathcal{T}_i$ has a simultaneous dilation in $\mathcal{X}$, then $\bigcup_{i \in I} \mathcal{T}_i$ has a simultaneous dilation in $\mathcal{X}$. \end{lemma} \begin{proof}
Choose an ultrafilter $\mathcal{U}$ on $I$ that contains the filter base $\big\{ \{j \in I: \, j \ge i\} : \; i \in I \big\}$.
For each $i \in I$ we can find a Banach space $Y_i \in \mathcal{X}$, contractions $J_i\colon X \to Y_i$ and $Q_i\colon Y_i \to X$ and invertible isometries $U_{i,T} \in \mathcal{L}(Y_i)$ (for $T \in \mathcal{T}_i$) with
\begin{align*}
T_1 \cdots T_n = Q_i \, U_{i,T_1} \cdots U_{i,T_n} J_i
\end{align*}
for all $n \in \mathbb{N}_0$ and all $T_1,\dots,T_n \in \mathcal{T}_i$. Now, define $Y \coloneqq \prod_\mathcal{U} Y_i$ as well as $J \coloneqq \prod_\mathcal{U} J_i \colon X^\mathcal{U} \to Y$ and $Q \coloneqq \prod_\mathcal{U} Q_i \colon Y \to X^\mathcal{U}$. Moreover, we define $U_T \coloneqq \prod_\mathcal{U} \tilde{U}_{i,T} \in \mathcal{L}(Y)$ for each $T \in \bigcup_{i \in I} \mathcal{T}_i$, where $\tilde{U}_{i,T} = U_{i,T}$ if $T \in \mathcal{T}_i$ and $\tilde{U}_{i,T} = \Id_{Y_i}$ otherwise. Then the diagram
\begin{center}
\begin{tikzcd}
Y \arrow{rrr}{U_{T_1} \dots U_{T_n}} & & & Y \arrow{d}{Q} \\
X^\mathcal{U} \arrow{u}{J} \arrow{rrr}{(T_1 \cdots T_n)^\mathcal{U}} & & & X^\mathcal{U} \arrow{d} \\
X \arrow{u} \arrow{rrr}{T_1 \cdots T_n} & & & X
\end{tikzcd}
\end{center}
commutes for every $n \in \mathbb{N}_0$ and for all $T_1,\dots,T_n \in \cup_{i \in I} \mathcal{T}_i$; here, the mapping $X \to X^\mathcal{U}$ denotes the canonical injection and $X^\mathcal{U} \to X$ the mapping induced by the weak limit along $\mathcal{U}$ (which exists since $X$ is reflexive). This proves the assertion. \end{proof}
\begin{proof}[Proof of Proposition~\ref{prop:finitary}]
The implication ``(i) $\Rightarrow$ (ii)'' is obvious. So assume that~(ii) holds. If we denote the family of all finite subsets of $\mathcal{T}$ by $\mathfrak{F}$ and apply Lemma~\ref{lem:extend-dilations-along-a-net} to the net $(\mathcal{F})_{\mathcal{F} \in \mathfrak{F}}$, we obtain~(i). \end{proof}
\subsection*{\texorpdfstring{$N$}{N}-dilations}
For an operator $T \in \mathcal{L}(X)$, consider the dilation equality \begin{align*}
T^n = Q \, U^n J \end{align*} from Definition~\ref{def:dilations}(a). For a dilation, we need a Banach space $Y$ and operators $J$, $Q$ and $U$ for which the equality is satisfied for all $n \in \mathbb{N}_0$. However, we shall see in Sections~\ref{sec:convex-combinations} and~\ref{sec:convex-combinations-simultaneous} that it is much easier to achieve this for the first $N$ powers only. In this subsection we show that this is, in a sense, sufficient to obtain a dilation of $T$.
\begin{definition} \label{def:N-dilations}
Let $\mathcal{X}$ be a class of Banach spaces, let $X \in \mathcal{X}$ and $N \in \mathbb{N}$.
\begin{enumerate}
\item[(a)] An operator $T \in \mathcal{L}(X)$ has a \emph{$N$-dilation} in $\mathcal{X}$ if there exist a space $Y \in \mathcal{X}$, linear contractions $J\colon X \to Y$, $Q\colon Y \to X$ and a linear invertible isometry $U \in \mathcal{L}(Y)$ such that for all $n \in \{0,\dots,N\}$
\begin{align*}
T^n = QU^nJ.
\end{align*}
\item[(b)] A set of operators $\mathcal{T} \subseteq \mathcal{L}(X)$ has a \emph{simultaneous $N$-dilation} in $\mathcal{X}$ if there exist a space $Y \in \mathcal{X}$, linear contractions $J\colon X \to Y$, $Q\colon Y \to X$ and invertible isometries $U_T \in \mathcal{L}(Y)$ (for $T \in \mathcal{T}$) such that the equality
\begin{align*}
T_1 \dots T_n = Q \, U_{T_1} \dots U_{T_n} \, J
\end{align*}
holds for all $n \in \{0,\dots,N\}$ and all $T_1,\dots, T_n \in \mathcal{T}$.
\end{enumerate} \end{definition}
Note that, if $T$ or $\mathcal{T}$ has a (simultaneous) $N$-dilation in $\mathcal{X}$, then it also has a (simultaneous) $M$-dilation in $\mathcal{X}$ for every $M \le N$.
For single operators on Hilbert spaces, $N$-dilations have been studied in various contexts in the literature, for instance in \cite{Neunzert1963}, \cite{Thompson1982}, \cite[Section~3]{Nagy2013} and~\cite[Sections~1--3]{Levy2014}. Moreover, there is a concept that one might call a \emph{commutative simultaneous $N$-dilation} of a set of commuting operators; this has also been studied on finite dimensional Hilbert spaces, see e.g.~\cite{McCarthy2013} and \cite[Sections~4--5]{Levy2014}.
For our purposes, $N$-dilations are essential for the construction of dilations for convex combinations of given operators in Sections~\ref{sec:convex-combinations} and~\ref{sec:convex-combinations-simultaneous}. The following proposition shows that, in order to construct a (simultaneous) dilation, it is actually sufficient to construct a (simultaneous) $N$-dilation for each $N$.
\begin{proposition} \label{prop:N-dilations}
Fix $p \in (1,\infty)$ and let $\mathcal{X}$ be a class of Banach spaces which fulfils Assumptions~\ref{ass:framework}. Let $X \in \mathcal{X}$ and consider an operator $T \in \mathcal{L}(X)$ and a set of operators $\mathcal{T} \subseteq \mathcal{L}(X)$.
\begin{enumerate}
\item[(a)] If $T$ has an $N$-dilation in $\mathcal{X}$ for each $N \in \mathbb{N}$, then $T$ has a dilation in $\mathcal{X}$.
\item[(b)] If $\mathcal{T}$ has a simultaneous $N$-dilation in $\mathcal{X}$ for each $N \in \mathbb{N}$, then $\mathcal{T}$ has a simultaneous dilation in $\mathcal{X}$.
\end{enumerate} \end{proposition} \begin{proof}
The proof is not too different from the proof of Lemma~\ref{lem:extend-dilations-along-a-net}. First note that (a) follows from~(b) by setting $\mathcal{T} = \{T\}$, so it suffices to prove~(b). By Assumptions~\ref{ass:framework} there exist, for every $N \in \mathbb{N}$, a space $Y_N \in \mathcal{L}(X)$, linear contractions $J_N\colon X \to Y_N$ and $Q_N\colon Y_N \to X$ and linear invertible isometries $U_{N,T} \in \mathcal{L}(Y_N)$ (for $T \in \mathcal{T}$) such that for $n \in \{0,\dots,N\}$ and all $T_1,\dots,T_n \in \mathcal{T}$ one has the dilation equality
\begin{align*}
T_1 \cdots T_n = Q_N \, U_{N,T_1} \cdots U_{N,T_n} J_N.
\end{align*}
Fix a free ultrafilter $\mathcal{U}$ on $\mathbb{N}$; we define $Y \coloneqq \prod_{\mathcal{U}} Y_N$, $J \coloneqq \prod_\mathcal{U} J_N \colon X^\mathcal{U} \to Y$ and $Q \coloneqq \prod_\mathcal{U} Q_N \colon Y \to X^\mathcal{U}$.
Also set $U_T \coloneqq \prod_\mathcal{U} U_{T,N}$ for every $T \in \mathcal{T}$. The operators $U_T$ are invertible isometries on $Y$ and according to Assumptions~\ref{ass:framework} we have $Y \in \mathcal{X}$. Moreover, the following diagram commutes for every $n \in \mathbb{N}_0$ and all $T_1,\dots,T_n \in \mathcal{T}$:
\begin{center}
\begin{tikzcd}
Y \arrow{rrr}{U_{T_1} \cdots U_{T_n}} & & & Y \arrow{d}{Q} \\
X^\mathcal{U} \arrow{u}{J} \arrow{rrr}{(T_1 \cdots T_n)^\mathcal{U}} & & & X^\mathcal{U} \arrow{d} \\
X \arrow{u} \arrow{rrr}{T_1 \cdots T_n} & & & X
\end{tikzcd}
\end{center}
Here, $X \to X^\mathcal{U}$ denotes the canonical embedding of $X$ into its ultrapower and $X^\mathcal{U} \to X$ denotes the operator induced by the weak limit along $\mathcal{U}$ (which exists since $X$ is reflexive). The diagram shows that $\mathcal{T}$ has a simultaneous dilation. \end{proof}
\section{Elementary properties of simultaneous dilations} \label{sec:elementary-properties}
\subsection*{Products of dilating operators} If two operators $T$ and $S$ on a Banach space $X$ have a dilation in a class of Banach spaces $\mathcal{X}$, then it is by no means clear whether the product $ST$ has a dilation in $\mathcal{X}$, too. If however $T$ and $S$ have a simultaneous dilation, then obviously so does $ST$. Let us note this -- in a slightly more general form -- in the following proposition.
\begin{proposition} \label{prop:product-of-dilating-operators}
Let $\mathcal{X}$ be a class of Banach spaces, let $X \in \mathcal{X}$ and assume that $\mathcal{T} \subseteq \mathcal{L}(X)$ has a simultaneous dilation in $\mathcal{X}$. Then the \emph{multiplicative semigroup generated by $\mathcal{T}$}, i.e.\ the set $\{T_1 \cdots T_n: \, n \in \mathbb{N}, \; T_1,\dots,T_n \in \mathcal{T}\}$, has a simultaneous dilation in $\mathcal{X}$, too (and in fact, to the same space as $\mathcal{T}$). \end{proposition}
\subsection*{Strong operator limits} The next proposition shows that simultaneous dilations are preserved by strong operator limits; this proves the topological part of Theorem~\ref{thm:main-result}.
\begin{proposition} \label{prop:stop-limits}
Fix $p \in (1,\infty)$ and let $\mathcal{X}$ be a class of Banach spaces which fulfills Assumptions~\ref{ass:framework}. Let $X \in \mathcal{X}$ and let $\mathcal{T} \subseteq \mathcal{L}(X)$ be a set of operators which has a simultaneous dilation in $\mathcal{X}$. Then the strong operator closure of $\mathcal{T}$ has a simultaneous dilation in $\mathcal{X}$, too. \end{proposition} \begin{proof}
There exist a space $Y \in \mathcal{L}(X)$, linear contractions $J\colon X \to Y$ and $Q\colon Y \to X$ and invertible isometries $U_T \in \mathcal{L}(Y)$ (for $T \in \mathcal{T}$) such that the equality $\prod_{k=1}^n T_k = Q\, \prod_{k=1}^n U_{T_k} \; J$ holds for each $n \in \mathbb{N}$ and all $T_1,\dots,T_n \in \mathcal{T}$.
Let $\mathcal{S}$ denote the strong operator closure of $\mathcal{T}$ in $\mathcal{L}(X)$. We can find a directed set $I$ such that, for each $S \in \mathcal{S}$, there exists a net $(T_{i,S})_{i \in I}$ in $\mathcal{T}$ that converges strongly to $S$; for instance, we can take $I$ to be the neighborhood filter of $0$ in $\mathcal{L}(X)$ with respect to the strong operator topology (endowed with the canonical order). Choose an ultrafilter $\mathcal{U}$ on $I$ which contains the filter base
\begin{align*}
\big\{ \{j \in I: \, j \ge i\} : \; i \in I \big\}.
\end{align*}
Let $\tilde J \colon Y \to Y^\mathcal{U}$ be the canonical injection and let $\tilde Q \colon Y^\mathcal{U} \to Y$ be the operator induced by the weak limit along $\mathcal{U}$ (which exists since $Y$ is reflexive). For each $S \in \mathcal{S}$ we choose a net $(T_{i,S})_{i \in I}$ that converges strongly to $S$ and we define $\tilde U_S \coloneqq \prod_\mathcal{U} U_{T_{i,S}} \in \mathcal{L}(Y^\mathcal{U})$.
Now, let $n \in \mathbb{N}$ and $S_1,\dots,S_n \in \mathcal{S}$. We show that $S_1 \cdots S_n = Q \tilde Q\tilde U_{S_1} \cdots \tilde U_{S_n} \tilde J J$. Indeed, we have on the one hand
\begin{align*}
\prod_{k=1}^n S_k & = \prod_{k=1}^n \stoplim_{i \to \mathcal{U}} T_{i,S_k} = \stoplim_{i \to \mathcal{U}} \prod_{k=1}^n T_{i,S_k} = \stoplim_{i \to \mathcal{U}} Q \, \prod_{k=1}^n U_{T_{i,S_k}} \; J,
\end{align*}
where $\stoplim$ denotes the limit with respect to the strong operator topology; the first equality follows from the choice of the ultrafilter $\mathcal{U}$ and for the second equality we used that operator multiplication is jointly continuous with respect to the strong operator topology on bounded sets. On the other hand, we have for every $x \in X$
\begin{align*}
Q \tilde Q \, \prod_{k=1}^n \tilde U_{S_k} \; \tilde J Jx & = Q \tilde Q \big(\prod_{k=1}^n U_{T_{i,S_k}} \; Jx\big)_\mathcal{U} \\
& = Q \wlim_{i \to \mathcal{U}} \big( \prod_{k=1}^n U_{T_{i,S_k}} \; Jx \big) = \wlim_{i \to \mathcal{U}} \big( Q \, \prod_{k=1}^n U_{T_{i,S_k}} \; Jx \big),
\end{align*}
where $\wlim$ denotes the weak limit. This proves the assertion. \end{proof}
We point out that the above proof does not work for the weak operator closure of $\mathcal{T}$ since operator multiplication is in general not jointly continuous with respect to the weak operator topology (not even on bounded sets of operators). However, one can show the following result by a similar technique as in the above proof: let $(T_i)_{i \in I}$ be a net in $\mathcal{L}(X)$, let $T \in \mathcal{L}(X)$ and assume that, for each power $n \in \mathbb{N}$, the net $\big((T_i)^n\big)_{i \in I}$ converges to $T^n$ with respect to the weak operator topology. If each operator $T_i$ has a dilation in $\mathcal{X}$ (not necessarily a simultaneous dilation for all $T_i$), then $T$ has a dilation in $\mathcal{X}$, too (provided that $\mathcal{X}$ fulfills Assumptions~\ref{ass:framework}). See also~\cite{Pel81} and the discussion in Remark~\ref{rem:peller-power-approximation-vs-our-approach}.
\subsection*{The zero operator} We can add the zero operator to a given set of simultaneously dilating operators. In conjunction with Theorem~\ref{thm:convex-combinations-simultaneous} this shows that simultaneous dilations are stable with respect to \emph{subconvex combinations}, i.e.\ linear combinations with positive coefficients adding up to at most $1$; see Remark~\ref{rem:subconvex-and-absolutely-convex-hull}(b).
\begin{proposition} \label{prop:zero-operator}
Fix $p \in (1,\infty)$ and let $\mathcal{X}$ be a class of Banach spaces which fulfills Assumptions~\ref{ass:framework}. Let $X \in \mathcal{X}$ and assume that $\mathcal{T} \subseteq \mathcal{L}(X)$ has a simultaneous dilation in $\mathcal{X}$. Then $\mathcal{T} \cup \{0\}$ has a simultaneous dilation in $\mathcal{X}$. \end{proposition} \begin{proof}
By assumption we can find a space $Y \in \mathcal{X}$, linear contractions $J\colon X \to Y$ and $Q\colon Y \to X$ and invertible isometries $U_T \in \mathcal{L}(Y)$ (for $T \in \mathcal{T}$) such that
\begin{align*}
T_1 \cdots T_n = Q \, U_{T_1} \cdots U_{T_n} J
\end{align*}
for every $n \in \mathbb{N}_0$ and all $T_1,\dots, T_n \in \mathcal{T}$. Define $U_0 \coloneqq 0 \in \mathcal{L}(Y)$ (which is of course neither invertible, nor an isometry). It suffices to show that the set of operators $\mathcal{U} \coloneqq \big\{U_T: T \in \mathcal{T} \cup \{0\}\big\}$ has a simultaneous dilation in $\mathcal{X}$ because then the diagram
\begin{center}
\begin{tikzcd}
\tilde Y \arrow{rrr}{\tilde V_{U_{T_1}} \cdots \tilde V_{U_{T_n}}} & & & \tilde Y \arrow{d}{\tilde Q} \\
Y \arrow{rrr}{U_{T_1} \cdots U_{T_n}} \arrow{u}{\tilde J} & & & Y \arrow{d}{Q} \\
X \arrow{u}{J} \arrow{rrr}{T_1 \cdots T_n} & & & X
\end{tikzcd}
\end{center}
commutes for an appropriate choice of $\tilde Y$, $\tilde J$, $\tilde Q$ and $V_U \in \mathcal{L}(\tilde Y)$ (for $U \in \mathcal{U}$) and for all $n \in \mathbb{N}_0$ and $T_1, \dots, T_n \in \mathcal{T} \cup \{0\}$.
Fix an arbitrary $N \in \mathbb{N}$. In order to show that $\big\{U_T: T \in \mathcal{T} \cup \{0\}\big\}$ has a simultaneous dilation in $\mathcal{X}$ it suffices, according to Proposition~\ref{prop:N-dilations}, to construct a simultaneous $N$-dilation for $\mathcal{U}$ in $\mathcal{X}$. To this end, choose $\tilde Y \coloneqq \ell^p_{N+1}(Y) \in \mathcal{X}$ and set $\tilde Jx = (x,0,\dots,0) \in \tilde Y$ for all $x \in Y$; for all $(x_1,\dots,x_{N+1}) \in \tilde Y$ we define $\tilde Q(x_1,\dots,x_{N+1}) = x_1 $ and
\begin{align*}
V_U(x_1,\dots,x_{N+1}) =
\begin{cases}
(U x_1,\dots,U x_{N+1}) \quad & \text{if } U \not= 0, \\
(x_2,\dots,x_{N+1},x_1) \quad & \text{if } U = 0.
\end{cases}
\end{align*}
Then one easily checks that for all $n \in \{0,\dots, N\}$ and all $U_1,\dots, U_n \in \mathcal{U}$
\begin{align*}
U_1 \cdots U_n = \tilde Q \, V_{U_1} \dots V_{U_n} \tilde J
\end{align*}
Hence, $\mathcal{U}$ indeed has a simultaneous $N$-dilation in $\mathcal{X}$. \end{proof}
\begin{remark}
It is worthwhile pointing out that the reduction to $N$-dilations used in the proof of Proposition~\ref{prop:zero-operator} (by employing Proposition~\ref{prop:N-dilations}) is not as essential as it might seem at first glance. Indeed, if we add to Assumptions~\ref{ass:framework} the mild condition that, for each $X \in \mathcal{L}(X)$, the vector-valued $\ell^p$-space $\ell^p(X) \coloneqq \ell^p(\mathbb{Z};X)$ is contained in $\mathcal{X}$, then we can define $Y \coloneqq \ell^p(\mathbb{Z};X)$ and choose $V_0$ to be the left (or right) shift operator and immediately obtain a simultaneous dilation instead of only a simultaneous $N$-dilation of the set $\mathcal{U}$ (where $J$, $Q$ and $V_U$ for $U \not= 0$ should be defined similarly as in the above proof of Proposition~\ref{prop:zero-operator}). Nevertheless, $N$-dilations and Proposition~\ref{prop:N-dilations} will be an indispensable tool for us in Sections~\ref{sec:convex-combinations} and~\ref{sec:convex-combinations-simultaneous}. \end{remark}
\section{Dilation of convex combinations} \label{sec:convex-combinations}
The goal of this section is to prove the following theorem.
\begin{theorem} \label{thm:convex-combination}
Fix $p \in (1,\infty)$ and let $\mathcal{X}$ be a class of Banach spaces which fulfills Assumptions~\ref{ass:framework}. Let $X \in \mathcal{X}$ and assume that $\mathcal{T} \subseteq \mathcal{L}(X)$ has a simultaneous dilation in $\mathcal{X}$. If $T$ is in the convex hull of $\mathcal{T}$, then $T$ has a dilation in $\mathcal{X}$. \end{theorem}
In Section~\ref{sec:convex-combinations-simultaneous} we show the more general result that the convex hull of $\mathcal{T}$ has a simultaneous dilation in $\mathcal{X}$, which, in conjunction with Proposition~\ref{prop:stop-limits}, proves our main result Theorem~\ref{thm:main-result}. The next result, which is not immediate to prove with bare hands, is a direct consequence of Theorem~\ref{thm:convex-combination} and Proposition~\ref{prop:zero-operator}.
\begin{corollary} \label{cor:multiple-of-a-single-operator}
Fix $p \in (1,\infty)$ and let $\mathcal{X}$ be a class of Banach spaces which fulfills Assumptions~\ref{ass:framework}. Let $X \in \mathcal{X}$ and assume that $T \in \mathcal{L}(X)$ has a dilation in $\mathcal{X}$. Then, for every $\lambda \in [0,1]$, the operator $\lambda T$ has a dilation in $\mathcal{X}$. \end{corollary}
It is a consequence of Proposition~\ref{prop:zero-operator} and the more general Theorem~\ref{thm:convex-combinations-simultaneous} below that $\{\lambda T: \; \lambda \in [0,1]\}$ even has a simultaneous dilation in $\mathcal{X}$.
\subsection*{Main ideas}
Before we give the proof of Theorem~\ref{thm:convex-combination}, we explain some of the main ideas. According to Proposition~\ref{prop:N-dilations} it suffices to show that $T$ has an $N$-dilation in $\mathcal{X}$ for every $N \in \mathbb{N}$. To give the reader an idea of how this can be accomplished, let us first consider the following special case of Theorem~\ref{thm:convex-combination}: let $T_1,T_2 \in \mathcal{L}(X)$ be two invertible isometries and let $\lambda_1,\lambda_2 \in [0,1]$ with $\lambda_1 + \lambda_2 = 1$. We want to show that $T \coloneqq \lambda_1T_1 + \lambda_2T_2$ has an $N$-dilation in $\mathcal{X}$ for $N \in \mathbb{N}$.
For $N = 1$ this can be accomplished as follows. Set $Y = \ell_2^p(X) = X^2$ and define \begin{align}
\label{eq:1-dilation-of-convex-combination}
U \coloneqq
\begin{pmatrix}
T_1 & 0 \\
0 & T_2
\end{pmatrix} \in \mathcal{L}(Y). \end{align} This is obviously an invertible isometry on $Y$ since $T_1$ and $T_2$ were assumed to be invertible isometries on $X$. Moreover, we define $J\colon X \to Y$ and $Q\colon Y \to X$ by \begin{align*}
Jx =
\begin{pmatrix}
\lambda_1^{1/p}x \\ \lambda_2^{1/p}x
\end{pmatrix},
\qquad
Q
\begin{pmatrix}
x_1 \\ x_2
\end{pmatrix}
=
\lambda_1^{1/q}x_1 + \lambda_2^{1/q}x_2 \end{align*} for all $x,x_1,x_2 \in X$, where $q$ is the HΓΆlder conjugate of $p$, i.e.\ $1/p + 1/q = 1$. Clearly, $J$ is an isometry and $Q$ is a contraction by HΓΆlder's inequality. Moreover, for $x \in X$ \begin{align*}
Q \, U^0 Jx = QJx = \lambda_1^{1/q}\lambda_1^{1/p}x + \lambda_2^{1/q}\lambda_2^{1/p}x = x = T^0 x \end{align*} and \begin{align*}
Q \, U^1 J x =
Q
\begin{pmatrix}
\lambda_1^{1/p} T_1 x \\
\lambda_2^{1/p} T_2 x
\end{pmatrix}
= \lambda_1 T_1x + \lambda_2 T_2x = Tx. \end{align*} Hence, we have constructed a $1$-dilation of $T$ in $\mathcal{X}$. In order construct a $2$-dilation, one can proceed as follows. Let $Y = \ell^p_4(X) = X^4$ and define $U \in \mathcal{L}(Y)$ by \begin{align}
\label{eq:2-dilation-of-convex-combination}
U =
\begin{pmatrix}
T_1 & & & \\
& T_2 & & \\
& & & T_1 \\
& & T_2 &
\end{pmatrix}, \end{align} where the empty entries are understood to be the zero operator. Moreover, we define $J\colon X \to Y$ and $Q\colon Y \to X$ by \begin{align*}
Jx = ((\lambda_1 \lambda_1)^{1/p}x,
(\lambda_2 \lambda_2)^{1/p}x,
(\lambda_1 \lambda_2)^{1/p}x,
(\lambda_2 \lambda_1)^{1/p}x)^T \end{align*} for all $x \in X$ and \begin{align*}
Q(x_1, \ldots, x_n)^T = (\lambda_1 \lambda_1)^{1/q}x_1 + (\lambda_2 \lambda_2)^{1/q}x_2 + (\lambda_1 \lambda_2)^{1/q}x_2 + (\lambda_2 \lambda_1)^{1/q}x_4 \end{align*} for all $x_1,\dots,x_4 \in X$. Again, $J$ and $Q$ are contractions and $U$ is an invertible isometry. Moreover, it is easy to check that $Q\, U^n J = T^n$ for all $n \in \{0,1,2\}$. We have thus constructed a $2$-dilation of $T$ in $\mathcal{X}$.
At first glance, this is where the story ends, since there is no obvious way to generalize the above constructions in order to obtain a $3$-dilation of $T$. Indeed, formulas~\eqref{eq:1-dilation-of-convex-combination} and~\eqref{eq:2-dilation-of-convex-combination} suggest that, in order to construct an $N$-dilation of $T$, we should choose $Y = X^{2^N}$ and define $U$ as some kind of permutation matrix whose entries are the operators $T_1$ and $T_2$ instead of ones. Yet, it does not seem to be clear what permutation matrix we should choose and which entries shall be chosen to be $T_1$ and which to be $T_2$. This suggests that we should make some changes to the above construction in order obtain a structure which also works for $N$-dilations.
To this end, we proceed as follows. Fix $N \in \mathbb{N}$ and consider the $N$-cycle $\sigma \coloneqq (1 \dots N)$, i.e.\ the permutation on the set $\{1,\dots,N\}$ that maps $1$ to $2$, $2$ to $3$, \dots, and $N$ to $1$. We intend to take the $N \times N$-permutation matrix induced by $\sigma$ and to replace the ones in the matrix with the operators $T_1$ and $T_2$. Since there are only two operators $T_1$ and $T_2$ but $N$ entries that we have to replace, there is no canonical choice of which entry should become which operator. We thus follow the folklore role of thumb that, if there is no canonical choice to make, then it is best to make all possible choices simultaneously: let $\mathcal{A}$ denote the set of all mappings from $\{1,\dots,N\}$ to $\{1,2\}$. For each $\alpha \in \mathcal{A}$ we consider the sequence of $N$ operators $T_{\alpha(1)}, \dots, T_{\alpha(N)}$ and replace the ones in the permutation matrix induced by $\sigma$ with those operators; this yields an invertible isometry $U_\alpha \in \mathcal{L}(X^{N})$. Finally, we define $U \coloneqq \bigoplus_{\alpha \in \mathcal{A}} U_\alpha \in \mathcal{L}(Y)$ where $Y \coloneqq X^{N \lvert \mathcal{A} \rvert} = X^{N 2^N}$.
It turns out that, with appropriate choices of $J$ and $Q$, this really yields an $N$-dilation of $T$, though a few cumbersome computations are needed to verify this. In the next subsection we make the above construction precise and give all necessary details. Note that we have to consider convex combinations of finitely many instead of only two operators.
We should point out once again that, for $N = 2$, the construction that we have just described is more complex than the construction of $U$ in~\eqref{eq:2-dilation-of-convex-combination}: we now obtain a $2$-dilation on the space $X^{2 \cdot 2^2} = X^8$ instead of $X^4$.
\subsection*{The proof of Theorem~\ref{thm:convex-combination}}
After we have just explained the main ideas behind the proof of Theorem~\ref{thm:convex-combination}, we are now going to give the proof in detail.
\begin{proof}[Proof of Theorem~\ref{thm:convex-combination}] Since $\mathcal{T}$ admits a simultaneous dilation in $\mathcal{X}$ we may, and will, assume that $\mathcal{T}$ consists of invertible isometries. There exist $m \in \mathbb{N}$, operators $T_1, \dots, T_m \in \mathcal{T}$ and numbers $\lambda_1,\dots,\lambda_m \in [0,1]$ such that $\sum_{k=1}^m \lambda_k = 1$ and $\sum_{k=1}^m \lambda_k T_k = T$.
Fix $N \in \mathbb{N}$. According to Proposition~\ref{prop:N-dilations}(a) it suffices to show that $T$ possesses an $N$-dilation in $\mathcal{X}$. To this end, let $\mathcal{A}$ denote the set of all mappings from $\{1,\dots,N\}$ to $\{1,\dots,m\}$. It is convenient to abbreviate $\lambda \coloneqq (\lambda_1, \dots, \lambda_m)$ and to define $\lvert \lambda \rvert_\alpha \coloneqq \prod_{k=1}^N \lambda_{\alpha(k)}$ for each $\alpha \in \mathcal{A}$. Note that \begin{align}
\label{eq:sum-of-all-lambda-alpha}
\sum_{\alpha \in \mathcal{A}} \lvert \lambda \rvert_\alpha = \big(\sum_{k=1}^m \lambda_k \big)^N = 1. \end{align} Now, we define the space $Y$ and the mappings $J\colon X \to Y$ and $Q\colon Y \to X$. We set \begin{align*}
Y \coloneqq \ell^p_{N m^N}(X) = \ell^p_{N \lvert \mathcal{A} \rvert}(X) = (X^N)^{\mathcal{A}}, \end{align*} where the latter space is endowed with the $p$-norm. For each $\alpha \in \mathcal{A}$ we set \begin{align*}
J_\alpha\colon X \to X^N, \qquad J_\alpha x = \big(\frac{\lvert \lambda\rvert_\alpha}{N}\big)^{1/p} \big( x, \dots, x\big) \end{align*} and we define $J \colon X \to Y$ by $Jx = (J_\alpha x)_{\alpha \in \mathcal{A}}$ for all $x \in X$. It readily follows from formula~\eqref{eq:sum-of-all-lambda-alpha} that $J$ is isometric. Moreover, for each $y = (x_{k,\alpha})_{k \in \{1,\dots,N\}, \, \alpha \in \mathcal{A}} \in Y = (X^N)^{\mathcal{A}}$ we define \begin{align*}
Qy \coloneqq \sum_{\alpha \in \mathcal{A}} \big(\frac{\lvert \lambda \rvert_\alpha}{N}\big)^{1/q} \sum_{k=1}^N x_{k,\alpha}, \end{align*} where $q \in (1,\infty)$ denotes the conjugate index of $p$, meaning that $1/p + 1/q = 1$. It follows from H\"older's inequality that $Q$ is contractive. Indeed, if $y$ is as above, then \begin{align*}
\lVert Qy\rVert & \le \sum_{\alpha \in \mathcal{A}} \sum_{k=1}^N \big( \frac{|\lambda|_\alpha}{N} \big)^{1/q} \lVert x_{k,\alpha}\rVert \le \\
& \le \big( \sum_{\alpha \in \mathcal{A}} \sum_{k=1}^N \frac{|\lambda|_\alpha}{N} \, \big)^{1/q} \cdot \big( \sum_{\alpha \in \mathcal{A}} \sum_{k=1}^N \lVert x_{k,\alpha}\rVert^p \, \big)^{1/p} = \lVert y \rVert. \end{align*} Finally, we have to construct an invertible isometry $U \in \mathcal{L}(Y)$ such that $Q \, U^n J = T^n$ for all $n \in \{0,\dots,N\}$. For each $\alpha \in \mathcal{A}$ we define $U_\alpha\colon X^N \to X^N$ by \begin{align*}
U_\alpha (x_k)_{k \in \{1,\dots,N\}} = \big( T_{\alpha(k)}x_{\sigma(k)} \big)_{k \in \{1,\dots,N\}} \end{align*} for every $(x_k)_{k \in \{1,\dots,N\}} \in X^N$; as noted above, $\sigma\colon \{1,\dots,N\} \to \{1,\dots,N\}$ denotes the $N$-cycle $(1 \dots N)$. We point out that $U_\alpha$ can be written in matrix form as \begin{align*}
U_\alpha =
\begin{pmatrix}
& T_{\alpha(1)} & & & \\
& & T_{\alpha(2)} & & \\
& & & \ddots & \\
& & & & T_{\alpha(N-1)} \\
T_{\alpha(N)} & & & &
\end{pmatrix}. \end{align*} We set $U \coloneqq \bigoplus_{\alpha \in \mathcal{A}} U_\alpha\colon Y \to Y$. Clearly, $U$ is an invertible isometry since every operator $T_1,\dots,T_m$ is assumed to be an invertible isometry. The only remaining point is to prove that $U$ fulfills the equality $Q \, U^n J = T^n$ for all $n \in \{0,\dots,N\}$.
First observe that we can explicitly compute the powers $(U_\alpha)^n$ of $U_\alpha$ for $n \in \{0,\dots,N\}$. Indeed, for each such $n$, every $\alpha \in \mathcal{A}$ and every $(x_k)_{k \in \{1,\dots,N\}} \in X^N$ we have \begin{equation}
\label{eq:formula_power_single_convex}
\begin{split}
(U_\alpha)^n (x_k)_{k \in \{1,\dots,N\}} & = \big( \prod_{j=1}^n T_{\alpha(\sigma^{j-1}(k))} \; x_{\sigma^n(k)} \big)_{k \in \{1,\dots,N\}} \\
& = \big( \prod_{j=1}^n T_{\alpha(\sigma^{k-1}(j))} \; x_{\sigma^n(k)} \big)_{k \in \{1,\dots,N\}}.
\end{split} \end{equation} The first equality can be seen by induction over $n$ (and holds for all $n \in \mathbb{N}_0$). The second equality follows from that fact that $\sigma^{j-1}(k) = \sigma^{k-1}(j)$ for all $j,k \in \{1,\dots,N\}$. Using~\eqref{eq:formula_power_single_convex}, we obtain for $n \in \{0,\dots,N\}$ and $x \in X$ \begin{align*}
Q \, U^n Jx & = \sum_{\alpha \in \mathcal{A}} \big(\frac{\lvert \lambda \rvert_\alpha}{N}\big)^{1/q} \sum_{k=1}^N \big(\frac{\lvert \lambda\rvert_\alpha}{N}\big)^{1/p} \big((U_\alpha)^n(x, \dots, x)\big)_k \\
& = \sum_{\alpha \in \mathcal{A}} \frac{\lvert \lambda\rvert_\alpha}{N} \sum_{k=1}^N \prod_{j=1}^n T_{\alpha(\sigma^{k-1}(j))} \; x. \end{align*} On the other hand, a short computation shows that \begin{align*}
T^n = \sum_{\alpha \in \mathcal{A}} \lvert \lambda \rvert_\alpha \prod_{k=1}^n T_{\alpha(k)}, \end{align*} so it only remains to prove the equality \begin{align}
\label{eq:critical-equation-for-dilation-equality}
\sum_{\alpha \in \mathcal{A}} \frac{\lvert \lambda\rvert_\alpha}{N} \sum_{k=1}^N \prod_{j=1}^n T_{\alpha(\sigma^{k-1}(j))} = \sum_{\alpha \in \mathcal{A}} \lvert \lambda\rvert_\alpha \prod_{j=1}^n T_{\alpha(j)}. \end{align} To show~\eqref{eq:critical-equation-for-dilation-equality} we need a bit of group theory. Let $S_N$ denote the symmetric group over $N$ elements, i.e.\ the set of all bijections on $\{1,\dots,N\}$, and let $C_N$ denote the cyclic subgroup of $S_N$ generated by the $N$-cycle $\sigma$. The group $C_N$ operates on the set $\mathcal{A}$ via the group action \begin{align*}
C_N \times \mathcal{A} & \to \mathcal{A}, \\
(\tau, \alpha) & \mapsto \alpha \circ \tau. \end{align*}
We call two elements $\alpha,\beta \in \mathcal{A}$ \emph{equivalent} and denote this by $\alpha \sim \beta$ if they have the same orbit under this group action, i.e.\ if there exists a permutation $\tau \in C_N$ such that $\alpha = \beta \circ \tau$. Clearly, $\sim$ is an equivalence relation on $\mathcal{A}$. Note that we have $|\lambda|_\alpha = |\lambda|_\beta$ whenever $\alpha \sim \beta$ (but the converse is of course false).
Consider a fixed equivalence class $A \subseteq \mathcal{A}$ of $\sim$. Since, for $\alpha \in A$, the number $\lvert \lambda\rvert_\alpha$ does not depend on the choice of $\alpha$, it suffices to prove \begin{align}
\label{eq:critical-equation-for-dilation-equality-2}
\sum_{\alpha \in A} \sum_{k=1}^N \prod_{j=1}^n T_{\alpha(\sigma^{k-1}(j))} = N \sum_{\alpha \in A} \prod_{j=1}^n T_{\alpha(j)} \end{align} in order to show~\eqref{eq:critical-equation-for-dilation-equality}. Fix an $\alpha_0 \in A$. The equivalence class $A$ is given by $A = \{\alpha_0 \circ \tau: \; \tau \in C_N\}$ and thus, we can replace the summation on both sides of~\eqref{eq:critical-equation-for-dilation-equality-2} with a summation over $C_N$. Yet, we have to be a bit careful here since the surjective mapping $C_N \ni \tau \to \alpha_0 \circ \tau \in A$ might not be injective. To account for this, we use Proposition~\ref{prop:group-operation} from the appendix which tells us that, for each $\alpha \in A$, there exist exactly $N/\lvert A\rvert$ elements $\tau \in C_N$ with $\alpha_0 \circ \tau = \alpha$. Thus, the left hand side of~\eqref{eq:critical-equation-for-dilation-equality-2} becomes \begin{align*}
\frac{\lvert A\rvert}{N} \sum_{\tau \in C_N} \sum_{k=1}^N \prod_{j=1}^n T_{(\alpha_0 \circ \tau \circ \sigma^{k-1})(j)} = \frac{\lvert A\rvert}{N} \sum_{\tau,\rho \in C_N} \prod_{j=1}^n T_{(\alpha_0 \circ \tau \circ \rho)(j)} \end{align*} and the right hand side of~\eqref{eq:critical-equation-for-dilation-equality-2} becomes \begin{align*}
\frac{\lvert A\rvert}{N} \; N \sum_{\tau \in C_N} \prod_{j=1}^n T_{(\alpha_0 \circ \tau)(j)}. \end{align*} The mapping $C_N \times C_N \ni (\tau, \rho) \mapsto \tau \circ \rho \in C_N$ hits each element in $C_N$ exactly $N$ times (see Proposition~\ref{prop:count-group-elements} in the appendix), so both the left and the right side of~\eqref{eq:critical-equation-for-dilation-equality-2} coincide. This proves~\eqref{eq:critical-equation-for-dilation-equality-2}, hence~\eqref{eq:critical-equation-for-dilation-equality} and thus the theorem. \end{proof}
\section{Simultaneous dilation of convex combinations} \label{sec:convex-combinations-simultaneous}
The following theorem generalizes Theorem~\ref{thm:convex-combination} to simultaneous dilations.
\begin{theorem} \label{thm:convex-combinations-simultaneous}
Fix $p \in (1,\infty)$ and let $\mathcal{X}$ be a class of Banach spaces that fulfills Assumptions~\ref{ass:framework}. Let $X \in \mathcal{X}$ and assume that $\mathcal{T} \subseteq \mathcal{L}(X)$ has a simultaneous dilation in $\mathcal{X}$. Then the convex hull $\operatorname{conv} \mathcal{T}$ of $\mathcal{T}$ has a simultaneous dilation in $\mathcal{X}$. \end{theorem}
Our main result, Theorem~\ref{thm:main-result}, is an immediate consequence of Theorem~\ref{thm:convex-combinations-simultaneous}, Proposition~\ref{prop:stop-limits} and of the fact that, for convex sets of operators, the strong and the weak operator closure coincide \cite[Corollary VI.1.5]{DunSch1958}.
The proof of Theorem~\ref{thm:convex-combinations-simultaneous} is similar to the proof of Theorem~\ref{thm:convex-combination}, but technically more involved. The major obstacle is that, in the proof of Theorem~\ref{thm:convex-combination}, the maps $J$ and $Q$ depend on the convex coefficients $\lambda_1,\dots,\lambda_m$. If we want to dilate several operators $T^{(1)},\dots, T^{(r)} \in \conv \mathcal{T}$ instead of only one operator $T$, we obtain different sets of convex coefficients and thus -- if we want to use the same technique as for Theorem~\ref{thm:convex-combination} -- different maps $J$ and $Q$ for each operator $T^{(1)}, \dots, T^{(r)}$. This contradicts the definition of a simultaneous dilation. Fortunately, there is a trick to circumvent this problem; this trick is explained in the first part of the proof of Theorem~\ref{thm:convex-combinations-simultaneous}. The rest of the proof is quite similar to the proof of Theorem~\ref{thm:convex-combination}.
\begin{proof}[Proof of Theorem~\ref{thm:convex-combinations-simultaneous}]
Let $\mathcal{S}$ denote the set of all convex combinations of $\mathcal{T}$ with rational convex coefficients.
Then $\conv \mathcal{T}$ is contained in the strong operator closure of $\mathcal{S}$, so it suffices by Proposition~\ref{prop:stop-limits} to show that $\mathcal{S}$ has a simultaneous dilation in $\mathcal{X}$.
To this end, it suffices in turn to prove that every finite subset $\mathcal{F}$ of $\mathcal{S}$ has a simultaneous dilation in $\mathcal{X}$, see Proposition~\ref{prop:finitary}.
So let $\mathcal{F}$ be a finite subset of $\mathcal{S}$. Since every operator in $\mathcal{F}$ can be written as a convex combination of finitely many operators in $\mathcal{T}$ with rational convex coefficients we can find a number $m \in \mathbb{N}$ and, for each $F \in \mathcal{F}$, operators $T_{1,F},\dots, T_{m,F} \in \mathcal{T}$ such that
\begin{align}
\label{eq:multiple-convex-combinations}
F = \sum_{k=1}^m \frac{1}{m} T_{k,F}.
\end{align}
Since the operators $T_{k,F}$ (for $k \in \{1,\dots, m\}$ and $F \in \mathcal{F}$) have a simultaneous dilation in $\mathcal{X}$ we may, and will, assume from now on that each operator $T_{k,F}$ is an invertible isometry. The point of the above manipulations is that we have represented the operators in $F \in \mathcal{F}$, which we wish to dilate, with the same convex coefficients for each $F$.
Fix $N \in \mathbb{N}$. We show that $\mathcal{F}$ has a simultaneous dilation in $\mathcal{X}$, and to this end it suffices due to Proposition~\ref{prop:N-dilations} to prove that $\mathcal{F}$ has a simultaneous $N$-dilation in $\mathcal{X}$. We first construct the space $Y$ and the mappings $J \colon X \to Y$ and $Q \colon Y \to X$ used in Definition~\ref{def:N-dilations}(b). As in the proof of Theorem~\ref{thm:convex-combination} we denote the set of all mappings from $\{1,\dots,N\}$ to $\{1,\dots,m\}$ by $\mathcal{A}$ and we let
\begin{align*}
Y \coloneqq \ell^p_{N m^N}(X) = \ell^p_{N \lvert \mathcal{A} \rvert}(X) = X^{N m^N} = (X^N)^{\mathcal{A}},
\end{align*}
Also analogously to the proof of Theorem~\ref{thm:convex-combination} we define
\begin{align*}
J\colon X \to Y, \qquad Jx = \big(\frac{1}{N m^N} \big)^{1/p} \big( x, \dots, x\big)
\end{align*}
and
\begin{align*}
Q\colon Y \to X, \qquad Q (x_{k,\alpha})_{k \in \{1,\dots,N\}, \, \alpha \in \mathcal{A}} = \big( \frac{1}{N m^N} \big)^{1/q} \sum_{\alpha \in \mathcal{A}} \sum_{k=1}^N x_{k,\alpha},
\end{align*}
where $q$ is the HΓΆlder conjugate of $p$. Then $J$ is isometric and $Q$ is contractive.
Finally, we construct $(U_{F})_{F \in \mathcal{F}}$ in a similar way as we defined the operator $U$ in the proof of Theorem~\ref{thm:convex-combination}. For $F \in \mathcal{F}$ and $\alpha \in \mathcal{A}$ we define $U_{\alpha, F} \in \mathcal{L}(X^N)$ by
\begin{align*}
U_{\alpha,F} (x_k)_{k \in \{1,\dots,N\}} = \big( T_{\alpha(k),F}x_{\sigma(k)} \big)_{k \in \{1,\dots,N\}}
\end{align*}
for all $(x_k)_{k \in \{1,\dots,N\}}$ and we set $U_F \coloneqq \bigoplus_{\alpha \in \mathcal{A}} U_{\alpha,F} \in \mathcal{L}(Y)$ for each $F \in \mathcal{F}$. Clearly, each operator $U_F$ is an invertible isometry on $Y$ and we only have to verify
\begin{align*}
Q \, U_{F_1} \cdots U_{F_n} J = F_1 \cdots F_n
\end{align*}
for each $n \in \{0,\dots,N\}$ and all $F_1, \dots, F_n \in \mathcal{F}$. So fix $n \in \{0,\dots,N\}$ and $F_1,\dots,F_n \in \mathcal{F}$.
We note that, similarly as in the proof of Theorem~\ref{thm:convex-combination}, the formula
\begin{align*}
\begin{split}
\prod_{j=1}^n U_{\alpha,F_j} \; (x_k)_{k \in \{1,\dots,N\}} & = \big( \prod_{j=1}^n T_{\alpha(\sigma^{j-1}(k)), F_j} \; x_{\sigma^n(k)} \big)_{k \in \{1,\dots,N\}} \\
& = \big( \prod_{j=1}^n T_{\alpha(\sigma^{k-1}(j)), F_j} \; x_{\sigma^n(k)} \big)_{k \in \{1,\dots,N\}}
\end{split}
\end{align*}
holds for all $\alpha \in \mathcal{A}$ and all $(x_k)_{k \in \{1,\dots,N\}} \in X^N$. Using this, we obtain for each $x \in X$
\begin{align*}
Q \, U_{F_1} \cdots U_{F_n} J x = \frac{1}{N m^N} \sum_{\alpha \in \mathcal{A}} \sum_{k=1}^N \prod_{j=1}^n T_{\alpha(\sigma^{k-1}(j)), F_j} \; x.
\end{align*}
On the other hand, using~\eqref{eq:multiple-convex-combinations} and the fact that $n \le N$, one easily checks that
\begin{align*}
F_1 \cdots F_n x = \frac{1}{m^N} \sum_{\alpha \in \mathcal{A}} \prod_{j=1}^n T_{\alpha(j), F_j} \; x.
\end{align*}
for each $x \in X$. Hence, we merely have to prove that the right hands sides of the previous two equations coincide. We denote by $\sim$ the same equivalence relation on $\mathcal{A}$ as in the proof of Theorem~\ref{thm:convex-combination} and we fix an equivalence class $A \subseteq \mathcal{A}$. To conclude the proof, it suffices to show that
\begin{align*}
\sum_{\alpha \in A} \sum_{k=1}^N \prod_{j=1}^n T_{\alpha(\sigma^{k-1}(j)), F_j} = N \sum_{\alpha \in A} \prod_{j=1}^n T_{\alpha(j), F_j}.
\end{align*}
This follows by exactly the same arguments as in the proof of Theorem~\ref{thm:convex-combination}. \end{proof}
\section{Application to Banach spaces with super properties} \label{sec:super-properties}
In this section we apply our approach to classes of Banach spaces which fulfill certain regularity properties. Let us begin with the class of super-reflexive Banach spaces. This class is not stable with respect to ultra-products and thus, it does not fulfill Assumptions~\ref{ass:framework}. Nevertheless, we can apply our theory by employing Proposition~\ref{prop:X_Z_satisfies_assumptions}.
\begin{theorem} \label{thm:super-reflexive-spaces}
Let $Z$ be a super-reflexive Banach space and let $\mathcal{T}$ denote the weakly closed convex hull of all invertible isometries in $\mathcal{L}(Z)$. Then $\mathcal{T}$ has a simultaneous dilation in the class of all super-reflexive Banach spaces. \end{theorem} \begin{proof}
Let $\mathcal{X}_Z$ be the class of Banach spaces defined in Proposition~\ref{prop:X_Z_satisfies_assumptions}. Then, according to this proposition, $\mathcal{X}_Z$ contains the space $Z$ and fulfills the Assumptions~\ref{ass:framework}. Hence, $\mathcal{T}$ has a simultaneous dilation in $\mathcal{X}_Z$ according to Corollary~\ref{cor:convex-combinations-of-isometries}. Since $\mathcal{X}_Z$ consists of super-reflexive spaces (as an immediate consequence of the Assumptions~\ref{ass:framework}), the assertion follows. \end{proof}
In case that $Z$ is not only super-reflexive, but satisfies an additional regularity property, it is natural to (try to) construct dilations on spaces that enjoy the same regularity property. This can be done by using the concept of super-properties which we recall next.
\begin{definition}
Consider a property $(P)$ of Banach spaces which is invariant under isometric isomorphisms. We say that a Banach space $Z$ has \emph{super-$(P)$} if every Banach space $X$ finitely representable in $Z$ has $(P)$. If $(P)$ and super-$(P)$ are the same property, then we call $(P)$ a \emph{super-property}. \end{definition}
The question whether a Banach space $Z$ has super-$(P)$ is closely related to the question whether all ultra powers of $Z$ have $(P)$. For more information on super-properties we refer the interested reader to~\cite[Chapter~11]{Pis16} and~\cite[Chapter~8]{DJT95}.
\begin{theorem} \label{thm:super-property}
Let $(P)$ be a super-property and let $Z$ be a super-reflexive Banach space such that $\ell^2(Z)$ has $(P)$. Further, let $\mathcal{T} \subseteq \mathcal{L}(X)$ by the weakly closed convex hull of all invertible isometries in $\mathcal{L}(Z)$. Then $\mathcal{T}$ has a simultaneous dilation in the class of all super-reflexive Banach spaces having property $(P)$. \end{theorem} \begin{proof}
Consider the class $\mathcal{X}_Z$ defined in Proposition~\ref{prop:X_Z_satisfies_assumptions}. The proposition implies that $\mathcal{X}_Z$ contains $Z$ and satisfies the Assumptions~\ref{ass:framework}. Hence, according to Corollary~\ref{cor:convex-combinations-of-isometries} the set $\mathcal{T}$ has simultaneous dilation in $\mathcal{X}_Z$. Yet, since $(P)$ is a super-property, every element of $\mathcal{X}_Z$ has $(P)$. As every element of $\mathcal{X}_Z$ is also super-reflexive, this proves the assertion. \end{proof}
This result applies to a rich collection of important super-properties. Among them, we explicitly mention uniform convexity, the UMD-property (see~\cite{HytNeeVer16}) and having prescribed type and cotype (see~\cite{DJT95}). As an example, we state the following dilation result for UMD spaces concretely.
\begin{corollary} \label{cor:umd-spaces}
Let $Z$ be a UMD Banach space and let $\mathcal{T}$ denote the weakly closed convex hull of all invertible isometries in $\mathcal{L}(Z)$. Then $\mathcal{T}$ has a simultaneous dilation in the class of all UMD Banach spaces. \end{corollary}
\section{Application to \texorpdfstring{$L^p$-spaces}{Lebesgue spaces} and to Hilbert spaces} \label{sec:L-p}
\subsection{Dilations on \texorpdfstring{$L^p$-spaces}{Lebesgue spaces}} \label{subsec:L-p}
In this subsection we discuss how our toolkit gives the dilation theorem of Akcoglu--Sucheston on $L^p$-spaces. In fact, we obtain even a bit more, namely a simultaneous dilation of all positive contractions.
\begin{theorem} \label{thm:akcoglu-simultaneously}
Let $p \in (1,\infty)$, let $(\Omega,\mu)$ be an arbitrary measure space and let $\mathcal{T}$ denote the set of all positive linear contractions on $L^p(\Omega,\mu)$. Then $\mathcal{T}$ has a simultaneous dilation in the class of all $L^p$-spaces. Moreover, the mappings $J$ and $Q$ from Definition~\ref{def:dilations}(b) can be chosen positive and the isometries $U_T$ from Definition~\ref{def:dilations}(b) can be chosen to be lattice isomorphisms. \end{theorem}
The proof of Theorem~\ref{thm:akcoglu-simultaneously} relies on a non-canonical reduction procedure: first, we prove the theorem for $\Omega = [0,1]$, endowed with the Lebesgue measure; then we prove it for $\Omega = \{1,\dots,n\}$, endowed with the counting measure; and finally, we prove it for arbitrary measure spaces.
\begin{lemma} \label{lem:akcoglu-lp-0-1}
Theorem~\ref{thm:akcoglu-simultaneously} is true if $\Omega = [0,1]$ and if $\mu$ is the Lebesgue measure. \end{lemma} \begin{proof}
First note that every positive invertible isometry on $L^p(\Omega,\mu)$ is in fact a lattice isomorphism (this is actually true on every Banach lattice, see \cite{Abramovich1988} or \cite[Theorem~2.2.16]{Emelyanov2007}). According to~\cite[Theorem~2]{Grz90}, the set of positive invertible isometries on $L^p([0,1],\mu)$ is dense with respect to the weak operator topology in the set of all positive contractions on the same space. The assertion thus follows from Corollary~\ref{cor:convex-combinations-of-isometries} and from Remark~\ref{rem:positive-morphisms}(a). \end{proof}
At first glance, it seems that dilations of convex combinations -- which constitute the most significant part of the present work -- do not play a role in the proof of Lemma~\ref{lem:akcoglu-lp-0-1} since the set of positive invertible isometries itself (and not only its convex hull) is weakly dense in the set of all positive contractions. However, the situation is not quite that simple: the weak operator closure of a set of invertible isometries might not have a simultaneous dilation in general (see the discussion after Proposition~\ref{prop:stop-limits}), but the strong operator closure has a simultaneous dilation according to Proposition~\ref{prop:stop-limits}. Thus, we need the convex hull to pass from the weak operator closure to the strong operator closure.
\begin{remark} \label{rem:peller-power-approximation-vs-our-approach}
One can even prove more than the density result \cite[Theorem~2]{Grz90} used above. In fact, Peller observed in~\cite[Section~4, Theorem~4 and Remark~3]{Pel81} that, for every regular operator $T$ on $L^p([0,1])$ with regular norm at most one, there exists a sequence of invertible isometries $(T_k)_{k \in \mathbb{N}}$ on $L^p([0,1])$ such that all powers $T_k^n$ converge weakly to $T^n$ ($n \in \mathbb{N}_0$). This implies the Akcoglu--Sucheston dilation theorem on $L^p([0,1])$ (see the discussion after Proposition~\ref{prop:stop-limits}), and from this result one can deduce the theorem on general $L^p$-spaces (by the techniques used below). We find it important to compare this argument with our approach in more detail:
As pointed out in the introduction, the main feature of our approach is that it splits dilation theorems into a purely dilation theoretic part which works on general Banach spaces (and which we have worked out in this paper) and into an approximation theoretic part. If we follows Peller's approach instead, proving dilation theorems seems to come down to an entirely approximation theoretical task. The price for this is that one has to show approximation results in a stronger topology than the weak operator topology and that one is not allowed to use convex combinations for the approximation.
Such a stronger approximation giving the weak convergence of all powers works on $L^p([0,1])$ and -- to a certain extend -- on a class of rearrangement invariant Banach function spaces (see \cite[Section~6, Theorem~8]{Pel81}). However, the need to prove such stronger approximation results might turn out to be an obstacle if one intends to find dilation theorems on other classes of Banach spaces (compare also Open Problem~\ref{open-problem:lp-lq}). Besides, we point out that this approach does not yield simultaneous dilations. \end{remark}
\begin{lemma} \label{lem:akcoglu-lp-n}
Theorem~\ref{thm:akcoglu-simultaneously} is true if $\Omega = \{1,\dots, n\}$ for $n \in \mathbb{N}$ and if $\mu$ is the counting measure. \end{lemma} \begin{proof}
Fix $n \in \mathbb{N}$. We write $\ell^p_n \coloneqq L^p(\Omega,\mu)$ and we use the abbreviation $L^p([0,1])$ for the $L^p$-space over $[0,1]$ endowed with the Lebesgue measure.
There exist an $n$-dimensional vector sublattice $F \subseteq L^p([0,1])$, a positive contractive projection $P \in \mathcal{L}(L^p([0,1]))$ with range $F$ and an isometric lattice homomorphism $J\colon \ell^p_n \to L^p([0,1])$ with range $F$. We define $Q \coloneqq J^{-1}P: L^p([0,1]) \to \ell^p_n$ and we set $S_T \coloneqq J T Q \in \mathcal{L}(L^p([0,1]))$ for each $T \in \mathcal{T}$. Then $Q$ and $J$ are positive, each operator $S_T$ is a positive contraction on $L^p([0,1])$, and the diagramm
\begin{center}
\begin{tikzcd}
L^p([0,1]) \arrow{rrr}{S_{T_1} \cdots S_{T_k}} & & & L^p([0,1]) \arrow{d}{Q} \\
\ell^p_n \arrow{u}{J} \arrow{rrr}{T_1 \cdots T_k} & & & \ell^p_n
\end{tikzcd}
\end{center}
commutes for all $k \in \mathbb{N}_0$ and $T_1,\dots,T_k \in \mathcal{T}$. Thus, the assertion follows from Lemma~\ref{lem:akcoglu-lp-0-1}. \end{proof}
\begin{remark}
The reduction of Lemma~\ref{lem:akcoglu-lp-n} to Lemma~\ref{lem:akcoglu-lp-0-1} we used in the above proof is a bit curious: recall that Lemma~\ref{lem:akcoglu-lp-0-1} mainly relies on the fact that the positive invertible isometries on $L^p([0,1])$ are weakly dense in the set of positive contractions. On the other hand, on the finite dimensional spaces $\ell^p_n$ and for $p\not=2$ this is not even true for the subconvex hull of all positive (invertible) isometries. Indeed, every isometric positive matrix on such a space is a permutation matrix (this follows for instance from the fact that a linear isometry between two $L^p$-spaces is always disjointness preserving in case that $p \neq 2$, see \cite{Lamperti1958}).
Hence, every operator $T$ in the subconvex hull of those matrices maps the vector $e = (1, \ldots, 1)$ to a vector smaller than $e$, and so does the adjoint of $T$. There are, however, positive contractions on $\ell^p_n$ which do not behave this way. From a different view point, the set of all positive contractions on $\ell_n^p$ has a rich collection of extreme points. A complete characterization of these extreme points can be found in~\cite[Theorem~3]{Grz85}.
Hence, in order to apply Corollary~\ref{cor:convex-combinations-of-isometries} to $\ell^p_n$-spaces, we have to take a detour via the diffuse space $L^p([0,1])$; this is indeed quite surprising since in the proof of Theorem~\ref{thm:akcoglu-simultaneously} we prove the Akcoglu--Sucheston dilation theorem for general $L^p$-spaces by reducing it to the case of $\ell^p_n$-spaces - which is, in a sense, converse to the reduction of Lemma~\ref{lem:akcoglu-lp-n} to Lemma~\ref{lem:akcoglu-lp-0-1}. \end{remark}
The technique that we now use in the proof of Theorem~\ref{thm:akcoglu-simultaneously} is nowadays standard and goes back to Peller and W.B.~Johnson. It was, for instance, used by Akcoglu and Sucheston in~\cite[Section~4]{AkcSuc77}. Since we deal with simultaneous dilations here instead of dilations of a single operator, we think it is worthwhile to include the details.
\begin{proof}[Proof of Theorem~\ref{thm:akcoglu-simultaneously}]
Throughout the proof, we use the abbreviation $L^p \coloneqq L^p(\Omega,\mu)$ and we let $\ell^p_n$ denote the space $\mathbb{R}^n$ (or $\mathbb{C}^n$) endowed with the $p$-norm.
We call a finite collection of pairwise disjoint measurable subsets of $\Omega$ of strictly positive and finite measure a semi-partition of $\Omega$. The set $\mathcal{P}$ of all semi-partitions of $\Omega$ is a directed set with respect to refinement; we write $\alpha \ge \beta$ if $\alpha$ is finer than $\beta$. For each semi-partition $\alpha \in \mathcal{P}$ we can define its conditional expectation $\mathbb{E}_{\alpha}$. Then the net $(\mathbb{E}_{\alpha})_{\alpha \in \mathcal{P}}$ converges to the identity with respect to the strong operator topology.
Fix $\alpha \in \mathcal{P}$. Since the range of $\mathbb{E}_\alpha$ is a finite-dimensional vector sublattice of $L^p$, there exists an integer $n_\alpha \in \mathbb{N}$ and an isometric lattice homomorphism $J_\alpha \colon \ell^p_{n_\alpha} \to L^p$ whose range coincides with the range of $\mathbb{E}_\alpha$. We set $Q_\alpha \coloneqq J_\alpha^{-1} \mathbb{E}_\alpha\colon L^p \to \ell^p_{n_\alpha}$ and we define $S_{T,\alpha} \coloneqq Q_\alpha TJ_\alpha$ for each $T \in \mathcal{T}_\alpha$. Then the diagram
\begin{center}
\begin{tikzcd}
\ell^p_{n_\alpha} \arrow{rrrr}{S_{T_1,\alpha} \cdots S_{T_k,\alpha}} & & & & \ell^p_{n_\alpha} \arrow{d}{J_\alpha} \\
L^p \arrow{u}{Q_\alpha} \arrow{rrrr}{ \mathbb{E}_\alpha \, \cdot \, (\mathbb{E}_\alpha T_1 \mathbb{E}_\alpha) \, \cdots \, (\mathbb{E}_\alpha T_k \mathbb{E}_\alpha)} & & & & L^p
\end{tikzcd}
\end{center}
commutes for each $k \in \mathbb{N}_0$ and all $T_1,\dots,T_k \in \mathcal{T}$. Note that we need the additional operator $\mathbb{E}_\alpha$ on the very left of the lower horizontal arrow to ensure that the diagram also commutes in case that $k = 0$; indeed, the lower arrow equals $\mathbb{E}_\alpha$ in this case (instead of $\Id_{L^p}$ which would be false).
According to Lemma~\ref{lem:akcoglu-lp-n}, we can find an $L^p$-space $X_\alpha$, positive contractions $\tilde J_\alpha \colon \ell^p_{n_\alpha} \to X_\alpha$ and $\tilde Q_\alpha \colon X_\alpha \to \ell^p_{n_\alpha}$ and isometric lattice isomorphisms $U_{\alpha, S} \in \mathcal{L}(X_\alpha)$ (for each positive contraction $S$ on $\ell^p_{n_\alpha}$) such that
\begin{align*}
S_{1} \cdots S_{k} = \tilde Q_\alpha U_{\alpha, S_1} \cdots U_{\alpha, S_k} \tilde J_\alpha
\end{align*}
for all $k \in \mathbb{N}_0$ and all positive contractions $S_{1} ,\dots, S_{k}$ on $\ell^p_{n_\alpha}$. Choose an ultrafilter $\mathcal{U}$ on $\mathcal{P}$ containing the filter base $\big\{ \{\alpha \in \mathcal{P}: \, \alpha \ge \beta\}: \; \beta \in \mathcal{P} \big\}$. Then the diagram
\begin{center}
\begin{tikzcd}
\prod_\mathcal{U} X_\alpha \arrow{rrrrr}{ (\prod_\mathcal{U} U_{\alpha, S_{T_1,\alpha}}) \, \cdots \, (\prod_\mathcal{U} U_{\alpha, S_{T_k,\alpha}}) } & & & & & \prod_\mathcal{U} X_\alpha \arrow{d}{\prod_\mathcal{U} \tilde Q_\alpha} \\
\prod_\mathcal{U} \arrow{u}{\prod_\mathcal{U} \tilde J_\alpha} \ell^p_{n_\alpha} \arrow{rrrrr}{(S_{T_1,\alpha})^\mathcal{U} \cdots (S_{T_k,\alpha})^\mathcal{U}} & & & & & \prod_\mathcal{U} \ell^p_{n_\alpha} \arrow{d}{\prod_\mathcal{U} J_\alpha} \\
(L^p)^\mathcal{U} \arrow{u}{\prod_\mathcal{U} Q_\alpha} \arrow{rrrrr}{\prod_\mathcal{U} \big(\mathbb{E}_\alpha \, \cdot \, (\mathbb{E}_\alpha T_1 \mathbb{E}_\alpha) \, \cdots \, (\mathbb{E}_\alpha T_k \mathbb{E}_\alpha) \big)} & & & & & (L^p)^\mathcal{U} \arrow{d}{} \\
L^p \arrow{u}{} \arrow{rrrrr}{T_1 \cdots T_k} & & & & & L^p
\end{tikzcd}
\end{center}
commutes for each $k \in \mathbb{N}_0$ and all $T_1,\dots,T_k \in \mathcal{T}$. Here, the mapping $L^p \to (L^p)^\mathcal{U}$ between the first and the second line (counted from below) is the canonical injection and $(L^p)^\mathcal{U} \to L^p$ between the second and the first line is the mapping induced by the weak limit along $\mathcal{U}$ (which exists since $L^p$ is reflexive). We note that the diagram commutes between the first and the second line since the operator net $\big(\mathbb{E}_\alpha \, \cdot \, (\mathbb{E}_\alpha T_1 \mathbb{E}_\alpha) \, \cdots \, (\mathbb{E}_\alpha T_k \mathbb{E}_\alpha)\big)_{\alpha \in \mathcal{P}}$ converges strongly to $T_1 \cdots T_k$ and since the ultrafilter $\mathcal{U}$ is adapted to the order on $\mathcal{P}$. The diagram shows that $\mathcal{T}$ has a simultaneous dilation with the required properties. \end{proof}
It would be interesting to have a similar result as in Theorem~\ref{thm:akcoglu-simultaneously} -- or, say, at least Lemma~\ref{lem:akcoglu-lp-0-1} -- available on $L^p(L^q)$-spaces, too. The class of all $L^p(L^q)$-spaces itself is not ultra-stable, but the class of all bands in $L^p(L^q)$-spaces is ultra-stable (this follows from \cite[Corollary~8.8]{HLR91}) and thus fulfills Assumptions~\ref{ass:framework}. Hence, in order to apply our main result and its corollaries, it would be desirable to understand the weakly closed convex hull of all positive invertible isometries on such spaces.
\begin{open_problem} \label{open-problem:lp-lq}
Let $p,q \in (1,\infty)$ and let $\mathcal{T}$ denote the weak operator closure of the convex hull of all positive invertible isometries on $L^p([0,1]; L^q([0,1]))$. Does $\mathcal{T}$ coincide with the set of all positive contractions? If not, can a good characterization of the elements of $\mathcal{T}$ be given? \end{open_problem}
\subsection*{Dilations on Hilbert spaces} \label{subsec:hilbert}
In the previous subsection we considered a dilation result for positive operators in the $L^p$-setting. On Hilbert spaces, on the other hand, one gets results for arbitrary contractions. For single operators, this is the well-known dilation theorem of Sz.-Nagy. We note that a standard proof of this result even yields a simultaneous dilation of all contractions on a given Hilbert space, as for example pointed out in \cite[Section~1.5.8]{Nik02}. Although this construction is not particularly difficult, we find it worthwhile to show that the same result can be obtained as a consequence of Corollary~\ref{cor:convex-combinations-of-isometries}. This emphasizes the universality of our approach.
\begin{theorem} \label{thm:hilbert-space-dilations-simultaneous}
Let $H$ be a Hilbert space and $\mathcal{T} \subseteq \mathcal{L}(H)$ the set of all contractions on $H$. Then $\mathcal{T}$ has a simultaneous dilation in the class of all Hilbert spaces. \end{theorem} \begin{proof}
By a similar reduction argument as used in the proof of Theorem~\ref{thm:akcoglu-simultaneously} it suffices to establish the result if $H$ is finite dimensional. In this case, however, the convex hull of all (invertible) isometries in $\mathcal{L}(H)$ coincides with the set of all contractions in $\mathcal{L}(H)$; this is an easy consequence of the polar decomposition theorem for matrices. Hence, the assertion follows from Corollary~\ref{cor:convex-combinations-of-isometries}. \end{proof}
\section{Outlook}\label{sec:outlook}
Our techniques do not work without adjustments to obtain non-trivial results on $L^1$-spaces: we require all our Banach spaces to be reflexive. However, as pointed out in Construction~\ref{constr:simple-dilation-ell-1}, it is not particularly difficult to find a dilation on a ``large'' $L^1$-space. We leave it to future research to find out whether our techniques can be adapted to $L^1$-spaces. Moreover, no attempt has been made to apply our results to non-commutative $L^p$-spaces; we also leave this as a task for the future.
In view of our definition of a \emph{simultaneous dilation} (Definition~\ref{def:dilations}) it is worthwhile pointing out that there is a distinct interest in \emph{commutative} simultaneous dilations in the literature, especially in the Hilbert space case; see for instance \cite{Ando1963, Gacspar1969, Popescu1986, Stochel2001, Opela2006, SauPreprint} as well as \cite[Chapter~I]{SFBK10} and \cite[Section~4]{Levy2014} for this and related topics. Our approach does not yield commutative simultaneous dilations of commuting operators; we do not know whether commutative dilation theorems can be derived from our simultaneous dilations results.
A related question concerns the task of dilating a $C_0$-semigroup of operators instead of a single operator only; this question has been studied by Fendler for $L^p$-spaces~\cite{Fendler1997} and by Konrad for $L^1$-spaces~\cite{Konrad2015}. Once a dilation theorem for single operators on a class of uniformly convex Banach spaces is established (as we do in the present work), one can mimic Fendler's general argument to obtain semigroup dilations.
\appendix
\section{Some observations from group theory} \label{appendix:group-theory}
In this appendix we explicitly write down a few simple observations from the theory of groups which are needed in the proofs of Theorems~\ref{thm:convex-combination} and~\ref{thm:convex-combinations-simultaneous}.
\begin{proposition} \label{prop:count-group-elements}
Let $G$ be a finite abelian group, let $\varphi: G \times G \to G$, $(g_1,g_2) \mapsto g_1 g_2$. Then $\varphi$ is surjective and the preimage of each element $g \in G$ contains exactly $|G|$ elements. \end{proposition} \begin{proof}
Obviously $\varphi$ is surjective. Since $G$ is abelian, $\varphi$ is a group homomorphism, and its kernel clearly consists of $|G|$ elements. Now, let $g \in G$. Since $g$ is contained in the range of $\varphi$ we have $|\varphi^{-1}(\{g\})| = |\ker \varphi| = |G|$. \end{proof}
\begin{proposition} \label{prop:group-operation}
Let $G$ be a finite group which operates on a finite set $X$. Fix $x \in X$ and denote the orbit of $x$ under $G$ by $G(x)$. Then $|G(x)|$ divides $|G|$. Moreover, if we define
\begin{align*}
G_y := \{g \in G: g(x) = y\}
\end{align*}
for each $y \in G(x)$, then the family $(G_y)_{y \in G(x)}$ is a partition of $G$ into $|G(x)|$ disjoint subsets and each set $G_y$ has the cardinality $\frac{|G|}{|G(x)|}$. \end{proposition} \begin{proof}
Clearly, the sets $G_y$ (for $y \in G(x)$) are disjoint and form a partition of $G$ into $|G(x)|$ subsets, so it remains to show that all sets $G_y$ have the same cardinality. Let $y \in G(x)$ and fix an element $g_0 \in G_y$. Then the mapping
\begin{align*}
G_x & \to G_y \\
g & \mapsto g_0 \, g
\end{align*}
is a bijection between $G_x$ and $G_y$. Hence, all sets $G_y$ have the same cardinality. \end{proof}
\end{document} |
\begin{document}
\centerline{}
\selectlanguage{english} \title{Failure of Wiener's property for positive definite periodic functions}
\selectlanguage{english} \author[A. Bonami]{Aline Bonami} \email{[email protected]} \author[S. Revesz]{Szil\'ard Gy. R\'ev\'esz} \email{[email protected]}
\address[A. Bonami]{F\'ed\'eration Denis Poisson. MAPMO-UMR 6628, D\'epartement de Math\'ematiques, Universit\'e d'Orl\'eans, 45067 Orl\'eans Cedex 2, France} \address[S. Revesz]{
R\'enyi Institute of Mathematics, Hungarian Academy of Sciences,
Budapest, P.O.B. 127, 1364 Hungary.}
\begin{abstract} \selectlanguage{english}
We say that Wiener's property holds for the exponent $p>0$ if we have that whenever a positive definite function $f$ belongs to $L^p(-\varepsilon,\varepsilon)$ for some $\varepsilon>0$, then $f$ necessarily belongs to $L^p(\mathbb T)$, too. This holds true for $p\in 2\mathbb N$ by a classical result of Wiener.
Recently various concentration results were proved for idempotents and positive definite functions on measurable sets on the torus. These new results enable us to prove a sharp version of the failure of Wiener's property for $p\notin 2\mathbb N$. Thus we obtain strong extensions of results of Wainger and Shapiro, who proved the negative answer to Wiener's problem for $p\notin 2\mathbb N$.
\selectlanguage{francais}
\noindent{\bf Contre-exemples \`a la propri\'et\'e de Wiener pour les fonctions p\'eriodi--ques d\'efinies-positives.}
\noindent{\bf R\'esum\'e.} On dit que l'exposant $p$ poss\`ede la propri\'et\'e de Wiener si toute fonction p\'eriodique d\'efinie-positive qui est de puissance $p$-i\`eme int\'egrable au voisinage de $0$ l'est sur un intervalle de p\'eriode. C'est le cas des entiers pairs, d'apr\`{e}s un r\'{e}sultat classique de Wiener.
Nous avons r\'ecemment obtenu des ph\'enom\`enes de concentration des polyn\^omes idempotents ou d\'efinis-positifs sur un ensemble mesurable du tore qui nous permettent de donner une version forte du fait que les exposants $p\notin 2\mathbb N$ n'ont pas la propri\'et\'e de Wiener, am\'eliorant ainsi les r\'esultats de Wainger et Shapiro.
\end{abstract}
\selectlanguage{english}
\maketitle
\section{Introduction}\label{sec:intro}
Let $f$ be a periodic integrable function which is positive definite, that is, has non negative Fourier coefficients. Assume that it is bounded (in $\|\cdot\|_\infty$) in a neighborhood of $0$, then it necessarily belongs to $L_\infty(\mathbb T)$, too. In fact, its maximum is obtained at $0$ and, as $f(0)=\sum_k \widehat{f}(k)$, $f$ has an absolutely convergent Fourier series.
The same question can be formulated in any $L^p$ space. Actually, the following question was posed by Wiener in a lecture, after he proved the $L^2$ case. We refer to \cite{Sh} for the story of this conjecture, see also \cite{L}, \cite{Sh} and \cite{W}.
\begin{problem}[Wiener]\label{Wienerproblem} Let $1\le p<\infty$. Is it true, that if for some $\varepsilon>0$ a positive definite function $f\in L^p(-\varepsilon,\varepsilon)$, then we necessarily have $f\in L^p(\mathbb T)$, too? \end{problem}
The observation that the answer is positive if $p\in 2\mathbb N$ has been given by Wainger \cite{Wa}, as well as by Erd\H os and Fuchs \cite{EF}. We refer to Shapiro \cite{Sh} for the proof, since the constant given by his proof is in some sense optimal, see \cite{L,L2}. Generalizations in higher dimension may be found in \cite {Hl} for instance. It was shown by Shapiro \cite{Sh} and Wainger \cite{Wa} that the answer is to the negative for all other values of $p$. Negative results were obtained for groups in e.g. \cite{F} and \cite{L}.
There is even more evidence that the Wiener property must hold when $p=2$ and we prescribe large gaps in the Fourier series of
$f$. Indeed, in this case by well-known results of Wiener and Ingham, see e.g. \cite{W,Z}, we necessarily have an essentially uniform distribution of the $L^2$ norm on intervals longer than the reciprocal of the gap, even without the assumption that $f$ be positive definite. As Zygmund pointed out, see the Notes to Chapter VΒ \S 9, page 380 in \cite{Z}, Ingham type theorems were not known for $p\ne 2$, nevertheless, one would feel that prescribing large gaps in the Fourier series should lead to better control of the global behavior by means of having control on some subset like e.g. $(-\varepsilon,\varepsilon)$. So the analogous Wiener question can be posed restricting to positive definite functions having gaps tending to $\infty$. However, we answer negatively as well. In this strong form the question, to the best of our knowledge, has not been dealt with yet. Also we are able to replace the interval $(-\varepsilon, +\varepsilon)$ by any measurable symmetric subset $E$ of the torus of measure $|E|<1$. Neither extension can be obtained by a straightforward use of the methods of Shapiro and Wainger.
\section{$L^2$ results and concentration of integrals}\label{sec:concentration}
We use the notation $\mathbb T:=\mathbb R/\mathbb Z$ for the torus. Then $e(t):=e^{2\pi i t}$ is the usual exponential function adjusted to interval length $1$, and we denote $e_h$ the function $e(hx)$. The set of positive definite trigonometrical polynomials is the set \begin{equation}\label{eq:posdefpol} {\mathcal T}^{+}:=\left\{ \sum_{h\in H}a_k e_k ~:~ H\subset \mathbb Z \quad (\textrm{or}~~ \mathbb N), ~~ \# H< \infty, \quad a_k\geq 0 ~(k\in H) \right\} \end{equation} For obvious reasons of being convolution idempotents, the set \begin{equation}\label{eq:idempotents} \PP:=\left\{ \sum_{h\in H}e_h ~:~ H\subset \mathbb Z \quad (\textrm{or} ~~\mathbb N), ~~ \# H< \infty \right\} \end{equation} is called the set of \emph{(convolution-)idempotent exponential (or trigonometric) polynomials}, or just \emph{idempotents} for short.
Note that multiplying a polynomial by an exponential $e_K$ does not change its absolute value, and the property of belonging to $\PP$ or ${\mathcal T}^{+}$ is not changed either. Therefore, it suffices to consider polynomials with nonnegative spectrum, i.e. $H\subset \mathbb N$ in \eqref{eq:posdefpol} and \eqref{eq:idempotents}.
Also note that for a positive definite function the function
$|f|$ is necessarily even. This is why we consider $0$-symmetric (or, just symmetric for short) intervals or sets, (alternatively, we could have chosen to restrict to $[0,1/2)$ instead of $\mathbb T$).
Let us first state the theorem on positive definite functions in $L^2$. Recall that the direct part is attributed to Wiener, with the constant given by Shapiro in \cite{Sh}. The converse seems to be well known (see \cite{L,L2}), except, may be, for the fact that counter-examples may be given by idempotents. The fact that the Wiener property fails for arbitrary measurable sets is, to the best of our knowledge, new.
\begin{theorem}[Wiener, Shapiro] \label{th:shapiro} For $p$ an even integer, for $0<a<1/2$ and for $f\in {\mathcal T}^{+}$, we have the inequality \begin{equation}\label{shapiro}
\frac 1{2a}\int_{-a}^{+a}|f|^p\geq \frac 12
\int_{-1/2}^{+1/2}|f|^p. \end{equation} Moreover, the constant $1/2$ cannot be replaced by a smaller one, even when restricting to idempotents. Indeed, for each integer $k>2$, for $a<1/k$ and for $b>1/k$, there exits an idempotent $f$
and such that $\int_{-a}^{+a}|f|^p\leq b\times \int_{-1/2}^{+1/2}|f|^p$. \end{theorem} \begin{proof} We refer to Shapiro for the proof of the inequality \eqref{shapiro}.
To show sharpness of the constant, let us now give an example, inspired by the examples of \cite{DPQ}. We take $f:=D_n*\mu_k$, where $D_n$ is the Dirichlet kernel, defined here as \begin{equation}\label{eq:Dndef} D_n(x):=\sum_{\nu=0}^{n-1} e(\nu x) = e^{\pi i(n-1)x/2} \frac{\sin(\pi n x)}{\sin(\pi x)}, \end{equation} and $\mu_k$ is the mean of Dirac masses at each $k$-th root of unity. Both have Fourier coefficients $0$ or $1$, so that $f$ is an idempotent. Only one of the point masses of $\mu_k$ lies inside the interval $(-a,+a)$ and one can see that the ratio between
$\int_{-a}^{+a}|f|^p$ and $\int_{-1/2}^{+1/2}|f|^p$ tends to $1/k$ when $n$ tends to infinity. \end{proof}
\begin{remark} The interval $(-a,+a)$ cannot be replaced by a measurable set $E$ having $0$ as a density point, even if $|E|$ is arbitrarily close to $1$. Indeed, assume that the complement of $E$ is the union (modulo $1$) of all intervals of radius $1/l^3$ around all irreducible rational numbers $k/l$, with $k$ different from $0$ and $l>L$. Then $E$ has the required properties, while,
for the same idempotent $f:=D_n*\mu_l$, the ratio between $\int_E|f|^p$ and
$\int_{-1/2}^{+1/2}|f|^p$ tends to $1/l$ when $n$ tends to infinity.
We get our conclusion noting that $l$ may be arbitrarily
large.
\end{remark}
Let us now consider the $p$-concentration problem, which comes from the following definition.
\begin{definition} Let $p>0$, and $\mathcal F$ be a class of functions on $\mathbb T$. We say that for the class $\mathcal F$ there is $p$-concentration if there exists a constant $c>0$ so that for any symmetric measurable set $E$ of positive measure one can find an idempotent $f\in{\mathcal F}$ with \begin{equation}\label{eq:Lpconcentration}
\int_E |f|^p \geq c \int_\mathbb T |f|^p. \end{equation} \end{definition}
The problem of $p$-concentration on the torus for idempotent polynomials has been considered in \cite{DPQ}, \cite{DPQ2}, \cite{CRMany}. It was essentially solved recently in \cite{BR}. Also, the weaker question of concentration of $p^{\textrm th}$ integrals of positive definite functions has been dealt with starting with the works \cite{DPQ,DPQ2}. In this respect we have proved the following result, see \cite[Theorem 48]{BR}. We will only state that part of the theorems of \cite{BR} that we will use.
\begin{theorem}\label{th:concentration} For all $0<p<\infty$, $p$ not an even integer, whenever a $0$-symmetric measurable set $E$
of positive measure $|E|>0$ is given, then to all $\varepsilon>0$ there exists some positive definite trigonometric polynomial $f\in{\mathcal T}^{+}$
so that \begin{equation}\label{eq:concentration}
\int_{^cE} |f|^p \leq\varepsilon \int_\mathbb T |f|^p. \end{equation} Moreover, $f$ can be taken with arbitrarily large prescribed gaps between frequencies of its Fourier series. \end{theorem}
\begin{remark} The same result is also proved for open symmetric sets and idempotents, and for measurable sets and idempotents when $p>1$. \end{remark}
Theorem \ref{th:concentration} allows to see immediately that there is no inequality like \eqref{shapiro} for $p$ not an even integer. What is new, compared to the results of Shapiro and Wainger, is the fact that this is also the case if $f$ has arbitrarily large gaps, and that we can replace intervals $(-a,+a)$ by arbitrary measurable sets of measure less than $1$. We will give a different statement in the next section for $E$ an open set, and also show a strong version of the negative state of Wiener's problem.
\section{Negative results in Wiener's problem}\label{sec:results}
Let us start with somewhat strengthening the previous theorem for open sets, which we obtain by an improvement of the methods of Shapiro in \cite{Sh}. \begin{theorem}\label{th:strong-conc} For all $0<q\leq p<2$, whenever a $0$-symmetric open set $E$
of positive measure $|E|>0$ is given, then for all $\varepsilon>0$ there exists some positive definite trigonometric polynomial $f\in{\mathcal T}^{+}$
so that \begin{equation}\label{eq:strong-conc}
\int_{^cE} |f|^p \leq\varepsilon \left (\int_\mathbb T
|f|^q\right)^{p/q}. \end{equation} The same is valid for $q<p$ with $p$ not an even integer, provided that $q$ is sufficiently close to $p$, that is $q>q(p)$, where $q(p)<p$. \end{theorem}
The construction is closely related to the failure of Hardy Littlewood majorant property. We do not know whether, for $p>2$ not an even integer, that is $2k<p<2k+2$, we can take $q(p)=2k$. Due to Theorem \ref{th:shapiro}, we cannot take $q(p)<2k$. We do not know either whether the next statement is valid for functions with arbitrary large gaps.
\begin{proof} Let us first assume that $p<2$. Then, for $D_n$ the Dirichlet kernel with $n$ sufficiently large depending on $\varepsilon$, there exists a choice of $\eta_k=\pm 1$ such that
$$\|D_n\|_p\leq \varepsilon \|\sum_{k=0}^n \eta_k e_k\|_q.$$ Indeed, if it was not the case, taking the $q$-th power, integrating on all possible signs and using Khintchine's Inequality, we would find that $c\varepsilon\sqrt n \leq
\|D_n\|_p\leq Cn^{1-\frac 1p}$ ($p>1$), $c\varepsilon\sqrt n \leq
\|D_n\|_1\leq C \log n$ and $c\varepsilon\sqrt n \leq
\|D_n\|_p\leq C$ ($0<p<1$) which leads to a contradiction.
We assume that $E$ contains $I\cup (-I)$, where $I:=(\frac kN,
\frac{k+1}N)$, and denote $$g(t):=\sum_{k=0}^n \eta_k e_k(t)\hspace{2cm} G(t):=D_n(t).$$ Let $\Delta$ be a triangular function based on the interval $ (-\frac 1{2N}, +\frac{1}{2N})$, that is, $\Delta(t):=\left (1-2N|t|\right)_+ $. We finally consider the function $$f(t):=\Delta(t-a)g(2Nt)+\Delta(t+a)g(2Nt)+2\Delta(t)G(2Nt),$$ where $a$ is the center of the interval $I$. Then an elementary computation of Fourier coefficients, using the fact that $\Delta$ has positive Fourier coefficients while the modulus of those of
$g$ and $G$ are equal, allows to see that $f$ is positive definite. Let us prove that one has (\ref{eq:strong-conc}). The left hand side is bounded by $\frac 2N \|G\|_p^p$, while $
\int_\mathbb T |f|^q $ is bounded below by $\frac 1{2N}\|g\|_q^q- \frac 2N \|G\|_q^q$. We conclude the proof choosing $n, N$ sufficiently large.
Let us now consider $p>2$ not an even integer. Mockenhaupt and Schlag in \cite{MS} have given counter-examples to the Hardy Littlewood majorant conjecture, which are based on the following property: for $j>p/2$ an odd integer, the two trigonometric polynomials $$g_0:=(1+e_j)(1- e_{j+1})\hspace{2cm} G_0:=(1+e_j)(1+ e_{j+1})$$
satisfy the inequality $\|G_0\|_p<\|g_0\|_p$. By continuity, this inequality remains valid when $p$ is replaced by $q$ in the right hand side, with $q>q(p)$, for some $q(p)<p$. By a standard Riesz product argument, for $K$ large enough, as well as $N_1, N_2,\cdots N_K$, depending on $\varepsilon$, the functions $$g(t):=g_0(t)g_0(N_1t)\cdots g_0(N_Kt)\ \ \mbox{\rm and}\ \ G(t):=G_0(t)G _10(N_1t)\cdots G_0(N_Kt)$$ satisfy the inequality
$$\|G\|_p\leq \varepsilon \|g\|_q.$$ From this point the proof is identical. \end{proof}
We can now state in two theorems the counter-examples that we obtain for the Wiener conjecture when $p$ is not an even integer.
\begin{theorem}\label{th:noWiener} Let $0<p<\infty$, and $p\notin 2\mathbb N$. Then for any symmetric, measurable set $E\subset\mathbb T$ with
$|E|>0$ and any $q<p$, there exists a function $f$ in the Hardy space $H^q(\mathbb T)$ with positive Fourier coefficients, so that its pointwise boundary value $f^*$ is in $L^p(^cE)$ while $f^*\notin L^p(\mathbb T)$. Moreover, $f$ can be chosen with gaps tending to $\infty$ in its Fourier series. \end{theorem}
Here $H^q(\mathbb T)$ denotes the space of periodic distributions $f$ whose negative coefficients are zero, and such that the function $f_r$ are uniformly in $L^q(\mathbb T)$ for $0<r<1$, where
$$f_r(t):=\sum_{n }\hat f(n)r^{|n|} e^{2i\pi n t}.$$ Moreover, the norm (or quasi-norm) of $f$ is given by
$$\|f\|_{H^q(\mathbb T)}^q:=\sup_{0<r<1}\int_0^1|f_r|^q.$$
It is well known that, for $f\in H^q(\mathbb T)$, the functions $f_r$ have an a. e. limit $f^*$ for $r$ tending to $1$. The function $f^*$, which we call the pointwise boundary value, belongs to $L^q(\mathbb T)$. When $q\geq 1$, then $f$ is the distribution defined by $f^*$, and $H^q(\mathbb T)$ coincides with the subspace of functions in $L^q(\mathbb T)$ whose negative coefficients are zero. In all cases the space $H^q(\mathbb T)$ identifies with the classical Hardy space when identifying the distribution $f$ with the holomorphic function $\sum_{n\geq 0 }\hat f(n)z^n$ on the unit disc. This explains the use of the term of boundary value.
The function $f\in H^q$ is said to have gaps (in its Fourier series) tending to $\infty$ whenever its Fourier series of $f$ can be written as $\sum_{k=0}^\infty a_k e^{2i\pi n_k x},$ where $n_k$ is an increasing sequence such that $n_{k+1}-n_k\to \infty$ with $k$.
In opposite to this theorem, recall that for $n_k$ a \emph{lacunary} series, if the Fourier series is in $L^p(E)$ for some measurable set $E$ of positive measure, then the function $f$ belongs to all spaces $L^q(\mathbb T)$, see \cite{Z}. This has been generalized by Miheev \cite{M} to $\Lambda(p)$ sets for $p>2$: if $f$ is in $L^p(E)$, then $f$ is in the space $L^p(\mathbb T)$. See also the expository paper \cite{BD}.
\begin{proof} The key of the proof is Theorem \ref{th:concentration}. Remark that we can assume that $p>q>1$. Indeed, $f^\ell$ is a positive definite function when $f$ is, and counter-examples for some $p>1$
will lead to counter-examples for $p/\ell$. Now, let us take a sequence $E_k$ of disjoint measurable subsets of $E$ of positive measure, such that $|E_k|<2^{-\alpha k}$, with $\alpha$ to be given later and let $f_k$ be a sequence of positive definite trigonometric polynomials such that \begin{equation}\label{first}
\int_{ \mathbb T\setminus E_k} |f_k|^p \leq 2^{-kp } \int_{\mathbb T} |f_k|^p . \end{equation} Moreover, we assume that $f_k$'s have gaps larger than $k$. Using H\"older's inequality, we obtain \begin{align*}\label{eq:smallernorm}
\int_{\mathbb T}|f_k|^q \leq 2^{-\alpha (1-q/p) k}\left(\int_{E_k}
|f_k|^p\right)^{q/p}+\left(\int_{\mathbb T\setminus E_k}
|f_k|^p\right)^{q/p} \leq 2\times 2^{-{kq}}\left (\int_{\mathbb T}
|f_k|^p\right)^{q/p}, \end{align*}
if $\alpha$ is chosen large enough. Finally, we normalize the sequence $f_k$ so that $\int_{\mathbb T} |f_k|^p=2^{\frac k{2}}$, and take \begin{equation}\label{series}
f(x):=\sum_{k\geq 1} e^{2i\pi m_k x}f_k(x), \end{equation} where the $m_k$ are chosen inductively sufficiently increasing, so that the condition on gaps is satisfied. The series is convergent in $L^q(\mathbb T)$ and in $L^p(^cE)$, and the limit $f$ has its Fourier series given by \eqref{series}. Now, let us prove that $f$ is not in $L^p(\mathbb T)$. Since the $E_j$'s are disjoint, $$
\| f\|_{p}\geq \| f\|_{L^p(E_k)} \geq \| f_k\|_{p} - \sum_{j} \|
f_j\|_{L^p(^cE_j)} \geq 2^{\frac k2} - \sum_{j>0} 2^{-\frac j2}, $$ which allows to conclude. \end{proof}
Using Theorem \ref{th:strong-conc} instead of Theorem \ref{th:concentration}, we have the following. \begin{theorem}\label{th:strong-noWiener}\begin{itemize}\item[(i)] Let $p>2$, with $p\notin 2\mathbb N$, and let $\ell\in\mathbb N$ such that $2\ell<p<2(\ell +1)$. Then, for any symmetric open set
$U\subset\mathbb T$ with $|U|>0$ and $q>q(p)$, there exists a positive definite function $f\in L^{2\ell}(\mathbb T)$, whose negative coefficients are zero, such that $f\notin L^q(\mathbb T)$ while $f$ is in $L^p(^cU)$. \item[(ii)] Let $0<p<2$. Then
for any symmetric open set $U\subset\mathbb T$ with $|U|>0$ and any $s<q<p$, there exists a function $f$ in the Hardy space $H^{s}(\mathbb T)$ with non negative Fourier coefficients, so that $f\notin H^q(\mathbb T)$ while $f^*$ is in $L^p(^cU)$. \end{itemize} \end{theorem}
\begin{proof} Let us first prove $(i)$. We can assume that $^cU$ contains a neighborhood of $0$. So, by Wiener's property, if $f$ is integrable and belongs to $L^p(^cU)$, then $f$ is in $L^{2\ell}(\mathbb T)$. Let us prove that there exists such a function, whose Fourier coefficients satisfy the required properties, and which does not belong to $L^q(\mathbb T)$. The proof follows the same lines as in the previous one. By using Theorem \ref{th:strong-conc}, we can find positive definite polynomials
$f_k$ such that $\|f_k\|_q=2^{k/2}\to \infty$, while $\|
f_k\|_{L^p(^cU_k)}\leq 2^{-k}$ with $U_k\subset U$ disjoint and of sufficiently small measure, so that
$\sum\|f_k\|_{L^p(^cU)}<\infty$. As before, the function $ f:=\sum_{k\geq 1} e_{ m_k}f_k$ will have the required properties.
Let us now consider $1\leq p<2$, from which we conclude for
$(ii)$: if $p< 1$, we look for a function of the form $f^{\ell}$, with $f$ satisfying the conclusions for $\ell p$, with $\ell$ such that $1\leq \ell p<2$. We can assume that $q< 1$. We proceed as before, with $f_k$'s given by Theorem \ref{th:strong-conc}, such that $\|f_k\|_q=2^{k/2}$ and $\| f_k\|_{L^p(^cU_k)}\leq 2^{-k/2}$. The $U_k$'s are assumed to be disjoint and of small measure, so that $\sum_k \|f_k\|_{H^s}^s<\infty$. It follows
that $f\in H^s(\mathbb T)$. Remark that $f$ is not a function, in general, but a distribution. Recall that $f^*$ is the boundary value of the corresponding holomorphic function. We write as before $$
\| f\|_{H^q(\mathbb T)}^q\geq \| f^*\|_{L^q(U_k)}^q \geq \| f_k\|_{q}^q
- \sum_{j} \| f_j\|_{L^q(^cU_j)}^q \geq 2^{\frac {kq}2} - \sum_{j>0} 2^{-\frac {jq}2}, $$ which allows to conclude for the fact that $f$ is not in $H^q(\mathbb T)$.
\begin{remark} As Wainger in \cite{W}, we can prove a little more:
the function $f$ may be chosen such that $\sup_{r<1}|f_r|$ is in $L^p(^cU)$. Let us give the proof in the case $(i)$. We can assume that $U$ may be written as $I\cup(-I) $
for some interval $I$. Let $J$ be the interval of same center and
length half, and take $f$ constructed as wished, but for the open
set $J\cup(-J)$. Finally, write $f=\phi+\psi$, with $\phi:=f\chi_{^c\left(J\cup
(-J)\right)}$. Then using the maximal theorem we know that $\sup_{r<1}|\phi_r|\in
L^p(\mathbb T)$, while the Poisson kernel $P_t(x-y)$ is uniformly bounded
for $x\notin U$ and $y\in J\cup(-J)$, so that $\sup_{r<1}|\psi_r|$ is uniformly bounded outside $U$.
In the case $(ii)$, the proof is more technical, $f$ being only a
distribution. We use the fact that derivatives
of the Poisson kernel $P_t(x-y)$ are also uniformly bounded
for $x\notin U$ and $y\in J\cup(-J)$.
\end{remark}
\end{proof}
\end{document} |
\begin{document}
\title{The Cauchy problem for an inviscid and non-diffusive Oldroyd-{B} model in two dimensions}
\author[a]{Yuanzhi Tu}
\author[a]{Yinghui Wang}
\author[a]{Huanyao Wen \thanks{Corresponding author.}}
\affil[a]{School of Mathematics, South China University of Technology, Guangzhou, China}
\date{}
\maketitle
\renewcommand{\thefootnote}{}
\footnote{ {E}-mail: [email protected](Tu); [email protected](Wang); [email protected](Wen).}
\begin{abstract}
A two-dimensional inviscid and diffusive Oldroyd-B model was investigated by [T. M. Elgindi, F. Rousset, Commun. Pure Appl. Math. 68 (2015), 2005--2021] where the global existence and uniqueness of the strong solution were established for arbitrarily large initial data. As pointed out by [A. V. Bhave, R. C. Armstrong, R. A. Brown, J. Chem. Phys., 95(1991), 2988-β3000], the diffusion coefficient is significantly smaller than other effects, it is interesting to study the non-diffusive model. In the present work, we obtain the global-in-time existence and uniqueness of the strong solution to the non-diffusive model with small initial data via deriving some uniform regularity estimates and taking vanishing diffusion limits. In addition, the large time behavior of the solution is studied and the optimal time-decay rates for each order of spatial derivatives are obtained. The main challenges focus on the lack of dissipation and regularity effects of the system and on the slower decay in the two-dimensional settings. A combination of the spectral analysis and the Fourier splitting method is adopted.
\end{abstract}
{\noindent \textbf{Keywords:} An Oldroyd-B model; global existence and uniqueness; long time behavior; vanishing diffusion limits.}
{\noindent\textbf{AMS Subject Classification (2020):} 76A10, 76B03, 74H40.}
\section{Introduction}
The interest for viscoelastic fluids has increased considerably, due to their connections with applied sciences. The motion of the fluids can be described by the Navier-Stokes equations coupling some constitutive laws of different types, see \cite{Bird_1, Bird_2} for more details. In this paper, we consider the following {O}ldroyd-{B} type model in Eulerian coordinates:
\begin{equation} \label{Oldroyd_B_d}
\begin{cases}
\partial_tu+(u\cdot\nabla) u+\nabla p=K\, {\rm div}\tau,\\
\partial_t\tau+(u\cdot\nabla)\tau+\beta\tau=\alpha\mathbb{D}(u),\\
{\rm div}\,u=0, \\[2mm]
(u,\tau)(x,0)=(u_0,\tau_0)(x),
\end{cases}
\end{equation}
on $\mathbb{R}^2$ $\times$ $(0,\infty)$. (\ref{Oldroyd_B_d}) with a diffusion term $-\mu\Delta\tau$ on the left-hand side of the equation of $\tau$ was investigated by Elgindi and Rousset in \cite{Elgindi Rousset 2015} where the global existence and uniqueness of the strong solution were established for arbitrarily large initial data. In this paper, we aim to study the global wellposedness and the large time behavior of the non-diffusive model (\ref{Oldroyd_B_d}).
We will give an overview of study of the model. In fact, it is a simplified model of the following classical incompressible {O}ldroyd-{B} model\footnote{\eqref{Oldroyd_B_d} is the case that $\mu=0$, $\nu=0$ and $Q=0$.}:
\begin{eqnarray} \label{Oldroyd_B}
\begin{cases}
\partial_tu+(u\cdot\nabla) u+\nabla p-\nu\Delta u=K \,{\rm div}\tau,\\
\partial_t\tau+(u\cdot\nabla)\tau-\mu\Delta\tau+\beta\tau=Q(\nabla u,\tau)+\alpha\mathbb{D}(u),\\
{\rm div}\,u=0,
\end{cases}
\end{eqnarray}
where $u=u(x,t)$, $p=p(x,t)$, and $\tau=\tau(x,t)$ denote the velocity field of the fluid, the scalar pressure, and the tangential part of the stress tensor represented by a symmetric matrix, respectively. $\mathbb{D}(u)=\frac12(\nabla u+ \nabla u^T)$ is the symmetric part of the velocity gradient. The nonlinear term $Q(\nabla u,\tau)$ is a bilinear form:
\begin{equation*}
Q(\nabla u,\tau)=\Omega\tau - \tau\Omega + b(\mathbb{D}(u)\tau+\tau\mathbb{D}(u)).
\end{equation*}
$\Omega=\frac12(\nabla u- \nabla u^T)$ is the skew-symmetric part of velocity gradient and $b\in[-1,1]$. Those physical coefficients $\alpha,\beta ,\mu, \nu, K$ are constants that satisfy $\alpha,\beta, K,\mu, \nu>0.$
As pointed out by Bhave, Armstrong and Brown (\cite{Bhave 1991}), the diffusion coefficient $\mu$ is significantly smaller than other effects. Thus some early works on the mathematical theory of the system \eqref{Oldroyd_B} focused on the non-diffusive case (i.e. $\nu>0,\mu = 0$ in \eqref{Oldroyd_B}). In this case, the model \eqref{Oldroyd_B} without diffusive term was first introduced by {O}ldroyd (\cite{Oldroyd 1958}) to describe the behavior of viscoelastic fluids, which consists of both viscous and elastic components, and thus behaves as viscous fluid in some circumstances and as elastic solid in others. For the initial-boundary value problem, Guillop\'{e} and Saut(\cite{Guillo 1990}) established the local wellposedness of strong solutions in Sobolev space $H^s$ and obtained the global existence and uniqueness with small initial data and small coupling parameter $\alpha$. Later, Fern\'{a}ndez-Cara, Guill\'{e}n, and Ortega (\cite{Ortega 1998}) extended the result in the $L^p$ settings. Molinet and Talhouk (\cite{Molinet 2004}) proved that the results obtained by \cite{Guillo 1990} remain true without any restriction on the smallness of the coupling parameter. When considering the exterior domains, one needs to overcome the difficulty caused by both the boundary effect and unboundedness of the domain. Hieber, Naito, and Shibata (\cite{Hieber Naito 2012}) obtained the unique global strong solution with small initial data and small coupling parameter, see also \cite{Fang Hieber Zi 2013} by Fang, Hieber, and Zi for the non-small coupling parameter case. Chemin and Masmoudi (\cite{Chemin 2001}) studied the global wellposeness in the framework of critical Besov spaces and some blow-up criteria were also obtained. See also \cite{Chen Miao 2008, Zi 2014} for the case of the non-small coupling parameter in critical Besov spaces. Lions and Masmoudi (\cite{Lions 2000}) considered the case that $b=0$ and proved the existence of global weak solution for arbitrarily large initial data. In fact, for the case $b\neq0$, it is still open. For some studies of blow-up criteria, please refer to \cite{Lei 2010, Kupferman 2008}. Lei (\cite{Lei 2006}) obtained the global existence of classical solutions via the incompressible limit in periodic domains. Recently, Hieber, Wen, and Zi (\cite{Hieber Wen 2019}) studied the long time behaviors of the solutions in three dimensions and obtained the same decay rates as the heat equation, see also the extension by Huang, Wang, Wen, and Zi (\cite{Huang 2022}). For the case of infinite Weissenberg number, an energetic variational approach was first introduced by Lin, Liu, and Zhang (\cite{Lin-Liu-Zhang}) to understand the physical structure for the related systems (see for instance \cite{Hu-Lin,Hu-Wu,Lai,Lei1,Lei2,Lin} for more progress).
For the diffusive model (i.e. $\mu > 0$ in \eqref{Oldroyd_B}),
Constantin and Kliegl (\cite{Constantin 2012}) proved the global wellposedness of strong solutions for the two-dimensional Cauchy problem with large initial data and $\nu>0$. For the inviscid case, Elgindi and Rousset (\cite{Elgindi Rousset 2015}) proved that the problem \eqref{Oldroyd_B} is global wellposed in $\mathbb{R}^2$ provided that the initial data is small enough. Later, Elgindi and Liu (\cite{Elgindi Liu 2015}) extended the results to the three-dimensional case. Very recently, Huang, Wang, Wen and Zi (\cite{Huang 2022}) obtained the optimal decay estimates with vanishing viscosity ($\nu\geq 0$) in three dimensions. When $\nu=0$, Deng, Luo and Yin (\cite{Yin_Deng}) obtained the global wellposedness of strong solutions and the $H^1$ time-decay rate as $(1+t)^{-\frac12}$ with small initial data in $\mathbb{R}^2$. When $\nu=0$ and $Q=0$, Elgindi and Rousset (\cite{Elgindi Rousset 2015})
established the global existence and uniqueness of strong solutions in $\mathbb{R}^2$.
More precisely, they proved the following result with the diffusion coefficient $\mu>0$.
\begin{pro}[Theorem 1.1, \cite{Elgindi Rousset 2015}]\label{proposition_1}
Assume that the initial data satisfy $(u_0,\tau_0)\in H^{s}(\mathbb{R}^2)$ with $\mathrm{div}\, u_0 = 0,\tau_0$ symmetric and $s>2$, there exists a unique global solution $(u,\tau)\in C([0,\infty);H^{s}(\mathbb{R}^2))$ to the initial-value problem of \eqref{Oldroyd_B} with $\nu=0$ and $Q=0$.
\end{pro}
It is interesting to see whether the solution obtained in Proposition \ref{proposition_1} exists globally or not for the non-diffusive case.
\subsection{Main results}
Our aim in this paper is to investigate the global-in-time existence and uniqueness and the optimal time-decay rates of the solutions to the initial-value problem of \eqref{Oldroyd_B_d}. The first main result concerning the global existence and uniqueness is stated as follows.
\begin{theorem}\label{wellposedness}
Assume that $(u_0,\tau_0)\in H^3(\mathbb{R}^2)$ with $\mathrm{div} \,u_0 = 0$ and $\tau_0$ symmetric, then there exists a sufficiently small constant $\epsilon_0 >0$ such that
the Cauchy problem \eqref{Oldroyd_B_d} admits a unique global solution $(u,\tau)\in L^\infty([0,\infty);H^3(\mathbb{R}^2))$ satisfying the following uniform regularity estimate:
\begin{equation*}
\|(u,\tau)(t)\|_{H^3}^2 + \int_0^t (\|\nabla u(s)\|_{H^2}^2 + \|\tau(s)\|_{H^3}^2 ){\rm d}s \leq C\|(u_0,\tau_0)\|_{H^3}^2,
\end{equation*}
provided that $\|(u_0,\tau_0)\|_{H^3} \leq \epsilon_0.$
\end{theorem}
Based on the global existence and uniqueness of the solution, we get the second main result concerning the time-decay estimates.
\begin{theorem}\label{thm_OB_d_decay}
Under the assumptions of Theorem \ref{wellposedness}, assume in addition that $(u_0,\tau_0)\in L^1(\mathbb{R}^2)$, then the following optimal time-decay estimates of the solution to the problem \eqref{Oldroyd_B_d} hold.
\begin{enumerate}[i)]
\item Upper time-decay estimates of the solutions:
\begin{eqnarray}\label{opti1}
\ \|\nabla^ku(t)\|_{L^2}\le C (1+t)^{-\frac12-\frac{k}{2}},\ k=0,1,2,3,
\end{eqnarray}
and
\begin{eqnarray}\label{opti2}
\ \|\nabla^{k}\tau(t)\|_{L^2}\le C(1+t)^{-1-\frac{k}{2}},\ k=0,1,2,3,
\end{eqnarray}
for all $t>0$, where $C$ is a positive constant independent of time.
\item In addition, assume that $\Big|\int_{\mathbb{R}^2}u_0(x){\rm d}x\Big| = c_2>0.$ Then there exists a positive time $t_1=t_1(\beta)$ such that
\begin{eqnarray}\label{opti3}
\|\nabla^ku(t)\|_{L^2}\ge \frac{1}{C} (1+t)^{-\frac12-\frac{k}{2}},\ k=0,1,2,3,
\end{eqnarray}
and
\begin{eqnarray}\label{opti4}
\|\nabla^{k}\tau(t)\|_{L^2}\ge \frac{1}{C} (1+t)^{-1-\frac{k}{2}},\ k=0,1,2,3,
\end{eqnarray}
for all $t\geq t_1$.
\end{enumerate}
\end{theorem}
\begin{rem}
For any $\mu>0$, Theorem \ref{thm_OB_d_decay} still holds for the system with diffusion.
\end{rem}
\subsection{Main ideas}
In order to establish the global wellposedness result, we choose (\ref{Oldroyd_B_d}) with the diffusive term $-\mu\Delta\tau$ as an approximate system. To obtain the uniform regularity for $\mu,$ the diffusive term can not play much role. Instead we make full use of the damping term $\tau$. Combining with some compactness arguments, the unique global solution of the Cauchy problem for \eqref{Oldroyd_B_d} can be obtained via vanishing diffusion limit.
To obtain some optimal time-decay estimates of the solution, the main challenges focus on deriving the sharp decay rate of the solution itself in $L^2$ norm due to the lower dimension. In fact, one can know from Lemma \ref{lemma_Greenfunction_7} that the time-decay rates of the low-frequency part of the solution to the linearized system \eqref{Oldroyd_B_d} will decrease as the dimension does. Our main strategy is to use spectral analysis together with energy method that the upper bound of the low frequency is a constant to get the sharp time-decay rates of the higher order derivatives of the solution. However, it seems not working for the lower order. More specifically, inspired by \cite{Dong 2006} where $$\|u(t)\|_{L^2}\le C (1+t)^{-\frac14},$$ can be derived by using an observation that $$(1+t)^{\frac{1}{2}}\| \nabla u(t)\|_{L^2}\longrightarrow 0 \,\,\,\,as\,\,\,\,t\rightarrow\infty,$$ and Lemma \ref{lemma_Greenfunction_8}, see Lemma \ref{lemma_upper_decay} for more details. To get the sharp time-decay rate $$\|u(t)\|_{L^2}\le C (1+t)^{-\frac12},$$ if we replace the upper bound of the low frequency $g_1^2(t)$ in \eqref{new_H1_L2_29} by a constant and use Lemma \ref{lemma_Greenfunction_7}, then an integral like $\int_0^t(1+t-s)^{-\frac12}(1+s)^{-1}{\rm d}s$ will turn up and it could not be dominated by $(1+t)^{-\frac12}$. The Fourier splitting method that the upper bound of the low frequency depends on a function of time can overcome this difficulty.
The rest of the paper is organized as follows. In Section \ref{Section_2} we prove the uniform regularity estimates for $\mu$ in \eqref{Oldroyd_B_1} and obtain Theorem \ref{approximate solution}. In Section \ref{Section_4} we use the vanishing diffusion limit technique to obtain the unique global solution of system \eqref{Oldroyd_B_d} and finish the proof of Theorem \ref{wellposedness}. In Section \ref{Section_5} we first analyze the linear part of the system \eqref{Oldroyd_B_d} and obtain the corresponding estimates of Green functions and the low-frequency part of the solution in \eqref{Oldroyd_B_d}, and then we obtain the optimal time-decay rates respectively for $u$ and $\tau$ and get Theorem \ref{thm_OB_d_decay}.
Throughout the rest of the paper, let $C$ denote a generic positive constant depending on some known constants but independent of $\mu$, $\delta$, $t$, and $\eta_i$ for $i=1,2,3$.
\section{Uniform regularity}\label{Section_2}
To begin with, we use the following initial-value problem as an approximation of the problem \eqref{Oldroyd_B_d} as $\mu \to 0$, namely,
\begin{eqnarray} \label{Oldroyd_B_1}
\begin{cases}
\partial_tu^\mu+(u^\mu\cdot\nabla) u^\mu+\nabla p^\mu=K \,{\rm div}\tau^\mu,\\
\partial_t\tau^\mu+(u^\mu\cdot\nabla)\tau^\mu-\mu\Delta\tau^\mu+\beta\tau^\mu=\alpha\mathbb{D}(u^\mu),\\
{\rm div}\,u^\mu=0, (u^\mu,\tau^\mu)(x,0) = (u_0,\tau_0).
\end{cases}
\end{eqnarray}
The global wellposedness of problem \eqref{Oldroyd_B_1} for fixed $\mu>0$ was already stated in Proposition \ref{proposition_1}. In this section, we will establish the uniform regularity of the solutions to the problem \eqref{Oldroyd_B_1}, i.e.,
\begin{theorem}\label{approximate solution}
Suppose that $(u_0,\tau_0)\in H^3(\mathbb{R}^2)$ with $\mathrm{div} \,u_0 = 0$ and $\tau_0$ symmetric, then there exists a sufficiently small constant $\epsilon_0 >0$ independent
of $\mu$ and $t$, such that the solutions to the Cauchy problem \eqref{Oldroyd_B_1} satisfy the following uniform estimates:
\begin{equation*}\label{uniform_estimates}
\|(u^\mu,\tau^\mu)(t)\|_{H^3}^2 + \int_0^t (\|\nabla u^\mu(s)\|_{H^2}^2 + \|\tau^\mu(s)\|_{H^3}^2 + \mu\|\nabla\tau^\mu(s)\|_{H^3}^2){\rm d}s \leq C\|(u_0,\tau_0)\|_{H^3}^2,
\end{equation*}
for all $t>0$, provided that $\|(u_0,\tau_0)\|_{H^3} \leq \epsilon_0.$
\end{theorem}
For simplicity, we use $(u,\tau)$ to represent $(u^\mu,\tau^\mu)$. Before proving Theorem \ref{approximate solution}, we need some reformulations of the original system which are motivated by \cite{Zi 2014} and the references therein. More specifically, applying the Leray projection operator $\mathbb{P}$ to the first equation of \eqref{Oldroyd_B_1} and the operator $\mathbb{P}{\rm div}\,$ to the second equation of \eqref{Oldroyd_B_1} respectively, we obtain that
\begin{eqnarray} \label{OB_d_1}
\begin{cases}
\partial_tu+\mathbb{P}\left(u\cdot\nabla u\right)=K\, \mathbb{P}{\rm div}\tau,\\
\partial_t\mathbb{P}{\rm div}\tau+\mathbb{P}{\rm div}\left(u\cdot\nabla\tau\right)-\mu\,\mathbb{P}{\rm div}\Delta\tau+\beta\,\mathbb{P}{\rm div}\tau=\frac{\alpha}{2}\Delta u.
\end{cases}
\end{eqnarray}
Then, applying $\Lambda^{-1}=(\sqrt{-\Delta})^{-1}$ to (\ref{OB_d_1})$_2$ and denoting by
\begin{eqnarray}\label{sigma}
\sigma := \Lambda^{-1}\mathbb{P}{\rm div}\tau,
\end{eqnarray} we can rewrite (\ref{OB_d_1}) as follows:
\begin{eqnarray} \label{u_sigma_d}
\begin{cases}
\partial_tu-K \Lambda\sigma=\mathcal{F}_1,\\
\partial_t\sigma -\mu\Delta\sigma +\beta\sigma+\frac{\alpha}{2}\Lambda u=\mathcal{F}_2,
\end{cases}
\end{eqnarray} where
\begin{eqnarray*}
\mathcal{F}_1=-\mathbb{P}\left(u\cdot\nabla u\right),\
\mathcal{F}_2=-\Lambda^{-1}\mathbb{P}{\rm div}\left(u\cdot\nabla\tau\right).
\end{eqnarray*}
Here $\hat{\sigma}^j=i\left(\delta_{j,k}-\frac{\xi_j\xi_k}{|\xi|^2}\right)\frac{\xi_l}{|\xi|}\hat{\tau}^{l,k}$ where $\hat{f}$ denotes the Fourier transform of $f$.
It is worth noticing that for any $u\in L^2(\mathbb{R}^2)$, there holds
\begin{equation*}
\mathbb{P}(u)_i=u_i-\sum_{k=1}^{2}R_iR_k u_k,
\end{equation*}
where $R_iR_k=(-\Delta)^{-1}\partial_i\partial_k$.
It is not difficult to get that
\begin{equation}\label{bu_7}
\|\mathbb{P}u\|_{L^2}^2\le C \|u\|_{L^2}^2.
\end{equation}
Combining \eqref{sigma} and \eqref{bu_7}, we can estimate $\sigma$ as follows:
\begin{equation}\label{bu_8}
\|\nabla^k\sigma\|_{L^2}^2\le C\|\nabla^k\tau\|_{L^2}^2,
\end{equation}for $k = 0,1,2,3$.
The proof of Theorem \ref{approximate solution} highly relies on the following proposition.
\begin{pro}\label{Prop2}
Under the conditions of Theorem \ref{approximate solution}, there exist sufficiently small positive constants $\epsilon_0$ and $\delta$ independent of $\mu$ and $T$ such that
if
\begin{equation*}\label{apriori-assum}
\sup_{0\leq s \leq T}\|(u,\tau)(s)\|_{H^3}\leq \delta,
\end{equation*}
for any given $T>0$, there holds
\begin{equation*}\label{apriori-result}
\sup_{0\leq s \leq T}\|(u,\tau)(s)\|_{H^3}\leq \frac{\delta}{2},
\end{equation*}
provided that $\|(u_0,\tau_0)\|_{H^3} \leq \epsilon_0.$
\end{pro}
The proof of Proposition \ref{Prop2} consists of the following Lemmas \ref{lemma_regularity_1}, \ref{lemma_regularity_2} and \ref{lemma_regularity_3}.
\begin{lemma}\label{lemma_regularity_1}
Under the assumptions of Proposition \ref{Prop2}, there exists a sufficiently small positive constant $\eta_1$ independent of $\mu, T$ such that
\begin{equation}\label{est_H1}
\begin{split}
& \udt (\alpha\| u\|_{H^1}^2 + K\|\tau\|_{H^1}^2 + \eta_1\langle\Lambda u, \sigma\rangle) + \frac{\beta K}{2}\|\tau\|_{H^1}^2 +\frac{\eta_1\alpha}{4}\|\Lambda u\|_{L^2}^2 + \mu K\|\nabla\tau\|_{H^1}^2
\leq 0,
\end{split}
\end{equation}
for all $0\leq t \leq T$.
\end{lemma}
\begin{proof}
Multiplying (\ref{Oldroyd_B_1})$_1$ and (\ref{Oldroyd_B_1})$_2$ by $\alpha u$ and $K \tau$, respectively, summing the results up, and using integration by parts, we have
\begin{equation}\label{est_L2}
\begin{split}
&\frac12 \udt (\alpha\|u\|_{L^2}^2 + K\|\tau\|_{L^2}^2) + \beta K\|\tau\|_{L^2}^2 + \mu K\|\nabla\tau\|_{L^2}^2 = 0.
\end{split}
\end{equation}
Similarly, multiplying $\nabla$(\ref{Oldroyd_B_1})$_1$ and $\nabla$(\ref{Oldroyd_B_1})$_2$ by $\alpha\nabla u$ and $K \nabla \tau$, respectively, we have
\begin{equation*}
\begin{split}
&\frac12 \udt (\alpha\|\nabla u\|_{L^2}^2 + K\|\nabla\tau\|_{L^2}^2) + \beta K\|\nabla\tau\|_{L^2}^2 + \mu K\|\nabla^2\tau\|_{L^2}^2\\
=& - \langle K \nabla(u\cdot\nabla\tau),\nabla\tau \rangle
\,\le\, K\|\nabla u\|_{L^\infty}\|\nabla\tau\|_{L^2}^2
\,\le\, C\delta K \|\nabla\tau\|_{L^2}^2.
\end{split}
\end{equation*}
Letting $\delta\le \frac{\beta}{2C}$, then we obtain
\begin{equation}\label{est_first}
\begin{split}
& \frac{1}{2}\udt (\alpha\|\nabla u\|_{L^2}^2 + K\|\nabla\tau\|_{L^2}^2) + \frac{\beta K}{2}\|\nabla\tau\|_{L^2}^2 + \mu K\|\nabla^2\tau\|_{L^2}^2
\leq 0.
\end{split}
\end{equation}
To derive the dissipative estimate of the velocity gradient, the equation of $\sigma$ plays an important role. More specifically, multiplying $\Lambda$(\ref{u_sigma_d})$_1$ and (\ref{u_sigma_d})$_2$ by $\sigma$ and $\Lambda u$, respectively, summing the results up, and using integration by parts, we have
\begin{equation}\label{bu}
\begin{split}
&\partial_t\langle\Lambda u, \sigma\rangle + \frac{\alpha}{2}\|\Lambda u\|_{L^2}^2\\
= &\Big(K\|\Lambda \sigma\|_{L^2}^2 + \langle \mu\Delta\sigma,\Lambda u\rangle - \langle\beta\sigma,\Lambda u \rangle\Big)\\ &- \Big(\langle \Lambda\mathbb{P}(u\cdot \nabla u),\sigma\rangle + \langle \Lambda^{-1}\mathbb{P}\mathrm{div}(u\cdot \nabla \tau),\Lambda u\rangle\Big)\\
=:&\, I_1 - I_2.
\end{split}
\end{equation}
For $I_1$ and $I_2$, using (\ref{bu_7}), we have that
\begin{equation*}\label{bu_1}
\begin{split}
|I_1|&\le K\|\Lambda \sigma\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda u\|_{L^2}^2 + \frac{4\mu^2}{\alpha}\|\Delta\sigma\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda u\|_{L^2}^2 + \frac{4\beta^2}{\alpha}\|\sigma\|_{L^2}^2,\\
|I_2|&\le \frac12\|\Lambda \sigma\|_{L^2}^2
+ \frac12\|\mathbb{P}(u\cdot \nabla u)\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda u\|_{L^2}^2 + \frac{4}{\alpha}\|\Lambda^{-1}\mathbb{P}\mathrm{div}(u\cdot \nabla \tau)\|_{L^2}^2\\
&\le \frac12\|\Lambda \sigma\|_{L^2}^2 + C\|u\cdot \nabla u\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda u\|_{L^2}^2 + C\|u\cdot \nabla \tau\|_{L^2}^2.
\end{split}
\end{equation*}
Then, substituting the above inequalities into (\ref{bu}), we obtain that
\begin{equation}\label{est_first_u}
\begin{split}
&\partial_t\langle\Lambda u, \sigma\rangle + \frac{\alpha}{2}\|\Lambda u\|_{L^2}^2\\
\leq & \,(\frac{3\alpha}{16} + C\delta^2)\|\Lambda u\|_{L^2}^2 + (K + \frac{4\beta^2}{\alpha} + \frac12)\|\sigma\|_{H^1}^2 + C\delta^2\|\tau\|_{H^1}^2 + C\mu^2\|\nabla^2\tau\|_{L^2}^2.
\end{split}
\end{equation}
Letting $\delta$ and $\eta_1>0$ small enough, then summing \eqref{est_L2}, \eqref{est_first} and $\eta_1$\eqref{est_first_u} up, and using \eqref{bu_8}, we get (\ref{est_H1}).
\end{proof}
In a similar way, we can obtain the following higher order estimates.
\begin{lemma}\label{lemma_regularity_2}
Under the assumptions of Proposition \ref{Prop2}, there exists a sufficiently small positive constant $\eta_2=\frac{\eta_1}{4}$ independent of $\mu, T$ such that
\begin{equation}\label{est_H2}
\begin{split}
\udt (\alpha\| u\|_{H^2}^2 &+ K\|\tau\|_{H^2}^2 + \eta_1\langle\Lambda u, \sigma\rangle + \eta_2\langle\Lambda^2 u,\Lambda \sigma\rangle)\\ &+ \frac{\beta K}{4}\|\tau\|_{H^2}^2 + \frac{\eta_2\alpha}{8}\|\Lambda u\|_{H^1}^2 + \mu K\|\nabla\tau\|_{H^2}^2
\leq 0,
\end{split}
\end{equation} for all $0\leq t \leq T$.
\end{lemma}
\begin{proof}
Multiplying $\nabla^2$(\ref{Oldroyd_B_1})$_1$ and $\nabla^2$ (\ref{Oldroyd_B_1})$_2$ by $\alpha \nabla^2 u$ and $K \nabla^2 \tau$, respectively, summing the results up, and using integration by parts, we have
\begin{align}\label{est_second}
\begin{split}
&\frac12 \udt (\alpha\|\nabla^2 u\|_{L^2}^2 + K\|\nabla^2\tau\|_{L^2}^2) + \beta K\|\nabla^2\tau\|_{L^2}^2 + \mu K\|\nabla^3\tau\|_{L^2}^2\\
= & - \,\langle K \nabla^2(u\cdot\nabla\tau),\nabla^2\tau \rangle - \langle\alpha \nabla^2(u\cdot\nabla u),\nabla^2u \rangle\\
\leq & \,C(\|\nabla \tau\|_{L^\infty}\|\nabla^2 u\|_{L^2}\|\nabla^2 \tau\|_{L^2} + \|\nabla u\|_{L^\infty}\|\nabla^2 \tau\|_{L^2}^2 + \|\nabla u\|_{L^\infty}\|\nabla^2 u\|_{L^2}^2)\\
\leq &\, C\delta \|\nabla^2 u\|_{L^2}^2 + C\delta \|\nabla^2\tau\|_{L^2}^2.
\end{split}
\end{align}
Multiplying $\Lambda^2$(\ref{u_sigma_d})$_1$ and $\Lambda$(\ref{u_sigma_d})$_2$ by $\Lambda\sigma$ and $\Lambda^2 u$, respectively, summing the results up, and using integration by parts, we have
\begin{align}\label{bu_2}
\begin{split}
&\partial_t\langle\Lambda^2 u,\Lambda \sigma\rangle + \frac{\alpha}{2}\|\Lambda^2 u\|_{L^2}^2\\
= & \,\Big(K\|\Lambda^2 \sigma\|_{L^2}^2 + \langle \mu\Lambda\,\Delta\sigma,\Lambda^2 u\rangle - \langle\beta\Lambda\sigma,\Lambda^2 u \rangle\Big)\\ &- \Big(\langle \Lambda^2\mathbb{P}(u\cdot \nabla u),\Lambda\sigma\rangle + \langle \mathbb{P}\mathrm{div}(u\cdot \nabla \tau),\Lambda^2 u\rangle\Big)\\
=: & \,I_3 - I_4.
\end{split}
\end{align}
For $I_3$ and $I_4$, using (\ref{bu_7}), we have that
\begin{align}
|I_3|&\le K\|\Lambda^2 \sigma\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda^2 u\|_{L^2}^2 + \frac{4\mu^2}{\alpha}\|\Lambda\,\Delta\sigma\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda^2 u\|_{L^2}^2 + \frac{4\beta^2}{\alpha}\|\Lambda\sigma\|_{L^2}^2,\label{I3}\\
\nonumber |I_4|&\le \frac12\|\Lambda^2 \sigma\|_{L^2}^2
+ \frac12\|\Lambda\mathbb{P}(u\cdot \nabla u)\|_{L^2}^2
+ \frac{\alpha}{16}\|\Lambda^2 u\|_{L^2}^2 + \frac{4}{\alpha}\|\mathbb{P}\mathrm{div}(u\cdot \nabla \tau)\|_{L^2}^2\\
&\le \frac12\|\Lambda^2 \sigma\|_{L^2}^2
+ C\|\nabla(u\cdot \nabla u)\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda^2 u\|_{L^2}^2 + C\|\nabla(u\cdot \nabla \tau)\|_{L^2}^2\label{I4}.
\end{align}
Substituting (\ref{I3}) and (\ref{I4}) into (\ref{bu_2}), we get
\begin{align}\label{est_second_u}
\begin{split}
&\partial_t\langle\Lambda^2 u,\Lambda \sigma\rangle + \frac{\alpha}{2}\|\Lambda^2 u\|_{L^2}^2\\
\leq & \,(\frac{3\alpha}{16} + C\delta^2)\|\Lambda^2 u\|_{L^2}^2 + C\delta^2\|\nabla u\|_{L^2}^2\\
&+ (K + \frac{4\beta^2}{\alpha} + \frac12)\|\nabla\sigma\|_{H^1}^2 + C\delta^2\|\nabla\tau\|_{H^1}^2 + C\mu^2\|\nabla^3\tau\|_{L^2}^2,
\end{split}
\end{align}
where the facts that
\begin{equation*}\label{bu_20}
\begin{split}
\|\nabla(u\cdot \nabla u)\|_{L^2}\le \| \nabla u\|_{L^\infty}\|\nabla u\|_{L^2} + \| u\|_{L^\infty}\|\nabla^2 u\|_{L^2},\\
\|\nabla(u\cdot \nabla \tau)\|_{L^2}\le \|\nabla u\|_{L^\infty}\|\nabla \tau\|_{L^2} + \| u\|_{L^\infty}\|\nabla^2 \tau\|_{L^2},
\end{split}
\end{equation*}
are used.
Letting $\eta_2= \frac14 \eta_1$ and $\delta$ small enough, then summing \eqref{est_H1}, \eqref{est_second} and $\eta_2$\eqref{est_second_u} up, and using \eqref{bu_8}, we get (\ref{est_H2}).
\end{proof}
Finally, we get the following uniform estimates up to the third order.
\begin{lemma}\label{lemma_regularity_3}
Under the assumptions of Proposition \ref{Prop2}, there holds
\begin{equation}\label{eq_a_priori_est}
\begin{cases}
\|(u,\tau)(t)\|_{H^3}^2 \leq \frac{\delta}{2},\\[4mm]
\displaystyle\int_0^t\left(\|\nabla u(s)\|_{H^2}^2 + \|\tau(s)\|_{H^3}^2 + \mu\|\nabla\tau(s)\|_{H^3}^2\right){\rm d}s\le C,
\end{cases}
\end{equation}for all $0\leq t \leq T.$
\end{lemma}
\begin{proof}
Multiplying $\nabla^3$(\ref{Oldroyd_B_1})$_1$ and $\nabla^3$ (\ref{Oldroyd_B_1})$_2$ by $\alpha \nabla^3 u$ and $K \nabla^3 \tau$, respectively, summing the results up, and using integration by parts, we have
\begin{equation}\label{est_third}
\begin{split}
&\frac12 \udt (\alpha\|\nabla^3 u\|_{L^2}^2 + K\|\nabla^3\tau\|_{L^2}^2) + \beta K\|\nabla^3\tau\|_{L^2}^2 + \mu K\|\nabla^4\tau\|_{L^2}^2\\
= & -\, \langle K \nabla^3(u\cdot\nabla\tau),\nabla^3\tau \rangle - \langle\alpha \nabla^3(u\cdot\nabla u),\nabla^3u \rangle\\
\leq & \,C(\|\nabla u\|_{L^\infty}\|\nabla^3 \tau\|_{L^2}^2 + \|\nabla \tau\|_{L^\infty}\|\nabla^3 u\|_{L^2}\|\nabla^3 \tau\|_{L^2} + \|\nabla u\|_{L^\infty}\|\nabla^3 u\|_{L^2}^2 )\\
\leq & \,C\delta \|\nabla^3 u\|_{L^2}^2 + C\delta\|\nabla^3\tau\|_{L^2}^2.
\end{split}
\end{equation}
Similarly, multiplying $\Lambda^3$(\ref{u_sigma_d})$_1$ and $\Lambda^2$(\ref{u_sigma_d})$_2$ by $\Lambda^2\sigma$ and $\Lambda^3 u$, respectively, and using integration by parts, we have
\begin{align}\label{bu_4}
\begin{split}
&\partial_t\langle\Lambda^3 u,\Lambda^2 \sigma\rangle + \frac{\alpha}{2}\|\Lambda^3 u\|_{L^2}^2\\
= & \,\Big(K\|\Lambda^3 \sigma\|_{L^2}^2 + \langle \mu\Lambda^2\Delta\sigma,\Lambda^3 u\rangle - \langle\beta\Lambda^2\sigma,\Lambda^3 u \rangle\Big)\\ &- \Big(\langle \Lambda^3\mathbb{P}(u\cdot \nabla u),\Lambda^2\sigma\rangle + \langle \Lambda\mathbb{P}\mathrm{div}(u\cdot \nabla \tau),\Lambda^3 u\rangle\Big)\\
=: &\, I_5 - I_6.
\end{split}
\end{align}
Using (\ref{bu_7}), we can obtain the estimates of $I_5$ and $I_6$ as follows
\begin{align}
|I_5|&\le K\|\Lambda^3 \sigma\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda^3 u\|_{L^2}^2 + \frac{4\mu^2}{\alpha}\|\Lambda^2\Delta\sigma\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda^3 u\|_{L^2}^2 + \frac{4\beta^2}{\alpha}\|\Lambda^2\sigma\|_{L^2}^2,\label{I5}\\
\nonumber|I_6|&\le \frac12\|\Lambda^3 \sigma\|_{L^2}^2
+ \frac12\|\Lambda^2\mathbb{P}(u\cdot \nabla u)\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda^3 u\|_{L^2}^2 + \frac{4}{\alpha}\|\Lambda\mathbb{P}\mathrm{div}(u\cdot \nabla \tau)\|_{L^2}^2\\
&\le \frac12\|\Lambda^3 \sigma\|_{L^2}^2
+ C\|\nabla^2(u\cdot \nabla u)\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda^3 u\|_{L^2}^2 + C\|\nabla^2(u\cdot \nabla \tau)\|_{L^2}^2.\label{I6}
\end{align}
Substituting (\ref{I5}) and (\ref{I6}) into (\ref{bu_4}), we can deduce that
\begin{equation}\label{est_third_u}
\begin{split}
&\partial_t\langle\Lambda^3 u,\Lambda^2 \sigma\rangle + \frac{\alpha}{2}\|\Lambda^3 u\|_{L^2}^2\\
\leq & \,(\frac{3\alpha}{16} + C\delta^2)\|\Lambda^3 u\|_{L^2}^2 + C\delta^2\|\nabla^2 u\|_{L^2}^2\\ &+ (K + \frac{4\beta^2}{\alpha} + \frac12)\|\nabla^2\sigma\|_{H^1}^2 + C\delta^2 \|\nabla^2\tau\|_{H^1}^2 + C\mu^2 \|\nabla^4\tau\|_{L^2}^2,
\end{split}
\end{equation}
where the facts that
\begin{equation}\label{bu_21}
\begin{split}
\|\nabla^2(u\cdot \nabla u)\|_{L^2}&\le \| u\|_{L^\infty}\|\nabla^3 u\|_{L^2} + 3\| \nabla u\|_{L^\infty}\|\nabla^2 u\|_{L^2},\\
\|\nabla^2(u\cdot \nabla \tau)\|_{L^2}&\le \| u\|_{L^\infty}\|\nabla^3 \tau\|_{L^2} + 2\|\nabla u\|_{L^\infty}\|\nabla^2 \tau\|_{L^2} + \|\nabla \tau\|_{L^\infty}\|\nabla^2 u\|_{L^2},
\end{split}
\end{equation}
are used.
Letting $\eta_3:= \frac14 \eta_2$ and $\delta$ small enough, summing \eqref{est_H2}, \eqref{est_third} and $\eta_3$\eqref{est_third_u} up, and using \eqref{bu_8}, we obtain that
\begin{equation}\label{est_H3}
\begin{split}
\udt (\alpha\| u\|_{H^3}^2 &+ K\|\tau\|_{H^3}^2 + \sum_{i=1}^{3}\eta_i\langle\Lambda^i u,\Lambda^{i-1}\sigma\rangle)\\ &+ \frac{\beta K}{8}\|\tau\|_{H^3}^2 + \frac{\eta_3\alpha}{16}\|\Lambda u\|_{H^2}^2 + \mu K\|\nabla\tau\|_{H^3}^2
\leq 0.
\end{split}
\end{equation}
From the definition of $\eta_1$, $\eta_2$ and $\eta_3$, we have that
\begin{equation}\label{bu_9}
\begin{split}
\frac12(\alpha\| u\|_{H^3}^2 + K\|\tau\|_{H^3}^2)&\le \alpha\| u\|_{H^3}^2 + K\|\tau\|_{H^3}^2 + \sum_{i=1}^{3}\eta_i\langle\Lambda^i u,\Lambda^{i-1}\sigma\rangle\\ &\le 2(\alpha\| u\|_{H^3}^2 + K\|\tau\|_{H^3}^2).
\end{split}
\end{equation}
For all $0\leq t \leq T,$ integrating (\ref{est_H3}) over $[0, t]$ and utilizing (\ref{bu_9}), we have that
\begin{equation}\label{bu_10}
\begin{split}
&\frac12(\alpha\| u(t)\|_{H^3}^2 + K\|\tau(t)\|_{H^3}^2) \\&+\int_0^t\left(\frac{\eta_3\alpha}{16}\|\nabla u(s)\|_{H^2}^2 + \frac{\beta K}{8}\|\tau(s)\|_{H^3}^2 + \mu K\|\nabla\tau(s)\|_{H^3}^2\right){\rm d}s\\
\le &\,2(\alpha\| u_0\|_{H^3}^2 + K\|\tau_0\|_{H^3}^2)\,\le\,(2\alpha + 2K )\epsilon_0^2.
\end{split}
\end{equation}
Letting
\begin{equation*}\label{bu_11}
\begin{split}
\frac{4(\alpha+K)}{\min\{\alpha,K\}}\epsilon_0^2\le \frac{\delta}{2},
\end{split}
\end{equation*}
we get (\ref{eq_a_priori_est})$_1$ from (\ref{bu_10}). Using (\ref{bu_10}) again, we get (\ref{eq_a_priori_est})$_2$ for some known positive constant $C$.
\end{proof}
With Lemma \ref{lemma_regularity_3}, we finish the proof of Proposition \ref{Prop2}. Now we come to the proof of Theorem \ref{approximate solution} by using the standard continuity method.
\subsection*{Proof of Theorem \ref{approximate solution}}
For any fixed $\mu>0$, since
\begin{equation*}
\|(u_0,\tau_0)\|_{H^3}^2\le \epsilon_0^2\le \frac{\delta}{2},
\end{equation*}
and
\begin{equation*}
\|(u^\mu,\tau^\mu)(t)\|\in C([0,\infty);H^{3}(\mathbb{R}^2)),
\end{equation*}
there exists a time $T=T(\mu)>0$, such that
\begin{equation}\label{conclusion}
\|(u^\mu,\tau^\mu)(t)\|_{H^3}\leq \delta,
\end{equation} for all $t\in[0,T]$.
Letting $T^*$ be the maximal life span such that (\ref{conclusion}) holds. In view of (\ref{conclusion}), it holds that $T^*>0$.
Suppose that $T^*<+\infty$. Then, the continuity of the solution with respect to time yields that (\ref{conclusion}) holds on $[0,T^*]$, i.e.,
\begin{equation}\label{continuity_method}
\sup_{0\le s \le T^*}\|(u^\mu,\tau^\mu)(s)\|_{H^3}\le \delta.
\end{equation}
Then (\ref{continuity_method}) and Proposition \ref{Prop2} conclude that
\begin{equation}\label{apriori-assum1}
\|(u^\mu,\tau^\mu)(t)\|_{H^3}^2 \leq \frac{\delta}{2},
\end{equation} for all $t\in[0,T^*].$
Using (\ref{apriori-assum1}) and the continuity of the solution with respect to time again, we obtain that
$T^*$ in (\ref{continuity_method}) can be replaced by $T^*+\sigma_0$ for a positive constant $\sigma_0$. This is a contradiction with the definition of $T^*$. Therefore $T^*$ must be $+\infty$.
Hence for all $\mu>0$ and $t> 0$, there holds
\begin{equation*}
\begin{split}
\|(u^\mu,\tau^\mu)(t)\|_{H^3}^2\le\delta.
\end{split}
\end{equation*}
This together with (\ref{bu_10}) finishes the proof of Theorem \ref{approximate solution}.
\section{Global existence and uniqueness}\label{Section_4}
This section aims to complete the proof of Theorem \ref{wellposedness}. By virtue of the uniform estimates stated in Theorem \ref{approximate solution}, i.e.,
\begin{equation*}
\|(u^\mu,\tau^\mu)(t)\|_{H^3}^2 + \int_0^t (\|\nabla u^\mu(s)\|_{H^2}^2 + \|\tau^\mu(s)\|_{H^3}^2 + \mu\|\nabla\tau^\mu(s)\|_{H^3}^2){\rm d}s \leq C\|(u_0,\tau_0)\|_{H^3}^2.
\end{equation*}
Combining the above inequality with the equation \eqref{Oldroyd_B_1}, we can easily obtain that
\begin{equation*}
\|(\partial_t u^\mu,\partial_t \tau^\mu)(t)\|_{H^2}^2\le C.
\end{equation*}
By virtue of some standard weak (or weak*) convergence results and the Aubin-Lions Lemma (see for instance \cite{Simon 1987}), there exists a $(u,\tau)\in L^\infty([0,\infty);H^3(\mathbb{R}^2))$ which is a limit of $(u^\mu,\tau^\mu)$ (take subsequence if necessary) in some sense and solves (\ref{approximate solution}).
For the uniqueness, we suppose that there are two pairs of the solutions $(u_1,\tau_1)$ and $(u_2,\tau_2)$. Denote $w=u_1-u_2$, $v=\tau_1-\tau_2$ satisfying
\begin{equation} \label{convergence_6}
\begin{cases}
\partial_tw+(w\cdot\nabla) u_1 +u_2\cdot\nabla w +\nabla (p_1-p_2)=K\, {\rm div}\,v,\\
\partial_tv+(w\cdot\nabla)\tau_1+u_2\cdot\nabla v+\beta v=\alpha\mathbb{D}(w).
\end{cases}
\end{equation}
Multiplying (\ref{convergence_6})$_1$ and (\ref{convergence_6})$_2$ by $\alpha w$ and $K v$, respectively, summing the results up, and using integration by parts, we have
\begin{equation*}
\begin{split}
&\frac12 \udt (\alpha\|w\|_{L^2}^2 + K\|v\|_{L^2}^2) + \beta K\|v\|_{L^2}^2\\
\le&-\alpha\langle w\cdot\nabla u_1,w\rangle- \alpha\langle u_2\cdot\nabla w,w\rangle-K\langle w\cdot\nabla\tau_1,v\rangle-K\langle u_2\cdot\nabla v,v\rangle\\
\le&\,C(\alpha\|w\|_{L^2}^2 + K\|v\|_{L^2}^2),
\end{split}
\end{equation*}
which implies
\begin{equation*}
\alpha\|w(t)\|_{L^2}^2 + K\|v(t)\|_{L^2}^2\le e^{Ct}(\alpha\|w(0)\|_{L^2}^2 + K\|v(0)\|_{L^2}^2)=0.
\end{equation*}
Thus, $w=u_1-u_2=0$ and $v=\tau_1-\tau_2=0$. The proof of the uniqueness is complete.
\section{Decay estimates for the nonlinear system}\label{Section_5}
In this section, we will establish the upper and lower decay estimates to the solutions of the Cauchy problem \eqref{Oldroyd_B_d} and finish the proof of Theorem \ref{thm_OB_d_decay}. We consider $\mu=0$ in \eqref{u_sigma_d}:
\begin{eqnarray} \label{u_sigma_1}
\begin{cases}
\partial_tu-K \Lambda\sigma=\mathcal{F}_1,\\
\partial_t\sigma +\beta\sigma+\frac{\alpha}{2}\Lambda u=\mathcal{F}_2,
\end{cases}
\end{eqnarray} where
\begin{eqnarray*}
\mathcal{F}_1=-\mathbb{P}\left(u\cdot\nabla u\right),\
\mathcal{F}_2=-\Lambda^{-1}\mathbb{P}{\rm div}\left(u\cdot\nabla\tau\right).
\end{eqnarray*}
\subsection{Some estimates of the low-frequency parts}
Consider the linear part of the system \eqref{u_sigma_1}, i.e.,
\begin{equation} \label{Greenfunction_1}
\begin{cases}
\partial_tu-K \Lambda\sigma=0,\\
\partial_t\sigma +\beta\sigma+\frac{\alpha}{2}\Lambda u=0.
\end{cases}
\end{equation}
Note that the 3{D} case of \eqref{Greenfunction_1} with viscosity and diffusion has already been analyzed by Huang, the second author, the third author, and Zi (\cite{Huang 2022}) (see Lemmas 2.1, 4.1, 4.5 and 4.6 and Proposition 2.3 therein). After some slight modifications, we can get the similar results in the 2{D} case.
To begin with, applying Fourier transform to system \eqref{Greenfunction_1}, we get that
\begin{equation} \label{Greenfunction_2}
\begin{cases}
\partial_t\hat{u}^j-K|\xi|\hat{\sigma}^j=0,\\
\partial_t\hat{\sigma}^j+\beta\hat{\sigma}^j+\frac{\alpha}{2}|\xi| \hat{u}^j=0.
\end{cases}
\end{equation}
\begin{lemma}\label{lemma_Greenfunction_1}
(Lemma 2.1, \cite{Huang 2022} for the case $\mu, \varepsilon=0$) The system \eqref{Greenfunction_2} can be solved as follows :
\begin{equation*}
\begin{cases}
\hat{u} = \mathcal{G}_3 \hat{u}_0 + K|\xi|\mathcal{G}_1\hat{\sigma}_0,\\
\hat{\sigma} = -\frac{\alpha}{2}|\xi|\mathcal{G}_1 \hat{u}_0 + \mathcal{G}_2\hat{\sigma}_0,
\end{cases}
\end{equation*}
where
\begin{equation}\label{lemma_Greenfunction_3+1}
\begin{split}
\mathcal{G}_1(\xi,t)=\frac{e^{\lambda_+t}-e^{\lambda_-t}}{\lambda_+-\lambda_-}, \ \mathcal{G}_2(\xi,t)&=\frac{\lambda_+e^{\lambda_+t}-\lambda_-e^{\lambda_-t}}{\lambda_+-\lambda_-}, \\ \mathcal{G}_3(\xi,t)=\frac{\lambda_+e^{\lambda_-t}-\lambda_-e^{\lambda_+t}}{\lambda_+-\lambda_-},\,
\lambda_{\pm}&=\frac{-\beta\pm\sqrt{\beta^2-2\alpha K|\xi|^2}}{2}.
\end{split}
\end{equation}
\end{lemma}
Due to the explicit expression of the solution, we can easily get some estimates as follows.
\begin{lemma}\label{lemma_Greenfunction_4}
There exist positive constants $R=R(\alpha,\beta,K)$, $\theta=\theta(\alpha,\beta,K)$ and $C=C(\alpha,\beta,K)$, such that, for any $|\xi|\leq R$ and $t>0$, it holds that
\begin{equation*}\label{lemma_Greenfunction_5}
\begin{split}
&\left|\mathcal{G}_1(\xi,t)\right|,\left|\mathcal{G}_3(\xi,t)\right|\leq Ce^{-\theta|\xi|^2t},\\
&|\mathcal{G}_2(\xi,t)|
\leq C\left(|\xi|^2 e^{-\theta|\xi|^2t} + e^{-\frac{\beta t}{2}}\right).
\end{split}
\end{equation*}
\end{lemma}
\begin{rem}
The proof of Lemma \ref{lemma_Greenfunction_4} is similar to the proof of Proposition $2.3$ and Lemma $4.5$ in \cite{Huang 2022} with only the difference of dimension.
\end{rem}
Consequently, we can obtain the upper bound of the low-frequency part of the solution satisfying \eqref{u_sigma_1}.
\begin{lemma} \label{lemma_Greenfunction_7}
Assume that $(u_0,\tau_0)\in L^1(\mathbb{R}^2)$, it holds for the solution to \eqref{u_sigma_1} that
\begin{equation*} \label{lemma_Greenfunction_8}
\begin{split}
\left(\int_{|\xi|\leq R}
|\xi|^{2k}|\hat{u}(t)|^2
{\rm d}\xi\right)^\frac{1}{2}
\le&\,C(1+t)^{-\frac12-\frac{k}{2}} + C\int_0^t (1+t-s)^{-\frac12-\frac{k}{2}}(\|\hat{\mathcal{F}}_1 \|_{L^\infty} + \|\hat{\mathcal{F}}_2 \|_{L^\infty})
{\rm d}s,\\
\left(\int_{|\xi|\leq R}
|\xi|^{2k}|\hat{\sigma}(t)|^2
{\rm d}\xi\right)^\frac{1}{2}
\le &\,C(1+t)^{-1-\frac{k}{2}} + C\int_0^t (1+t-s)^{-1-\frac{k}{2}}(\|\hat{\mathcal{F}}_1 \|_{L^\infty} + \|\hat{\mathcal{F}}_2 \|_{L^\infty})
{\rm d}s.
\end{split}
\end{equation*}
\end{lemma}
\begin{proof}
From the Duhamel's principle, we have that
\begin{align}
\label{Greenfunction_13-1} \hat{u}(t) =& \,\mathcal{G}_3 \hat{u}_0 + K|\xi|\mathcal{G}_1\hat{\sigma}_0 + \int_0^t\mathcal{G}_3(t-s)\hat{\mathcal{F}}_1 (s) + K|\xi|\mathcal{G}_1(t-s)\hat{\mathcal{F}}_2 (s)
{\rm d}s,\\
\label{Greenfunction_13-2} \hat{\sigma}(t) =& -\frac{\alpha}{2}|\xi|\mathcal{G}_1 \hat{u}_0 + \mathcal{G}_2\hat{\sigma}_0 + \int_0^t-\frac{\alpha}{2}|\xi|\mathcal{G}_1(t-s)\hat{\mathcal{F}}_1 (s) + \mathcal{G}_2(t-s)\hat{\mathcal{F}}_2 (s){\rm d}s.
\end{align}
It follows from Lemma \ref{lemma_Greenfunction_4} and Minkowski's inequality that
\begin{align*}
&\left(\int_{|\xi|\leq R}|\xi|^{2k}|\hat{u}(t)|^2{\rm d}\xi\right)^{\frac12}\\
\le & ~C(\|\hat{u}_0\|_{L^\infty} + \|\hat{\tau}_0\|_{L^\infty})\left(\int_{|\xi|\leq R}|\xi|^{2k}e^{-2\theta|\xi|^2t}{\rm d}\xi\right)^{\frac12}\\
&+ ~C\left(\int_{|\xi|\leq R}|\xi|^{2k}\Big|\int_0^t\mathcal{G}_3(t-s)\hat{\mathcal{F}}_1 (\xi,s) + K|\xi|\mathcal{G}_1(t-s)\hat{\mathcal{F}}_2 (\xi,s){\rm d}s\Big|^2{\rm d}\xi\right)^\frac12\\
\le & ~C(1+t)^{-\frac12-\frac{k}{2}} + C\int_0^t\Big(\int_{|\xi|\leq R}|\xi|^{2k}e^{-2\theta|\xi|^2(t-s)}(|\hat{\mathcal{F}}_1 (\xi,s)|^2 + |\hat{\mathcal{F}}_2 (\xi,s)|^2){\rm d}\xi\Big)^\frac12{\rm d}s\\
\le &~C(1+t)^{-\frac12-\frac{k}{2}} + C\int_0^t(1+t-s)^{-\frac12-\frac{k}{2}}(\|\hat{\mathcal{F}}_1 (\cdot,s)\|_{L^\infty} + \|\hat{\mathcal{F}}_2 (\cdot,s)\|_{L^\infty}){\rm d}s.
\end{align*}
By similar calculations, we obtain that
\begin{align*}
&\left(\int_{|\xi|\leq R}
|\xi|^{2k}|\hat{\sigma}(t)|^2{\rm d}\xi\right)^\frac{1}{2}\\
\le& ~C\|\hat{u}_0\|_{L^\infty}\left(\int_{|\xi|\leq R}|\xi|^{2k+2}e^{-2\theta|\xi|^2t}{\rm d}\xi\right)^{\frac12} + C\|\hat{\tau}_0\|_{L^\infty}\left(\int_{|\xi|\leq R} |\xi|^{2k+4}e^{-2\theta|\xi|^2 t}+|\xi|^{2k}e^{-\beta t}{\rm d}\xi \right)^\frac12\\
&+ ~C\left(\int_{|\xi|\leq R}|\xi|^{2k}\Big|\int_0^t-\frac{\alpha}{2}|\xi|\mathcal{G}_1(t-s)\hat{\mathcal{F}}_1 (\xi,s) + \mathcal{G}_2(t-s)\hat{\mathcal{F}}_2 (\xi,s){\rm d}s\Big|^2{\rm d}\xi\right)^\frac12\\
\le& C(1+t)^{-1-\frac{k}{2}} + C\int_0^t (1+t-s)^{-1-\frac{k}{2}}(\|\hat{\mathcal{F}}_1(\cdot,s)\|_{L^\infty} + \|\hat{\mathcal{F}}_2(\cdot,s) \|_{L^\infty})
{\rm d}s.
\end{align*}
The proof of Lemma \ref{lemma_Greenfunction_7} is complete.
\end{proof}
Next, we consider the lower bound estimates of $\mathcal{G}_1(\xi,t)$ and $\mathcal{G}_3(\xi,t)$.
\begin{lemma}\label{lemma_Greenfunction_10}
Let $R$ be the constant chosen in Lemma \ref{lemma_Greenfunction_4}. There exist three positive constants $\eta=\eta(\alpha,\beta,K)$, $C=C(\alpha,\beta,K)$ and $t_1=t_1(\beta)$, such that
\begin{equation}\label{Greenfunction_16}
|\mathcal{G}_1(\xi,t)| \geq \frac{1}{C} e^{-\eta |\xi|^2 t},~~|\mathcal{G}_3(\xi,t)| \geq \frac{1}{C} e^{-\eta |\xi|^2 t}, \ {\rm for}\ {\rm all}\ |\xi| \leq R \ {\rm and}\ t\geq t_1.
\end{equation}
\end{lemma}
\begin{proof}
From Lemma \ref{lemma_Greenfunction_4}, there holds
\begin{equation} \label{Greenfunction_17}
\frac{\sqrt{2}}{2}\beta\leq \lambda_+ - \lambda_-=\sqrt{\beta^2-2\alpha\kappa|\xi|^2}\leq \beta,
\end{equation} for all $|\xi|\leq R$, where $R$ is sufficiently small.
Noticing that
\begin{equation*}\label{Greenfunction_18}
\lambda_+ =\frac{-\alpha K|\xi|^2}{\beta+\sqrt{\beta^2-2\alpha K|\xi|^2}} \geq - \frac{\alpha K}{\beta}|\xi|^2 =:-\eta|\xi|^2,
\end{equation*}
there exists a time $t_1 = \frac{\sqrt{2}\ln 2}{\beta},$ such that
\begin{equation} \label{Greenfunction_19}
|e^{\lambda_+t}-e^{\lambda_-t}| = \left|e^{\lambda_+ t}\big(1 - e^ {-(\lambda_+-\lambda_-)t}\big)\right| \geq \frac{1}{2}e^{-\eta |\xi|^2 t},
\end{equation}
and
\begin{equation} \label{Greenfunction_20}
|\lambda_+e^{\lambda_-t}-\lambda_-e^{\lambda_+t}| = |e^{\lambda_+ t}\big(\lambda_+ e^ {-(\lambda_+-\lambda_-)t} - \lambda_-\big)| \geq (\lambda_+-\lambda_-)e^{-\eta |\xi|^2t},
\end{equation} for any $t\geq t_1$.
Then (\ref{lemma_Greenfunction_3+1}) combined with (\ref{Greenfunction_17}), (\ref{Greenfunction_19}), and (\ref{Greenfunction_20}) yields (\ref{Greenfunction_16}). Hence we finish the proof of Lemma \ref{lemma_Greenfunction_10}.
\end{proof}
With Lemmas \ref{lemma_Greenfunction_4} and \ref{lemma_Greenfunction_10}, the lower bounds of the linear part of the solution can be estimated as follows.
\begin{lemma}\label{lemma_Greenfunction_11}
Under the assumptions of Lemma \ref{lemma_Greenfunction_10}, and in addition that $\Big|\int_{\mathbb{R}^2}u_0(x) {\rm d}x\Big| = c_2>0,$ there exists a positive generic constant $C = C(\alpha,\beta,K,c_2,\|\tau_0\|_{L^1})$, such that
\begin{equation}\label{lemma_Greenfunction_12}
\begin{split}
\||\xi|^k\left(\mathcal{G}_3 \hat{u}_0 + K|\xi|\mathcal{G}_1\hat{\sigma}_0\right)\|_{L^2} \geq \frac{1}{C}(1 + t)^{-\frac12-\frac{k}{2}},
\end{split}
\end{equation}
\begin{equation}\label{lemma_Greenfunction_13}
\,\,\,\,\,\||\xi|^k\left(-\frac{\alpha}{2}|\xi|\mathcal{G}_1 \hat{u}_0 + \mathcal{G}_2\hat{\sigma}_0\right)\|_{L^2} \geq \frac{1}{C}(1 + t)^{-1-\frac{k}{2}},
\end{equation}for all $t\geq t_1$ and $k=0, 1, 2, 3 $.
\end{lemma}
\begin{proof}
First of all, since $u_0\in L^1(\mathbb{R}^2),$ then $\hat{u_0}\in C(\mathbb{R}^2).$ There exsits a constant $R'>0$, such that
\begin{equation*}
|\hat{u_0}(\xi)|\ge \frac{c_2}{2},\text{ for all } 0\le|\xi|\leq R'.
\end{equation*}
For simplicity, we assume $R'\le R$,
then we have
\begin{equation}\label{Greenfunction_21}
\begin{split}
&\||\xi|^k\left(\mathcal{G}_3 \hat{u}_0 + K|\xi|\mathcal{G}_1\hat{\sigma}_0\right)\|_{L^2}\\
=&
\left\||\xi|^k\mathcal{G}_3(\xi,t)\hat{u}_0(\xi) + K|\xi|^{k+1}\mathcal{G}_1(\xi,t)\hat{\sigma}_0(\xi)\right\|_{L^2}\\
\geq & \left(\int_{|\xi|\leq R'}|\xi|^{2k}\big|\mathcal{G}_3(\xi,t)\hat{u}_0(\xi) \big|^2 {\rm d}\xi\right)^\frac12 -\left(\int_{|\xi|\leq R'} K^2|\xi|^{2k+2}|\mathcal{G}_1(\xi,t)\hat{\sigma}_0(\xi)|^2 {\rm d}\xi\right)^\frac12
\\ =:&\,
K_1-K_2.
\end{split}
\end{equation}
From Lemma \ref{lemma_Greenfunction_10}, we have that
\begin{equation}\label{Greenfunction_22}
\begin{split}
K_1 \geq \frac{1}{C}\left(\int_{|\xi|\leq R'}|\xi|^{2k}e^{-2\eta |\xi|^2t}{\rm d} \xi\right)^\frac12
\ge \frac{1}{C}(1+t)^{-\frac12-\frac{k}{2}},
\end{split}
\end{equation} for all $t\geq t_1$.
On the other hand, Lemma \ref{lemma_Greenfunction_4} yields
\begin{equation}\label{Greenfunction_23}
\begin{split}
K_2 \leq & ~C\|\hat{\sigma}_0\|_{L^\infty}\left( \int_{|\xi|\leq R'} |\xi|^{2k+2} e^{-2\theta|\xi|^2t} {\rm d} \xi\right)^\frac12
\le
C(1+t)^{-1-\frac{k}{2}}.
\end{split}
\end{equation}
(\ref{Greenfunction_21}), (\ref{Greenfunction_22}), and (\ref{Greenfunction_23}) yield (\ref{lemma_Greenfunction_12}) for all $t\ge t_1$.
Next, notice that
\begin{equation}\label{Greenfunction_24}
\begin{split}
&\||\xi|^k\left(-\frac{\alpha}{2}|\xi|\mathcal{G}_1 \hat{u}_0 + \mathcal{G}_2\hat{\sigma}_0\right)\|_{L^2}
\\= & \left\|-\frac{\alpha}{2}|\xi|^{k+1}\mathcal{G}_1(\xi,t)\hat{u}_0(\xi) + |\xi|^k \mathcal{G}_2(\xi,t)\hat{\sigma}_0(\xi)\right\|_{L^2}\\
\ge&\frac{\alpha}{2}\left(\int_{|\xi|\leq R'}|\xi|^{2k+2}|\mathcal{G}_1(\xi,t)|^2|\hat{u}_0(\xi)|^2 {\rm d}\xi\right)^\frac12
- \left(\int_{|\xi|\leq R'}|\xi|^{2k}|\mathcal{G}_2(\xi,t)|^2|\hat{\sigma}_0(\xi)|^2 {\rm d}\xi\right)^\frac12\\
=:&\, K_3 - K_4.
\end{split}
\end{equation}
Similar to the analysis of $K_1$ and $K_2$, we have
\begin{equation}\label{Greenfunction_25}
K_3 \ge \frac{1}{C}(1 + t)^{-1-\frac{k}{2}},
\end{equation}
and
\begin{equation}\label{Greenfunction_26}
\begin{split}
K_4 \le& \|\hat{\sigma}_0\|_{L^\infty} \left(\int_{|\xi|\leq R'} |\xi|^{2k}|\mathcal{G}_2(\xi,t)|^2{\rm d} \xi\right)^\frac12
\\ \le & C\left(\int_{|\xi|\leq R'} |\xi|^{4+2k}e^{-2\theta|\xi|^2 t}+|\xi|^{2k}e^{-\beta t}{\rm d}\xi \right)^\frac12\\
\le & \,C(1 + t)^{-\frac{3}{2}-\frac{k}{2}},
\end{split}
\end{equation} for all $t\geq t_1$.
It follows from \eqref{Greenfunction_24}, (\ref{Greenfunction_25}), and \eqref{Greenfunction_26} that \eqref{lemma_Greenfunction_13} holds for all $t\ge t_1$. Therefore letting $t\ge t_1$, we finish the proof of Lemma \ref{lemma_Greenfunction_11}.
\end{proof}
\subsection{Upper time-decay estimates}To begin with, we define that
\begin{equation*}
\begin{split}
\mathcal{H}_1(t) & := \alpha\| u\|_{H^1}^2 + K\| \tau\|_{H^1}^2 + \eta_1\langle\Lambda u,\sigma\rangle = O(\|(u,\tau)\|_{H^1}),\\
\mathcal{H}_2(t) & := \alpha\|\nabla u\|_{H^1}^2 + K\|\nabla \tau\|_{H^1}^2 + \eta_2\langle\Lambda^2 u,\Lambda\sigma\rangle = O(\|\nabla(u,\tau)\|_{H^1}),\\
\mathcal{H}_3(t) & := \alpha\|\nabla^2 u\|_{H^1}^2 + K\|\nabla^2 \tau\|_{H^1}^2 + \eta_3\langle\Lambda^3 u,\Lambda^2\sigma\rangle = O(\|\nabla^2(u,\tau)\|_{H^1}).
\end{split}
\end{equation*}
\begin{lemma}\label{lemma_upper_decay}
Under the assumptions of Theorem \ref{thm_OB_d_decay}, we have
\begin{equation*}\label{utauH1-1}
\| u(t)\|_{H^1}^2 + \| \tau(t)\|_{H^1}^2\le C(1+t)^{-\frac{1}{2}},
\end{equation*} for all $t>0.$
\end{lemma}
\begin{proof}
Recalling from \eqref{est_first} ($\mu=0$) that
\begin{equation*}
\begin{split}
\udt (\alpha\|\nabla u(t)\|_{L^2}^2 + K\|\nabla\tau(t)\|_{L^2}^2) \leq 0,
\end{split}
\end{equation*}
then we have
\begin{equation}\label{nau}
\begin{split}
\alpha\|\nabla u(t)\|_{L^2}^2 + K\|\nabla\tau(t)\|_{L^2}^2\le \alpha\|\nabla u(s)\|_{L^2}^2 + K\|\nabla\tau(s)\|_{L^2}^2,
\end{split}
\end{equation}for $t\ge s\ge 0$.
By virtue of \eqref{uniform_estimates} with $\mu=0$, there holds
\begin{equation*}
\int_0^{+\infty} (\|\nabla u(s)\|_{H^2}^2 + \|\tau(s)\|_{H^3}^2){\rm d}s \leq C.
\end{equation*}
This combined with (\ref{nau}) yields
\begin{equation*}\label{new_H1_L2_7}
\begin{split}
\frac{t}{2}\alpha\|\nabla u(t)\|_{L^2}^2 + \frac{t}{2}K\|\nabla\tau(t)\|_{L^2}^2\le \int_{\frac{t}{2}}^t (\alpha\|\nabla u(s)\|_{L^2}^2 + K\|\nabla\tau(s)\|_{L^2}^2){\rm d}s\longrightarrow 0\,\,\,\,\text{as}\,\,\,\,t\rightarrow+\infty.
\end{split}
\end{equation*}
Namely, we have
\begin{equation}\label{new_H1_L2_19}
\begin{split}
\varphi(t):=(1+t)^{\frac12}\|\nabla u(t)\|_{L^2}\longrightarrow 0 \,\,\,\,\text{and}\,\,\,\, \psi(t):=(1+t)^{\frac12}\|\nabla \tau(t)\|_{L^2}\longrightarrow 0 \,\,\,\,\text{as}\,\,\,\,t\rightarrow\infty.
\end{split}
\end{equation}
Next, from \eqref{est_H1} ($\mu=0$), we have
\begin{equation}\label{new_H1_L2}
\begin{split}
\udt \mathcal{H}_1(t) + \frac{\beta K}{2}\|\tau\|_{H^1}^2 +\frac{\eta_1\alpha}{4}\|\Lambda u\|_{L^2}^2
\leq 0.
\end{split}
\end{equation}
Noticing that
\begin{equation*}\label{notice}
\begin{split}
\|\Lambda u\|_{L^2}^2 = \|\nabla u\|_{L^2}^2 &= \frac12 \|\nabla u\|_{L^2}^2 + \frac12 \int_{|\xi|\ge R_{1}}
|\xi|^2|\hat{u}|^2 {\rm d}\xi + \frac12 \int_{|\xi|\le R_{1}}
|\xi|^2|\hat{u}|^2 {\rm d}\xi\\
&\ge \frac12 \|\nabla u\|_{L^2}^2 + \frac12 R_{1}^{2}\int_{|\xi|\ge R_{1}}
|\hat{u}|^2 {\rm d}\xi,
\end{split}
\end{equation*}
where $R_{1}:=\min\{1, R\}$. Without loss of generality, we assume that $\frac{\eta_1}{8}\le \frac{\beta}{2}$, then \eqref{new_H1_L2} can be rewritten as
\begin{equation}\label{dtH1}
\begin{split}
\udt \mathcal{H}_1(t) + \frac{\eta_1 R_{1}^{2}}{16}(2\alpha\| u\|_{H^1}^2 + 2K\| \tau\|_{H^1}^2)\le \frac{\eta_1\alpha}{8}\int_{|\xi|\le R_{1}}
|\hat{u}|^2 {\rm d}\xi.
\end{split}
\end{equation}
Substituting \eqref{bu_9} into (\ref{dtH1}), we obtain
\begin{equation} \label{new1}
\begin{split}
\udt \mathcal{H}_1(t) + \frac{\eta_1 R_{1}^{2}}{16}\mathcal{H}_1(t)
\le \frac{\eta_1\alpha}{8}\int_{|\xi|\le R_{1}}
|\hat{u}|^2 {\rm d}\xi.
\end{split}
\end{equation}
From Lemma \ref{lemma_Greenfunction_7} ($k=0$), we have that
\begin{equation} \label{new2}
\begin{split}
\left(\int_{|\xi|\leq R_{1}}
|\hat{u}|^2
{\rm d}\xi\right)^\frac{1}{2}&\le\left(\int_{|\xi|\leq R}
|\hat{u}|^2
{\rm d}\xi\right)^\frac{1}{2}\\
&\le C(1+t)^{-\frac12} + C\int_0^t(1+t-s)^{-\frac12}\|u\|_{L^{2}}(\|\nabla u\|_{L^2} + \|\nabla\tau\|_{L^{2}})
{\rm d}s.
\end{split}
\end{equation}
Then, (\ref{new1}) and (\ref{new2}) yield
\begin{equation}\label{new_H1_L2_6}
\begin{split}
\udt \mathcal{H}_1(t) &+ \frac{\eta_1 R_{1}^{2}}{16}\mathcal{H}_1(t)
\le C (1+t)^{-1}\\ &+ C \left(\int_0^t(1+t-s)^{-\frac12}\|u\|_{L^{2}}(\|\nabla u\|_{L^2} + \|\nabla\tau\|_{L^{2}})
{\rm d}s\right)^{2}.
\end{split}
\end{equation}
Substituting \eqref{new_H1_L2_19} into \eqref{new_H1_L2_6}, we obtain
\begin{equation}\label{new_H1_L2_20}
\begin{split}
\udt \mathcal{H}_1(t) &+ \frac{\eta_1 R_{1}^{2}}{16}\mathcal{H}_1(t)
\le C (1+t)^{-1}\\ &+ C \left(\int_0^t(1+t-s)^{-\frac12}(1+s)^{-\frac12}\|u(s)\|_{L^{2}}(\varphi(s) + \psi(s))
{\rm d}s\right)^{2}.
\end{split}
\end{equation}
Define that
\begin{equation}\label{M}
\mathcal{M}(t): = \sup_{0\leq s \leq t}(1+s)^{\frac{1}{2}}\mathcal{H}_1(s).
\end{equation}
Notice that $\mathcal{M}(t)$ is non-decreasing and for all $t\ge 0$,
$$\mathcal{H}_1(t) \le (1+t)^{-\frac{1}{2}}\mathcal{M}(t)\,\,\,\,\, \text{and}\,\,\,\,\, \|u(t)\|_{L^{2}} \le C(1+t)^{-\frac{1}{4}}\mathcal{M}(t)^{\frac12}.$$
Then, \eqref{new_H1_L2_20} and (\ref{M}) immediately yield
\begin{equation}\label{new_H1_L2_21}
\begin{split}
\udt \mathcal{H}_1(t) &+ \frac{\eta_1 R_{1}^{2}}{16}\mathcal{H}_1(t)
\le C (1+t)^{-1}\\ &+ C \mathcal{M}(t)\left(\int_0^t(1+t-s)^{-\frac12}(1+s)^{-\frac34}(\varphi(s) + \psi(s))
{\rm d}s\right)^{2}.
\end{split}
\end{equation}
Motivated by Dong and Chen (\cite{Dong 2006}), we define that
\begin{equation*}
\mathcal{J}(t):= (1+t)^{\frac14}\int_0^t(1+t-s)^{-\frac12}(1+s)^{-\frac34}(\varphi(s) + \psi(s)){\rm d}s.
\end{equation*}
Owing to \eqref{new_H1_L2_19}, for any given small constant $\epsilon$, there exists a $T_\epsilon> 0$, such that
$$\varphi(T_\epsilon) + \psi(T_\epsilon)\le\epsilon.$$
Then, we have
\begin{equation*}\label{new_H1_L2_22}
\begin{split}
\mathcal{J}(t) = &\,(1+t)^{\frac14}\int_0^{T_\epsilon}(1+t-s)^{-\frac12}(1+s)^{-\frac34}(\varphi(s) + \psi(s)){\rm d}s \\&+ (1+t)^{\frac14}\int_{T_\epsilon}^t(1+t-s)^{-\frac12}(1+s)^{-\frac34}(\varphi(s) + \psi(s)){\rm d}s\\
\le&\, (1+t)^{\frac14}\Big(C(T_\epsilon)\int_0^{T_\epsilon}(1+t-s)^{-\frac12}(1+s)^{-\frac34}{\rm d}s + \epsilon\int_{0}^t(1+t-s)^{-\frac12}(1+s)^{-\frac34}{\rm d}s\Big)\\
\le& \,C(T_\epsilon)(1+t)^{-\frac14} +C\epsilon,
\end{split}
\end{equation*}
for $t\ge 2T_\epsilon.$ Letting $t\rightarrow+\infty$, and using the fact that $\epsilon$ is arbitrarily small, we have
\begin{equation}\label{new_H1_L2_23}
\begin{split}
\mathcal{J}(t)\longrightarrow 0 \,\,\,\,as\,\,\,\,t\rightarrow +\infty.
\end{split}
\end{equation}
Back to \eqref{new_H1_L2_21}, by Gronwall's inequality and using \eqref{new_H1_L2_23}, we have
\begin{equation*}\label{new_H1_L2_24}
\begin{split}
\mathcal{H}_1(t)&\le e^{-Ct}\mathcal{H}_1(0)+ C\int_0^t e^{-C(t-s)}\left((1+s)^{-1} + \mathcal{M}(s)(1+s)^{-\frac12} \mathcal{J}(s)^2 \right) {\rm d}s\\
&\le C (1+t)^{-1} + C(T_1)\int_0^{T_1} e^{-C(t-s)}(1+s)^{-\frac12} {\rm d}s + C\int_{T_1}^t e^{-C(t-s)}\mathcal{M}(s)(1+s)^{-\frac12} \mathcal{J}(s)^2 {\rm d}s\\
&\le C(T_1) (1+t)^{-\frac{1}{2}} + \frac12(1+t)^{-\frac12}\sup_{0\leq s \leq t}(1+s)^{\frac{1}{2}}\mathcal{H}_1(s),
\end{split}
\end{equation*} for any $t\ge T_2:=2T_1$. Namely, for $t\ge T_2,$ there holds
\begin{equation}\label{new_H1_L2_26}
\begin{split}
(1+t)^{\frac12}\mathcal{H}_1(t)\le C(T_1) + \frac12\sup_{0\leq s \leq t}(1+s)^{\frac{1}{2}}\mathcal{H}_1(s).
\end{split}
\end{equation}
In \eqref{new_H1_L2_26}, taking supremum with respect to $t$ from $T_2$ to $t$, we get
\begin{equation*}\label{new_H1_L2_27}
\begin{split}
\frac12\sup_{T_2\leq s \leq t}(1+s)^{\frac{1}{2}}\mathcal{H}_1(s)\le C,
\end{split}
\end{equation*}
which yields
\begin{equation}\label{new_H1_L2_28}
\begin{split}
\mathcal{H}_1(t)\le C (1+t)^{-\frac12},
\end{split}
\end{equation} for $t\ge T_2$. In fact, for all $t\le T_2$, $\mathcal{H}_1(t)$ is bounded. Consequently, for all $t>0$, (\ref{new_H1_L2_28}) also holds. The proof of Lemma \ref{lemma_upper_decay} is complete.
\end{proof}
To get sharper decay rate of the quantities on the left-hand side of (\ref{utauH1}), we employ the Fourier splitting method (see \cite{Schonbek 1985}).
\begin{lemma}\label{lemma_upper_decay+1}
Under the assumptions of Theorem \ref{thm_OB_d_decay}, we have
\begin{equation}\label{utauH1}
\| u(t)\|_{H^1}^2 + \| \tau(t)\|_{H^1}^2\le C(1+t)^{-1},
\end{equation} for all $t>0.$
\end{lemma}
\begin{proof}
Decomposing the term $\|\Lambda u\|_{L^2}^2$ again, there holds
\begin{equation}\label{new_H1_L2_29}
\begin{split}
\|\Lambda u\|_{L^2}^2 = \|\nabla u\|_{L^2}^2 &= \frac12 \|\nabla u\|_{L^2}^2 + \frac12 \int_{|\xi|\ge g_1(t)}
|\xi|^2|\hat{u}|^2 {\rm d}\xi + \frac12 \int_{|\xi|\le g_1(t)}
|\xi|^2|\hat{u}|^2 {\rm d}\xi\\
&\ge \frac12 \|\nabla u\|_{L^2}^2 + \frac12 g_1(t)^{2}\int_{|\xi|\ge g_1(t)}
|\hat{u}|^2 {\rm d}\xi,
\end{split}
\end{equation}
where $g_1^2(t)=\frac{24}{\eta_1}(1+t)^{-1}$. Then $g_1^2(t)\le 1$ for all $t\ge \frac{24}{\eta_1}-1$.
Substituting \eqref{new_H1_L2_29} into \eqref{new_H1_L2}, we get
\begin{equation}\label{new_H1_L2_12}
\begin{split}
\udt \mathcal{H}_1(t) + \frac{3}{2}(1+t)^{-1}\mathcal{H}_1(t)
\le C(1+t)^{-1}\int_{|\xi|\le g_1(t)}
|\hat{u}|^2 {\rm d}\xi,
\end{split}
\end{equation}for all $t\ge \max\{\frac{24}{\eta_1}-1,\frac{6}{\beta}-1\}=:t_2$.
Recalling (\ref{Greenfunction_13-1}), we have
\begin{equation} \label{duhamel}
\hat{u}(t) = \,\mathcal{G}_3 \hat{u}_0 + K|\xi|\mathcal{G}_1\hat{\sigma}_0 + \int_0^t\mathcal{G}_3(t-s)\hat{\mathcal{F}}_1 (s) + K|\xi|\mathcal{G}_1(t-s)\hat{\mathcal{F}}_2 (s)
{\rm d}s,
\end{equation}
where
\begin{eqnarray*}
\mathcal{F}_1=-\mathbb{P}\left(u\cdot\nabla u\right),\
\mathcal{F}_2= -\Lambda^{-1}\mathbb{P}{\rm div}\left(u\cdot\nabla\tau\right).
\end{eqnarray*}
It is easy to see that
\begin{equation*}
g_1^2(t)=\frac{24}{\eta_1}(1+t)^{-1}\le R^2 \doteq \frac{\beta^2}{4\alpha K},
\end{equation*}
for all $t\ge \max\{\frac{96\alpha K}{\eta_1\beta^2}-1, t_2\}=:t_3$.
By virtue of Lemma \ref{lemma_Greenfunction_4}, (\ref{duhamel}) can be estimated as below:
\begin{align}\label{hatu}
\nonumber |\hat{u}|\le \,&Ce^{-\theta|\xi|^2t}|\hat{u}_0| + C |\xi| e^{-\theta|\xi|^2t}|\hat{\sigma}_0|
+ C\int_0^t e^{-\theta|\xi|^2(t-s)}|\xi||\widehat{u\otimes u}(s)| {\rm d}s
\\ \nonumber &+ C\int_0^t |\xi| e^{-\theta|\xi|^2(t-s)}|\xi||\widehat{u \otimes\tau}(s)| {\rm d}s\\
\le \,&C + C|\xi|\int_0^t \|u\|_{L^2}^2{\rm d}s + C|\xi|^2\int_0^t \|u\|_{L^2}\|\tau\|_{L^2}{\rm d}s,
\end{align}for all $|\xi|\le g_1(t)\le R$ as $t\ge t_3$.
By virtue of \eqref{new_H1_L2_28} and (\ref{hatu}), we get
\begin{equation}\label{u_L00_low_2}
\begin{split}
|\hat{u}(\xi,t)|\le C,
\end{split}
\end{equation} for $t\ge t_3$ and $|\xi|\le g_1(t)$.
Substituting \eqref{u_L00_low_2} into \eqref{new_H1_L2_12}, we get
\begin{equation}\label{new_H1_L2_15}
\begin{split}
\udt \mathcal{H}_1(t) + \frac{3}{2}(1+t)^{-1}\mathcal{H}_1(t)
\le C(1+t)^{-2},
\end{split}
\end{equation} for all $t\ge t_3$.
Multiplying \eqref{new_H1_L2_15} by $(1+t)^{\frac32}$, we can deduce that
\begin{equation*}\label{new_H1_L2_13}
\begin{split}
\udt ((1+t)^{\frac32}\mathcal{H}_1(t))\le C(1+t)^{-\frac12},
\end{split}
\end{equation*}for all $t\ge t_3$, which yields
\begin{equation}\label{new_H1_L2_14}
\begin{split}
\mathcal{H}_1(t)\le C (1+t)^{-1}.
\end{split}
\end{equation}
Similarly, since $\mathcal{H}_1(t)$ is bounded for all $t\le t_3$, thus (\ref{new_H1_L2_14}) also holds for all $t>0$.
\end{proof}
Next, we will develop a way to capture the optimal time-decay rates for
the higher-order derivatives of the solution.
\begin{lemma}\label{lemma_upper_decay_2}
Under the assumptions of Theorem \ref{thm_OB_d_decay}, we have
\begin{equation}\label{upper decay2}
\|\nabla u(t)\|_{H^2}^2 + \|\nabla \tau(t)\|_{H^2}^2\le C (1+t)^{-2},
\end{equation}
for all $t>0$.
\end{lemma}
\begin{proof}
Summing \eqref{est_first}, \eqref{est_second} and $\eta_2$\eqref{est_second_u} ($\mu=0$) up, we obtain that
\begin{equation}\label{new_H2_L2_1}
\begin{split}
\udt \mathcal{H}_2(t) + \frac{\beta K}{2}\|\nabla\tau\|_{H^1}^2 +\frac{\eta_2\alpha}{8}\|\Lambda^2 u\|_{L^2}^2 \le C\eta_2\big(\|\nabla u\|_{L^\infty}^{2}+\|\nabla \tau\|_{L^\infty}^{2}\big)\|\nabla u\|_{L^2}^2.
\end{split}
\end{equation}Then, \eqref{new_H2_L2_1} yields
\begin{equation}\label{new_H2_L2_2}
\begin{split}
\udt \mathcal{H}_2(t) + \bar{c}_0\mathcal{H}_2(t) \le C\|\nabla u\|_{L^2}^2,
\end{split}
\end{equation} for some positive constants $\bar{c}_0$ and $C$.
By virtue of \eqref{utauH1}, \eqref{new_H2_L2_2} yields
\begin{equation}\label{new_H2_L2_3}
\begin{split}
\mathcal{H}_2(t)\le &C(1+t)^{-1},
\end{split}
\end{equation}for all $t>0$.
Similarly, summing \eqref{est_second}, \eqref{est_third} and $\eta_3$\eqref{est_third_u} ($\mu=0$) up, we obtain that
\begin{equation} \label{new5}
\udt \mathcal{H}_3(t) + \frac{\beta K}{2}\|\nabla^2\tau\|_{H^1}^2 +\frac{\eta_3\alpha}{8}\|\Lambda^3 u\|_{L^2}^2 \le C\big(\|\nabla u\|_{L^\infty}+\|\nabla \tau\|_{L^\infty}\big)\|\nabla^2 u\|_{L^2}^2,
\end{equation}which yields
\begin{equation}\label{new_H3_L2_2}
\begin{split}
\udt \mathcal{H}_3(t) + \bar{c}_1\mathcal{H}_3(t) \le C\|\nabla^2 u\|_{L^2}^2,
\end{split}
\end{equation}for some positive constants $\bar{c}_1$ and $C$.
Combining \eqref{new_H2_L2_3} with \eqref{new_H3_L2_2}, we get
\begin{equation} \label{new3}
\mathcal{H}_3(t)\le C(1+t)^{-1},
\end{equation} for all $t>0$.
Similar to \eqref{new_H1_L2_29}, we have
\begin{equation}\label{na2u}
\|\Lambda^2 u\|_{L^2}^2 = \|\nabla^2 u\|_{L^2}^2 \ge \frac12 \|\nabla^2 u\|_{L^2}^2 + \frac12 g_2^2(t)\int_{|\xi|\ge g_2(t)}
|\xi|^2|\hat{u}|^2 {\rm d}\xi,
\end{equation}
where $g_2(t)>0$ is to be determined.
Substituting (\ref{na2u}) into \eqref{new_H2_L2_1}, and using (\ref{new_H2_L2_3}) and (\ref{new3}), we have
\begin{equation}\label{new_H2_L2_4}
\begin{split}
&\udt \mathcal{H}_2(t) + \frac{\beta K}{2}\|\nabla\tau\|_{H^1}^2 +\frac{\eta_2\alpha}{16}\|\nabla^2 u\|_{L^2}^2 + \frac{\eta_2\alpha}{16}g_2^2(t)\|\nabla u\|_{L^2}^2\\
\le\,&\frac{\eta_2\alpha}{16}g_2^2(t)\int_{|\xi|\le g_2(t)}
|\xi|^2|\hat{u}|^2 {\rm d}\xi + C\eta_2\big(\|\nabla u\|_{L^\infty}^{2}+\|\nabla \tau\|_{L^\infty}^{2}\big)\|\nabla u\|_{L^2}^2\\
\le\,&\frac{\eta_2\alpha}{16}g_2^2(t)\int_{|\xi|\le g_2(t)}
|\xi|^2|\hat{u}|^2 {\rm d}\xi +\frac{C\eta_2}{1+t}\|\nabla u\|_{L^2}^2.
\end{split}
\end{equation}
Here, taking $g_2^2(t)=\frac{160}{\eta_2}(1+t)^{-1}$, then $g_2^2(t)\le 1$ for all $t\ge \frac{160}{\eta_2}-1=:t_4$. In addition, letting $\eta_2 \le \min \{16\beta, \frac{5\alpha}{C}\}$, then \eqref{new_H2_L2_4} yields
\begin{equation}\label{new4}
\begin{split}
\udt \mathcal{H}_2(t) + \frac{\eta_2}{64}g_2^2(t)\,(2\alpha\|\nabla u\|_{H^1}^2 + 2K\| \nabla\tau\|_{H^1}^2)
\le C(1+t)^{-3},
\end{split}
\end{equation}where we have used the boundedness of $|\hat{u}|$ for all $|\xi|\le g_2(t)$, which is similar to (\ref{u_L00_low_2}).
Combining (\ref{new4}) with \eqref{bu_9}, we obtain
\begin{equation}\label{new_H2_L2_6}
\begin{split}
\udt \mathcal{H}_2(t) + \frac{\eta_2}{64}g_2^2(t)\mathcal{H}_2(t)
\le C(1+t)^{-3}.
\end{split}
\end{equation}
Multiplying \eqref{new_H2_L2_6} by $(1+t)^\frac52$, we can deduce that for all $t\ge t_4$,
\begin{equation}\label{new_H2_L2_7}
\begin{split}
\mathcal{H}_2(t)\le &C(1+t)^{-2}.
\end{split}
\end{equation}
Substituting \eqref{new_H2_L2_7} into \eqref{new_H3_L2_2}, we can also deduce that for all $t\ge t_4$,
\begin{equation}\label{new_H3_L2_8}
\begin{split}
\mathcal{H}_3(t)\le &C(1+t)^{-2}.
\end{split}
\end{equation}
Since for all $t\le t_4$, $\mathcal{H}_2(t)$ and $\mathcal{H}_3(t)$ are bounded, thus \eqref{new_H2_L2_7} and \eqref{new_H3_L2_8} also hold for all $t>0$.
\end{proof}
\begin{corollary}\label{cor1}
Under the assumptions of Theorem \ref{thm_OB_d_decay}, we have
\begin{equation}\label{new_tau_L2_2}
\begin{split}
\|\tau(t)\|_{L^2}^2\le C(1+t)^{-2},
\end{split}
\end{equation}for all $t>0.$
\end{corollary}
\begin{proof}
Applying $\nabla^k$ ($k=0,1,2$) to (\ref{Oldroyd_B_d})$_2$, multiplying the result by $\nabla^k \tau$, and integrating with respect to $x$, we have that
\begin{equation}\label{new_tau_L2_1}
\begin{split}
&\frac{1}{2}\frac{\mathrm{d}}{\mathrm{d}t} \|\nabla^k \tau\|_{L^2}^2 + \frac{\beta}{2}\|\nabla^k \tau\|_{L^2}^2 \\&\le C \|\nabla^{k+1}u\|_{L^2}^2 + C\|\nabla^{k}(u\cdot \nabla\tau)\|_{L^2}^2\\
&\le C\|\nabla^{k+1}u\|_{L^2}^2 + C\|\nabla^{k+1}\tau\|_{L^2}^2\|u\|_{L^\infty}^2 + \|\nabla \tau\|_{L^\infty}^2\|\nabla^{k}u\|_{L^2}^2.
\end{split}
\end{equation}
However, (\ref{new_tau_L2_1}) for $k=1,2$ will be used later. In fact, to prove \eqref{new_tau_L2_2}, it suffices to take $k=0$ in \eqref{new_tau_L2_1}. Then by virtue of (\ref{new_H2_L2_7}) and \eqref{new_H3_L2_8}, the inequality \eqref{new_tau_L2_2} holds.
\end{proof}
\begin{lemma}\label{lemma_upper_decay_3}
Under the assumptions of Theorem \ref{thm_OB_d_decay}, we have
\begin{equation}\label{na2udecay}
\|\nabla^2 u(t)\|_{H^1}^2 + \|\nabla^2 \tau(t)\|_{H^1}^2\le C(1+t)^{-3},
\end{equation} for all $t>0.$
\end{lemma}
\begin{proof} By virtue of (\ref{upper decay2}) and (\ref{new5}), we have
\begin{equation*}\label{new_H3_L2_2+1}
\begin{split}
\udt \mathcal{H}_3(t) + \bar{c}_2\mathcal{H}_3(t) \le& C\big(\|\nabla u\|_{L^\infty}+\|\nabla \tau\|_{L^\infty}\big)\|\nabla^2 u\|_{L^2}^2+C\int_{|\xi|\leq R}|\xi|^{4}|\hat{u}(t)|^2{\rm d}\xi\\ \le& C\big(\|\nabla u\|_{H^2}^3+\|\nabla \tau\|_{H^2}^3\big) +C\int_{|\xi|\leq R}|\xi|^{4}|\hat{u}(t)|^2{\rm d}\xi\\ \le& C(1+t)^{-3} +C\int_{|\xi|\leq R}|\xi|^{4}|\hat{u}(t)|^2{\rm d}\xi,
\end{split}
\end{equation*}for some positive constants $\bar{c}_2$ and $C$, which together with Lemma \ref{lemma_Greenfunction_7} yields
\begin{equation}\label{new_H3_L2_2+2}
\begin{split}
\udt \mathcal{H}_3(t) + \bar{c}_2\mathcal{H}_3(t) \le& C(1+t)^{-3}.
\end{split}
\end{equation}
Then (\ref{na2udecay}) can be obtained by using (\ref{new_H3_L2_2+2}). The proof of Lemma \ref{lemma_upper_decay_3} is complete.
\end{proof}
\begin{lemma}\label{lemma_upper_decay_4}
Under the assumptions of Theorem \ref{thm_OB_d_decay}, we have
\begin{equation}\label{na3udecay}
\begin{split}
\|\nabla^3 u(t)\|_{L^2}^2 + \|\nabla^3 \tau(t)\|_{L^2}^2\le C (1+t)^{-4},
\end{split}
\end{equation}for all $t>0.$
\end{lemma}
\begin{proof}
Here, we choose the standard cut-off function $0\le \varphi_0(\xi)\le 1$ in $C_c^\infty(\mathbb{R}^2)$ such that
\begin{equation*}
\varphi_0(\xi) =
\begin{cases}
1, \,&for\, |\xi|\le \frac{R}{2},\\
0, \,&for\, |\xi|\ge R,
\end{cases}
\end{equation*}
where $R$ is defined in Lemma \ref{lemma_Greenfunction_4}. The low-high-frequency decomposition $(f^l(x), f^h(x))$ for a function $f(x)$ is stated as follows:
\begin{equation*}
f^l(x):=\mathcal{F}^{-1}(\varphi_0(\xi)\hat{f}(\xi))\,\,\,\ \text{and} \,\,\,\,f^h(x):=f(x)-f^l(x).
\end{equation*}
Multiplying $\Lambda^3$(\ref{u_sigma_1})$_1$ and $\Lambda^2(\ref{u_sigma_1})^l_2$ by $-\Lambda^2\sigma^l$ and $-\Lambda^3 u$, respectively, summing the results up, and using integration by parts, we have
\begin{align}\label{bu_15}
\begin{split}
&-\partial_t\langle\Lambda^3 u,\Lambda^2 \sigma^l\rangle\\
= & \,-\Big(K\langle\Lambda^3\sigma, \Lambda^3 \sigma^l\rangle - \langle\beta\Lambda^2\sigma^l,\Lambda^3 u \rangle - \frac{\alpha}{2}\langle\Lambda^3 u, \Lambda^3 u^l\rangle \Big)\\ &+ \Big(\langle \Lambda^3\mathbb{P}(u\cdot \nabla u),\Lambda^2\sigma^l\rangle + \langle\big(\Lambda \mathbb{P}\mathrm{div}(u\cdot \nabla \tau)\big)^l,\Lambda^3 u\rangle\Big)\\
=: &\,I_7 + I_8.
\end{split}
\end{align}
For $I_7$ and $I_8$, using (\ref{bu_7}), we have
\begin{align}
|I_7|\le\, &K\|\Lambda^3 \sigma\|_{L^2}^2 + \frac{\alpha}{32}\|\Lambda^3 u\|_{L^2}^2 + \frac{8\beta^2}{\alpha}\|\Lambda^2\sigma\|_{L^2}^2 + \frac{\alpha}{2}\|\Lambda^3 u^l\|_{L^2}^2 + \frac{\alpha}{8}\|\Lambda^3 u\|_{L^2}^2,\label{I7}\\
\nonumber|I_8|\le\, &\frac12\|\Lambda^3 \sigma\|_{L^2}^2
+ \frac12\|\Lambda^2\mathbb{P}(u\cdot \nabla u)\|_{L^2}^2
+ \frac{\alpha}{32}\|\Lambda^3 u\|_{L^2}^2 + \frac{8}{\alpha}\|\Lambda\mathbb{P}\mathrm{div}(u\cdot \nabla \tau)\|_{L^2}^2\\
\le\, &\frac12\|\Lambda^3 \sigma\|_{L^2}^2
+ C\|\nabla^2(u\cdot \nabla u)\|_{L^2}^2 + \frac{\alpha}{32}\|\Lambda^3 u\|_{L^2}^2 + C\|\nabla^2(u\cdot \nabla \tau)\|_{L^2}^2.\label{I8}
\end{align}
Substituting (\ref{I7}) and (\ref{I8}) into (\ref{bu_15}), we get
\begin{equation}\label{new_H5_L2_1}
\begin{split}
\begin{split}
&-\partial_t\langle\Lambda^3 u,\Lambda^2 \sigma^l\rangle\\
\leq \,& \frac{\alpha}{2}\|\Lambda^3 u^l\|_{L^2}^2 + \big(\frac{3\alpha}{16} + C\| u\|_{L^\infty}^{2}\big)\|\Lambda^3 u\|_{L^2}^2 + (K + \frac{8\beta^2}{\alpha} + \frac12)\|\nabla^2\sigma\|_{H^1}^2\\
\, &+ C\big(\|u\|_{L^\infty}^{2}+\|\nabla u\|_{L^\infty}^{2}+\|\nabla \tau\|_{L^\infty}^{2}\big)\big(\|\nabla^2\tau\|_{H^1}^2 + \|\nabla^2 u\|_{L^2}^2\big),
\end{split}
\end{split}
\end{equation}
where \eqref{bu_21} is used.
Summing \eqref{est_third_u} ($\mu=0$) and \eqref{new_H5_L2_1} up, and using the smallness of the solution stated in Theorem \ref{wellposedness}, we get
\begin{equation}\label{new_H5_L2_2}
\begin{split}
&\partial_t\langle\Lambda^3 u,\Lambda^2 \sigma^h\rangle + \frac{\alpha}{16}\|\Lambda^3 u\|_{L^2}^2\\
\leq \, &\frac{\alpha}{2}\|\Lambda^3 u^l\|_{L^2}^2 + (2K + \frac{12\beta^2}{\alpha} + 1)\|\nabla^2\sigma\|_{H^1}^2 \\
& + C\big(\|u\|_{L^\infty}^{2}+\|\nabla u\|_{L^\infty}^{2}+\|\nabla \tau\|_{L^\infty}^{2}\big)\big(\|\nabla^2\tau\|_{H^1}^2 + \|\nabla^2 u\|_{L^2}^2\big).
\end{split}
\end{equation}
Letting $\eta_4>0$ small enough, then summing 2$\times$\eqref{est_third} ($\mu=0$) and $\eta_4$\eqref{new_H5_L2_2} up, we have
\begin{equation}\label{new_H5_L2_4}
\begin{split}
&\udt \mathcal{H}_4(t) + \frac{\eta_4\alpha}{16}\|\Lambda^3 u\|_{L^2}^2 + \frac{\beta K}{100}\|\nabla^3\tau\|_{L^2}^2\\
\leq\, & C\big(\|\nabla u\|_{L^\infty}+\|\nabla \tau\|_{L^\infty}\big)\|\nabla^3 u\|_{L^2}^2 + \eta_4\Big(\frac{\alpha}{2} \|\Lambda^3 u^l\|_{L^2}^2
+ (2K + \frac{12\beta^2}{\alpha} + 1)\|\nabla^2\sigma\|_{H^1}^2 \\
\, &+ C\big(\|u\|_{L^\infty}^{2}+\|\nabla u\|_{L^\infty}^{2}+\|\nabla \tau\|_{L^\infty}^{2}\big)\big(\|\nabla^2\tau\|_{H^1}^2 + \|\nabla^2 u\|_{L^2}^2\big)\Big),
\end{split}
\end{equation} where
\begin{equation*}
\mathcal{H}_4:=\alpha\|\nabla^3 u\|_{L^2}^2 + K\|\nabla^3\tau\|_{L^2}^2+\eta_4\langle\Lambda^3 u,\Lambda^2 \sigma^h\rangle=O(\|\nabla^3 u\|_{L^2}^2+\|\nabla^3 \tau\|_{L^2}^2).
\end{equation*}
By virtue of (\ref{bu_7}), (\ref{bu_8}) and (\ref{upper decay2}) and the smallness of the solution and $\eta_4$, (\ref{new_H5_L2_4}) yields
\begin{equation}\label{new_H5_L2_5}
\begin{split}
\begin{split}
&\udt \mathcal{H}_4(t) + \frac{\eta_4\alpha}{32}\|\Lambda^3 u\|_{L^2}^2 + \frac{\beta K}{100}\|\nabla^3\tau\|_{L^2}^2\\
\leq \,& \eta_4\big(\frac{\alpha}{2} \|\Lambda^3 u^l\|_{L^2}^2
+ (2K + \frac{12\beta^2}{\alpha} + 1)\|\nabla^2\sigma\|_{L^2}^2\\
\,&+ C\big(\|u\|_{L^\infty}^{2}+\|\nabla u\|_{L^\infty}^{2}+\|\nabla \tau\|_{L^\infty}^{2}\big)\big(\|\nabla^2\tau\|_{L^2}^2 + \|\nabla^2 u\|_{L^2}^2\big).
\end{split}
\end{split}
\end{equation}
Using \eqref{bu_8}, we have that
\begin{equation}\label{bu_17}
\frac{\beta K}{200}\|\nabla^3\tau\|_{L^2}^2\ge \frac{1}{C}\|\nabla^3\sigma\|_{L^2}^2 \ge \frac{1}{C} R^2\int_{|\xi|\ge R}
|\xi|^4|\hat{\sigma}|^2 {\rm d}\xi\ge \frac{1}{C}\|\nabla^2\sigma^h\|_{L^2}^2.
\end{equation}
In addition, letting
\begin{equation}\label{bu_18}
\eta_4 \le \frac{C}{2K + \frac{12\beta^2}{\alpha} + 1},
\end{equation}
substituting \eqref{bu_17} and \eqref{bu_18} into \eqref{new_H5_L2_5}, and using (\ref{utauH1}), \eqref{upper decay2} and \eqref{na2udecay}, we get
\begin{equation}\label{new_H5_L2_6}
\begin{split}
\begin{split}
\udt \mathcal{H}_4(t) + \bar{c}_3\mathcal{H}_4(t)
\leq& \, C\|\Lambda^3 u^l\|_{L^2}^2
+ C\|\nabla^2\sigma^l\|_{L^2}^2\\
&+ C\big(\|u\|_{L^\infty}^{2}+\|\nabla u\|_{L^\infty}^{2}+\|\nabla \tau\|_{L^\infty}^{2}\big)\big(\|\nabla^2\tau\|_{L^2}^2 + \|\nabla^2 u\|_{L^2}^2\big)\\
\leq& \, C\|\Lambda^3 u^l\|_{L^2}^2
+ C\|\nabla^2\sigma^l\|_{L^2}^2 + C(1+t)^{-4},
\end{split}
\end{split}
\end{equation}
for some positive constants $\bar{c}_3$ and $C$.
By virtue of (\ref{Greenfunction_13-1}), (\ref{utauH1}), and (\ref{upper decay2}), we have
\begin{align}\label{new_H5_L2_8}
&\left(\int_{|\xi|\leq R}
|\xi|^{6}|\hat{u}(t)|^2{\rm d}\xi\right)^\frac{1}{2}\notag\\
\le & ~C(1+t)^{-2} + C\int_0^t\Big(\int_{|\xi|\leq R}|\xi|^{6}e^{-2\theta|\xi|^2(t-s)}(|\hat{\mathcal{F}}_1 (\xi,s)|^2 + |\hat{\mathcal{F}}_2 (\xi,s)|^2){\rm d}\xi\Big)^\frac12{\rm d}s\notag\\
\le &~C(1+t)^{-2} +C\int_0^{\frac{t}{2}}(1+t-s)^{-2}(\|\hat{\mathcal{F}}_1 (\cdot,s)\|_{L^\infty} + \|\hat{\mathcal{F}}_2 (\cdot,s)\|_{L^\infty}){\rm d}s \\
&+C\int_{\frac{t}{2}}^{t}\Big(\int_{|\xi|\leq R}|\xi|^{4}\frac{(1+t-s)^{2}}{(1+t-s)^{2}}e^{-2\theta|\xi|^2(1+t-s)}|\xi|^2(|\hat{\mathcal{F}}_1 (\xi,s)|^2 + |\hat{\mathcal{F}}_2 (\xi,s)|^2){\rm d}\xi\Big)^\frac12{\rm d}s\notag\\
\le &~C(1+t)^{-2} +C\int_{\frac{t}{2}}^{t}(1+t-s)^{-1}\Big(\big\||\xi|\hat{\mathcal{F}}_1 (\xi,s)\big\|_{L^2} + \big\||\xi|\hat{\mathcal{F}}_2 (\xi,s)\big\|_{L^2} \Big){\rm d}s,\notag
\end{align} where we have used
\begin{equation*}
\begin{split}
\|\hat{\mathcal{F}}_1(\xi,s) \|_{L^\infty} + \|\hat{\mathcal{F}}_2(\xi,s)\|_{L^\infty}\le& C\|u(s)\|_{L^2}\big(\|\nabla u(s)\|_{L^2}+\|\nabla \tau(s)\|_{L^2}\big)
\\ \le& C(1+s)^{-\frac{3}{2}}.
\end{split}
\end{equation*}
Using \eqref{upper decay2} and \eqref{na2udecay} and Gagliardo-Nirenberg inequailty, we have
\begin{equation}\label{new_H5_L2_9}
\begin{split}
\big\||\xi|\hat{\mathcal{F}}_1 (\xi,s)\big\|_{L^2}&\le C\|\nabla(u\cdot\nabla u)\|_{L^2}\\
&\le C\big(\| u\|_{L^\infty}\|\nabla^2 u\|_{L^2} + \|\nabla u\|_{L^\infty}\|\nabla u\|_{L^2}\big)\\
&\le C\big(\| u\|_{L^2}^{\frac12}\|\nabla^2 u\|_{L^2}^{\frac12}\|\nabla^2 u\|_{L^2} + \|\nabla u\|_{L^2}^{\frac12}\|\nabla^3 u\|_{L^2}^{\frac12}\|\nabla u\|_{L^2}\big)\\
&\le C(1+s)^{-\frac{9}{4}}.
\end{split}
\end{equation}
Similarly, we have
\begin{equation}\label{new_H5_L2_10}
\begin{split}
\||\xi|\hat{\mathcal{F}}_2 (\xi,s)\|_{L^2}&\le C(1+s)^{-\frac{9}{4}}.
\end{split}
\end{equation}
Subtituting \eqref{new_H5_L2_9} and \eqref{new_H5_L2_10} into \eqref{new_H5_L2_8}, we can deduce that
\begin{equation}\label{new_H5_L2_11}
\begin{split}
\|\Lambda^3 u^l\|_{L^2}^2&\le C(1+s)^{-4}.
\end{split}
\end{equation}
It is worth noticing that $\|\nabla^2\sigma^l\|_{L^2}$ has the similar structure as $\|\Lambda^3 u^l\|_{L^2}$ (see the proof of Lemma \ref{lemma_Greenfunction_7}). Thus we have
\begin{equation}\label{new_H5_L2_12}
\begin{split}
\|\nabla^2\sigma^l\|_{L^2}^2&\le C(1+s)^{-4}.
\end{split}
\end{equation}
Substituting \eqref{new_H5_L2_11} and \eqref{new_H5_L2_12} into \eqref{new_H5_L2_6}, we get
\begin{equation}\label{new_H5_L2_13}
\begin{split}
\begin{split}
\udt \mathcal{H}_4(t) + \bar{c}_3\mathcal{H}_4(t)\le C(1+t)^{-4}.
\end{split}
\end{split}
\end{equation}
Then, \eqref{na3udecay} can be obtained by using \eqref{new_H5_L2_13}.
\end{proof}
\begin{corollary}\label{cor2}
Under the assumptions of Theorem \ref{thm_OB_d_decay}, we have
\begin{equation}\label{new_tau_L2_}
\begin{split}
\|\nabla\tau(t)\|_{L^2}^2\le C (1+t)^{-3},\,\,\,\,
\|\nabla^2\tau(t)\|_{L^2}^2\le C (1+t)^{-4}.
\end{split}
\end{equation}for all $t>0.$
\end{corollary}
\begin{proof}
\eqref{new_tau_L2_} can be obtained by using (\ref{upper decay2}), \eqref{new_tau_L2_1}, \eqref{na2udecay}, and \eqref{na3udecay}.
\end{proof}
Noticing that \eqref{new_tau_L2_1} can not be used to get further time-decay rate of the quantity $\|\nabla^3 \tau(t)\|_{L^2}$, since the decay of $\|\nabla^4 u(t)\|_{L^2}$ is unknown. Here we consider a combination of $\|\nabla^3 u^h(t)\|_{L^2}^2$ and $\|\nabla^3 \tau(t)\|_{L^2}^2$.
\begin{lemma}\label{lemma_upper_decay_5}
Under the assumptions of Theorem \ref{thm_OB_d_decay}, we have
\begin{equation}\label{highest_order_decay_0}
\begin{split}
\|\nabla^3 u^h(t)\|_{L^2}^2 + \|\nabla^3 \tau(t)\|_{L^2}^2\le C (1+t)^{-5},
\end{split}
\end{equation}
for all $t>0.$
\end{lemma}
\begin{proof}
Multiplying $\nabla^3$(\ref{Oldroyd_B_d})$_1^h$ and $\nabla^3$ (\ref{Oldroyd_B_d})$_2$ by $\alpha \nabla^3 u^h$ and $K \nabla^3 \tau$, respectively, summing the results up, and using integration by parts, we have
\begin{equation}\label{highest_order_decay_1}
\begin{split}
&\frac12 \udt (\alpha\|\nabla^3 u^h\|_{L^2}^2 + K\|\nabla^3\tau\|_{L^2}^2) + \beta K\|\nabla^3\tau\|_{L^2}^2\\
= & -\, \Big(\langle\alpha \nabla^3(u\cdot\nabla u)^h,\nabla^3u^h \rangle + \langle K \nabla^3(u\cdot\nabla\tau),\nabla^3\tau \rangle\Big)\\
& + \, \Big(\langle \alpha \nabla^3\mathbb{D}(u^h),K\nabla^3\tau^l \rangle + \langle \alpha \nabla^3\mathbb{D}(u^l),K\nabla^3\tau \rangle\Big)\\
=: &\,I_{9} + I_{10},
\end{split}
\end{equation}
where we have used $$\langle K \nabla^3{\rm div}\tau^h,\alpha\nabla^3u^h \rangle = -\,\langle \alpha \nabla^3\mathbb{D}(u^h),K\nabla^3\tau^h \rangle.$$
Using the Plancherel's Theorem, we obtain
\begin{equation}\label{highest_order_decay_2}
\begin{split}
\langle\alpha \nabla^3(u\cdot\nabla u)^h,\nabla^3u^h \rangle
= &\,\langle\alpha|\xi|^3 \big(1-\varphi_0(\xi)\big)\widehat{u\cdot\nabla u},\,\,\,|\xi|^3\big(1-\varphi_0(\xi)\big)\hat{u} \rangle\\
= &\,\langle\alpha|\xi|^3\widehat{u\cdot\nabla u},\,\,\,|\xi|^3(1-\varphi_0(\xi))^2\hat{u}\rangle\\
= &\,\langle\alpha\nabla^3(u\cdot\nabla u),\,\,\nabla^3u^{\widetilde{h}}\rangle,
\end{split}
\end{equation}
where we define that
\begin{equation*}
u^{\widetilde{h}}(x):=\mathcal{F}^{-1}((1-\varphi_0(\xi))^2\hat{u}(\xi))\,\,\,\ \text{and} \,\,\,\,u^{\widetilde{l}}(x):=u(x)-u^{\widetilde{h}}(x).
\end{equation*}
Next, we can divide the term $\langle\alpha\nabla^3(u\cdot\nabla u),\nabla^3u^{\widetilde{h}}\rangle$ in (\ref{highest_order_decay_2}) into two parts, and using (\ref{upper decay2}), (\ref{na2udecay}) and (\ref{na3udecay}), we have
\begin{equation}\label{highest_order_decay_3}
\begin{split}
&\big|\langle\alpha\nabla^3(u\cdot\nabla u),\nabla^3u^{\widetilde{h}}\rangle\big|\\
= &\,\big|\langle\alpha\nabla^3(u\cdot\nabla u^{\widetilde{h}}),\nabla^3u^{\widetilde{h}}\rangle + \langle\alpha\nabla^3(u\cdot\nabla u^{\widetilde{l}}),\nabla^3u^{\widetilde{h}}\rangle\big|\\
\le &\,C\Big(\|\nabla u\|_{L^\infty}\|\nabla^3 u^{\widetilde{h}}\|_{L^2}^2 + \|\nabla u^{\widetilde{h}}\|_{L^\infty}\|\nabla^3 u\|_{L^2}\|\nabla^3 u^{\widetilde{h}}\|_{L^2}\\&+\|\nabla^2u\|_{H^1}\|\nabla^2u^{\widetilde{h}}\|_{H^1}\|\nabla^3u^{\widetilde{h}}\|_{L^2}
+ \|\nabla u\|_{L^\infty}\|\nabla^3 u^{\widetilde{l}}\|_{L^2}\|\nabla^3 u^{\widetilde{h}}\|_{L^2}\\ & + \|\nabla u^{\widetilde{l}}\|_{L^\infty}\|\nabla^3 u\|_{L^2}\|\nabla^3 u^{\widetilde{h}}\|_{L^2}+ \|\nabla^2 u^{\widetilde{l}}\|_{H^1}\|\nabla^2 u\|_{H^1}\|\nabla^3 u^{\widetilde{h}}\|_{L^2} \\&+\|u\|_{L^\infty}\|\nabla^4 u^{\widetilde{l}}\|_{L^2}\|\nabla^3 u^{\widetilde{h}}\|_{L^2}\Big)\\
\le& C(1+t)^{-5} + C(1+t)^{-\frac{5}{2}}\|\nabla^4 u^{\widetilde{l}}\|_{L^2}.
\end{split}
\end{equation}
Similar with (\ref{new_H5_L2_8}) and (\ref{new_H5_L2_9}), we have
\begin{equation}\label{highest_order_decay_}
\begin{split}
\|\nabla^4 u^{\widetilde{l}}\|_{L^2}&\le C(1+t)^{-\frac{5}{2}} + C\int_{\frac{t}{2}}^{t}(1+t-s)^{-1}\Big(\||\xi|^2\hat{\mathcal{F}}_1 (\xi,s)\|_{L^2} + \||\xi|^2\hat{\mathcal{F}}_2 (\xi,s)\|_{L^2} \Big){\rm d}s\\
&\le C(1+t)^{-\frac{5}{2}} + C\int_{\frac{t}{2}}^{t}(1+t-s)^{-1}(\|\nabla^2(u\cdot\nabla u)\|_{L^2} + \|\nabla^2(u\cdot\nabla \tau)\|_{L^2}) {\rm d}s\\
&\le C(1+s)^{-\frac{5}{2}}.
\end{split}
\end{equation}
Substituting \eqref{highest_order_decay_3} and \eqref{highest_order_decay_} into \eqref{highest_order_decay_1}, we have
\begin{equation}\label{highest_order_decay_4}
\begin{split}
|I_{9}|&\le C\,(1+t)^{-5} + C (\|\nabla u\|_{L^\infty}\|\nabla^3\tau\|_{L^2}^2 + \|\nabla \tau\|_{L^\infty}\|\nabla^3u\|_{L^2}\|\nabla^3\tau\|_{L^2})\\
&\le C\,(1+t)^{-5}.
\end{split}
\end{equation}
Noticing that
\begin{equation*}\label{highest_order_decay_5}
\begin{split}
\langle \alpha \nabla^3\mathbb{D}(u^h),K\nabla^3\tau^l \rangle = \langle \alpha \nabla^3\mathbb{D}(u^l),K\nabla^3\tau^h \rangle
\le\frac{\beta K}{4}\|\nabla^3 \tau\|_{L^2}^2 + \frac{\alpha^2 K}{\beta}\|\nabla^4 u^{l}\|_{L^2}^2,
\end{split}
\end{equation*}
and
$$\langle \alpha \nabla^3\mathbb{D}(u^l),K\nabla^3\tau \rangle\le\frac{\beta K}{4}\|\nabla^3 \tau\|_{L^2}^2 + \frac{\alpha^2 K}{\beta}\|\nabla^4 u^{l}\|_{L^2}^2,$$ we can estimate $I_{10}$ as follows:
\begin{equation}\label{highest_order_decay_6}
\begin{split}
|I_{10}|\le \frac{\beta K}{2}\|\nabla^3 \tau\|_{L^2}^2 + \frac{2\alpha^2 K}{\beta}\|\nabla^4 u^{l}\|_{L^2}^2.
\end{split}
\end{equation}
Substituting \eqref{highest_order_decay_4} and \eqref{highest_order_decay_6} into \eqref{highest_order_decay_1}, we get
\begin{equation}\label{highest_order_decay_7}
\begin{split}
\frac12 \udt (\alpha\|\nabla^3 u^h\|_{L^2}^2 + K\|\nabla^3\tau\|_{L^2}^2) + \frac{\beta K}{2}\|\nabla^3\tau\|_{L^2}^2\le C\,(1+t)^{-5},
\end{split}
\end{equation} where the estimate of the second term on the right-hand side of (\ref{highest_order_decay_6}) is similar to that of (\ref{highest_order_decay_}).
Multiplying $\Lambda^3$(\ref{u_sigma_1})$_1^h$ and $\Lambda^2(\ref{u_sigma_1})_2^h$ by $\Lambda^2\sigma^h$ and $\Lambda^3 u^h$, respectively, summing the results up, and using integration by parts, we have
\begin{equation}\label{highest_order_decay_8}
\begin{split}
&\partial_t\langle\Lambda^3 u^h,\Lambda^2 \sigma^h\rangle + \frac{\alpha}{2}\|\Lambda^3 u^h\|_{L^2}^2\\
= & \,\Big(K\|\Lambda^3 \sigma^h\|_{L^2}^2 - \langle\beta\Lambda^2\sigma^h,\Lambda^3 u^h \rangle\Big)\\ &- \Big(\langle \Lambda^3\mathbb{P}(u\cdot \nabla u)^h,\Lambda^2\sigma^h\rangle + \langle \Lambda\mathbb{P}\mathrm{div}(u\cdot \nabla \tau)^h,\Lambda^3 u^h\rangle\Big)\\
=: &\,I_{11} - I_{12}.
\end{split}
\end{equation}
Then, $I_{11}$ and $I_{12}$ can be estimated as follows:
\begin{align}
\label{new6} |I_{11}|&\le K\|\Lambda^3 \sigma^h\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda^3 u^h\|_{L^2}^2 + \frac{4\beta^2}{\alpha}\|\Lambda^2\sigma^h\|_{L^2}^2,\\
\nonumber|I_{12}|&\le \frac12\|\Lambda^3 \sigma^h\|_{L^2}^2
+ C\|\Lambda^2\mathbb{P}(u\cdot \nabla u)^h\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda^3 u^h\|_{L^2}^2 + C\|\Lambda\mathbb{P}\mathrm{div}(u\cdot \nabla \tau)^h\|_{L^2}^2\\ \label{new7}
&\le \frac12\|\Lambda^3 \sigma^h\|_{L^2}^2
+ C\|\nabla^2(u\cdot \nabla u)\|_{L^2}^2 + \frac{\alpha}{16}\|\Lambda^3 u^h\|_{L^2}^2 + C\|\nabla^2(u\cdot \nabla \tau)\|_{L^2}^2.
\end{align}
Substituting (\ref{new6}) and (\ref{new7}) into (\ref{highest_order_decay_8}), we can easily deduce that
\begin{equation}\label{highest_order_decay_9}
\begin{split}
\partial_t\langle\Lambda^3 u^h,\Lambda^2 \sigma^h\rangle + \frac{\alpha}{4}\|\Lambda^3 u^h\|_{L^2}^2
\le (K+\frac12)\|\Lambda^3 \sigma^h\|_{L^2}^2 + \frac{4\beta^2}{\alpha}\|\Lambda^2\sigma^h\|_{L^2}^2 + C(1+t)^{-5},
\end{split}
\end{equation} for all $t\ge0$.
We define that
\begin{equation*}
\begin{split}
\mathcal{H}_5(t): = \alpha\|\nabla^3 u^h\|_{L^2}^2 + K\|\nabla^3\tau\|_{L^2}^2 + \eta_3\langle\Lambda^3 u^h,\Lambda^2 \sigma^h\rangle.
\end{split}
\end{equation*}
Then
\begin{equation}\label{highest_order_decay_10}
\frac12\alpha\|\nabla^3 u^h\|_{L^2}^2 + \frac12K\|\nabla^3\tau\|_{L^2}^2\le\mathcal{H}_5(t)\le 2\alpha\|\nabla^3 u^h\|_{L^2}^2 + 2K\|\nabla^3\tau\|_{L^2}^2.
\end{equation}
Summing $2\times$\eqref{highest_order_decay_7} and $\eta_3\times$\eqref{highest_order_decay_9} up,
and using \eqref{highest_order_decay_10} and the smallness of $\eta_3$, we have
\begin{equation}\label{new8}
\udt \mathcal{H}_5(t) + \bar{c}_4\mathcal{H}_5(t)\le C\,(1+t)^{-5},
\end{equation}
for some positive constants $\bar{c}_4$ and $C$, where we have used
\begin{equation*}
\|\Lambda^3 \sigma^h\|_{L^2}^2 +\|\Lambda^2\sigma^h\|_{L^2}^2\le C\|\Lambda^3 \tau\|_{L^2}^2.
\end{equation*}
Then, (\ref{new8}) yields (\ref{highest_order_decay_0}). The proof of Lemma \ref{lemma_upper_decay_5} is complete.
\end{proof}
With Lemmas \ref{lemma_upper_decay+1}, \ref{lemma_upper_decay_2}, \ref{lemma_upper_decay_3}, \ref{lemma_upper_decay_4}, and \ref{lemma_upper_decay_5}, and Corollaries \ref{cor1} and \ref{cor2}, we get (\ref{opti1}) and (\ref{opti2}) in Theorem \ref{thm_OB_d_decay}.
\subsection{Lower time-decay estimates}
To finish the proof of Theorem \ref{thm_OB_d_decay}, we will establish the lower decay estimates for the system \eqref{Oldroyd_B_d}.
\begin{lemma}\label{lower_bound}
Under the assumptions of Theorem \ref{thm_OB_d_decay}, there exists a positive time $t_1$, such that the following estimates:
\begin{equation}\label{lower_bound_1}
\|\nabla^k u(t)\|_{L^2} \ge \frac{1}{C} (1 + t)^{-\frac{1}{2}-\frac{k}{2}}, \quad k=0,1,2,3,
\end{equation}
and
\begin{equation}\label{lower_bound_2}
\|\nabla^k \tau (t)\|_{L^2} \ge \frac{1}{C} (1 + t)^{-1-\frac{k}{2}}, \quad k=0, 1, 2, 3,
\end{equation}
hold for all $t\geq t_1$.
\end{lemma}
\begin{proof} Recalling from (\ref{Greenfunction_13-1}) and (\ref{Greenfunction_13-2}), we have
\begin{equation*}
\begin{split}
\hat{u}(t) =& \mathcal{G}_3 \hat{u}_0 + K|\xi|\mathcal{G}_1\hat{\sigma}_0 + \int_0^t\mathcal{G}_3(t-s)\hat{\mathcal{F}}_1 (s) + K|\xi|\mathcal{G}_1(t-s)\hat{\mathcal{F}}_2 (s)
{\rm d}s,\\
\hat{\sigma}(t) =& -\frac{\alpha}{2}|\xi|\mathcal{G}_1 \hat{u}_0 + \mathcal{G}_2\hat{\sigma}_0 + \int_0^t-\frac{\alpha}{2}|\xi|\mathcal{G}_1(t-s)\hat{\mathcal{F}}_1 (s) + \mathcal{G}_2(t-s)\hat{\mathcal{F}}_2 (s){\rm d}s,
\end{split}
\end{equation*}
where
\begin{eqnarray*}
\mathcal{F}_1=-\mathbb{P}\left(u\cdot\nabla u\right),\
\mathcal{F}_2=-\Lambda^{-1}\mathbb{P}{\rm div}\left(u\cdot\nabla\tau\right).
\end{eqnarray*}
From \eqref{lemma_Greenfunction_12} and \eqref{lemma_Greenfunction_13}, we can obtain for all $t\geq t_1$, that
\begin{equation}\label{lower_bound_3}
\begin{split}
\|\nabla^ku(t)\|_{L^2} = &\left\||\xi|^k\hat{u}(t)\right\|_{L^2}\\
\ge\,&\frac{1}{C}(1 + t)^{-\frac12-\frac{k}{2}} -
C\int_0^t \Big(\int_{|\xi|\leq R'}\Big||\xi|^k\mathcal{G}_3(t-s)\hat{\mathcal{F}}_1 (\xi,s)\\& + |\xi|^{k+1}\mathcal{G}_1(t-s)\hat{\mathcal{F}}_2 (\xi,s)\Big|^2{\rm d}\xi\Big)^\frac12{\rm d}s,
\end{split}
\end{equation}
and
\begin{equation}\label{lower_bound_4}
\begin{split}
\|\nabla^k\sigma(t)\|_{L^2}= &\left\||\xi|^k\hat{\sigma}(t)\right\|_{L^2}\\
\ge\, &\frac{1}{C}(1 + t)^{-1-\frac{k}{2}}-
C\int_0^t \Big(\int_{|\xi|\leq R'}\Big||\xi|^{k+1}\mathcal{G}_1(t-s)\hat{\mathcal{F}}_1 (\xi,s)\\ &+ |\xi|^k\mathcal{G}_2(t-s)\hat{\mathcal{F}}_2 (\xi,s)\Big|^2{\rm d}\xi\Big)^\frac12{\rm d}s.
\end{split}
\end{equation}
Now we estimate the nonlinear term in \eqref{lower_bound_3} for $k=0, 1, 2, 3$. In fact, using Lemmas \ref{lemma_Greenfunction_4} and \ref{lemma_upper_decay}, we obtain that for $k=0, 1, 2$,
\begin{equation}\label{lower_bound_5}
\begin{split}
& \int_0^t \Big(\int_{|\xi|\leq R'}\Big||\xi|^k\mathcal{G}_3(t-s)\hat{\mathcal{F}}_1 (\xi,s) + |\xi|^{k+1}\mathcal{G}_1(t-s)\hat{\mathcal{F}}_2 (\xi,s)\Big|^2{\rm d}\xi\Big)^\frac12{\rm d}s\\
\le C &\int_0^{\frac{t}{2}} \left(\|\frac{1}{|\xi|} \hat{\mathcal{F}}_1 (\xi,s)\|_{L^\infty} + \|\hat{\mathcal{F}}_2 (\xi,s)\|_{L^\infty}\right)\left(\int_{|\xi|\leq R'} |\xi|^{2(k+1)}e^{-2\theta|\xi|^2(t-s )} {\rm d} \xi\right)^{\frac{1}{2}}{\rm d}s \\
+ &C\int_{\frac{t}{2}}^t \left(\int_{|\xi|\leq R'} e^{-2\theta|\xi|^2(t-s )}\big(|\xi|^{2k}|\hat{\mathcal{F}}_1(\xi,s)|^2 + |\xi|^{2k}|\hat{\mathcal{F}}_2(\xi,s)|^2\big) {\rm d} \xi\right)^{\frac{1}{2}}{\rm d}s \\
\le C &\int_0^{\frac{t}{2}}(1+s)^{-1}(1+t-s)^{-1-\frac{k}{2}}{\rm d}s + C\int_{\frac{t}{2}}^t(1+s)^{-\frac{3}{2}-\frac{k}{2}}(1+t-s)^{-\frac{1}{2}}{\rm d}s\\
\le C& \,(1+t)^{-\frac{3}{4}-\frac{k}{2}}.
\end{split}
\end{equation}
For $k=3$, we have
\begin{equation}\label{lower_bound_7}
\begin{split}
& \int_0^t \Big(\int_{|\xi|\leq R'}\Big||\xi|^3\mathcal{G}_3(t-s)\hat{\mathcal{F}}_1 (\xi,s) + |\xi|^{4}\mathcal{G}_1(t-s)\hat{\mathcal{F}}_2 (\xi,s)\Big|^2{\rm d}\xi\Big)^\frac12{\rm d}s\\
\le C &\,(1+t)^{-\frac{9}{4}}
+ C\int_{\frac{t}{2}}^t \left(\big\||\xi|^{2}\hat{\mathcal{F}}_1(\cdot,s)\big\|_{L^\infty} + \big\||\xi|^{2}\hat{\mathcal{F}}_2(\cdot,s)\big\|_{L^\infty}\right)(1+t-s)^{-1}{\rm d}s \\
\le C& \,(1+t)^{-\frac{9}{4}} + C\int_{\frac{t}{2}}^t(1+s)^{-\frac{5}{2}}(1+t-s)^{-1}{\rm d}s\\
\le C& \,(1+t)^{-\frac{9}{4}}.
\end{split}
\end{equation}
Next, we turn to estimate the nonlinear term in \eqref{lower_bound_4} for $k=0, 1, 2, 3$. Similar to \eqref{lower_bound_5}, using Lemmas \ref{lemma_Greenfunction_4} and \ref{lemma_upper_decay}, we deduce that for $k=0, 1, 2$,
\begin{equation}\label{lower_bound_11}
\begin{split}
& \int_0^t \underbrace{\Big(\int_{|\xi|\leq R'}\Big||\xi|^{k+1}\mathcal{G}_1(t-s)\hat{\mathcal{F}}_1 (\xi,s) + |\xi|^k\mathcal{G}_2(t-s)\hat{\mathcal{F}}_2 (\xi,s)\Big|^2{\rm d}\xi\Big)^\frac12}_{N(k,s)}{\rm d}s
\\=&\int_0^{\frac{t}{2}} N(k,s){\rm d}s + \int_{\frac{t}{2}}^{t} N(k,s){\rm d}s.
\end{split}
\end{equation}For the first term of \eqref{lower_bound_11}, we have
\begin{align}\label{lower_bound_6}
\int_0^{\frac{t}{2}} N(k,s){\rm d}s \le C &\int_0^{\frac{t}{2}} \left(\|\frac{1}{|\xi|} \hat{\mathcal{F}}_1 (\cdot,s)\|_{L^\infty} +\|\hat{\mathcal{F}}_2 (\cdot,s)\|_{L^\infty}\right)\left(\int_{|\xi|\leq R'} |\xi|^{2k+4}e^{-2\theta|\xi|^2(t-s )}{\rm d} \xi \right)^{\frac{1}{2}}{\rm d}s\notag\\
+ &C\int_{0}^{\frac{t}{2}}\||\xi|^{k}\hat{\mathcal{F}}_2(\cdot,s)\|_{L^\infty}\left(\int_{|\xi|\leq R'} e^{-\beta(t-s)} {\rm d} \xi\right)^{\frac{1}{2}}{\rm d}s\\
\le C &\int_0^{\frac{t}{2}}(1+s)^{-1}(1+t-s)^{-\frac{3}{2}-\frac{k}{2}}{\rm d}s +C\,(1+t)^{-\frac{3}{2}-\frac{k}{2}}\notag\\
\le C&\, (1+t)^{-\frac{5}{4}-\frac{k}{2}}\notag.
\end{align}For the second term of \eqref{lower_bound_11}, we have
\begin{align}
\int_{\frac{t}{2}}^{t} N(k,s){\rm d}s\le &\,C\int_{\frac{t}{2}}^t \left(\||\xi|^{k}\hat{\mathcal{F}}_1(\cdot,s)\|_{L^\infty} + \||\xi|^{k}\hat{\mathcal{F}}_2(\cdot,s)\|_{L^\infty}\right)\left(\int_{|\xi|\leq R'} |\xi|^{2}e^{-2\theta|\xi|^2(t-s )} {\rm d} \xi\right)^{\frac{1}{2}}{\rm d}s \notag\\
&+ C\int_{\frac{t}{2}}^t\||\xi|^{k}\hat{\mathcal{F}}_2(\cdot,s)\|_{L^\infty}\left(\int_{|\xi|\leq R'} e^{-\beta(t-s)} {\rm d} \xi\right)^{\frac{1}{2}}{\rm d}s\\
\le &C\int_{\frac{t}{2}}^t(1+s)^{-\frac{3}{2}-\frac{k}{2}}(1+t-s)^{-1}{\rm d}s +C\,(1+t)^{-\frac{3}{2}-\frac{k}{2}}\notag\\
\le &C\, (1+t)^{-\frac{5}{4}-\frac{k}{2}}\notag.
\end{align}
For $k=3$, we have
\begin{equation}\label{lower_bound_8}
\begin{split}
& \int_0^t \Big(\int_{|\xi|\leq R'}\Big||\xi|^{4}\mathcal{G}_1(t-s)\hat{\mathcal{F}}_1 (\xi,s) + |\xi|^3\mathcal{G}_2(t-s)\hat{\mathcal{F}}_2 (\xi,s)\Big|^2{\rm d}\xi\Big)^\frac12{\rm d}s\\
\le C&\,(1+t)^{-\frac{11}{4}}
+ C\int_{\frac{t}{2}}^t \left(\||\xi|^{2}\hat{\mathcal{F}}_1(\cdot,s)\|_{L^2} + \||\xi|^{2}\hat{\mathcal{F}}_2(\cdot,s)\|_{L^2}\right)(1+t-s)^{-1}{\rm d}s \\
\le C& \,(1+t)^{-\frac{11}{4}} + C\int_{\frac{t}{2}}^t\Big(\|\nabla^{2}(u\cdot\nabla u)(s)\|_{L^2} + \|\nabla^{2}(u\cdot\nabla\tau)(s)\|_{L^2}\Big)(1+t-s)^{-1}{\rm d}s\\
\le C& \,(1+t)^{-\frac{11}{4}},
\end{split}
\end{equation}
where we have used the estimate:
\begin{equation*}\label{lower_bound_9}
\begin{split}
&\|\nabla^{2}(u\cdot\nabla u)(t)\|_{L^2} + \|\nabla^{2}(u\cdot\nabla\tau)(t)\|_{L^2}\\
\le &C\big(\| u\|_{L^2}^{\frac12}\|\nabla^2 u\|_{L^2}^{\frac12}\|\nabla^3 u\|_{L^2} + \|\nabla u\|_{L^2}^{\frac12}\|\nabla^3 u\|_{L^2}^{\frac12}\|\nabla^2 u\|_{L^2}\big)\\
&+C\big(\| u\|_{L^2}^{\frac12}\|\nabla^2 u\|_{L^2}^{\frac12}\|\nabla^3 \tau\|_{L^2} + \|\nabla u\|_{L^2}^{\frac12}\|\nabla^3 u\|_{L^2}^{\frac12}\|\nabla^2 \tau\|_{L^2}+ \|\nabla \tau\|_{L^2}^{\frac12}\|\nabla^3 \tau\|_{L^2}^{\frac12}\|\nabla^2 u\|_{L^2}\big)\\
\le &C (1+t)^{-3},\\
\end{split}
\end{equation*}for all $t\geq 0$, which is similar to \eqref{new_H5_L2_9}.
Using \eqref{lower_bound_3} -- \eqref{lower_bound_8}, and the fact that $\|\nabla^k\sigma\|_{L^2}\le C \|\nabla^k\tau\|_{L^2},$ we conclude that \eqref{lower_bound_1} and \eqref{lower_bound_2} hold for $t\ge t_1$ ($t_1$ is sufficiently large), i.e., \eqref{opti3} and \eqref{opti4} hold. Thus the proof of Theorem \ref{thm_OB_d_decay} is complete.
\end{proof}
\section*{Acknowledgments}This work was supported by the Guangdong Basic and Applied Basic Research Foundation $\#2020B1515310015$ and \#2022A1515012112, and by the National Natural Science Foundation of China $\#12071152,$ and by the Guangdong Provincial Key Laboratory of Human Digital Twin (\#2022B1212010004).
\end{document} |
\begin{document}
\title{On a nonhomogeneous Kirchhoff type elliptic system with the singular Trudinger-Moser growth} \author {Shengbing Deng\footnote{ E-mail address:\, {\tt [email protected]} (S. Deng), {\tt [email protected]} (X. Tian)} \, and Xingliang Tian\\ \footnotesize School of Mathematics and Statistics, Southwest University, Chongqing, 400715, P.R. China} \date{ } \maketitle
\begin{abstract} {The aim of this paper is to study the multiplicity of solutions for the following Kirchhoff type elliptic systems
\begin{eqnarray*}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
-m\left(\sum^k_{j=1}\|u_j\|^2\right)\Delta u_i=\frac{f_i(x,u_1,\ldots,u_k)}{|x|^\beta}+\varepsilon h_i(x),\ \ & \mbox{in}\ \ \Omega, \ \ i=1,\ldots,k ,\\[2mm]
u_1=u_2=\cdots=u_k=0,\ \ & \mbox{on}\ \ \partial\Omega,
\end{array}
\right.
\end{eqnarray*}
where $\Omega$ is a bounded domain in $\mathbb{R}^2$ containing the origin with smooth boundary, $\beta\in [0,2)$, $m$ is a Kirchhoff type function, $\|u_j\|^2=\int_\Omega|\nabla u_j|^2dx$, $f_i$ behaves like $e^{\beta s^2}$ when $|s|\rightarrow \infty$ for some $\beta>0$, and there is $C^1$ function $F: \Omega\times\mathbb{R}^k\to \mathbb{R}$ such that $\left(\frac{\partial F}{\partial u_1},\ldots,\frac{\partial F}{\partial u_k}\right)=\left(f_1,\ldots,f_k\right)$, $h_i\in \left(\big(H^1_0(\Omega)\big)^*,\|\cdot\|_*\right)$. We establish sufficient conditions for the multiplicity of solutions of the above system by using variational methods with a suitable singular Trudinger-Moser inequality when $\varepsilon>0$ is small.}
\emph{\bf Keywords:} Kirchhoff type elliptic systems; multiple solutions; singular Trundinger-Moser inequality.
\emph{\bf 2020 Mathematics Subject Classification:} 35J50, 35J57. \end{abstract}
\section{{\bfseries Introduction}}
In last decades, a great attention has been focused on the study of problems involving exponential growth nonlinearities, which is related to the famous Trudinger-Moser inequality. Let $\Omega$ be a bounded domain in $\mathbb{R}^2$, and denote with $H_0^{1}(\Omega)$ the standard first order Sobolev space given by \[
H_0^{1}(\Omega)=cl\Big\{u\in C^\infty_0(\Omega)\ :\ \int_\Omega|\nabla u|^2{\rm d}x<\infty\Big\},\quad\ \ \|u\| =\left(\int_\Omega|\nabla u|^2{\rm d}x\right)^{\frac{1}{2}}. \]
This space is a limiting case for the Sobolev embedding theorem, which yields $H_0^{1}(\Omega)\hookrightarrow L^p(\Omega)$ for all $1\leq p<\infty$, but one knows by easy examples that $H_0^{1}(\Omega)\not\subseteq L^\infty(\Omega)$ such as, $u(x)=\log(1-\log|x|)$ in $B_1(0)$. Hence, one is led to look for a function $g(s):\mathbb{R}\to\mathbb{R}^+$ with maximal growth such that \[
\sup\limits_{u\in H_0^{1}(\Omega),\|u\| \leq 1}\int_\Omega g(u){\rm d}x<\infty. \] It was shown by Trudinger \cite{Trudinger} and Moser \cite{m} that the maximal growth is of exponential type. More precisely, named the Trudinger-Moser inequality that \[ \exp(\alpha u^{2})\in L^1(\Omega),\quad \forall\ u\in H_0^{1}(\Omega),\ \ \forall\ \alpha>0, \] and \begin{align*}
\sup\limits_{u\in H_0^{1}(\Omega),\|u\| \leq 1}\int_\Omega \exp(\alpha u^{2}){\rm d}x< \infty,\quad \mbox{if}\ \alpha\leq 4\pi, \end{align*} where $4\pi$ is the sharp constant in the sense that the supremum in the left is $\infty$ if $\alpha >4\pi$.
In order to treat the system problems, here we give some definitions. For all $1\leq p<\infty$, we define $L^p(\Omega,\mathbb{R}^k)$ as
\[
L^p(\Omega,\mathbb{R}^k):=\underbrace{L^p(\Omega)\times\cdots\times L^p(\Omega)}_{k},
\]
where $L^p(\Omega)$ is the standard $L^p$-space, and since
\begin{equation}\label{eqk}
\frac{1}{k}\left(\sum^k_{i=1}|u_i|^p\right)\leq\left(\sum^k_{i=1}|u_i|^2\right)^{\frac{p}{2}}\leq
k^p\left(\sum^k_{i=1}|u_i|^p\right),
\end{equation}
we can know that $L^p(\Omega,\mathbb{R}^k)$ is well defined and for $U\in L^p(\Omega,\mathbb{R}^k)$, we define $\|U\|_p=\left(\int_\Omega |U|^p dx\right)^{1/p}$ where $|U|=(\sum^k_{i=1}|u_i|^2)^{1/2}$. Moreover we denote
\[
H^1_0(\Omega,\mathbb{R}^k):=\underbrace{H_0^{1}(\Omega)\times\cdots\times H_0^{1}(\Omega)}_{k},
\]
be the Sobolev space modeled in $L^2(\Omega,\mathbb{R}^k)$ with the scalar product
\begin{equation*}\
\langle U,V\rangle=\sum^k_{i=1}\int_{\Omega}\nabla u_i\nabla v_idx,
\end{equation*}
where $U, V\in L^2(\Omega,\mathbb{R}^k)$, to which corresponds the norm $\|U\|=\langle U,U\rangle^{1/2}=(\sum^k_{i=1}\|u_i\|^2)^{1/2}$, then $H^1_0(\Omega,\mathbb{R}^k)$ is well defined and also is a Hilbert space. For all $1\leq p<\infty$,
by the inequality (\ref{eqk}) and the Sobolev embedding theorem, we can know that the embedding $H^1_0(\Omega,\mathbb{R}^k)\hookrightarrow L^p(\Omega,\mathbb{R}^k)$ is compact and $H^1_0(\Omega,\mathbb{R}^k)\nsubseteq L^\infty(\Omega,\mathbb{R}^k)$, where $L^\infty(\Omega,\mathbb{R}^k):=L^\infty(\Omega)\times\cdots\times L^\infty(\Omega)$. In Section \ref{sec preliminaries}, we will establish the Trudinger-Moser type inequality for $H^1_0(\Omega,\mathbb{R}^k)$.
Now, we begin to state our problem. Let $\Omega$ be a bounded domain in $\mathbb{R}^2$ containing the origin with smooth boundary, we study the multiplicity of solutions for the following Kirchhoff type systems
\begin{eqnarray}\label{P}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
-m\left(\sum^k_{j=1}\|u_j\|^2\right)\Delta u_i=\frac{f_i(x,u_1,\ldots,u_k)}{|x|^\beta}+\varepsilon h_i(x),\ \ & \mbox{in}\ \ \Omega, \ \ i=1,\ldots,k ,\\[2mm]
u_1=u_2=\cdots=u_k=0,\ \ & \mbox{on}\ \ \partial\Omega,
\end{array}
\right.
\end{eqnarray}
where $\beta\in [0,2)$, $m$ is a continuous Kirchhoff type function, $h_i\in \big(\big(H^1_0(\Omega)\big)^*,\|\cdot\|_*\big)\backslash\{0\}$ for some $i\in\{1,\ldots,k\}$, $\varepsilon$ is a small positive parameter, and $f_i$ has the maximal growth which allows treating (\ref{P}) variationally in the Sobolev space $H^1_0(\Omega,\mathbb{R}^k)$. We shall consider the variational situation in which
\begin{equation*}
(f_1(x,U),\ldots,f_k(x,U))=\nabla F(x,U)
\end{equation*}
for some function $F:\Omega \times \mathbb{R}^k \rightarrow \mathbb{R}$ of class $C^1$, where $\nabla F$ stands for the gradient of $F$ in the variables $U=(u_1,\ldots,u_k)\in \mathbb{R}^k$.
We then rewrite ($\ref{P}$) in the matrix form as
\begin{equation}\label{Pb1}
-m\left(\|U\|^2\right)\Delta U=\frac{\nabla F(x,U)}{|x|^\beta}+\varepsilon H(x),
\end{equation}
where $\Delta U=(\Delta u_1,\ldots,\Delta u_k)$, $\frac{\nabla F(x,U)}{|x|^\beta}=\Big(\frac{f_1(x,U)}{|x|^\beta},\ldots,\frac{f_k(x,U)}{|x|^\beta}\Big)$ and $H(x)=\big(h_1(x),\ldots,h_k(x)\big)$.
System (\ref{P}) is called nonlocal because of the term $m\left(\sum^k_{j=1}\|u_j\|^2\right)$ which implies that the equation in (\ref{P}) is no longer a pointwise identity. As we will see later the presence of the term $m\left(\sum^k_{j=1}\|u_j\|^2\right)$ provokes some mathematical difficulties which makes the study of such a class of problems particularly interesting. Moreover, System (\ref{P}) with $k=1$ has a physical appeal which is generalization of a model introduced in 1883 by Kirchhoff \cite{k}. There are many results about the existence and multiplicity of solutions for Kirchhoff problems by many mathematicians, we refer to \cite{acm,chen,cy2,c2,fs,fiscellaValdinoci,hezou,hezou2,nt} and the references therein. When $k=1$, $\beta=0$ and $\varepsilon=0$, system (\ref{P}) become the following Kirchhoff type problem \begin{eqnarray}\label{P1}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
-m\Big(\int_\Omega|\nabla u|^2dx\Big)\Delta u =f(x,u),\ \ & \mbox{in}\ \ \Omega,\\[2mm]
u=0,\ \ & \mbox{on}\ \ \partial\Omega,
\end{array}
\right. \end{eqnarray} where the Kirchhoff function $m : \mathbb{R}_{+} \rightarrow \mathbb{R}_{+}$ satisfies
$(\overline{M}_1)$ there exists $m_0>0$ such that $m(t)\geq m_0$ for all $t\geq0$ and $M(t+s)\geq M(t)+M(s)$ for all $s,t\geq0$, where $M(t)=\int^t_0 m(\tau)d\tau$ is the primitive of $m$.
$(\overline{M}_2)$ there exist constants $a_1,a_2>0$ and $t_0>0$ such that for some $\sigma\in\mathbb{R}$, $m(t) \leq a_{1}+a_{2} t^{\sigma}, \forall t \geq t_{0}$.
$(\overline{M}_3)$ $\frac{m(t)}{t}$ is nonincreasing for $t>0$.
\noindent Moreover, the nonlinearity $f:\Omega\times\mathbb{R}\to\mathbb{R}$ is continuous and satisfies
$(\overline{F}_1)$ there exist constants $s_0,K_0>0$ such that $F(x, s) \leq K_{0} f(x, s), \ \forall(x, s) \in \Omega \times\left[s_{0},+\infty\right)$.
$(\overline{F}_2)$ for each $x \in \Omega, \frac{f(x, s)}{s^{3}}$ is increasing for $s>0$.
$(\overline{F}_3)$ there exists $\beta_{0}>\frac{2}{\alpha_{0} d^{2}} m\left(4 \pi / \alpha_{0}\right)$ such that $\lim _{s \rightarrow+\infty} \frac{s f(x, s)}{\exp \left(\alpha_{0} s^{2}\right)} \geq \beta_{0}$ uniformly in $x \in \Omega$.\\ Under these assumptions, by using minimax techniques with the Trudinger-Moser inequality, Figueiredo and Severo \cite{fs} obtained the existence of ground state solution of (\ref{P1}). We note that hypothesis $(\overline{F}_2)$ is necessary to obtain precise information about the minimax level of the energy functional associated to problem (\ref{P1}), they show the existence of the least energy solution. Recently, Naimen and Tarsi \cite{nt} studied the existence and multiplicity of solutions for problem (\ref{P1}) with $m(t)=1+\alpha t$ under some weaker assumptions than those in \cite{fs}.
On the other hand, we mention that the existence of solutions for elliptic equations involving critical exponential nonlinearities and a small nonhomogeneous term was considered by many authors, see \cite{adiyang,am,doms,lamlu4,y2012} and the references therein. In the whole Euclidean space $\mathbb{R}^N$, for $N$-Laplacian problems in \cite{doms}, for $N$-Laplacian problem with the nonlinear term involving critical Hardy exponential growth and the nonhomogeneous term in \cite{adiyang,y2012}. What's more, Lam and Lu \cite{lamlu4} established the existence and multiplicity of nontrivial solutions for the nonuniformly elliptic equations of $N$-Laplacian type. Moreover, Manasses de Souza \cite{ds} has studied the existence of solutions for a singular class of elliptic systems involving critical exponential growth in a bounded domain of $\mathbb{R}^2$. To the best of our knowledge, there are no results for (\ref{P}) with Kirchhoff function and exponential growth nonlinearity.
The main purpose of the present paper is to consider the multiplicity of solutions of system (\ref{P}) and overcome the lack of compactness due to the presence of exponential growth terms as well as the degenerate nature of the Kirchhoff coefficient.
Let us introduce the precise assumptions under which our problem is studied. For this, we define $M(t)=\int^ t_0 m(\tau)d\tau$, the primitive of $m$ so that $M(0)=0$. The hypotheses on Kirchhoff function $m:\mathbb{R}^+ \rightarrow\mathbb{R}^+$ are the following:
($M_1$)
there exists $m_0>0$ such that $m(t)\geq m_0$ for all $t\geq 0$;
($M_2$)
$m(t)$ is nondecreasing for $t\geq 0$;
($M_3$)
there exists \ $\theta>1$ such that $\theta M(t)-m(t)t$ is nondecreasing for $t\geq 0$.
\begin{remark}\label{rem1}\rm
By $(M_1)$, we can get that $M(t)$ is increasing for $t\geq 0$. \end{remark}
\begin{remark}\label{rem3}\rm From $(M_3)$, we have that
\begin{equation}\label{1.4}
\theta \mathcal{M}(t)-M(t)t\geq 0,\ \ \forall t\geq 0.
\end{equation} \end{remark}
\begin{remark}\label{rem2}\rm A typical example of a function $m$ satisfying the conditions $(M_1)-(M_3)$ is given by $m(t)=m_0+at^{\theta-1}$ with $\theta>1, m_0>0$ and $a\geq 0$. Another example is $m(t)=1+\ln(1+t)$. \end{remark}
\begin{remark}\label{remcmm}\rm Here, we compare assumptions $(\overline{M}_1)-(\overline{M}_3)$ in \cite{fs} as shown before with our present assumptions $(M_1)-(M_3)$. From $(\overline{M}_3)$, we can obtain $(M_3)$ with $\theta=2$. Indeed, for any $0<t_1< t_2$,
\begin{equation*}
\begin{split}
2M(t_1)-m(t_1)t_1&=2M(t_2)-2\int^{t_2}_{t_1}m(s)ds-\frac{m(t_1)t^2_1}{t_1} \\
&\leq 2M(t_2)-\frac{m(t_2)(t^2_2-t^2_1)}{t_2}-\frac{m(t_2)t^2_1}{t_2} \\
&=2 M(t_2)-m(t_2)t_2,
\end{split}
\end{equation*}
thus $2M(t)-m(t)t$ is nondecreasing for $t\geq 0$.
From $(M_1)-(M_2)$, we can obtain $(\overline{M}_1)$. Indeed, by $m(t)$ is nondecreasing for $t\geq 0$, we have $\int^{t+s}_t m(\tau)d\tau\geq \int^{s}_0 m(\tau)d\tau$ for all $s,t\geq0$, then it holds that $\int^{t}_0 m(\tau)d\tau+\int^{t+s}_t m(\tau)d\tau\geq \int^{t}_0 m(\tau)d\tau+\int^{s}_0 m(\tau)d\tau$, i.e. $M(t+s)\geq M(t)+M(s)$.
Then from (\ref{1.4}), we can get $M(t)\geq M(1)t^\theta$ for $t\leq 1$, and $M(t)\leq M(1)t^\theta$ for $t\geq 1$, thus $M(t)\leq C_1t^\theta+C_2$ for some $C_1,C_2>0$. \end{remark}
Motivated by pioneer works of Adimurthi \cite{ad}, de Figueiredo et al. \cite{dmr1} and J.M. do \'{O} \cite{do}, we treat the so-called subcritical case and also the critical case. They say that a function $f:(\Omega,\mathbb{R})\to\mathbb{R}$ has subcritical growth on $\Omega\subset \mathbb{R}^2$ if \[
\lim _{|u| \rightarrow \infty} \frac{|f(x, u)|}{\exp \left(\alpha u^{2}\right)}=0, \text { uniformly on } \Omega,\ \forall \alpha>0, \] and $f$ has critical growth on $\Omega$ if there exists $\alpha_0>0$ such that \[
\lim _{|u| \rightarrow \infty} \frac{|f(x, u)|}{\exp \left(\alpha u^{2}\right)}=0, \text { uniformly on } \Omega,\ \forall \alpha>\alpha_0, \] and \[
\lim _{|u| \rightarrow \infty} \frac{|f(x, u)|}{\exp \left(\alpha u^{2}\right)}=\infty, \text { uniformly on } \Omega,\ \forall \alpha<\alpha_0. \]
Throughout this paper, we assume the following hypotheses on the function $f_i:\Omega\times\mathbb{R}^k\rightarrow\mathbb{R}$ and $F$:
($F_0$) $f_i$ is continuous and $f_i(x,0,\ldots,0)=0$, $F(x,0,\ldots,0)=0$ uniformly on $x\in \Omega$.
($F_1$)
$\lim \sup_{|U|\rightarrow 0} \frac {2F(x,U)}{|U|^2}<\lambda_1 m_0$ uniformly on $\Omega$, where
$
\lambda_1=\inf_{U\in H^1_0(\Omega,\mathbb{R}^k)\setminus\{0\}} \frac {\|U\|^2}{\int_{\Omega}|U|^2/|x|^\beta dx}>0;
$
($F_2$)
there exist constants $S_0,M_0>0$ such that
$0<F(x,U)\leq M_0|\nabla F(x,U)|$, \ for all \ $|U|\geq S_0$ uniformly on $\Omega$;
($F_3$)
there exists \ $\mu>2\theta$ such that
$0<\mu F(x,U)\leq U\cdot\nabla F(x,U)$, \ for all \ $(x,U)\in \Omega\times\mathbb{R}^k\setminus\{\mathbf{0}\}$;
We say that $U\in H^1_0(\Omega,\mathbb{R}^k)$ is a weak solution of problem (\ref{P}) it holds
\begin{equation*}\
m(\|U\|^2)\int_{\Omega}\nabla U\cdot\nabla \Phi dx=\int_{\Omega}\frac{\Phi\cdot \nabla F(x,U)}{|x|^\beta} dx+\varepsilon\int_{\Omega}\Phi\cdot H dx, \ \ \forall \ \Phi\in H^1_0(\Omega,\mathbb{R}^k)\\[3pt].
\end{equation*}
Since $f_i(x,0,\ldots,0)=0$,\ $U\equiv \mathbf{0}$ is the trivial solution of problem (\ref{P}). Thus, our aim is to obtain nontrivial solutions. Now, the main results of this work can state as follows.
\begin{theorem}\label{thm1.2}
Assume $f_i$ has subcritical growth at $\infty$, that is,
\begin{equation}\label{1.2}
\lim_{|U|\to \infty}\frac{|f_i(x,U)|}{e^{\alpha |U|^2}}=0,
\ \ \text{uniformly on }x\in\Omega,\ \ \forall\alpha >0.
\end{equation}
Moreover, assume $(M_1)$,\ $(M_3)$ and $(F_1)-(F_3)$, then there exists $\varepsilon_{sc}>0$ such that for each $0<\varepsilon<\varepsilon_{sc}$, problem (\ref{P}) has at least two nontrivial weak solutions. One of them with positive energy, while the other one with negative energy.
\end{theorem}
\begin{theorem}\label{thm1.3}
Assume $f_i$ has critical growth at $\infty$, that is, if there exists $\alpha_0 >0$ such that
\begin{equation}\label{1.3}
\lim_{|u_i|\to\infty}\frac{|f_i(x,U)|}{e^{\alpha |U|^2}}
= \begin{cases}
0,\ \ &\forall\alpha >\alpha_0,\\[3pt]
+\infty,\ \ &\forall\alpha <\alpha_0,
\end{cases}
\end{equation}
uniformly on $x\in\Omega$ and $u_j$ where $j\in\{1,\ldots,k\}\backslash\{i\}$. Moreover, suppose $(M_1)-(M_3)$, $(F_1)-(F_3)$ hold and
$(F_4)$ if for some $i\in \{1,\ldots,k\}$, there exists $\eta_0$ such that
$$
\liminf_{|u_i|\rightarrow\infty}\frac {u_i f_i(x,0,\ldots,0,u_i,0,\ldots,0)}{e^{\alpha_0 |u_i|^2}}\geq \eta_0>\frac { (2-\beta)^2m\left(\frac {2\pi(2-\beta)}{\alpha_0}\right)}{\alpha_0 d^{2-\beta} e },
$$
uniformly on $\Omega$,
where $d$ is the radius of the largest open ball contained in $\Omega$ centered at the origin.
Then there exists $\varepsilon_c>0$ such that for each $0<\varepsilon<\varepsilon_c$, problem (\ref{P}) has at least two nontrivial weak solutions. One of them with positive energy, while the other one with negative energy.
\end{theorem}
\begin{remark}\rm
If $\beta=\varepsilon=0,\ k=1$, for $(F_4)$, in \cite{fs}, the author replaced $e$ with 2, therefore, in order to get this improvement on the growth of the nonlinearity $f_i$ at $\infty$, it is crucial in our argument to use a new sequence in \cite{ddr}.
\end{remark}
\begin{remark}\rm
When $m\equiv 1$, $k=1$, $\beta=\varepsilon=0$, problems with critical growth involving the Laplace operator in bounded domains of $\mathbb{R}^2$ have been investigated in \cite{asy,ay,am,dmr1}, quasilinear elliptic problems with critical growth for $N$-Laplacian in bounded domains of $\mathbb{R}^N$ have been studied in \cite{ad,do}. Moreover, for the problems with critical growth in bounded domains in $\mathbb{R}^2$ and $f$ satisfied (see examples in \cite{ad,dmr1,do}) the asymptotic hypothesis
\begin{align}\label{f41}
\liminf_{|u|\rightarrow\infty}\frac {u f(x,u)}{e^{\alpha_0 u^2}}\geq \eta_0'>\frac {2}{\alpha_0 d^2},
\end{align}
and for Kirchhoff problem, in \cite{fs}
\begin{align}\label{f42}
\liminf_{|u|\rightarrow\infty}\frac {u f(x,u)}{e^{\alpha_0 u^2}}\geq \eta_0''>\frac {2 m\big(\frac {4\pi}{\alpha_0}\big)}{\alpha_0 d^2}.
\end{align}
What's more, when $m\equiv 1$, de Souza studied this problem in \cite{ds} and he assumed the hypothesis
\begin{align}\label{f43}
\lim\inf_{|U|\rightarrow\infty}\frac {u_i f_i(x,U)}{e^{2^{k-1}\alpha_0 |U|^2}}\geq \eta_0'''>\frac {(2-\beta)^2}{2^{k-1}\alpha_0 d^{2-\beta} e },
\end{align}
for some $i\in\{1,2,\ldots,k\}$.
Motivated by \cite{as} and \cite{ra}, where they proved a version of Trudinger-Moser inequality with singular weight and studied the existence of positive weak solutions for the following semilinear and homogeneous elliptic problem
\begin{eqnarray*}
\left\{
\begin{array}{ll}
-\Delta u=\frac{f(x,u)}{|x|^\beta},\ \ & \mbox{in}\ \ \Omega,
\\[2mm]
u=0,\ \ & \mbox{on}\ \ \partial\Omega.
\end{array}
\right.
\end{eqnarray*}
In the present paper, we improve and complement some of the results cited above for singular and nonhomogeneous case and extend the results to systems. Moreover, thanks to de Figueiredo, do \'{O} and Ruf of \cite{ddr} have constructed a proper sequence which makes the hypotheses (\ref{f41}) and (\ref{f42}) can be improved to $(F_4)$ in Theorem \ref{thm1.3}. And using the improvement of the Young's inequality which will be introduced in Lemma \ref{yi}, (\ref{f43}) can be improved better in $(F_4)$.
\end{remark}
\begin{remark}\rm
On the basis of assumption $(F_0)$, if we further assume that for any $u_j\leq 0$ where $j\in \{1,\ldots,k\}$, $f_i(x,u_1,\ldots,u_k)\equiv 0$ for all $i\in \{1,\ldots,k\}$, uniformly in $x\in\Omega$, and $h_i\geq 0$ for all $i\in \{1,\ldots,k\}$, and $(F_4)$ changes to
$$
\liminf_{u_1,\ldots,u_k\rightarrow +\infty}\frac {U\cdot\nabla F(x,U)}{e^{\alpha_0 |U|^2}}\geq \eta_0>\frac { (2-\beta)^2m\left(\frac {2\pi(2-\beta)}{\alpha_0}\right)}{\alpha_0 d^{2-\beta} e },
$$
where $U=(u_1,\ldots,u_k)$, then by using Maximum principle, we can proof the solutions obtained in Theorem \ref{thm1.3} are entire positive, i.e. each of the component is positive. A typical example is $F(x,U)=|U|^\mu\exp(\alpha_0|U|^2)\prod^{k}_{i=1}{\rm sign} u_i$, where ${\rm sign} t=0$ if $t\leq 0$ and ${\rm sign} t=1$ if $t>0$.
\end{remark}
This paper is organized as follows: Section \ref{sec preliminaries} contains some technical results. In Section \ref{vf}, we present the variational setting in which our problem will be treated. Section \ref{ps} is devoted to show some properties of the Palais-Samle sequences. Finally, we split Section \ref{main} into two subsections for the subcritical and critical cases, and we complete the proofs of our main results. Hereafter, $C,C_0,C_1,C_2...$ will denote positive (possibly different) constants.
\section{{\bfseries Some preliminary results}}\label{sec preliminaries}
Now, we introduce some famous inequalities as follows, and inspired by those inequalities, we conclude some similar forms of inequalities. In this paper, we shall use the following version of the Trudinger-Moser inequality with a singular weight due to Adimurthi-Sandeep \cite{as}:
\begin{lemma}\label{lemtm1}
Let $\Omega$ be a bounded domain in $\mathbb{R}^2$ containing the origin and $u\in H^1_0(\Omega)$. Then for every $\alpha >0$, and $\beta \in [0,2)$,
\begin{align*}
\int_{\Omega} \frac{e^{\alpha |u|^2}}{|x|^\beta}<\infty.
\end{align*}
Moreover, there exists constant $C(\Omega)$ depending only on $\Omega$ such that
\begin{align*}
\sup_{||\nabla u||_2\leq 1}\int_{\Omega} \frac{e^{\alpha |u|^2}}{|x|^\beta}\leq C(\Omega),
\end{align*}
if and only if $\frac{\alpha}{4\pi}+\frac{\beta}{2} \leq 1$.
\end{lemma}
Then, we give two useful algebraic inequalities that will be used systematically in the rest of the paper as the following:
\begin{lemma}\label{yi}
({\bfseries Improvement of the Young's inequality})
Let $a_1,\ldots,a_k>0$, $p_1,\ldots,p_k>1$ with $\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_k}=1$, then
\begin{align}\label{yic}
a_1 a_2 \cdots a_k\leq \frac{a^{p_1}_1}{p_1}+\frac{a^{p_2}_2}{p_2}+\cdots+\frac{a^{p_k}_k}{p_k}.
\end{align}
\end{lemma}
\begin{proof}
We will use the mathematical induction to proof this. When $k=2$, by Young inequality, we can know this conclusion is correct. Suppose that when $k=s-1$ the conclusion is correct, we are going to show that when $k=s$ the conclusion is still correct. Let
\begin{align*}
\frac{1}{q}=\sum^{s-1}_{i=1}\frac{1}{p_i},\ \ \frac{1}{q}+\frac{1}{p_s}=1,\ \ \mbox{then}\ \ \sum^{s-1}_{i=1}\frac{1}{p_i/q}=1.
\end{align*}
Thus,
\begin{align*}
\prod^s_{i=1} a_i=\Big(\prod^{s-1}_{i=1}a_i\Big)a_s\leq \frac{1}{q}\Big(\prod^{s-1}_{i=1}a_i\Big)^q+\frac{1}{p_s}a^{p_s}_s,
\end{align*}
by the mathematical induction, we can get that
\begin{align*}
\frac{1}{q}\Big(\prod^{s-1}_{i=1}a_i\Big)^q=\frac{1}{q}\Big(\prod^{s-1}_{i=1}a^{q}_i\Big)\leq \frac{1}{q}\sum^{s-1}\Big[\frac{1}{p_i/q}\big(a^q_i)^{p_i/q}\Big]=\sum^{s-1}\frac{1}{p_i}a^{p_i}_i.
\end{align*}
Therefore
\begin{align*}
\prod^s_{i=1} a_i\leq \sum^{s-1}_{i=1}\frac{1}{p_i}a^{p_i}_i+\frac{1}{p_s}a^{p_s}_s=\sum^{s}_{i=1}\frac{1}{p_i}a^{p_i}_i.
\end{align*}
This lemma is proved. If we take $p_1=p_2=\cdots=p_k=k$, we can get that
\begin{align}\label{yiy}
a_1 a_2 \cdots a_k\leq \frac{1}{k}\sum^{k}_{i=1}a^{k}_i\leq \sum^{k}_{i=1}a^{k}_i.
\end{align}
\end{proof}
\begin{lemma}\label{yib}
Suppose $a_1, a_2, \ldots,a_k\geq0$ with $a_1+a_2+\cdots+a_k<1$, then there exist $p_1,\ldots,p_k>1$ satisfying $\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_k}=1$, such that
\begin{align}\label{yibc}
p_ia_i<1, \quad \mbox{for all}\ \ i=1,2,\ldots,k.
\end{align}
Moreover, if $a_1, a_2, \ldots,a_k\geq0$ satisfying $a_1+a_2+\cdots+a_k=1$, then we can take $p_i=\frac{1}{a_i}$ such that $\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_k}=1$ and
\begin{align}\label{yibcd}
p_ia_i=1, \quad \mbox{for all}\ \ i=1,2,\ldots,k.
\end{align}
\end{lemma}
\begin{proof}
For the case $a_1, a_2, \ldots,a_k\geq0$ with $a_1+a_2+\cdots+a_k<1$. We also make use of the mathematical induction as previous. When $k=2$, $a_1, a_2\geq0$ with $a_1+a_2<1$. If $a_2=0$ (or $a_1=0$), then taking $p_1=\left(\frac{1}{2}+\frac{1}{2a_1}\right)>1$ (or $p_2=\left(\frac{1}{2}+\frac{1}{2a_2}\right)>1$), we can obtain $p_1a_1<1,\ p_2a_2<1$, where $p_2=\frac{p_1}{p_1-1}$. If $a_1, a_2>0$, then taking $p_1=\left(\frac{1}{2(1-a_1)}+\frac{1}{2a_2}\right)>1$, we can obtain $p_1a_1<1,\ p_2a_2<1$, where $p_2=\frac{p_1}{p_1-1}$. Suppose that when $k=s-1$ the conclusion is correct, if $k=s$ the conclusion is still correct, then the lemma follows. Let
\begin{align*}
(a_1+a_2+\cdots+a_{s-1})+a_s<1,
\end{align*}
then there exist $q_1, q_2>1$ satisfying $\frac{1}{q_1}+\frac{1}{q_2}=1$ such that
\begin{align*}
q_1(a_1+a_2+\cdots+a_{s-1})<1, \ \ q_2a_s<1.
\end{align*}
Then by the assumption, there exist $q_3, q_4,\ldots, q_{s+1}>1$ satisfying $\frac{1}{q_3}+\frac{1}{q_4}+\cdots+\frac{1}{q_{s+1}}=1$ such that
\begin{align*}
q_3(q_1a_1)<1,\ q_4(q_1a_2)<1, \ldots,\ q_{s+1}(q_1a_{s-1})<1,
\end{align*}
i.e.
\begin{align*}
(q_3q_1)a_1<1,\ (q_4q_1)a_2<1, \ldots,\ (q_{s+1}q_1)a_{s-1}<1.
\end{align*}
Taking $p_1=q_1q_3,\ p_2=q_1q_4,\ldots,\ p_{s-1}=q_1q_{s+1},\ p_s=q_2$, then it holds that
\begin{align*}
\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_s}=\frac{1}{q_1}\left(\frac{1}{q_3}+\frac{1}{q_4}+\cdots+\frac{1}{q_{s+1}}\right)+\frac{1}{q_2}
=\frac{1}{q_1}+\frac{1}{q_2}=1,
\end{align*}
and
\begin{align*}
p_1a_1<1,\ p_2a_2<1, \ldots,\ p_sa_s<1.
\end{align*}
For the case $a_1, a_2, \ldots,a_k\geq0$ satisfying $a_1+a_2+\cdots+a_k=1$. If $a_i>0$ for all $i\in\{1,\ldots,k\}$, then we can take $p_i=\frac{1}{a_i}$ such that $\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_k}=1$ and (\ref{yibc}) holds. If $a_i=0$ for some $i\in\{1,\ldots,k\}$ which same as the case $(k-1)$.
The proof is complete.
\end{proof}
By the above inequalities, we begin to establish the singular Trudinger-Moser type inequalities in $H^1_0(\Omega,\mathbb{R}^k)$:
\begin{lemma}\label{lemtm2}
({\bfseries Improvement of the Trudinger-Moser inequality})
Let $\Omega$ be a bounded domain in $\mathbb{R}^2$ containing the origin and $U=(u_1,\ldots,u_k)\in H^1_0(\Omega,\mathbb{R}^k)$. Then for every $\alpha >0$, and $\beta\in [0,2)$,
\begin{align}\label{tmit}
\int_{\Omega} \frac{e^{\alpha |U|^2}}{|x|^\beta}<\infty.
\end{align}
Moreover, it holds that
\begin{align}\label{tmiht}
\sup_{U\in H^1_0(\Omega,\mathbb{R}^k),\ \|U\|\leq 1}\int_{\Omega} \frac{e^{\alpha |U|^2}}{|x|^\beta}\leq C(\Omega).
\end{align}
if and only if $\frac{\alpha}{4\pi}+\frac{\beta}{2} \leq 1$, where $C(\Omega)$ is given in Lemma \ref{lemtm1}.
\end{lemma}
\begin{proof}
Because $U=(u_1,\ldots,u_k)$, we can get $|U|^2=\sum^k_{i=1}|u_i|^2$. Thus, by using (\ref{yiy}) and Lemma \ref{lemtm1}, we have
\begin{align*}
\int_{\Omega} \frac{e^{\alpha |U|^2}}{|x|^\beta}=\int_{\Omega} \frac{e^{\alpha |u_1|^2}\cdots e^{\alpha |u_k|^2}}{|x|^\beta} \leq \sum^k_{i=1}\int_{\Omega} \frac{e^{k\alpha |u_i|^2}}{|x|^{\beta}}<\infty.
\end{align*}
Then we begin to proof (\ref{tmiht}). For each $U\in H^1_0(\Omega,\mathbb{R}^k)$ satisfying $\|U\|\leq 1$, then $\|U\|^2=\sum^k_{i=1}\|u_i\|^2\leq 1$, Lemma \ref{yib} shows that there exist $p_1,\ldots,p_k>1$ satisfying $\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_k}=1$ such that
$p_i\|u_i\|^2\leq 1$ holds, for all $i=1,2,\ldots,k$. If $\frac{\alpha}{4\pi}+\frac{\beta}{2} \leq 1$, then it also holds that $\frac{p_i\|u_i\|^2\alpha}{4\pi}+\frac{\beta}{2} \leq 1$, for all $i=1,2,\ldots,k$. Then by using Lemma \ref{yi}, i.e. the improvement of the Young's inequality, and from Lemma \ref{lemtm1} we have
\begin{align*}
\begin{split}
\int_{\Omega} \frac{e^{\alpha |U|^2}}{|x|^\beta} =\int_{\Omega} \frac{e^{\alpha |u_1|^2}\cdots e^{\alpha |u_k|^2}}{|x|^\beta} \leq \sum^k_{i=1}\int_{\Omega} \frac{e^{p_i\alpha |u_i|^2}}{p_i|x|^{\beta}} =\sum^k_{i=1}\int_{\Omega} \frac{e^{p_i\alpha\|u_i\|^2 (\frac{u_i}{\|u_i\|})^2}}{p_i|x|^{\beta}}
\leq \sum^k_{i=1}\frac{C(\Omega)}{p_i}=C(\Omega),
\end{split}
\end{align*}
then (\ref{tmiht}) follows.
If $\frac{\alpha}{4\pi}+\frac{\beta}{2} >1$, we take $U=(u,0,\ldots,0)$, then Lemma \ref{lemtm1} shows that the supremum for the integral in (\ref{tmiht}) is infinite. Thus the proof is complete.
\end{proof}
\begin{lemma}\label{lemtm3}
Let $\{U_n\}$ be a sequence of functions in $H^1_0(\Omega,\mathbb{R}^k)$ with $\|U_n\|=1$ such that $U_n\rightharpoonup U\neq0$ weakly in $H^1_0(\Omega,\mathbb{R}^k)$. Then for any $0<p<\frac{2\pi(2-\beta)}{{(1-\|U\|^2)}}$ and $\beta\in [0,2)$, we have
$$
\sup_n \int_{\Omega}\frac{e^{p|U_n|^2}}{|x|^\beta}<\infty.
$$
\end{lemma}
\begin{proof}
Since $U_n\rightharpoonup U\neq0$ and $\|\nabla U_n\|_2=1$, we conclude that
\begin{align*}
\|U_n-U\|^2=1-2\langle U_n,U\rangle+\|U\|^2\rightarrow 1-\|U\|^2<\frac {2\pi(2-\beta)}{p}
\end{align*}
Thus, for large $n$ we have
\begin{align*}
\frac{p\|U_n-U\|^2}{4\pi}+\frac{\beta}{2}<1.
\end{align*}
Now we can choose $q>1$ close to 1 and $\epsilon>0$ such that
\begin{align*}
\frac{qp(1+\epsilon^2)\|U_n-U\|^2}{4\pi}+\frac{q\beta}{2}\leq 1.
\end{align*}
Lemmas \ref{lemtm2} shows that
\begin{align*}
\int_{\Omega}\frac{e^{qp(1+\epsilon^2)|U_n-U|^2}}{|x|^{q\beta}}\leq C(\Omega).
\end{align*}
Moreover, since
\begin{align*}
p|U_n|^2 \leq p (1+ \epsilon ^2)|U_n-U|^2 + p(1+1/\epsilon ^2)|U|^2,
\end{align*}
which can be proved by Young inequality, then it follows that
\begin{align*}
e^{p|U_n|^2 }\leq e^{p (1+ \epsilon ^2)|U_n-U|^2} e^{ p(1+1/\epsilon ^2)|U|^2}.
\end{align*}
Consequently, by H\"{o}lder inequality,
\begin{align*}
\begin{split}
\int_{\Omega}\frac{e^{p|U_n|^2}}{|x|^\beta}&\leq \left (\int_{\Omega}\frac{e^{qp(1+\epsilon^2)|U_n-U|^2}}{|x|^{q\beta}}\right)^{1/q}\left(\int_{\Omega}e^{rp(1+1/\epsilon^2)|U|^2}\right)^{1/r}\leq C\left (\int_{\Omega}e^{rp(1+1/\epsilon^2)|U|^2}\right),
\end{split}
\end{align*}
for large $n$, where $r=\frac{q}{q-1}$. By Lemma \ref{lemtm2}, we know the second term in the last inequality is bounded, and this lemma is proved.
\end{proof}
\begin{remark}\label{remccp1}\rm
Lemma \ref{lemtm3} is actually an expression that Concentration-compactness principle for a singular Trudinger-Moser inequality which is better than (\cite{ds}, Lemma 2.3). However, it is still not clear whether $\frac{2\pi(2-\beta)}{1-\|U\|^2}$ is sharp or not. This is still an open question.
\end{remark}
\begin{lemma}\label{lemtm4}
If $V\in H^1_0(\Omega,\mathbb{R}^k),\ \alpha>0,\ q>0,\ \beta\in [0,2)$ and $\|V\|\leq N$ with $\frac{\alpha N^2}{4\pi}+\frac{\beta}{2}<1$, then there exists $C=C(\alpha,N,q)>0$ such that
\begin{align}\label{asd1}
\int_{\Omega} |V|^q \frac{e^{\alpha |V|^2}}{|x|^\beta}\leq C\|V\|^q.
\end{align}
\end{lemma}
\begin{proof}
We consider $r> 1$ close to 1 such that $\frac{r \alpha N^2}{4\pi}+\frac{r\beta}{2} \leq 1$ and $sq\geq 1$, where $s=\frac {r}{r-1}$. By using H\"{o}lder inequality and Lemma \ref{lemtm2}, we have
\begin{align*}
\begin{split}
\int_{\Omega} |V|^q \frac{e^{\alpha |V|^2}}{|x|^\beta} &\leq \left (\int_{\Omega}\frac{e^{r\alpha |V|^2}}{|x|^{r\beta}}\right)^{1/r}\|V\|^q_{qs}
\leq C(\Omega)\|V\|^q_{qs}.
\end{split}
\end{align*}
Finally, using the continuous embedding $H^1_0(\Omega,\mathbb{R}^k)\hookrightarrow L^{sq}(\Omega,\mathbb{R}^k)$, we conclude that
\begin{align*}\label{asd1}
\int_{\Omega} |V|^q \frac{e^{\beta |V|^2}}{|x|^\beta}\leq C\|V\|^q.
\end{align*}
\end{proof}
\section{{\bfseries The variational framework}}\label{vf}
We now consider the functional $I$ given by
\begin{equation*}
I_\varepsilon(U)=\frac {1}{2}M(\|U\|^2)-\int_{\Omega}\frac{F(x,U)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U\cdot H(x)dx.
\end{equation*}
\begin{lemma}\label{ic1} Under our assumptions we have that $I$ is well defined and $C^1$ on $H^1_0(\Omega,\mathbb{R}^k)$. Moreover, \begin{equation*}
\langle I_\varepsilon'(U),\Phi\rangle_*=m(\|U\|^2)\langle U,\Phi\rangle-\int_{\Omega}\frac{\Phi\cdot\nabla {F(x,U)}}{|x|^\beta} dx-\varepsilon\int_{\Omega}\Phi\cdot H dx,
\end{equation*} where $\Phi\in H^1_0(\Omega,\mathbb{R}^k)$, here $\langle \cdot,\cdot \rangle_*$ simply denotes the dual pairing between $H^1_0(\Omega,\mathbb{R}^k)$ and its dual space $\left(H^1_0(\Omega,\mathbb{R}^k)\right)^*$. \end{lemma}
\begin{proof}
We have that $f_i$ is continuous and has subcritical (or critical) growth at $\infty$, as defined in (\ref{1.2}) (or (\ref{1.3})). Thus, given $\alpha>0$ (or $\alpha>\alpha_0$), there exists $C>0$ such that $|f_i(x,U)|\leq Ce^{\alpha |U|^2}$ for all $(x,U)\in\Omega \times \mathbb{R}^k$. Then,
\begin{equation} \label{3.1}
|\nabla F(x,U)|\leq \sum^k_{i=1}|f_i(x,U)|\leq C_1 e^{\alpha |U|^2},
\ \ \text{for all }(x,U)\in\Omega \times \mathbb{R}^k.\\[3pt]
\end{equation} By $(F_1)$, given $\epsilon>0$ there exists $\delta>0$ such that
\begin{equation}\label{3.2}
|F(x,U)|\leq \frac {\lambda_1 m_0-\epsilon}{2} |U|^2
\ \ \text{always that }|U|<\delta.\\[3pt]
\end{equation} Thus, using (\ref{3.1}), (\ref{3.2}) and $(F_3)$, we have
\begin{align*}
\int_{\Omega}\frac{|F(x,U)|}{|x|^\beta}dx \leq \frac {\lambda_1 m_0-\epsilon}{2}\int_{\Omega}\frac{|U|^2}{|x|^\beta} dx + C_1\int_{\Omega}\frac{|U|e^{\alpha |U|^2}}{|x|^\beta}dx.
\end{align*}
Considering the continuous imbedding $H^1_0(\Omega,\mathbb{R}^k)\hookrightarrow L^s(\Omega,\mathbb{R}^k)$ for $s\geq 1$ and using Lemma \ref{lemtm4}, it follows that $\frac{F(x,U)}{|x|^\beta}\in L^1(\Omega)$ which implies that $I_\varepsilon$ is well defined. And we can see that $I\in C^1(H^1_0(\Omega,\mathbb{R}^k),\mathbb R)$ with
\begin{equation*}
\langle I_\varepsilon'(U),\Phi\rangle_*=m(||U||^2)\langle U,\Phi\rangle-\int_{\Omega}\frac{\Phi\cdot\nabla {F(x,U)}}{|x|^\beta} dx-\varepsilon\int_{\Omega}\Phi\cdot H dx,
\end{equation*} for all $U, \Phi\in H^1_0(\Omega,\mathbb{R}^k)$. \end{proof}
From Lemma \ref{ic1}, we have that critical points of the functional $I_\varepsilon$ are precisely weak solutions of problem (\ref{P}). In the next three lemmas we check that the functional $I_\varepsilon$ satisfies the geometric conditions of the Mountain-pass theorem.
\begin{lemma}\label{lemgc1}
Suppose that $(M_1)$ and $(F_1),\ (F_3)$ hold and the function $f_i$ has subcritical (or critical) growth at $\infty$. Then for small $\varepsilon$, there exist positive number $\rho_\varepsilon$ and $\varsigma$ such that
\begin{align*}
I_\varepsilon(U)\geq \varsigma,\ \ \forall U\in H^1_0(\Omega,\mathbb{R}^k)\ \ \mbox{with}\ \ \|U\|=\rho_\varepsilon.
\end{align*}
Moreover, $\rho_\varepsilon$ can be chosen such that $\rho_\varepsilon\to0$ as $\varepsilon\to0$.
\end{lemma}
\begin{proof}
By $(F_1)$, given $\kappa>0$, there exists $\delta>0$ such that
\begin{align*}
|\nabla F(x,U)|\leq (\lambda_1 m_0-\kappa)|U|
\end{align*}
always that $|U|<\delta$. On the other hand, for $\alpha>0$ (subcritical case) or $\alpha>\alpha_0$ (critical case), we have that there exists $C_1>0$ such that $|f_i (x,U)|\leq C_1|U|^{q-1} e^{\alpha |U|^2}$ for all $|U|\geq \delta$ with $q>2$. Thus,
\begin{align}\label{3.3}
|\nabla F(x,U)|\leq \sum^k_{i=1} |f_i (x,U)|\leq (\lambda_1 m_0-\kappa)|U|+C_2|U|^{q-1} e^{\alpha |U|^2},\ \ \forall (x,U)\in \Omega\times \mathbb{R}^k.
\end{align}
Thus, by $(M_1),\ (F_3)$ and (\ref{3.3}),
\begin{align*}
\begin{split}
I_\varepsilon(U)=&\frac {1}{2}M(\|U\|^2)-\int_{\Omega}\frac{F(x,U)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U\cdot H(x)dx \\
\geq &\frac{1}{2}m_0\|U\|^2-\int_{\Omega}\frac{F(x,U)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U\cdot H(x)dx \\
\geq &\frac{1}{2}m_0\|U\|^2-\frac {1}{\mu}\int_{\Omega}\frac{U\cdot \nabla F(x,U)}{|x|^\beta}dx-\varepsilon\|U\|\|H\|_* \\
\geq &\frac{1}{2}m_0\|U\|^2-\frac {\lambda_1 m_0-\kappa}{\mu}\int_{\Omega}\frac{|U|^2}{|x|^\beta}dx-C_2\int_{\Omega}|U|^q \frac{e^{\alpha |U|^2}}{|x|^\beta}dx-\varepsilon\|U\|\|H\|_* \\
\geq &\Big(\frac {m_0}{2}-\frac {m_0}{\mu}+\frac {\kappa}{\mu \lambda_1}\Big)\|U\|^2-C_2\int_{\Omega}|U|^q \frac{e^{\alpha |U|^2}}{|x|^\beta}dx-\varepsilon\|U\|\|H\|_*,
\end{split}
\end{align*}
By Lemma \ref{lemtm4}, there exists $N>0$ such that $\alpha N^2/4\pi+\beta/2<1$ and we take $\|U\|\leq N$, there exists $C>0$ such that $\int_{\Omega}|U|^q \frac{e^{\alpha |U|^2}}{|x|^\beta}dx\leq C\|U\|^q$. Therefore,
\begin{align*}
I_\varepsilon(U)\geq \Big(\frac {m_0}{2}-\frac {m_0}{\mu}+\frac {\kappa}{\mu \lambda_1}\Big)\|U\|^2-C_3\|U\|^q-\varepsilon\|U\|\|H\|_*,
\end{align*}
Since $m_0>0,\ \mu>2\theta>2,\ q>2$, then for small $\varepsilon$, there exists $\xi_\varepsilon>0$ such that $\Big(\frac {m_0}{2}-\frac {m_0}{\mu}+\frac {\kappa}{\mu \lambda_1}\Big)\xi_\varepsilon^2-C_3\xi_\varepsilon^q-\varepsilon \xi_\varepsilon\|H\|_*>0$.
Consequently, taking $\rho_\varepsilon=\min\{N,\xi_\varepsilon\}>0$, we can get that $I_\varepsilon(U)\geq\varsigma$ whenever $\|U\|=\rho_\varepsilon$ where $\varsigma:=\Big(\frac {m_0}{2}-\frac {m_0}{\mu}+\frac {\kappa}{\mu \lambda_1}\Big)\rho_\varepsilon^2-C_3\rho_\varepsilon^q-\varepsilon \rho_\varepsilon\|H\|_*>0$. And it is worth noting that, $\rho_\varepsilon\rightarrow 0$ as $\varepsilon\rightarrow 0$.
\end{proof}
\begin{lemma}\label{lemgc2}
Assume that $(M_1),\ (M_3),\ (F_3)$ hold and the function $f_i$ has subcritical (or critical) growth at $\infty$. Then there exists $E\in H^1_0(\Omega,\mathbb{R}^k)$ with $\|E\|>\rho_\varepsilon$ such that
\begin{align*}
I_\varepsilon(E)<\inf_{\|U\|=\rho_\varepsilon}I(U).
\end{align*}
\end{lemma}
\begin{proof}
We shall make use of the polar coordinate representation
\begin{align*}
U=(\nu,\phi)=(\nu,\phi_1,\ldots,\phi_{k-1}),
\end{align*}
where $\nu\geq 1,\ -\pi\leq\phi_1\leq\pi,\ 0\leq\phi_2,\ldots,\phi_{k-1}\leq\pi$ and
\begin{align*}
\begin{split}
u_1&=\nu \sin(\phi_1)\sin(\phi_2)\cdots\sin(\phi_{k-1}),\\
u_2&=\nu \cos(\phi_1)\sin(\phi_2)\cdots\sin(\phi_{k-1}),\\
u_3&=\nu \cos(\phi_2)\cdots\sin(\phi_{k-1}),\\
\vdots \\
u_k&=\nu \cos(\phi_{k-1}).
\end{split}
\end{align*}
Substituting in $(F_3)$, we get $0<\mu F(x,U)\leq \nu F_{\nu}(x,U)$ and hence
\begin{align*}
F(x,U)\geq\left(\min_{|W|=1}F(x,W)\right)|U|^\mu,\ \ \mbox{for all}\ \ x\in \Omega \ \ \mbox{and}\ \ |U|\geq 1.
\end{align*}
Hence, for all $U\in H^1_0(\Omega,\mathbb{R}^k)\setminus\{0\}$ with $|U|\geq 1$, we have that
\begin{align*}
F(x,U)\geq C|U|^\mu.
\end{align*}
From (\ref{1.4}), we have that $0<M(t)\leq M(1) t^\theta$ for all $ t\geq 1$. Thus we have
\begin{align*}
I_\varepsilon(tU)\leq C_1t^{2\theta}\|U\|^{2\theta}-C_2 t^\mu \int_{K}\frac{|U|^\mu}{|x|^\beta} dx-t\varepsilon\int_{\Omega}U\cdot H dx,
\end{align*}
for $t$ large enough, where $C_1,\ C_2>0,\ \mu>2\theta>2$, $K$ is the compact subset of $\Omega$ which yields $I_\varepsilon(tU)\rightarrow -\infty$ as $t\rightarrow+\infty$. Setting $E=tU$ with $t$ large enough such that $I_\varepsilon(E)<0$ with $\|E\|>\rho_\varepsilon$. Thus, the proof is finished.
\end{proof}
\begin{lemma}\label{lemgc3}
If $f_i$ has subcritical (or critical) growth at $\infty$, there exists $\eta_\varepsilon>0$ and $V\in H^1_0(\Omega,\mathbb{R}^k)\backslash \{\mathbf{0}\}$ such that $I_\varepsilon(tV)<0$ for all $0<t<\eta_\varepsilon$. In particular,
\begin{align*}
\inf_{\|U\|\leq \eta_\varepsilon}I_\varepsilon(U)<0.
\end{align*}
\end{lemma}
\begin{proof}
Since $h_i\in \big(\big(H^1_0(\Omega)\big)^*,\|\cdot\|_*\big)\backslash\{0\}$ for some $i\in\{1,\ldots,k\}$, then by the Riesz representation theorem, the problem
\begin{align*}
-\Delta v_i=h_i,\ \ x\in\Omega;\ \ v_i=0\ \ \mbox{on}\ \ \partial\Omega,
\end{align*}
has a unique nontrivial weak solution $v_i$ in $H^1_0(\Omega)$. Thus,
\begin{align*}
\int_{\Omega} h_i v_i=\|v_i\|^2>0.
\end{align*}
Since $f_i(x,0,\ldots,0)=0$, by continuity, $(F_3)$ and (\ref{1.4}), $\theta>1$, it follows that there exists $\eta_\varepsilon>0$ such that for all $0<t<\eta_\varepsilon$,
\begin{align*}
\begin{split}
\frac{d}{dt}[I_\varepsilon((0,\ldots,tv_i,\ldots,0))]=&m(t^2\|v_i\|^2)t\|v_i\|^2-\int_{\Omega}\frac{v_if_i(x,(0,\ldots,tv_i,\ldots,0))}{|x|^\beta}
-\varepsilon\int_{\Omega} h_i v_i \\
\leq &C\|v_i\|^{2}t-\varepsilon\|v_i\|^2-\int_{\Omega}\frac{v_if_i(x,(0,\ldots,tv_i,\ldots,0))}{|x|^\beta}
<0.
\end{split}
\end{align*}
Using that $I_\varepsilon(\mathbf{0})=0$, it must hold that $I_\varepsilon(tV)<0$ for all $0<t<\eta_\varepsilon$ where $V=(0,\ldots,v_i,\ldots,0)$.
\end{proof}
\section{{\bfseries On Palais-Smale sequences}}\label{ps}
To prove that a Palais-Smale sequence converges to a weak solution of problem (\ref{P}), we need to establish the following lemma.
\begin{lemma}\label{lem4.1}
Assume that $(M_1),\ (M_3),\ (F_3)$ hold and $f_i$ has subcritical (or critical) growth at $\infty$. Let $\{U_n\}\subset H^1_0(\Omega,\mathbb{R}^k)$ be the Palais-Smale sequence for functional $I_\varepsilon$ at finite level. Then there exist $C>0$ such that
\begin{align*}
\|U_n\|\leq C,\ \ \int_{\Omega}\frac{\left|U_n\cdot \nabla F(x,U_n)\right|}{|x|^\beta}dx\leq C\ \ \mbox{and}\ \ \int_{\Omega}\frac{F(x,U_n)}{|x|^\beta}dx\leq C.
\end{align*}
\end{lemma}
\begin{proof}
Let $\{U_n\} \subset H^1_0(\Omega,\mathbb{R}^k)$ be a sequence such that $I_\varepsilon(U_n)\rightarrow c$ and $I_\varepsilon'(U_n)\rightarrow 0$, where $|c|<\infty$, then we can take this as follows:
\begin{equation}\label{4.1}
I_\varepsilon(U_n)=\frac{1}{2}M(\|U_n\|^2)-\int_{\Omega}\frac{F(x,U_n)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U_n\cdot H dx=c+\delta_n,
\end{equation}
where $\delta_n\rightarrow 0$ as $n\rightarrow \infty$, and
\begin{equation}\label{4.2}
\langle I_\varepsilon'(U_n),U_n\rangle_*=m(\|U_n\|^2)\|U_n\|^2-\int_{\Omega}\frac{U_n\cdot \nabla F(x,U_n)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U_n\cdot H dx=o(\|U_n\|).
\end{equation}
Then for $n$ large enough, by $(M_1),\ (F_3)$ and (\ref{1.4}), it holds that
\begin{align*}
\begin{split}
C+\|U_n\|\geq& I_\varepsilon(U_n)-\frac {1}{\mu} \langle I'_\varepsilon(U_n),U_n\rangle_* \\
=&\frac {1}{2}M(\|U_n\|^2)-\frac {1}{\mu}m(\|U_n\|^2)\|U_n\|^2+\frac{1}{\mu} \int_{\Omega}\frac{\left (U_n\cdot \nabla F(x,U_n) -\mu F(x,U_n)\right )}{|x|^\beta}dx \\
&-\frac{\mu-1}{\mu}\varepsilon\int_{\Omega}U_n\cdot H dx \\
\geq& \frac {1}{2\theta}\Big[\theta M(\|U_n\|^2)-m(\|U_n\|^2)\|U_n\|^2\Big]+\Big(\frac {1}{2\theta}-\frac {1}{\mu}\Big)m(\|U_n\|^2)\|U_n\|^2 \\
&-\frac{\mu-1}{\mu}\varepsilon \|U_n\| \|H\|_* \\
\geq& \Big(\frac {1}{2\theta}-\frac {1}{\mu}\Big)m(\|U_n\|^2)\|U_n\|^2-\frac{\mu-1}{\mu}\varepsilon \|U_n\| \|H\|_* \\
\geq& \frac {\mu-2\theta}{2\theta\mu}m_0\|U_n\|^2-\frac{\mu-1}{\mu}\varepsilon \|U_n\| \|H\|_*,
\end{split}
\end{align*}
for some $C>0$. Since $\mu>2\theta>2,\ m_0>0,\ \varepsilon>0$, we obtain $\|U_n\|$ is bounded. From (\ref{4.1}) and (\ref{4.2}), it can be concluded directly that there exist $C>0$ such that $\int_{\Omega}\frac{U_n\cdot \nabla F(x,U_n)}{|x|^\beta}dx\leq C$ and $\int_{\Omega}\frac{F(x,U_n)}{|x|^\beta}dx\leq C$. Condition $(F_3)$ implies $U_n\cdot \nabla F(x,U_n)\geq 0$ for all $x\in\Omega$, thus we have $\int_{\Omega}\frac{\left|U_n\cdot \nabla F(x,U_n)\right|}{|x|^\beta}dx\leq C$.
\end{proof}
In order to show that the limit of a sequence in $H^1_0(\Omega,\mathbb{R}^k)$ is a weak solution of problem (\ref{P}) we will use the following convergence result due to Figueiredo-do \'{O}-Ref \cite{ddr2} and the dominated result due to do \'{O}-Medeiros-Severo \cite{dms}.
\begin{lemma}\label{lemcr}
\cite{ddr2} Let $\Omega \in \mathbb{R}^2$ be a bounded domain and $f: \Omega\times \mathbb{R} \rightarrow \mathbb{R}$ be a continuous function, $\beta \in [0,2)$. Then for any sequence $\{u_n\}$ in $L^1(\Omega)$ such that
\begin{align*}
u_n\rightarrow u\ \ in\ \ L^1(\Omega),\ \ \frac{f(x,u_n)}{|x|^\beta} \in L^1(\Omega)\ \ \mbox{and}\ \ \int_{\Omega}\frac{|f(x,u_n)u_n|}{|x|^\beta}dx\leq C,
\end{align*}
up to a sequence we have
\begin{align*}
\frac{f(x,u_n)}{|x|^\beta}\rightarrow \frac{f(x,u)}{|x|^\beta} \ \ \mbox{in}\ \ L^1(\Omega).
\end{align*}
\end{lemma}
\begin{lemma}\label{lemdr}
\cite{dms} Let $\{u_n\}$ be a sequence of functions in $H^1_0(\Omega)$ strongly convergent. Then there exists a subsequence $\{u_{n_k}\}$ of $\{u_n\}$ and $g\in H^1_0(\Omega)$ such that $|u_{n_k}(x)|\leq g(x)$ almost everywhere in $\Omega$.
\end{lemma}
\begin{lemma}\label{lem4.4}
Assume $(M_1),\ (M_3),\ (F_2),\ (F_3)$ hold and $f_i$ has subcritical (or critical) growth at $\infty$. Let $\{U_n\}\subset H^1_0(\Omega,\mathbb{R}^k)$ be the Palais-Smale sequence for functional $I_\varepsilon$ at finite level, then there exists $U\in H^1_0(\Omega,\mathbb{R}^k)$ such that
\begin{equation}\label{dcfi}
\frac{f_i(x,U_n)}{|x|^\beta}\rightarrow \frac{f_i(x,U)}{|x|^\beta}\ \ \mbox{in}\ \ L^1(\Omega), \ \ \mbox{for all}\ \ i=1,\ldots,k.
\end{equation}
and
\begin{equation}\label{dch}
U_n\cdot H\rightarrow U\cdot H\ \ \mbox{in}\ \ L^1(\Omega).
\end{equation}
Moreover,
\begin{equation}\label{4.5}
\frac{F(x,U_n)}{|x|^\beta}\rightarrow \frac{F(x,U)}{|x|^\beta}\ \ \mbox{in}\ \ L^1(\Omega).
\end{equation}
\end{lemma}
\begin{proof}
According to Lemma \ref{lem4.1}, we know that $\{U_n\}$ is bounded in $H^1_0(\Omega,\mathbb{R}^k)$, then up to a subsequence, for some $U \in H^1_0(\Omega,\mathbb{R}^k)$ such that $U_n\rightharpoonup U$ weakly in $H^1_0(\Omega,\mathbb{R}^k)$,\ $U_n\rightarrow U$ in $L^p(\Omega,\mathbb{R}^k)$ for all $p\geq 1$ and $U_n(x)\rightarrow U(x)$ almost everywhere in $\Omega$. Consequently, by Lemmas \ref{lem4.1} and \ref{lemcr}, we have
\begin{equation*}
\frac{f_i(x,U_n)}{|x|^\beta}\rightarrow \frac{f_i(x,U)}{|x|^\beta}\ \ \mbox{in}\ \ L^1(\Omega), \ \ \mbox{for all}\ \ i=1,\ldots,k.
\end{equation*}
Since
\begin{align*}
\int_{\Omega}|U_n\cdot H-U\cdot H|dx\leq \int_{\Omega}|H| |U_n -U|dx\leq \|H\|_2\|U_n-U\|_2 \rightarrow 0,
\end{align*}
we can conclude that
\begin{equation*}
U_n\cdot H\rightarrow U\cdot H\ \ \mbox{in}\ \ L^1(\Omega).
\end{equation*}
Then by Lemma \ref{lemdr}, there exists $g_i\in L^1(\Omega)$ such that $\frac{|f_i(x,U_n)|}{|x|^\beta}\leq g_i$ almost everywhere in $\Omega$. From $(F_2)$ we can conclude that
\begin{align*}
|F(x,U_n)|\leq \sup_{\Omega \times [-S_0,S_0]} |F(x,U_n(x))|+M_0|\nabla F(x,U_n)| \ \ \mbox{a.e. in}\ \ \Omega.
\end{align*}
Thus, by the generalized Lebesgue dominated convergence theorem, we get
\begin{equation*}
\frac{F(x,U_n)}{|x|^\beta}\rightarrow \frac{F(x,U)}{|x|^\beta}\ \ \mbox{in}\ \ L^1(\Omega).
\end{equation*}
\end{proof}
\section{{\bfseries Proof of the main results}}\label{main}
In order to obtain a weak solution with positive energy, according to Lemmas \ref{lemgc1} and \ref{lemgc2}, let
\begin{equation}\label{defmpl}
c_{M,\varepsilon}=\inf_{\gamma\in\Upsilon}\max_{t\in [0,1]}I_\varepsilon(\gamma(t))>0,
\end{equation}
be the minimax level of $I_\varepsilon$, where $\Upsilon=\{\gamma\in C\big([0,1],H^1_0(\Omega,\mathbb{R}^k)\big):\gamma(0)=\mathbf{0}, I_\varepsilon(\gamma(1))<0\}$. Therefore, using the Mountain-pass theorem, there exists a sequence $\{U_n\} \subset H^1_0(\Omega,\mathbb{R}^k)$ satisfying
\begin{equation}\label{5.1}
I_\varepsilon(U_n)\rightarrow c_{M,\varepsilon}\ \ \mbox{and}\ \ I'_\varepsilon(U_n)\rightarrow 0.
\end{equation}
And in order to obtain another weak solution with negative energy, by Lemmas \ref{lemgc1} and \ref{lemgc3}, we take $\eta_\varepsilon\leq \rho_\varepsilon$ and so we have that
\begin{equation}\label{defc0}
-\infty<c_{0,\varepsilon}:=\inf_{\|V\|\leq \rho_\varepsilon}I_\varepsilon(V)<0,
\end{equation}
where $\rho_\varepsilon$ is given as in Lemma \ref{lemgc1}. Since $\overline{B}_{\rho_\varepsilon}$ is a complete metric space with the metric given by the norm of $H^1_0(\Omega,\mathbb{R}^k)$, convex and the functional $I_\varepsilon$ is of class $C^1$ and bounded below on $\overline{B}_{\rho_\varepsilon}$, by the Ekeland variational principle, there exists a sequence $\{V_n\}$ in $\overline{B}_{\rho_\varepsilon}$ such that
\begin{equation}\label{5.2}
I_\varepsilon(V_n)\rightarrow c_{0,\varepsilon}\ \ \mbox{and}\ \ I'_\varepsilon(V_n)\rightarrow 0.
\end{equation}
\subsection{Subcritical case: Proof of Theorem \ref{thm1.2}}\label{ssectpfthmsc}
In this subsection, we assume that $f_i$ has subcritical growth at $\infty$ satisfying (\ref{1.2}) and proof Theorem \ref{thm1.2}.
\begin{lemma}\label{lem5.1}
The functional $I_\varepsilon$ satisfies the Palais-Smale condition at any finite level $c$.
\end{lemma}
\begin{proof}
Let $\{U_n\} \subset H^1_0(\Omega,\mathbb{R}^k)$ be a sequence such that $I_\varepsilon(U_n)\rightarrow c$ and $I_\varepsilon'(U_n)\rightarrow 0$. Lemma \ref{lem4.1} shows that $\{U_n\}$ is bounded in $H^1_0(\Omega,\mathbb{R}^k)$, then we can get a subsequence still labeled by $\{U_n\}$, for some $U\in H^1_0(\Omega,\mathbb{R}^k)$ such that
\begin{align*}
U_n\rightharpoonup U\ \ \mbox{in}\ \ H^1_0(\Omega,\mathbb{R}^k);\quad U_n \rightarrow U\ \ \mbox{in}\ \ L^q(\Omega,\mathbb{R}^k) \ \mbox{for all}\ \ q\geq 1.
\end{align*}
Since
\begin{equation}\label{5.3}
\begin{split}
\langle I_\varepsilon'(U_n),U_n-U\rangle_*=&m(\|U_n\|^2)\langle U_n,U_n-U\rangle-\int_{\Omega}\frac{(U_n-U)\cdot\nabla F(x,U_n)}{|x|^\beta}dx
-\varepsilon\int_{\Omega}(U_n-U_0)\cdot H dx.
\end{split}
\end{equation}
From $I_\varepsilon'(U_n)\rightarrow 0$ in $\big(H^1_0(\Omega,\mathbb{R}^k)\big)_*$, we have $\langle I'_\varepsilon(U_n),U_n-U\rangle_* \rightarrow 0$.
Meanwhile, Lemma \ref{lem4.1} shows that $\|U_n\|$ is bounded, i.e. $\|U_n\|^2\leq C_0$ for some $C_0>0$, then by subscritical condition, the H\"{o}lder's inequality and Lemma \ref{yi}, it follows that
\begin{align*}
\begin{split}
\left|\int_{\Omega}\frac{(U_n-U)\cdot\nabla F(x,U_n)}{|x|^\beta}dx \right| & \leq \int_{\Omega}\frac{|U_n-U| |\nabla F(x,U_n)|}{|x|^\beta}dx \\
&\leq C_1\int_{\Omega}|U_n-U|\frac{e^{\alpha |U_n|^2}}{|x|^\beta}dx \\
&\leq C_2\|U_n-U\|_{\frac {r}{r-1}}\left(\int_{\Omega}\frac{e^{r\alpha |U_n|^2}}{|x|^{r\beta}}dx\right)^\frac {1}{r} \\
&\leq C_2\|U_n-U\|_{\frac {r}{r-1}}\left(\sum^k_{i=1}\int_{\Omega}\frac{e^{kr\alpha\|U_n\|^2\left(\frac{u^i_n}{\|U_n\|}\right)^2}}{|x|^{r\beta}}dx\right)^\frac {1}{r}
\end{split}
\end{align*}
for some $C_1,C_2>0$, where $\alpha=\frac{4\pi(1-r\beta/2)}{krC_0}$ and $r>1$ sufficiently close to 1 such that $r\beta<2$. Since $\frac{kr\alpha\|U_n\|^2}{4\pi}+\frac{r\beta}{2}\leq 1$, then by Lemma \ref{lemtm1} and $H^1_0(\Omega,\mathbb{R}^k)\hookrightarrow L^s(\Omega,\mathbb{R}^k)$ is compact for $s\geq 1$, the fourth term in the last inequality converges to zero.
Thus from (\ref{dch}), in (\ref{5.3}), it must be that
\begin{align*}
m(\|U_n\|^2)\langle U_n,U_n-U\rangle \rightarrow 0\ \ \mbox{as}\ \ n\rightarrow+\infty.
\end{align*}
Because $(M_1)$: $m(t)\geq m_0>0$ for $t\geq 0$ and $U_n\rightharpoonup U$ in $H^1_0(\Omega,\mathbb{R}^k)$, it must be that
\begin{align*}
\langle U_n,U_n-U\rangle\rightarrow 0,
\end{align*}
which means $\|U_n\|^2\rightarrow \|U\|^2$. By Radon's Theorem, $U_n\rightarrow U$ strongly in $H^1_0(\Omega,\mathbb{R}^k)$. This proof is complete.
\end{proof}
\noindent{\bfseries Proof of Theorem \ref{thm1.2}.}
By (\ref{5.1}), (\ref{5.2}) and Lemma \ref{lem5.1}, there exists $\varepsilon_{sc}>0$ such that for each $0<\varepsilon<\varepsilon_{sc}$, using Minimax principle, there exist critical points $U_{M,\varepsilon}$ for $I_\varepsilon$ at level $c_{M,\varepsilon}$ and $V_{0,\varepsilon}$ for $I_\varepsilon$ at level $c_{0,\varepsilon}$. We claim that $U_{M,\varepsilon}\neq \mathbf{0}$. In fact, suppose by contradiction that $U_{M,\varepsilon}\equiv \mathbf{0}$. We can know that $0<c_{M,\varepsilon}=\lim_{n\rightarrow\infty}I_\varepsilon(U_n)=I_\varepsilon(U_{M,\varepsilon})=I(\mathbf{0})=0$, what is absurd. Similarly, we have $V_{0,\varepsilon}\neq \mathbf{0}$. In the end, we claim $U_{M,\varepsilon}\neq V_{0,\varepsilon}$. Suppose by contradiction that $ U_{M,\varepsilon}\equiv V_{0,\varepsilon}$, then $0>c_{0,\varepsilon}=\lim_{n\rightarrow\infty}I_\varepsilon(V_n)=I(V_{0,\varepsilon})=I(U_{M,\varepsilon})=\lim_{n\rightarrow\infty}I_\varepsilon(U_n)=c_{M,\varepsilon}>0$, what is absurd. Thus, the proof of Theorem \ref{thm1.2} is complete.
\qed
\subsection{Critical case: Proof of Theorem \ref{thm1.3}}\label{ssectpfthmc}
In this subsection, we assume that $f_i$ has critical growth at $\infty$ satisfying (\ref{1.3}) and give the proof Theorem \ref{thm1.3}.
Firstly, we give a conclusion that functional $I_\varepsilon$ satisfies the Palais-Smale condition if Palais-Smale sequence less than appropriate level:
\begin{lemma}\label{ms1}
If $\{V_n\}$ is a Palais-Smale sequence for $I_\varepsilon$ at any finite level with
\begin{align}\label{msVx}
\liminf_{n\rightarrow\infty}\|V_n\|^2<\frac{2\pi(2-\beta)}{\alpha_0},
\end{align}
then $\{V_n\}$ possesses a strongly subsequence.
\end{lemma}
\begin{proof}
Let $\{V_n\}\subset H^1_0(\Omega,\mathbb{R}^k)$ such that $I_\varepsilon(V_n)\rightarrow c$ and $I'_\varepsilon(V_n)\rightarrow 0$ in $\big(H^1_0(\Omega,\mathbb{R}^k)\big)^*$. By Lemma \ref{lem4.1}, $\|V_n\|\leq C$ for some $C>0$, thus, up to a sequence, for some $V\in H^1_0(\Omega,\mathbb{R}^k)$
\begin{align*}
\begin{split}
&V_n\rightharpoonup V\ \ \mbox{in}\ \ H^1_0(\Omega,\mathbb{R}^k).
\end{split}
\end{align*}
Taking $V_n=V+W_n$, it follows that $W_n\rightharpoonup 0$ in $H^1_0(\Omega,\mathbb{R}^k)$ and by the Br\'{e}zis-Lieb Lemma (see \cite{bl2}), we get
\begin{align}\label{ws1}
\begin{split}
\|V_n\|^2=\|V\|^2+\|W_n\|^2+o_n(1).
\end{split}
\end{align}
By $V_n\rightharpoonup V$ which means $\langle V_n,V \rangle\rightarrow \langle V,V \rangle=\|V\|^2$. Therefore, (\ref{ws1}) can be replaced by
\begin{align}\label{ws2}
\begin{split}
\|V_n\|^2=\langle V_n,V\rangle+\|W_n\|^2+o_n(1).
\end{split}
\end{align}
By $I_\varepsilon'(V_n)\rightarrow 0$ in $\big(H^1_0(\Omega,\mathbb{R}^k)\big)^*$ and (\ref{dch}), (\ref{ws2}), we can get
\begin{align*}
\begin{split}
\langle I_\varepsilon'(V_n),V_n-V\rangle_* &=m(\|V_n\|^2)\langle V_n,V_n-V \rangle-\int_{\Omega}\frac{(V_n-V)\cdot\nabla F(x,V_n)}{|x|^\beta}+o_n(1) \\
&=m(\|V_n\|^2)\|W_n\|^2-\int_{\Omega}\frac{W_n\cdot\nabla F(x,V_n)}{|x|^\beta}+o_n(1),
\end{split}
\end{align*}
that is,
\begin{align}\label{ws3}
\begin{split}
m(\|V_n\|^2)\|W_n\|^2=\int_{\Omega}\frac{W_n\cdot\nabla F(x,V_n)}{|x|^\beta}+o_n(1).
\end{split}
\end{align}
From (\ref{msVx}), there exists $\zeta>0$ such that $\alpha_0\|V_n\|^2<\zeta<2\pi(2-\beta)$ for $n$ sufficiently large and also, there exist $\alpha>\alpha_0$ close to $\alpha_0$ and $q>q$ close to~1~such that $q\alpha \|V_n\|^2<\zeta<2\pi(2-q\beta)$ for $n$ sufficiently large. Then by (\ref{3.1}), we have
\begin{align*}
\begin{split}
\left|\int_{\Omega}\frac{W_n\cdot\nabla F(x,V_n)}{|x|^\beta}\right|\leq C_1\int_{\Omega}|W_n|\frac{e^{\alpha|V_n|^2}}{|x|^\beta},
\end{split}
\end{align*}
and by the H\"{o}lder's inequality and Lemma \ref{lemtm2}, we can get that
\begin{align*}
\begin{split}
\int_{\Omega}|W_n|\frac{e^{\alpha|V_n|^2}}{|x|^\beta}\leq C_1\|W_n\|_s \bigg(\int_{\Omega}\frac{e^{q\alpha|V_n|^2}}{|x|^{q\beta}}\bigg)^{1/r}\leq C_2\|W_n\|_s,
\end{split}
\end{align*}
where $s=\frac{q}{q-1}$. By the compact embedding $H^1_0(\Omega,\mathbb{R}^k)\hookrightarrow L^s(\Omega,\mathbb{R}^k)$ for $s\geq 1$, we conclude that
\begin{align*}
\begin{split}
\int_{\Omega}\frac{W_n\cdot\nabla F(x,V_n)}{|x|^\beta}\rightarrow 0.
\end{split}
\end{align*}
Thus, this together with (\ref{ws3}) and $(M_1)$, we get that $\|W_n\|\rightarrow 0$ and the result follows.
\end{proof}
Then, in order to get a more precise information about the minimax level $c_{M,\varepsilon}$, let us consider the following sequence which was introduced in \cite{ddr}: for $n\in \mathbb{N}$ set $\delta_n=\frac{2\log n}{n}$, and let
\begin{eqnarray*}
y_n(t)=\frac {1}{\sqrt{2\pi}}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
\frac {t}{n^{1/2}}(1-\delta_n)^{1/2},\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ &{\rm if}\ \ 0\leq t\leq n,\\[2mm]
\frac {1}{\big [n(1-\delta_n)\big ]^{1/2}}\log\frac{A_n+1}{A_n+e^{-(t-n)}}+\big [n(1-\delta_n)\big ]^{1/2},\ \ \ &{\rm if}\ \ \ t\geq n,\\[2mm]
\end{array}
\right.
\end{eqnarray*}
where $A_n$ is defined as $A_n=\frac{1}{en^2}+O(\frac{1}{n^4})$.
The sequence of function $\{y_n\}$ satisfies the following properties:
\begin{eqnarray*}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
\{y_n\}\subset C\big([0,+\infty)\big),\ \mbox{piecewise differentiable, with}\ y_n(0)=0 \ \mbox{and}\ y'_n(t)\geq 0;\\[2mm]
\int^{+\infty}_0 |y'_n(t)|^2=1;\\[2mm]
\lim_{n\rightarrow \infty}\int^{+\infty}_0 e^{y^2_n(t)-t}dt=1+e.
\end{array}
\right.
\end{eqnarray*}
Now, let $y_n(t)=2\sqrt{\pi}\widehat{G}_n(e^{-t/2})$ with $|x|=e^{-t/2}$, define a function $\widehat{G}_n(x)=\widehat{G}_n(|x|)$ on $\overline{B_1(0)}$, which is nonnegative and radially symmetric. Moreover, we have
\begin{align*}
\int_{B_1(0)}|\nabla \widehat{G}_n(x)|^2dx=\int^{+\infty}_0|y'_n(t)|^2=1.
\end{align*}
Therefore $\|\widehat{G}_n\|=1$. Let $\tau=\frac{2-\beta}{2}$, then $\widehat{G}_n$ defines another function nonnegative and radially symmetric $\tilde{G}_n$ as follows:
\begin{align*}
\widehat{G}_n(\varrho)=\tau^{1/2}\tilde{G}_n(\varrho^{1/\tau})\ \ \mbox{for}\ \ \varrho\in [0,1].
\end{align*}
Note that
\begin{align*}
\int^1_0|\widehat{G}'_n(\varrho)|^2\varrho d\varrho=\int^1_0|\tilde{G}'_n(\varrho)|^2\varrho d\varrho.
\end{align*}
Therefore, $\|\widehat{G}_n\|=\|\tilde{G}_n\|$. The open ball $B_d(0)$ is contained in $\Omega$, where $d$ was given in $(F_4)$. Considering
\begin{align}\label{defgnd}
\mathscr{G}_{n,d}(x):=\big(G_{n,d}(x),0,\ldots,0\big),\ \ \ \mbox{where}\ \ \ G_{n,d}(x):=\tilde{G}_n\left(\frac{x}{d}\right),
\end{align}
then $\mathscr{G}_{n,d}(x)$ belongs to $H^1_0(\Omega,\mathbb{R}^k)$ with $\|\mathscr{G}_{n,d}\|=1$, and the support of $\mathscr{G}_{n,d}$ contained in $B_d(0)$.
\begin{remark}\label{rem5.2} \rm
If condition $(F_4)$ holds, we define $\mathscr{G}'_{n,d}(x)$ that $i$-th component is set to $G_{n,d}(x)$, and the remaining components are set to $0$, i.e., $\mathscr{G}'_{n,d}(x)=(0,\ldots,0,G_{n,d}(x),0,\ldots,0)$, then given $\delta>0$ there exists $s_\delta>0$ such that
\begin{align*}
\begin{split}
\mathscr{G}'_{n,d}\cdot\nabla F(x,\mathscr{G}'_{n,d})
= G_{n,d} f_i(x,\mathscr{G}'_{n,d})
\geq (\eta_0 -\delta)\exp\left(\alpha_0|\mathscr{G}'_{n,d}|^2\right)
= (\eta_0 -\delta)\exp\left(\alpha_0|G_{n,d}|^2\right),
\end{split}
\end{align*}
$\forall x\in \Omega,\ \ |\mathscr{G}'_{n,d}|=|G_{n,d}|\geq s_\delta$. This is the same as type (\ref{5.7}) below,
therefore without lose of generality, we can assume that $i=1$ in $(F_4)$.
\end{remark}
\begin{lemma}\label{leest}
For any $0<\epsilon<1$, we have that for $x\in B_\frac{d}{\varpi(n)}(0)$ with $\varpi(n)=\exp\left\{\frac{n^{(1+\epsilon)/2}}{2}\right\}$,
\begin{align}\label{psbc}
|\mathscr{G}_{n,d}(x)|\geq\frac{1}{2\sqrt{\pi}} n^{\frac{\epsilon}{2}}\left(1-\frac{2\log n}{n}\right)^{\frac{1}{2}},
\end{align}
where $\mathscr{G}_{n,d}$ is given in (\ref{defgnd}).
\end{lemma}
\begin{proof}
For $x\in B_\frac{d}{\varpi(n)}(0)$,
\begin{align*}
\left|\mathscr{G}_{n,d}(x)\right|=\left|G_{n,d}(x)\right|=\left|\tilde{G}_n\left(\frac{x}{d}\right)\right|=\left|\tilde{G}_n(y)\right|,
\end{align*}
where $y=\frac{x}{d}\in B_\frac{1}{\varpi(n)}(0)$. Moreover,
\begin{align*}
\left|\tilde{G}_n(y)\right|=\left|\tilde{G}_n(|y|)\right|=\frac{1}{2\sqrt{\pi}}y_n(-2\log(|y|))=\frac{1}{2\sqrt{\pi}}y_n(t),
\end{align*}
where $t=-2\log(|y|)\in (n^{\frac{1+\epsilon}{2}},+\infty)$. Noticing that, $y_n(t)\geq\big[n(1-\delta_n)\big]^{\frac{1}{2}}=n^{\frac{1}{2}}\left(1-\frac{2\log n}{n}\right)^{\frac{1}{2}}$ if $t\geq n$. Moreover, in $(n^{\frac{1+\epsilon}{2}},n)$,
\begin{align*}
\begin{split}
y_n(t)\geq \frac {n^{\frac{1+\epsilon}{2}}}{n^{\frac{1}{2}}}(1-\delta_n)^{\frac{1}{2}}=n^{\frac{\epsilon}{2}}\left(1-\frac{2\log n}{n}\right)^{\frac{1}{2}}.
\end{split}
\end{align*}
The proof is complete.
\end{proof}
\begin{lemma}\label{nl}
If conditions $(M_1),\ (M_3)$ and $(F_3),\ (F_4)$ hold, then
\begin{align*}
\max_{t\geq 0}\left[\frac{1}{2}M(t^2)-\int_{\Omega}\frac{F(x,t\mathscr{G}_{n,d})}{|x|^\beta}\right]
<\frac{1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right).
\end{align*}
\end{lemma}
\begin{proof}
Suppose by contradiction, that for all $n\in \mathbb{N}$, we have
\begin{align*}
\max_{t\geq 0}\left[\frac{1}{2}M(t^2)-\int_{\Omega}\frac{F(x,t\mathscr{G}_{n,d})}{|x|^\beta}\right]
\geq \frac{1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right).
\end{align*}
By Lemmas \ref{lemgc1} and \ref{lemgc2}, for each $n$ there exists $t_n>0$ such that
\begin{align*}
\frac{1}{2}M(t^2_n)-\int_{\Omega}\frac{F(x,t_n\mathscr{G}_{n,d})}{|x|^\beta}=\max_{t\geq 0}\left[\frac{1}{2}M(t^2)-\int_{\Omega}\frac{F(x,t\mathscr{G}_{n,d})}{|x|^\beta}\right]
\end{align*}
From this, and using $(F_3)$, one has $M(t^2_n)\geq M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)$. By $(M_1)$, which implies that $M:[0,+\infty)\rightarrow[0,+\infty)$ is an increasing bijection and so
\begin{align}\label{5.5}
t^2_n\geq \frac{2\pi(2-\beta)}{\alpha_0}.
\end{align}
On the other hand,
\begin{align*}
\frac{d}{dt}\left[\frac{1}{2}M(t^2)-\int_{\Omega}\frac{F(x,t\mathscr{G}_{n,d})}{|x|^\beta}\right]\bigg|_{t=t_n}=0,
\end{align*}
from which we obtain
\begin{align}\label{5.6}
m(t^2_n)t^2_n&=\int_{\Omega}\frac{t_n\mathscr{G}_{n,d}\cdot\nabla F(x,t_n\mathscr{G}_{n,d})}{|x|^\beta}dx
= \int_{\Omega}\frac{t_nG_{n,d}f_1(x,t_n\mathscr{G}_{n,d})}{|x|^\beta}dx.
\end{align}
By Remark \ref{rem5.2} and $(F_4)$, given $\delta>0$ there exists $s_\delta>0$ such that
\begin{align}\label{5.7}
u_1 f_1(x,u_1,0,\ldots,0)\geq (\eta_0 -\delta)e^{\alpha_0|u_1|^2},\ \ \ \forall\ x\in \Omega,\ \ |u_1|\geq s_\delta.
\end{align}
Lemma \ref{leest} shows that for any $0<\epsilon<1$, $t_n|\mathscr{G}_{n,d}|\geq s_\delta$ in $B_\frac{d}{\varpi(n)}(0)\subset\Omega$ for $n$ sufficiently large, where $
\varpi(n)=\exp\left\{\frac{n^{(1+\epsilon)/2}}{2}\right\}. $
Thus, by (\ref{5.6}) and (\ref{5.7}), we have
\begin{align*}
\begin{split}
m(t^2_n)t^2_n\geq (\eta_0-\delta)\int_{B_\frac{d}{\varpi(n)}(0)} \frac{e^{\alpha_0|t_n G_{n,d}|^2}}{|x|^\beta}dx
&=(\eta_0-\delta)\Big(\frac{d}{\varpi(n)}\Big)^{2-\beta}\int_{B_1(0)} \frac{e^{\beta_0|t_n \tilde{G}_n|^2}}{|x|^\beta}dx \\
&=2\pi(\eta_0-\delta)\Big(\frac{d}{\varpi(n)}\Big)^{2-\beta}\int^1_0 e^{\alpha_0|t_n \tilde{G}_n(\sigma)|^2}\sigma^{1-\beta} d\sigma.
\end{split}
\end{align*}
By performing the change of variable $\sigma=\tau^{\frac{2}{2-\beta}}$, we get
\begin{align*}
m(t^2_n)t^2_n\geq \frac{4\pi}{2-\beta}(\eta_0-\delta)\Big(\frac{d}{\varpi(n)}\Big)^{2-\beta}\int^{1}_0 e^{\frac{2\alpha_0|t_n \tilde{G}_n(\tau)|^2}{2-\beta}}\tau d\tau.
\end{align*}
Meanwhile, setting $\tau=e^{-t/2}$, we obtain
\begin{align*}
\begin{split}
m(t^2_n)t^2_n\geq \frac{2\pi}{2-\beta}(\eta_0-\delta)\Big(\frac{d}{\varpi(n)}\Big)^{2-\beta}\int^{+\infty}_0 e^{\frac{\alpha_0|t_n y_n(t)|^2}{2\pi(2-\beta)}}e^{-t}dt.
\end{split}
\end{align*}
Consequently,
\begin{align}\label{5.8}
\begin{split}
m(t^2_n)t^2_n&\geq \frac{2\pi}{2-\beta}(\eta_0-\delta)\Big(\frac{d}{\varpi(n)}\Big)^{2-\beta}\int^{+\infty}_n e^{\frac{\alpha_0t_n^2(n-2\log n)}{2\pi(2-\beta)}}e^{-t}dt \\
&=\frac{2\pi}{2-\beta}(\eta_0-\delta)d^{2-\beta}\exp\left\{\frac{\alpha_0t_n^2(n-2\log n)}{2\pi(2-\beta)}-(2-\beta)\log \varpi(n)-n\right\} \\
&=\frac{2\pi}{2-\beta}(\eta_0-\delta)d^{2-\beta}\exp\left\{\frac{\alpha_0t_n^2(n-2\log n)}{2\pi(2-\beta)}-\frac{(2-\beta)n^{\frac{1+\epsilon}{2}}}{2}-n\right\} \\
&=\frac{2\pi}{2-\beta}(\eta_0-\delta)d^{2-\beta}\exp\left\{\left[\frac{\alpha_0t_n^2}{2\pi(2-\beta)}-1\right]n-\frac{(2-\beta)n^{\frac{1+\epsilon}{2}}}{2}
-\frac{\alpha_0t_n^2}{\pi(2-\beta)}\log n\right\}.
\end{split}
\end{align}
From this
\begin{align}\label{5.9}
\begin{split}
1\geq \frac{2\pi}{2-\beta}(\eta_0-\delta)d^{2-\beta}\exp\left\{t^2_n n\left[\frac{\alpha_0\big(1-\frac{2\log n}{ n}\big)}{2\pi(2-\beta)}-\frac{(2-\beta)n^{\frac{1+\epsilon}{2}}+2n}{2t^2_n n}-\frac{\log{\big (m(t^2_n)t^2_n\big)}}{t^2_n n} \right]\right\},
\end{split}
\end{align}
thus, $\{t_n\}$ is bounded. Otherwise, noting that, from (\ref{1.4}), making use of the property of $M$ and $m$, we would have that
\begin{align*}
\begin{split}
t^2_n n\left[\frac{\alpha_0\big(1-\frac{2\log n}{n}\big)}{2\pi(2-\beta)}-\frac{(2-\beta)n^{\frac{1+\epsilon}{2}}+2n}{2t^2_n n}-\frac{\log{\big (m(t^2_n)t^2_n\big)}}{t^2_n n} \right]\rightarrow+\infty,
\end{split}
\end{align*}
which is a contradiction with (\ref{5.9}). Therefore $\{t_n\}$ has a subsequence convergent, from (\ref{5.5}), for some $t_0^2\geq \frac{2\pi(2-\beta)}{\alpha_0}$,\ $t_n\rightarrow t_0$. Moreover, using (\ref{5.8}), we must have $\frac{\alpha_0 t^2_0}{2\pi(2-\beta)}-1\leq 0$ and therefore,
\begin{align}\label{5.10}
t^2_n\rightarrow\frac{2\pi(2-\beta)}{\alpha_0}.
\end{align}
At this point, following arguments as in \cite{dmr1}, we are going to estimate (\ref{5.6}) more exactly. For this, in view of (\ref{5.7}), for $0<\delta<\eta_0$ and $n\in \mathbb{N}$ we set
\begin{equation*}
D_{n,\delta}:=\{x\in B_d(0):t_n G_{n,d}\geq s_\delta\}\ \ \mbox{and}\ \ E_{n,\delta}:=B_d(0)\backslash D_{n,\delta}.
\end{equation*}
Thus, by splitting the integral (5.6) on $D_{n,\delta}\ \mbox{and}\ E_{n,\delta}$, and using (\ref{5.7}), it follows that
\begin{align}\label{5.11}
\begin{split}
M(t^2_n)t^2_n\geq &(\eta_0-\delta)\int_{B_d(0)}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx-(\eta_0-\delta)\int_{E_{n,\delta}}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx \\
&+\int_{E_{n,\delta}}\frac{t_n G_{n,d}f_1(x,t_n \mathscr{G}_{n,d})}{|x|^\beta}dx.
\end{split}
\end{align}
Since $G_{n,d}(x)\rightarrow 0$ for almost everywhere $x\in B_d(0)$, we have that the characteristic functions $\chi_{E_{n,\delta}}$ satisfy
\begin{equation*}
\chi_{E_{n,\delta}}\rightarrow 1\ \ \mbox{a.e. in}\ \ B_d(0)\ \ \mbox{as}\ \ n\rightarrow+\infty.
\end{equation*}
Moreover, $t_n G_{n,d}<s_\delta$ in $E_{n,\delta}$. Thus, invoking the Lebesgue dominated convergence theorem, we obtain
\begin{equation*}
\int_{E_{n,\delta}}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx\rightarrow\pi d^2 \ \ \mbox{and}\ \ \int_{E_{n,\delta}}\frac{t_n G_{n,d}f_1(x,t_n \mathscr{G}_{n,d})}{|x|^\beta}dx\rightarrow 0,\ \ \mbox{as}\ \ n\rightarrow+\infty.
\end{equation*}
Noting that
\begin{equation*}
\begin{split}
\int_{B_d(0)}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx=d^{2-\beta}\int_{B_1(0)}\frac{e^{\alpha_0 t^2_n \tilde{G}^2_n}}{|x|^\beta}dx=2\pi d^{2-\beta}\int^1_0{e^{\alpha_0 t^2_n \tilde{G}^2_n(\sigma)}}\sigma^{2-\beta}d\sigma,
\end{split}
\end{equation*}
By performing the change of variable $\sigma=\tau^{\frac{2}{2-\beta}}$, we get
\begin{align*}
\int_{B_d(0)}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx=\frac{4\pi}{2-\beta}d^{2-\beta}\int^{1}_0 e^{\frac{2\alpha_0|t_n \tilde{G}_n(\tau)|^2}{2-\beta}}\tau d\tau.
\end{align*}
Meanwhile, setting $\tau=e^{-t/2}$ and using (\ref{5.5}), we obtain
\begin{align*}
\begin{split}
\int_{B_d(0)}\frac{e^{\alpha_0 t^2_n G^2_{n,d}}}{|x|^\beta}dx=&\frac{2\pi}{2-\beta}d^{2-\beta}\int^{+\infty}_0 e^{\frac{\alpha_0|t_n y_n(t)|^2}{2\pi(2-\beta)}}e^{-t}dt \\
\geq& \frac{2\pi}{2-\beta}d^{2-\beta}\int^{+\infty}_0 e^{y^2_n(t)-t}dt.
\end{split}
\end{align*}
Passing to limit in (\ref{5.11}), we obtain that
\begin{align}\label{5.12}
m\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\frac{2\pi(2-\beta)}{\alpha_0}\geq (\eta_0-\delta)\left[\frac{2\pi}{2-\beta} d^{2-\beta}(1+e)-\frac{2\pi}{2-\beta} d^{2-\beta}\right]=(\eta_0-\delta)\frac{2\pi e}{2-\beta} d^{2-\beta},
\end{align}
and doing $\delta\rightarrow 0^+$, we get $\eta_0\leq \frac { (2-\beta)^2m\big(\frac {2\pi(2-\beta)}{\alpha_0}\big)}{\alpha_0 d^{2-\beta} e }$, which contradicts $(F_4)$. Thus, this lemma is proved.
\end{proof}
Now, we establish an estimate for the minimax level.
\begin{lemma}\label{lem5.3}
If conditions $(M_1),\ (M_3)$ and $(F_3)-(F_4)$ hold, then for small $\varepsilon$, it holds that
\begin{align*}
c_{M,\varepsilon}<\frac{1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right),
\end{align*}
where $c_{M,\varepsilon}$ is given as in (\ref{defmpl}).
\end{lemma}
\begin{proof}
Since $\|\mathscr{G}_{n,d}\|=1$, as in the proof of Lemma \ref{lemgc2}, we have that $I(t\mathscr{G}_{n,d})\rightarrow -\infty$ as $t\rightarrow +\infty$. Consequently, $c_{M,\varepsilon}\leq\max_{t\geq 0}I_\varepsilon(t\mathscr{G}_{n,d}),\ \forall \ n\in \mathbb{N}$. Thus, from Lemma \ref{nl}, taking $\varepsilon$ sufficiently small, we can get what we desired.
\end{proof}
\begin{lemma}\label{lemms}
If $f_i$ has critical growth at $\infty$,\ $(M_1)-(M_3)$ and $(F_1)-(F_4)$ satisfy, then for small $\varepsilon$, problem (\ref{P}) has one nontrivial mountain-pass type solution $U_{M,\varepsilon}$ at level $c_{M,\varepsilon}$, where $c_{M,\varepsilon}$ is given as in (\ref{defmpl}).
\end{lemma}
\begin{proof}
From (\ref{5.1}) and Lemma \ref{lem4.1}, there exists a bounded Palais-Smale consequence $\{U_n\}$ for $I_\varepsilon$ at level $c_{M,\varepsilon}$. And, up to a subsequence, for some $U\in H^1_0(\Omega,\mathbb{R}^k)$, one has
\begin{align}\label{5.14}
U_n\rightharpoonup U\ \ \mbox{in}\ \ H^1_0(\Omega,\mathbb{R}^k); \qquad
U_n \rightarrow U\ \ \mbox{in}\ \ L^s(\Omega,\mathbb{R}^k) \ \mbox{for all}\ \ s\geq 1.
\end{align}
By Lemma \ref{lem4.1}, $\|U_n\|\leq C$ for some $C>0$, and $\|U\|\leq \lim\inf_{n\rightarrow\infty} \|U_n\|\leq C$. And by $(F_3)$, we have that for small $\varepsilon$, it holds
\begin{align}\label{ms0}
\begin{split}
&\frac {1}{2\theta}\int_{\Omega}\bigg[\frac{U\cdot\nabla F(x,U)-2\theta F(x,U)}{|x|^\beta}-\varepsilon(2\theta-1) U\cdot H\bigg]dx \\
&\geq\frac {1}{2\theta}\int_{\Omega}\frac{U\cdot\nabla F(x,U)-2\theta F(x,U)}{|x|^\beta}dx-\frac {\varepsilon (2\theta-1)\|U\| \|H\|_*}{2\theta}
\geq 0.
\end{split}
\end{align}
Next, we will make some claims as follows.
\noindent{\bfseries Claim 1.} \ \ \ $U\neq \mathbf{0}$.
\begin{proof}
Suppose by contradiction that $U\equiv \mathbf{0}$. Then Lemma \ref{lem4.4} show that $\int_{\Omega}U_n\cdot H dx\rightarrow 0$ and $\int_{\Omega}\frac{F(x,U_n)}{|x|^\beta}dx\rightarrow 0$, thus
\begin{align*}
\frac{1}{2}M(\|U_n\|^2)\rightarrow c_{M,\varepsilon}<\frac{1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right),
\end{align*}
then condition $(M_1)$ implies
\begin{align*}
\liminf_{n\rightarrow\infty}\|U_n\|^2<\frac{2\pi(2-\beta)}{\alpha_0}.
\end{align*}
By Lemma \ref{ms1}, we have $\|U_n\|^2\rightarrow 0$ and therefore $I_\varepsilon(U_n)\rightarrow 0$, what is absurd for $c_{M,\varepsilon}$ and we must have $U\neq \mathbf{0}$.
\end{proof}
\noindent{\bfseries Claim 2.} \ \ \ Let $A:=\lim_{n\rightarrow \infty}\|U_n\|^2$, then $U$ is a weak solution of
\begin{eqnarray*}
\left\{ \arraycolsep=1.5pt
\begin{array}{ll}
-m(A)\Delta U=\frac{\nabla F(x,U)}{|x|^\beta}+\varepsilon H,\ \ &\mbox{in}\ \ \Omega,\\[2mm]
U=0,\ \ &\mbox{on}\ \ \partial\Omega.
\end{array}
\right.
\end{eqnarray*}
\begin{proof}
We define $C^\infty_0(\Omega,\mathbb{R}^k):=C^\infty_0(\Omega)\times \cdots\times C^\infty_0(\Omega)$. By $I_\varepsilon'(U_n)\rightarrow 0$ and Lemma \ref{lem4.4}, we see that
\begin{align*}
\begin{split}
m(A)\int_{\Omega}\nabla U\cdot\nabla \Phi dx-\int_{\Omega}\frac{\Phi\cdot\nabla F(x,U)}{|x|^\beta}dx-\varepsilon\int_{\Omega}\Phi\cdot Hdx=0,\quad \forall\Phi\in C^\infty_0(\Omega,\mathbb{R}^k).
\end{split}
\end{align*}
Since $C^\infty_0(\Omega)$ is dense in $H^1_0(\Omega)$, then $C^\infty_0(\Omega,\mathbb{R}^k)$ is also dense in $H^1_0(\Omega,\mathbb{R}^k)$, and we conclude this claim.
\end{proof}
\noindent{\bfseries Claim 3.} \ \ \ $A:=\lim_{n\rightarrow \infty}\|U_n\|^2<\|U\|^2+\frac{2\pi(2-\beta)}{\alpha_0}$.
\begin{proof}
Suppose by contradiction that $A\geq \|U\|^2+\frac{2\pi(2-\beta)}{\alpha_0}\geq \frac{2\pi(2-\beta)}{\alpha_0}$. Therefore, from (\ref{5.1}) and Lemma \ref{lem4.4}, we obtain
\begin{align*}
\begin{split}
c_{M,\varepsilon}=&\lim_{n\rightarrow\infty}\left[I_\varepsilon(U_n)-\frac {1}{2\theta}\langle I'_\varepsilon(U_n),U_n\rangle_*\right] \\
=&\frac {1}{2\theta}\lim_{n\rightarrow\infty}\Big[\theta M(\|U_n\|^2)-m(\|U_n\|^2)\|U_n\|^2\Big] \\
&+\frac {1}{2\theta}\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
&+\frac {1}{2\theta}\int_{\Omega}\bigg[\frac{U\cdot\nabla F(x,U)-2\theta F(x,U)}{|x|^\beta}-\varepsilon(2\theta-1)U\cdot H\bigg]dx, \\
\end{split}
\end{align*}
thus, by $(M_3)$ and (\ref{ms0})
\begin{align*}
\begin{split}
c_{M,\varepsilon}\geq&\frac {1}{2\theta}\lim_{n\rightarrow\infty}\Big[\theta M(\|U_n\|^2)-m(\|U_n\|^2)\|U_n\|^2\Big] \\
&+\frac {1}{2\theta}\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
=&\frac {1}{2\theta}\Big[\theta M(A)-m(A)A\Big]+\frac {1}{2\theta}\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
\geq&\frac {1}{2\theta}\bigg[\theta M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)-m\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\frac{2\pi(2-\beta)}{\alpha_0}\bigg] \\
&+\frac {1}{2\theta}\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
=&\frac {1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right) \\
&-\frac {1}{2\theta}\bigg[m\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\frac{2\pi(2-\beta)}{\alpha_0}-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx\bigg]. \\
\end{split}
\end{align*}
Here, we assert
\begin{align*}
0\geq m\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\frac{2\pi(2-\beta)}{\alpha_0}-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx.
\end{align*}
Indeed, $\langle I'(U_n),U_n\rangle_*\rightarrow 0$, (\ref{dch}) and Claim 2 indicate that
\begin{align*}
\begin{split}
0=&m(A)A-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U\cdot H dx, \\
0=&m(A)\|U\|^2-\int_{\Omega}\frac{U\cdot\nabla F(x,U)}{|x|^\beta}dx-\varepsilon\int_{\Omega}U\cdot H dx.
\end{split}
\end{align*}
Subtracting the second equality from the first one, by $(M_2)$ which implies $m(t)$ and $m(t)t$ are nondecreasing for $t\geq 0$, we get
\begin{align*}
\begin{split}
0&=m(A)(A-\|U\|^2)-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
&\geq m(A-\|U\|^2)(A-\|U\|^2)-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx \\
&\geq m\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\frac{2\pi(2-\beta)}{\alpha_0}-\lim_{n\rightarrow\infty}\int_{\Omega}\frac{U_n\cdot\nabla F(x,U_n)-U\cdot\nabla F(x,U)}{|x|^\beta}dx.
\end{split}
\end{align*}
This shows the assertion. Noting Lemma \ref{lem5.3}, we conclude
\begin{align*}
\frac {1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)\leq c_{M,\varepsilon}<\frac {1}{2}M\left(\frac{2\pi(2-\beta)}{\alpha_0}\right),
\end{align*}
which is absurd. Thus this claim is proved.
\end{proof}
\noindent{\bfseries Claim 4.} \ \ \ $A:=\lim_{n\rightarrow \infty}\|U_n\|^2=\|U\|^2$.
\begin{proof}
By using semicontinuity of norm, we have $\|U\|^2\leq A$. We are going to show that the case $\|U\|^2< A$ can not occur. Indeed, if $\|U\|^2< A$, defining $Z_n=\frac{U_n}{\|U_n\|}$ and $Z_0=\frac{U}{A^{1/2}}$, we have $Z_n\rightharpoonup Z_0\ \mbox{in} \ H^1_0(\Omega,\mathbb{R}^k)$ and $\|Z_0\|<1$. Thus, by Lemma \ref{lemtm4}
\begin{align}\label{5.16}
\sup_n \int_{\Omega}\frac{e^{p|Z_n|^2}}{|x|^\beta}dx<\infty,\ \ \forall p<\frac{2\pi(2-\beta)} {1-\|Z_0\|^2}.
\end{align}
Since $A=\frac{A-\|U\|^2}{1-\|Z_0\|^2}$, from Claim 3 which follows that $A<\frac{2\pi(2-\beta)}{\alpha_0(1-\|Z_0\|^2)}$.
Thus, there exists $\zeta>0$ such that $\alpha_0\|U_n\|^2<\zeta<\frac{2\pi(2-\beta)}{1-\|Z_0\|^2}$ for $n$ sufficiently large. For $q>1$ close to 1 and $\alpha>\alpha_0$ close to $\alpha_0$ we still have $q\alpha\|U_n\|^2< \zeta<\frac{2\pi(2-q\beta)}{1-\|Z_0\|^2}$ with $q\beta<2$, and provoking (\ref{5.16}), for some $C>0$ and $n$ large enough, we conclude that
\begin{align*}
\int_{\Omega}\frac{e^{q\alpha|U_n|^2}}{|x|^{q\beta}}dx\leq \int_{\Omega}\frac{e^{\zeta|Z_n|^2}}{|x|^{q\beta}}dx\leq C.
\end{align*}
Hence, using (\ref{3.1}), (\ref{5.14}) and the H\"{o}lder's inequality, we get
\begin{align*}
\begin{split}
\left|\int_{\Omega} \frac{(U_n-U)\cdot\nabla F(x,U_n)}{|x|^\beta} dx\right|&\leq C_1\int_{\Omega}|U_n-U|\frac{e^{\alpha|U_n|^2}}{|x|^\beta} dx \\
&\leq C_1\|U_n-U\|_{\frac{q}{q-1}}\left(\int_{\Omega}\frac{e^{q\alpha|U_n|^2}}{|x|^{q\beta}} dx\right)^{1/q} \\
&\leq C_2\|U_n-U\|_{\frac{q}{q-1}}\rightarrow 0,
\end{split}
\end{align*}
as $n\rightarrow\infty.$ Since $\langle I'_\varepsilon(U_n),U_n-U\rangle_*\rightarrow 0$, by (\ref{dch}), it follows that $m(\|U_n\|^2)\langle U_n,U_n-U\rangle\rightarrow 0$. On the other hand,
\begin{align*}
\begin{split}
m(\|U_n\|^2)\langle U_n,U_n-U\rangle&=m(\|U_n\|^2)\|U_n\|^2-m(\|U_n\|^2)\int_{\Omega}\nabla U_n\cdot\nabla Udx \\
&\rightarrow m(A)A-m(A)\|U\|^2,
\end{split}
\end{align*}
which implies that $A=\|U\|^2$ what is absurd. Thus, this claim is proved.
\end{proof}
\noindent{\bfseries Finalizing the proof of Lemma \ref{lemms}}: Since $H^1_0(\Omega,\mathbb{R}^k)$ is uniformly convex Banach space and (\ref{5.14}), Claim 4, by Radon's Theorem, $U_n\rightarrow U$ in $H^1_0(\Omega,\mathbb{R}^k)$. Hence, by (\ref{5.1}): $I_\varepsilon'(U_n)\rightarrow 0$ and Lemma \ref{lem4.4}, we have
\begin{equation*}
m(\|U\|^2)\int_{\Omega}\nabla U \cdot\nabla \Phi=\int_{\Omega}\frac{\Phi\cdot\nabla {F(x,U)}}{|x|^\beta} dx+\varepsilon\int_{\Omega}H \cdot \Phi dx,\ \ \forall \Phi\in C^\infty_0(\Omega,\mathbb{R}^k).
\end{equation*}
Since $C^\infty_0(\Omega,\mathbb{R}^k)$ is dense in $H^1_0(\Omega,\mathbb{R}^k)$, we conclude that $U_{M,\varepsilon}:=U$ is a Mountain-pass type solution for problem (\ref{P}) with $I_\varepsilon(U_{M,\varepsilon})=c_{M,\varepsilon}>0$ and according to Claim 1, the proof is complete.
\end{proof}
Finally, let us to find out a minimum type solution $V_{0,\varepsilon}$ with $I_\varepsilon(V_{0,\varepsilon})=c_{0,\varepsilon}<0$, where $c_{0,\varepsilon}$ is given as in (\ref{defc0}).
\begin{lemma}\label{lemms2}
For small $\varepsilon$, problem (\ref{P}) has a nontrivial minimum type solution $V_{0,\varepsilon}$ with $I_\varepsilon(V_{0,\varepsilon})=c_{0,\varepsilon}<0$.
\end{lemma}
\begin{proof}
Let $\rho_\varepsilon$ be as in Lemma \ref{lemgc1}. Then we can choose $\varepsilon$ sufficiently small such that
\begin{align*}
\rho_\varepsilon<\left(\frac{2\pi(2-\beta)}{\alpha_0}\right)^{1/2}.
\end{align*}
Since $\overline{B}_{\rho_\varepsilon}$ is a complete metric space with the metric given by the norm of $H^1_0(\Omega,\mathbb{R}^k)$, convex and the functional $I_\varepsilon$ is of class $C^1$ and bounded below on $\overline{B}_{\rho_\varepsilon}$, by the Ekeland variational principle, there exists a sequence $\{V_n\}$ in $\overline{B}_{\rho_\varepsilon}$ such that
\begin{equation*}
I_\varepsilon(V_n)\rightarrow c_{0,\varepsilon}=\inf_{\|V\|\leq \rho_\varepsilon}I_\varepsilon(V)<0\ \ \mbox{and}\ \ I'_\varepsilon(V_n)\rightarrow 0.
\end{equation*}
Observing that
\begin{align*}
\|V_n\|^2\leq \rho_\varepsilon^2<\frac{2\pi(2-\beta)}{\alpha_0},
\end{align*}
by Lemma \ref{ms1}, there exists a strongly convergent subsequence and therefore, for some $V_{0,\varepsilon}$, $V_n\rightarrow V_{0,\varepsilon}$ strongly in $H^1_0(\Omega,\mathbb{R}^k)$. Consequently, we have $V_{0,\varepsilon}$ is a minimum type solution of problem (\ref{P}) with $I_\varepsilon(V_{0,\varepsilon})=c_{0,\varepsilon}<0$. We claim $V_{0,\varepsilon}\neq \mathbf{0}$. Indeed, suppose by a contradiction $V_{0,\varepsilon}= \mathbf{0}$, then $0>c_{0,\varepsilon}=I_\varepsilon(V_{0,\varepsilon})=I_\varepsilon(\mathbf{0})=0$, what is absurd and this lemma is proved.
\end{proof}
\noindent{\bfseries Proof of Theorem \ref{thm1.3}.}
By Lemmas \ref{lemms} and \ref{lemms2}, there exists $\varepsilon_c>0$ such that for each $0<\varepsilon<\varepsilon_c$, there exist nontrivial critical points $U_{M,\varepsilon}$ for $I_\varepsilon$ at level $c_{M,\varepsilon}$ and $V_{0,\varepsilon}$ for $I_\varepsilon$ at level $c_{0,\varepsilon}$. In the end, we claim $U_{M,\varepsilon}\neq V_{0,\varepsilon}$. Suppose by contradiction that $ U_{M,\varepsilon}\equiv V_{0,\varepsilon}$, then $0>c_{0,\varepsilon}=\lim_{n\rightarrow\infty}I_\varepsilon(V_n)=I(V_{0,\varepsilon})
=I(U_{M,\varepsilon})=\lim_{n\rightarrow\infty}I_\varepsilon(U_n)=c_{M,\varepsilon}>0$, what is absurd. Thus, the proof of Theorem \ref{thm1.3} is complete.
\qed
\noindent{\bfseries Acknowledgements.} The authors have been supported by NSFC 11971392, Natural Science Foundation of Chongqing, China cstc2019jcyjjqX0022 and Fundamental Research Funds for the Central Universities XDJK2019TY001.
\end{document} |
\begin{document}
\draft \preprint{\hbox to \hsize{\hfil\vtop{\hbox{IASSNS-HEP-99/36} \hbox{April, 1999}}}}
\title{State Vector Collapse Probabilities and Separability\\ of Independent Systems in Hughston's\\ Stochastic Extension of the Schr\"odinger Equation \\ } \author{Stephen L. Adler\\} \address{Institute for Advanced Study\\ Princeton, NJ 08540\\ } \author{Lawrence P. Horwitz \footnote{On leave from School of Physics and Astronomy, Raymond and Beverly Sackler Faculty of Exact Sciences, Tel Aviv University, Ramat Aviv, Israel, and Department of Physics, Bar Ilan University, Ramat Gan, Israel.}} \address{Institute for Advanced Study\\ Princeton, NJ 08540\\ }
\maketitle
\leftline{Send correspondence to:}
{\leftline{Stephen L. Adler} \leftline{Institute for Advanced Study} \leftline{Olden Lane, Princeton, NJ 08540} \leftline{Phone 609-734-8051; FAX 609-924-8399; email [email protected]}}
\begin{abstract} We give a general proof that Hughston's stochastic extension of the Schr\"odinger equation leads to state vector collapse to energy eigenstates, with collapse probabilities given by the quantum mechanical probabilities computed from the initial state. We also show that for a system composed of independent subsystems, Hughston's equation separates into similar independent equations for the each of the subsystems, correlated only through the common Wiener process that drives the state reduction. \end{abstract}
A substantial body of work [1] has addressed the problem of state vector collapse by proposing that the Schr\"odinger equation be modified to include a stochastic process, presumably arising from physics at a deeper level, that drives the collapse process. Although interesting models have been constructed, there so far has been no demonstration that for a generic Hamiltonian, one can construct a stochastic dynamics that collapses the state vector with the correct quantum mechanical probabilities. Part of the problem has been that most earlier work has used stochastic equations that do not preserve state vector normalization, requiring additional ad hoc assumptions to give a consistent physical interpretation.
Various authors [2] have proposed rewriting the Schr\"odinger equation as an equivalent dynamics on projective Hilbert space, i.e., on the space of rays, a formulation in which the imposition of a state vector normalization condition is not needed. Within this framework, Hughston [3] has proposed a simple stochastic extension of the Schr\"odinger equation, constructed solely from the Hamiltonian function, and has shown that his equation leads to state vector reduction to an energy eigenstate, with energy conservation in the mean throughout the reduction process. In the simplest spin-1/2 case, Hughston exhibits an explicit solution that shows that his equation leads to collapse with the correct quantum mechanical probabilities, but the issue of collapse probabilities in the general case has remained open. In this Letter, we shall give a general proof that Hughston's equation leads to state vector collapse to energy eigenstates with the correct quantum mechanical probabilities, using the martingale or ``gambler's ruin'' argument pioneered by Pearle [4]. We shall also show that Hughston's equation separates into independent equations of similar structure for a wave function constructed as the product of independent subsystem wave functions.
We begin by explaining the basic elements needed to understand Hughston's equation, working in an $n+1$ dimensional Hilbert space. We denote the
general state vector in this space by $| z \rangle$, with $z$ a shorthand for the complex projections $z^1,z^2,...,z^{n+1}$ of the state vector on an arbitrary fixed basis. Letting $F$ be an arbitrary Hermitian operator, and using the summation convention that repeated indices are summed over their range, we define
\begin{mathletters} \label{allequations} \begin{equation}
(F) \equiv { \langle z | F | z \rangle \over \langle z |z \rangle } = { \overline z^{\alpha} F_{\alpha \beta} z^{\beta} \over \overline z^{\gamma} z^{\gamma} }~~~, \label{equationa} \end{equation} so that $(F)$ is the expectation of the operator $F$
in the state $|z\rangle$, independent of the ray representative and normalization chosen for this state. Note that in this notation $(F^2)$ and $(F)^2$ are not the same; their difference is in fact the variance $[\Delta F]^2$, \begin{equation} [\Delta F]^2 = (F^2)-(F)^2~~~. \label{equationb} \end{equation} \end{mathletters}
We shall use two other parameterizations for the state $|z\rangle$ in what follows. Since $(F)$ is homogeneous of degree zero in both $z^{\alpha}$ and $\overline z^{\alpha}$, let us define new complex coordinates $t^j$ by \begin{equation}t^j=z^j/z^0,~~ \overline t^j=\overline z^j / \overline z^0~,~~~j=1,...,n. ~~~ \end{equation} Next, it is convenient to split each of the complex numbers $t^j$ into its real and imaginary part $t^j_R,~t^j_I$, and to introduce a $2n$ component real vector $x^a,~a=1,...,2n$ defined by $x^1=t^1_R,~x^2=t^1_I,~x^3=t^2_R,~ x^4=t^2_I,...,x^{2n-1}=t^n_R,~x^{2n}=t^n_I$. Clearly, specifying the projective coordinates $t^j$ or $x^a$ uniquely determines the
unit ray containing the unnormalized state $|z\rangle$, while leaving
the normalization and ray representative of the state $|z\rangle$ unspecified.
As discussed in Refs. [2], projective Hilbert space is also a Riemannian space with respect to the Fubini-Study metric $g_{\alpha \beta}$, defined by the line element \begin{mathletters} \label{allequations} \begin{equation} ds^2= g_{\alpha \beta} d\overline z^{\alpha} dz^{\beta}
\equiv 4\left( 1- { | \langle z | z+dz \rangle |^2 \over \langle z |z \rangle
\langle z+dz | z+dz \rangle } \right) ~~~. \label{equationa} \end{equation} Abbreviating $\overline z^{\gamma} z^{\gamma} \equiv \overline z \cdot z$, a simple calculation gives \begin{equation} g_{\alpha \beta}=4(\delta_{\alpha \beta} \overline z \cdot z -z^{\alpha} \overline z^{\beta})/(\overline z \cdot z)^2 =4 {\partial \over \partial \overline z^{\alpha} } {\partial \over \partial z^{\beta} } \log \overline z \cdot z~~~. \label{equationb} \end{equation} \end{mathletters} Because of the homogeneity conditions $\overline z^{\alpha} g_{\alpha \beta} =z^{\beta} g_{\alpha \beta}=0$, the metric $g_{\alpha \beta}$ is not invertible, but if we hold the coordinates $\overline z^0,~z^0$ fixed in the variation of Eq.~(3a) and go over to the projective coordinates $t^j$, we can rewrite the line element of Eq.~(3a) as \begin{mathletters} \label{allequations} \begin{equation} ds^2=g_{jk}d\overline t^j dt^k~~~, \label{equationa} \end{equation} with the invertible metric [5] \begin{equation} g_{jk}={4[(1+\overline t^{\ell} t^{\ell}) \delta_{jk} - t^j \overline t^k ] \over (1+\overline t^m t^m)^2 }~~~, \label{equationb} \end{equation} with inverse \begin{equation} g^{jk}={1 \over 4} (1+\overline t^m t^m) (\delta_{jk} + t^j \overline t^k) ~~~. \label{equationc} \end{equation} Reexpressing the complex projective coordinates $t^j$ in terms of the real coordinates $x^a$, the line element can be rewritten as \begin{eqnarray} ds^2=&&g_{ab}dx^adx^b~~~,\nonumber\\ g_{ab}=&&{4[(1+x^dx^d)\delta_{ab}-(x^ax^b+\omega_{ac}x^c\omega_{bd}x^d)] \over (1+x^e x^e)^2}~~~,\nonumber\\ g^{ab}=&&{1\over 4} (1+x^e x^e)(\delta_{ab}+ x^ax^b+\omega_{ac}x^c\omega_{bd}x^d)~~~. \label{equationd} \end{eqnarray} \end{mathletters} Here $\omega_{ab}$ is a numerical tensor whose only nonvanishing elements are $\omega_{a=2j-1 ~b=2j}=1$ and $\omega_{a=2j~b=2j-1}=-1$ for $j=1,...,n$. As discussed by Hughston, one can define a complex structure $J_a^{~b}$ over the entire projective Hilbert space for which $J_a^{~c}J_b^{~d}g_{cd}=g_{ab},$ $J_a^{~b}J_b^{~c}=-\delta_a^c$, such that $\Omega_{ab}=g_{bc} J_a^{~c}$ and $\Omega^{ab}=g^{ac}J_c^{~b}$ are antisymmetric tensors. At $x=0$, the metric and complex structure take the values \begin{eqnarray} g_{ab}=&&4 \delta_{ab}~,~~g^{ab}={1 \over 4} \delta_{ab}~~~,\nonumber\\ J_a^{~b}=&&\omega_{ab}~,~~\Omega_{ab}=4\omega_{ab}~, ~~\Omega^{ab}={1\over 4}\omega_{ab}~~~. \end{eqnarray}
Returning to Eq.~(1a), we shall now derive some identities that are central to what follows. Differentiating Eq.~(1a) with respect to $\overline z^{\alpha}$, with respect to $z^{\beta}$, and with respect to both $\overline z^{\alpha}$ and $z^{\beta}$, we get \begin{mathletters} \label{allequations} \begin{eqnarray}
\langle z | z \rangle {\partial (F) \over \partial \overline z^{\alpha}} =&&F_{\alpha \beta} z^{\beta} - (F) z^{\alpha}~~~,\nonumber\\
\langle z | z \rangle {\partial (F) \over \partial z^{\beta}}=&& \overline z^{\alpha} F_{\alpha \beta} - (F) \overline z^{\beta}~~~,\nonumber\\
\langle z | z \rangle^2 {\partial^2 (F) \over \partial \overline z^{\alpha} \partial z^{\beta} }=&&
\langle z |z \rangle [F_{\alpha \beta}-\delta_{\alpha \beta} (F) ] +2z^{\alpha} \overline z^{\beta} (F) - \overline z^{\gamma} F_{\gamma \beta} z^{\alpha}-\overline z^{\beta} F_{\alpha \gamma} z^{\gamma}~~~. \label{equationa} \end{eqnarray} Writing similar expressions for a second operator expectation $(G)$, contracting in various combinations with the relations of Eq.~(6a), and using the homogeneity conditions \begin{equation} \overline z^{\alpha} {\partial (F) \over \partial \overline z^{\alpha} } =z^{\beta} {\partial (F) \over \partial z^{\beta} } =\overline z^{\alpha} {\partial^2 (F) \over \partial \overline z^{\alpha} \partial z^{\beta}} =z^{\beta} {\partial^2 (F) \over \partial \overline z^{\alpha} \partial z^{\beta} } =0 \label{equationb} \end{equation} \end{mathletters} to eliminate derivatives with respect to $\overline z^0,~z^0$, we get the following identities, \begin{mathletters} \label{allequations} \begin{eqnarray}
-i(FG-GF)&&=-i \langle z| z \rangle \left( {\partial (F) \over \partial z^{\alpha}} {\partial (G) \over \partial \overline z^{\alpha}} - {\partial (G) \over \partial z^{\alpha}} {\partial (F) \over \partial \overline z^{\alpha}} \right) =2\Omega^{aAb} \nabla_a (F) \nabla_b (G)~~~,\nonumber\\
(FG+GF)-2(F)(G)&&= \langle z| z \rangle \left( {\partial (F) \over \partial z^{\alpha}} {\partial (G) \over \partial \overline z^{\alpha}} + {\partial (G) \over \partial z^{\alpha}} {\partial (F) \over \partial \overline z^{\alpha}} \right) =2g^{ab} \nabla_a (F) \nabla_b (G)~~~,\nonumber\\ (FGF)-(F^2)(G)&&-(F)(FG+GF)+2(F)^2(G)\nonumber\\
&&=\langle z | z \rangle ^2 {\partial (F) \over \partial z^{\alpha}} {\partial^2 (G) \over \partial \overline z^{\alpha} \partial z^{\beta}} {\partial (F) \over \partial \overline z^{\beta}} =2\nabla^a (F) \nabla^b (F) \nabla_a \nabla_b (G), \label{equationa} \end{eqnarray} with $\nabla_a$ the covariant derivative with respect to the Fubini-Study metric. It is not necessary to use the detailed form of the affine connection to verify the right hand equalities in these identities, because since $(G)$ is a Riemannian scalar, $\nabla_a \nabla_b (G)$$ =\nabla_a \partial_b (G)$, and since projective Hilbert space is a homogeneous manifold, it suffices to verify the identities at the single point $x=0$, where the affine connection vanishes and thus $\nabla_a \nabla_b (G)=\partial_a \partial_b (G)$. Using Eqs.~(7a) and the chain rule we also find \begin{equation} -\nabla^a [(F^2)-(F)^2] \nabla_a (G)= -{1\over 2} (F^2 G+G F^2) +(F^2)(G) + (F) (FG+GF) -2(F)^2(G)~~~, \label{equationb} \end{equation} which when combined with the final identity in Eq.~(7a) gives \begin{equation} \nabla^a(F) \nabla^b(F) \nabla_a \nabla_b (G) -{1\over 2} \nabla^a [(F^2)-(F)^2] \nabla_a (G)= -{1\over 4}([F,[F,G]])~~~, \label{equationc} \end{equation} \end{mathletters} the right hand side of which vanishes when the operators $F$ and $G$ commute [6].
Let us now turn to Hughston's stochastic differential equation, which in our notation is \begin{mathletters} \label{allequations} \begin{equation} dx^a=[2 \Omega^{ab}\nabla_b(H)-{1\over 4}\sigma^2 \nabla^aV]dt +\sigma\nabla^a(H) dW_t~~~, \label{equationa} \end{equation} with $W_t$ a Brownian motion or Wiener process, with $\sigma$ a parameter governing the strength of the stochastic terms, with $H$ the Hamiltonian operator and $(H)$ its expectation, and with $V$ the variance of the Hamiltonian, \begin{equation} V=[\Delta H]^2=(H^2)-(H)^2~~~. \label{equationb} \end{equation} \end{mathletters} When the parameter $\sigma$ is zero, Eq.~(8a) is just the transcription of the Schr\"odinger equation to projective Hilbert space. For the time evolution of a general function $G[x]$, we get by Taylor expanding $G[x+dx]$ and using the It\^o stochastic calculus rules \begin{mathletters} \label{allequations} \begin{equation} [dW_t]^2=dt~,~~[dt]^2=dtdW_t=0~~~, \label{equationa} \end{equation} the corresponding stochastic differential equation \begin{equation} dG[x]=\mu dt + \sigma \nabla_aG[x]\nabla^a(H) dW_t~~~, \label{equationb} \end{equation} with the drift term $\mu$ given by \begin{equation} \mu=2 \Omega^{ab} \nabla_aG[x]\nabla_b(H)-{1\over 4} \sigma^2\nabla^aV\nabla_a G[x]+{1\over 2}\sigma^2 \nabla^a(H)\nabla^b(H)\nabla_a\nabla_bG[x]~~~. \label{equationc} \end{equation} \end{mathletters} Hughston shows that with the $\sigma^2$ part of the drift term chosen as in Eq.~(8a), the drift term $\mu$ in Eq.~(9c) vanishes for the special case $G[x]=(H)$, guaranteeing conservation of the expectation of the energy with respect to the stochastic evolution of Eq.~(8a). But referring to Eq. (7c) and the first identity in Eq.~(7a), we see that in fact a much stronger result is also true, namely that $\mu$ vanishes [and thus the stochastic process of Eq.~(9b) is a martingale] whenever $G[x]=(G)$, with $G$ any operator that commutes with the Hamiltonian $H$.
Let us now make two applications of this fact. First, taking $G[x]=V= (H^2)-(H)^2$, we see that the contribution from $(H^2)$ to $\mu$ vanishes, so the drift term comes entirely from $-(H)^2$. Substituting this into $\mu$ gives $-2(H)$ times the drift term produced by $(H)$, which is again zero, plus an extra term \begin{mathletters} \label{allequations} \begin{equation} -\sigma^2 \nabla^a(H)\nabla ^b(H)\nabla_a(H)\nabla_b(H) =-\sigma^2V^2~~~, \label{equationa} \end{equation} where we have used the relation $V=\nabla_a(H)\nabla^a(H)$ which follows from the $F=G=H$ case of the middle identity of Eq.~(7a). Thus the variance $V$ of the Hamiltonian satisfies the stochastic differential equation, derived by Hughston by a more complicated method, \begin{equation} dV=-\sigma^2 V^2 dt + \sigma \nabla_aV\nabla^a(H) dW_t~~~. \label{equationb} \end{equation} This implies that the expectation $E[V]$ with respect to the stochastic process obeys \begin{equation} E[V_t]=E[V_0]-\sigma^2 \int_0^t ds E[V_s^2]~~~, \label{equationc} \end{equation} which using the inequality $0\leq E[\{V-E[V]\}^2]=E[V^2]-E[V]^2$ gives the inequality \begin{equation} E[V_t] \leq E[V_0] -\sigma^2 \int_0^t ds E[V_s]^2~~~. \label{equationd} \end{equation} \end{mathletters} Since $V$ is necessarily positive, Eq.~(10d) implies that $E[V_{\infty}]=0$, and again using positivity of $V$ this implies that $V_s$ vanishes as $s \to \infty$, apart from a set of outcomes of probability measure zero. Thus, as concluded by Hughston, the stochastic term in his equation drives the system, as $t \to \infty$, to an energy eigenstate.
As our second application of the vanishing of the drift term $\mu$ for expectations of operators that commute with $H$, let us consider the
projectors $\Pi_e\equiv |e\rangle \langle e| $ on a complete set of
energy eigenstates $|e \rangle$. By definition, these projectors all commute with H, and so the drift term $\mu$ vanishes in the stochastic differential equation for $G[x]=(\Pi_e)$, and consequently the expectations $E[(\Pi_e)]$ are time independent; additionally, by completeness of
the states $|e\rangle$, we have $\sum_e (\Pi_e)=1$. But these are just the conditions for Pearle's [4] gambler's ruin argument to apply. At time zero, $E[(\Pi_e)]=(\Pi_e)\equiv p_e$ is the absolute value squared of the quantum mechanical amplitude
to find the initial state in energy eigenstate $|e \rangle$. At $t=\infty$, the system always evolves to an energy eigenstate, with the eigenstate
$|f\rangle $ occurring with some probability $P_f$. The expectation $E[(\Pi_e)]$, evaluated at infinite time, is then \begin{equation} E[(\Pi_e)]=1 \times P_e + \sum_{f \neq e} 0 \times P_f = P_e~~~; \end{equation} hence $p_e=P_e$ for each $e$ and the state collapses into energy eigenstates at $t=\infty$ with probabilities given by the usual quantum mechanical rule applied to the initial wave function [7].
Let us now examine the structure of Hughston's equation for a Hilbert space constructed as the direct product of independent subsystem Hilbert spaces, so that \begin{mathletters} \label{allequations} \begin{eqnarray}
|z\rangle =&& \prod_{\ell} |z_{\ell} \rangle~~~,\nonumber\\ H=&&\sum_{\ell} H_{\ell}~~~, \label{equationa} \end{eqnarray}
with $H_{\ell}$ acting as the unit operator on the states $|z_{k}\rangle ~,~~ k \neq \ell$. Then a simple calculation shows that the expectation of the Hamiltonian $(H)$ and its variance $V$ are both additive over the subsystem Hilbert spaces, \begin{eqnarray} (H)=&&\sum_{\ell} (H_{\ell})_{\ell}~~~,\nonumber\\ V=\sum_{\ell} V_{\ell} =&&\sum_{\ell}[ (H_{\ell}^2)_{\ell} -(H_{\ell})_{\ell}^2]~~~, \label{equationb} \end{eqnarray} \end{mathletters} with $(F_{\ell})_{\ell}$ the expectation of the operator $F_{\ell}$ formed according to Eq.~(1a) with respect
to the subsystem wave function $|z_{\ell}\rangle$. In addition, the Fubini-Study line element is also additive over the subsystem Hilbert spaces, since [8] \begin{eqnarray}
1-ds^2/4=&& {| \langle z | z+dz \rangle |^2 \over \langle z |z \rangle
\langle z+dz | z+dz \rangle } =\prod_{\ell}
{ | \langle z_{\ell} | z_{\ell}+dz_{\ell} \rangle |^2 \over
\langle z_{\ell} |z_{\ell} \rangle \langle z_{\ell}+dz_{\ell}
| z_{\ell}+dz_{\ell} \rangle }\nonumber\\ =&&\prod_{\ell}[1-ds_{\ell}^2/4]=1-[\sum_{\ell} ds_{\ell}^2]/4 +{\rm O}(ds^4) ~~~. \end{eqnarray} As a result of Eq.~(13), the metric $g^{ab}$ and complex structure $\Omega^{ab}$ block diagonalize over the independent subsystem subspaces. Equation (12b) then implies that Hughston's stochastic extension of the Schr\"odinger equation given in Eq.~(8a) separates into similar equations for the subsystems, that do not refer to one another's $x^a$ coordinates, but are correlated only through the common Wiener process $dW_t$ that appears in all of them. Under the assumption [9] that $\sigma \sim M_{\rm Planck}^{-1/2}$ in microscopic units with $\hbar =c=1$, these correlations will be very small; it will be important to analyze whether they can have observable physical consequences on laboratory or cosmological scales [10].
To summarize, we have shown that Hughston's stochastic extension of the Schr\"odinger equations has properties that make it a viable physical model for state vector reduction. This opens the challenge of seeing whether it can be derived as a phenomenological approximation to a fundamental pre-quantum dynamics. Specifically, we suggest that since Adler and Millard [11] have argued that quantum mechanics can emerge as the thermodynamics of an underlying non-commutative operator dynamics, it may be possible to show that Hughston's stochastic process is the leading statistical fluctuation correction to this thermodynamics.
\acknowledgments This work was supported in part by the Department of Energy under Grant \#DE--FG02--90ER40542. One of us (S.L.A.) wishes to thank J. Anandan for conversations introducing him to the Fubini-Study metric. The other (L.P.H.) wishes to thank P. Leifer for many discussions on the properties of the complex projective space. \begin{references} \bibitem{[1]} For a representative, but not exhaustive, survey of the earlier literature, see the papers of Di\'osi, Ghirardi et. al., Gisin, Pearle, and Percival cited by Hughston, Ref. [3] below. \bibitem{[2]} T.W.B. Kibble, Commun. Math. Phys. {\bf 65}, 189 (1979); D. A. Page, Phys. Rev. A {\bf 36}, 3479 (1987); Y. Aharanov and J. Anandan, Phys. Rev. Lett. {\bf 58}, 1593 (1987); J. Anandan and Y. Aharanov, Phys. Rev. D {\bf 38}, 1863 (1988) and Phys. Rev. Lett. {\bf 65}, 1697 (1990); G. W. Gibbons, J. Geom. Phys. {\bf 8}, 147 (1992); L. P. Hughston, ``Geometric aspects of quantum mechanics'', in S. A. Huggett, ed., {\it Twistor theory}, Marcel Dekker, New York, 1995; A. Ashtekar and T. A. Schilling, preprint gr-qc/9706069. For related work, see A. Heslot, Phys. Rev. D {\bf 31}, 1341 (1985) and S. Weinberg, Phys. Rev. Lett. {\bf 62}, 485 (1989) and Ann. Phys. (NY) {\bf 194}, 336 (1989).
\break \bibitem{[3]} L. P. Hughston, Proc. Roy. Soc. Lond. A {\bf 452}, 953 (1996). \bibitem{[4]} P. Pearle, Phys. Rev. D {\bf 13}, 857 (1976); Phys. Rev. D {\bf 29}, 235 (1984); Phys. Rev. A {\bf 39}, 2277 (1989).
\break \bibitem{[5]} What we have called $z^0$ could be any $z^{\alpha}\neq 0$. There is therefore a set of holomorphically overlapping patches, so that the metric of Eq.~(4b) is globally defined. See, for example, S. Kobayashi and K. Nomizu, {\it Foundations of Differential Geometry}, Vol. II, p. 159, Wiley Interscience, New York, 1969.
\break \bibitem{[6]} An alternative demonstration of this result uses the fact, noted by Hughston [3], that $\xi^a_F\equiv \Omega^{ab} \nabla_b(F)$ is a Killing vector obeying $\nabla_c\xi_F^a+\nabla^a \xi_{Fc}=0$. First rewrite $\nabla^a(F)\nabla^b(F)\nabla_a\nabla_b(G)$ as $\Omega^{ca} \Omega^{eb} \xi_{Fc}\xi_{Fe} \nabla_a\nabla_b(G)$ $=\xi_{Fc}\xi_{Fe}\Omega^{ca} \nabla_a \xi_G^e$. By the Killing vector property, this becomes $-\xi_{Fc}\xi_{Fe}\Omega^{ca}\nabla^e \xi_{Ga}$, which can be rewritten as $-\nabla^e[\xi_{Fe}\xi_{Fc}\Omega^{ca}\xi_{Ga}]$ $+\nabla^e\xi_{Fe}\, \xi_{Fc}\Omega^{ca}\xi_{Ga}$ $+\xi_{Fe}\nabla^e\xi_{Fc}\,\Omega^{ca}\xi_{Ga}$. When $F$ and $G$ commute, the first two terms vanish by the first identity in Eq.~(7a), while using the Killing vector property for $\xi_F$ in the third term gives $-\nabla_c\xi_F^e\, \xi_{Fe} \Omega^{ca} \xi_{Ga}$ = $-(1/2)\nabla_c[\xi_F^e \xi_{Fe}]\Omega^{ca}J_a^{~b} \nabla_b(G)$, which using $\Delta F=\xi_F^e\xi_{Fe}$ reduces to $(1/2)\nabla_c[\Delta F] \nabla^c (G)$.
\break \bibitem{[7]} This conclusion readily generalizes to the stochastic equation $dx^a=[2 \Omega^{ab}\nabla_b(H)-{1\over 4}\sigma^2 \sum_j \nabla^aV_j]dt +\sigma\sum_j \nabla^a(H_j) dW_t^j~~~,$ with the $H_j$ a set of mutually commuting operators that commute with H, with $V_j =(H_j^2)-(H_j)^2$, and with the $dW_t^j$ independent Wiener processes obeying $dW_t^j dW_t^k=\delta^{jk}dt$~. \bibitem{[8]} An alternative way to see this is to use the identity $\log \overline z \cdot z =\log \prod_{\ell} \overline z_{\ell} \cdot z_{\ell} =$ $ \sum_{\ell} \log \overline z_{\ell}\cdot z_{\ell}$ in Eq.~(3b), along with a change of variable from $z$ to the $z_{\ell}$'s.
\break \bibitem{[9]} See L. P. Hughston, Ref. [3], Sec. 11 and earlier work of Di\'osi, Ghirardi et. al., and Penrose cited there; also D. I. Fivel, preprint quant-ph/9710042.
\break \bibitem{[10]} Atomic physics tests for nonlinearities in quantum mechanics have been surveyed by J. J. Bollinger, D. J. Heinzen, W. M. Itano, S. L. Gilbert, and D. J. Wineland, in J. C. Zorn and R. R. Lewis, eds., Proceedings of the 12th International Conference on Atomic Physics, Amer. Inst. of Phys. Press, New York, 1991, p. 461. In Hughston's equation, the parameter $\epsilon$ characterizing the nonlinearities is of order $\epsilon \sim \sigma^2 [\Delta H]^2$. For a two level system with ``clock'' transition energy $E_c$, one has $[\Delta H]^2 \sim E_c^2$, so for $\sigma^2 \sim M_{\rm Planck}^{-1}$, one estimates $\epsilon \sim E_c^2/M_{\rm Planck}$. For the $^9Be$ transition studied by Bollinger et. al., this gives a predicted
$\epsilon \sim 10^{-46}$ MeV, as compared with the measured bound $|\epsilon| < 2.4 \times 10^{-26}$ MeV. Transitions with smaller $E_c$ values, such as $^{201}Hg$ and $^{21}Ne$, have correspondingly suppressed predictions for $\epsilon$. \bibitem{[11]}
S. L. Adler and A. C. Millard, Nucl. Phys. B {\bf 473}, 199 (1966); see also S. L. Adler and A. Kempf, J. Math. Phys. {\bf 39}, 5083 (1998). \end{references}
\end{document} |
\begin{document}
\preprint{APS/123-QED}
\title{Robust one-sided self-testing of two-qubit states via quantum steering}
\author{Yukun Wang }
\affiliation{Beijing Key Laboratory of Petroleum Data Mining, China University of Petroleum, Beijing 102249, China} \affiliation{State Key Laboratory of Cryptology, P.O. Box 5159, Beijing, 100878, China}
\author{Xinjian Liu}
\affiliation{Beijing Key Laboratory of Petroleum Data Mining, China University of Petroleum, Beijing 102249, China}
\author{Shaoxuan Wang}
\affiliation{Beijing Key Laboratory of Petroleum Data Mining, China University of Petroleum, Beijing 102249, China}
\author{Haoying Zhang}
\affiliation{Beijing Key Laboratory of Petroleum Data Mining, China University of Petroleum, Beijing 102249, China}
\author{Yunguang Han} \email{[email protected]} \affiliation{College of Computer Science and Technology, Nanjing University of Aeronautics and Astronautics, Nanjing 211106, China}
\date{\today}
\begin{abstract} Entangled two-qubit states are the core building blocks for constructing quantum communication networks. Their accurate verification is crucial to the functioning of the networks, especially for untrusted networks. In this work we study the self-testing of two-qubit entangled states via steering inequalities, with robustness analysis against noise. More precisely, steering inequalities are constructed from the tilted Clauser-Horne-Shimony-Holt inequality and its general form, to verify the general two-qubit entangled states. The study provides a good robustness bound, using both local extraction map and numerical semidefinite-programming methods. In particular, optimal local extraction maps are constructed in the analytical method, which yields the theoretical optimal robustness bound. To further improve the robustness of one-sided self-testing, we propose a family of three measurement settings steering inequalities. The result shows that three-setting steering inequality demonstrates an advantage over two-setting steering inequality on robust self-testing with noise. Moreover, to construct a practical verification protocol, we clarify the sample efficiency of our protocols in the one-sided device-independent scenario.
\begin{description} \item[Usage] Secondary publications and information retrieval purposes.
\end{description} \end{abstract}
\maketitle
\section{Introduction}
Quantum entangled states is the key resource of quantum information technologies, such as quantum networks \cite{Kimble2008}, cryptography \cite{Xu2020}, computation \cite{Campbell2017}, and metrology \cite{Giovannetti2011}. As we advance towards the second quantum revolution \cite{Deutsch2020}, the characterization and certification of quantum devices becomes an extremely important topic in the practical applications of quantum technologies \cite{Eisert2020,Kliesch2021}.
To ensure the proper functioning of a quantum network, it is essential to certify the entangled state deployed in the network accurately and efficiently. Besides the traditional quantum state tomography method, various methods have been proposed to improve the efficiency and apply to different scenarios, such as direct fidelity estimation \cite{Flammia2011}, compressed sensing tomography \cite{Gross2010}, and shadow tomography \cite{Huang2020}. In the last few years, quantum state verification (QSV) has attracted much attention by achieving remarkably low sample efficiency \cite{Pallister2018,Zhu2019}. One drawback of quantum state verification method is that it requires the perfect characterization of the measurements performed by the quantum devices, thus it is device dependent and not applicable to the untrusted quantum network. Self-testing \cite{Supic2020,Mayers2004} is a prominent candidate of quantum state certification in device-independent (DI) scenario, in which all quantum devices are treated as black-boxes. Taking the advantage of Bell nonlocality \cite{Nonlocality2014}, many important results on self-testing have been achieved, such as self-testing various quantum entangled states \cite{McKague2012,Yang2014,Coladangelo2017}, self-testing entangled quantum measurement \cite{Renou2018,Bancal2018}, and parallel self-testing \cite{Reichardt2013,Wu2016}. Self-testing has wide applications in device-independent quantum information tasks, such as device-independent quantum random number generation \cite{Pironio2010,Liu2018}, and quantum key distribution \cite{Acin2007,Vazirani2014}.
Lying between standard QSV and self-testing, there is semi-device-independent (SDI) scenario \cite{Shrotriya2021} in which some parties are honest, while some others may be dishonest. The certification in this scenario can be called as SDI self-testing or SDI state verification. This scenario has wide applications in quantum information processing, such as one-sided device-independent (1SDI) quantum key distribution \cite{Branciard2012}, quantum random number generation \cite{Passaro2015}, verifiable quantum computation \cite{Gheorghiu2017}, and anonymous communication \cite{Unnikrishnan2019,Hahn2020,Wang2022}. Meanwhile the certification in the SDI scenario is closely related to the foundational studies on quantum steering in the untrusted quantum networks \cite{Uola2020,Wiseman2007,Saunders2010,Cavalcanti2015a}. However, not much is known about the quantum certification in the SDI scenario despite its significance. In \cite{Gheorghiu2017,Supic2016}, the authors studied the one-sided self-testing of maximally entangled two-qubit state based on 2-setting quantum steering inequality. In \cite{Han2021}, the authors proposed various verification protocols for Bell state based on multiple settings. For nonmaximal entangled two-qubit states, the authors in \cite{Goswami2018} realized the one-sided certification by combining fine-grained inequality \cite{Pramanik2014} and analog CHSH inequalities \cite{Cavalcanti2015}, which is more complicated compared with traditional self-testing. In \cite{Shrotriya2021}, the authors proposed tilted steering inequality analogous to tilted-CHSH inequality \cite{Acin2012} for one-sided self-testing of two-qubit states. Then they generalized the one-sided certification to general pure bipartite states by adopting the subspace method in DI scenario \cite{Coladangelo2017}. {In Ref. \cite{Sarkar2021}, a class of steering inequalities concentrating on the nonmaximal entangled bipartite-qudit state were constructed, where they achieve the bipartite-qudit state self-testing by performing only two measurements. While in Ref. \cite{Skrzypczyk2018}, steering inequalities with $d+1$ measurement settings are used for self-testing the same states.} However, the robustness analysis there follows the norm inequalities method in \cite{McKague2012,Supic2016} (if it's not missed), thus the result is quite weak. For the multipartite case, the studies of SDI certification are mainly focused on GreenbergerβHorneβZeilinger (GHZ) states as the generalization of Bell state \cite{Pappa2012,McCutcheon2016,Han2021}.
In this paper, we focus on the robust one-sided self-testing of two-qubit entangled states. We construct two types of 2-setting steering inequalities for general two-qubit entangled states based on tilted-CHSH inequality and its general form. For the first type, analytical and optimal robustness bound is obtained using the local extraction channel method introduced in \cite{Kaniewski2016}. For the second type, we get nearly linear robustness bound using numerical method based on the swap trick \cite{Yang2014} and semidefinite programming (SDP). To put our work in perspective, we compare the robustness result in the 1SDI scenario with both DI and device-dependent scenario. Our result can be applied to the certification of high dimensional quantum devices as building blocks.
Furthermore, we construct three measurement settings steering inequalities for general two-qubit states, which is beyond the conventional one-sided self-testing based on two settings. In \cite{Han2021}, the authors studied the optimal verification of Bell state and GHZ states in the 1SDI scenario using multiple measurement settings. However, their study is limited to the maximal entangled state in bipartite case. Based on the 3-settings steering inequalities, it is shown that the robustness bound can be further improved. This opens the question that how much the resistance to noise can be improved using multiple measurement settings. Finally, to construct a practical verification protocol, we clarify the sample efficiency for our protocols in the 1SDI scenario. It is shown that approximately optimal sample efficiency can be obtained based on the steering inequalities we constructed.
\section{Preliminary}
\subsection{\label{sec:steeringscenario} Steering scenario and steering inequalities}
Let us start by recalling the steering theory. Two distant parties, Alice and Bob, are considered, and between them are many copies of
state $\rho_{AB}\in H_A\bigotimes H_B$. Bob performs two measurements labeled by $y$, on his particle and obtains the binary outcome $b$. Meanwhile, Alice receives the corresponding unnormalized conditional states $\rho_{b|y}$ and performs measurements randomly, labeled by $x$, and obtains the binary outcome $a$. If Alice cannot explain the assemblage of received states by assuming pre-existing states at her location and some pre-shared random numbers with Bob, she has to believe that Bob has the steerability of her particle from a distance.
To determine whether Bob has steerability of her, Alice asks Bob to run the experiment many times with her. Finally, they obtain the measurement statistics. If the statistics admit the description, \begin{align}
p(a, b|x, y; \rho_{AB}) = \sum\limits_{\lambda} p(\lambda)p(a|x,\rho_{\lambda})p(b|y,\lambda), \end{align} then Alice knows Bob has not the steerability of her. This non-steerable correlation models is the so called local hidden variable (LHV)-LHS model \cite{Cavalcanti2015}. The LHV-LHS decomposition is based on the idea that Bobβs outcomes are determined by a local hidden random $\lambda$ and Aliceβs outcomes are determined by local measurements on quantum state $\rho_\lambda$.
The combination of the statistics will give a steering inequality, where the LHV-LHS model can be used to establish local bounds for the steering inequality; violation of such inequalities implies steering. In Ref. \cite{Saunders2010}, the authors introduced a family of steering inequalities for Bell state \begin{align}\label{eq:Bellsteering} S_n\equiv \frac{1}{n}\sum_{k=1}^n \langle\hat{\sigma}_k^A{B_k} \rangle\leq C_n, \end{align}
$C_n$ is the LHS bound \begin{align}\label{eq:Bellsteeringbound} C_n = \max_{\{A_k\}} \cu{ \lambda_{\rm max} \ro{\frac{1}{n}\sum_{k=1}^n \hat{\sigma}_k^A{B_k}}}, \end{align} where $\lambda_{\rm max}(\hat O)$ denotes the largest eigenvalue of $\hat O$.
An approach to constructing this family of steering inequalities is transforming from Bell inequalities. Bell states are shown to maximally violate analog CHSH inequality \cite{Cavalcanti2015,Supic2016,Gheorghiu2017}. For partial entangled two-qubit states, the authors in Ref. \cite{Shrotriya2021} constructed tilted steering inequalities from tilted-CHSH inequalities \cite{Acin2012}. In this paper, we study the more general tilted steering inequalities construction from tilted-CHSH inequalities and study the robustness of one-sided self-testing based on analog steering inequalities. Furthermore, we consider to construct three measurement settings steering inequalities for general two-qubit states.
\subsection{\label{sec:channel} SDI certification and local extraction channel} In this paper, we focus on one-sided self-testing two-qubit entangled state based on the steering inequalities. To this end, we first review the concept of self-testing.
Self-testing was originally known as a DI state verification, where some observed statistics $p(a,b|x,y)$ from quantum devices can determine uniquely the underlying quantum state and the measurements, up to a local isometry. As an example, the maximal violation of CHSH inequality uniquely identifies the maximally entangled two-qubit state \cite{Mayers2004, McKague2012}. Usually, self- testing relies on the observed extremal correlations, if the quantum systems that achieve the extremal correlations are unique up to local isometries, we say the extremal correlations $p(a,b|x,y)$ self- test the target system $\{|\Bar{\psi}\rangle,\Bar{M}_{a|x},\Bar{N}_{b|y}\}$. Denoting the local isometry as $\Phi=\Phi_{AA'}\otimes\Phi_{BB'}$, self-testing can be formally defined as \begin{equation}
\begin{aligned}
\Phi|\psi\rangle_{AB}{|00\rangle}_{A'B'}&=|\text{junk}\rangle_{AB}|\Bar{\psi}\rangle_{A'B'} \\
\Phi M_{a|x}\otimes N_{b|y}|\psi\rangle_{AB}{|00\rangle}_{A'B'}&=|\text{junk}\rangle_{AB}\Bar{M}_{a|x}\otimes \Bar{N}_{b|y}|\Bar{\psi}\rangle_{A'B'}
\end{aligned} \end{equation}
Coming to the 1SDI scenario, only the existence of an isometry $\Phi_B$ on Bob's side is required \begin{equation} \label{isometry}
\begin{aligned}
\Phi |\psi\rangle_{AB}{|0\rangle}_{B'}&=|\text{junk}\rangle_B\otimes|\Bar\psi\rangle_{AB'} \\
\Phi M_{b|y}|\psi\rangle_{AB}{|0\rangle}_{B'}&=|\text{junk}\rangle_B\otimes \Bar{M}_{b|y}|\Bar\psi\rangle_{AB'}
\end{aligned} \end{equation}
where $M_{b|y}$ acts on $\mathcal{H}_{B}$; $\Bar{M}_{b|y}$ acts on $\mathcal{H}_{B'}$.
In addition to the above ideal definition of self-testing, it is essential to study the robustness of self-testing in the imperfect case when the obtained data deviate from the ideal value. There are two frameworks in the robustness analysis of self-testing. The first approach is based on the swap method by introducing an ancilla system. The desired state can be swapped out of the real quantum system then it could be calculated how far is it from the target state. One way to calculate this closeness is based on the analytic method involving mathematical inequalities techniques first proposed in \cite{McKague2012}. The second one is the numerical method based on semidefinite programming combining NPA hierarchy \cite{Miguel2008}. Usually, the numerical method gives much higher robustness.
The second approach is based on operator inequalities first introduced in Ref. \cite{Kaniewski2016}, which is now widely used in the robustness analysis of self-testing. For self-testing Bell state using CHSH inequality and self-testing GHZ state using Mermin inequality, the operator inequalities give nearly optimal bound. Robustness analysis of self-testing with operator inequalities can recur to \textit{local extraction map}, which hinges on the idea that local measurements can be used to virtually construct local extraction channel to extract the desired state from the real quantum system. The local \textit{ extractability} of target $\psi_{AB}$ from $\rho_{AB}$ is quantified
\begin{equation}\label{eq:extractability}\Xi(\rho_{AB}\rightarrow\psi_{AB}):=\max\limits_{\Lambda_A,\Lambda_B}F((\Lambda_A\otimes\Lambda_B)(\rho_{AB}),\psi_{AB}),
\end{equation} where the maximum is taken over all possible local channels constructed with local measurements. For the 1SDI scenario, Alice's side is trusted, thus the extraction channel in Alice's side is $\Lambda_A= I_A$. The lower bound of the fidelity between $\rho$ and the target state under the observed steering inequality can be defined as \textit{one-sided extractability}
\begin{equation}\label{eq:fidelity}F(\rho_{AB},\psi_{AB}):=\inf\limits_{\rho_{AB}:S(\rho)\geq S_{obs}}\max\limits_{\Lambda_B}F(\Lambda_B(\rho_{AB}),\psi_{AB}),
\end{equation}
where $S(\cdot)$ is the steering expression and $S_{obs}$ is observed violation. To derive a linear bound of the fidelity about observed steering inequality violation, real parameters $s$ and $\tau$ are required to be fixed such that $F\geq s\cdot S_{obs}+\tau$. This is equivalent to find $\Lambda_B$ (constructed by Bob's local measurement operators $M_y^b$) to make \begin{equation}\label{eq:steering ineq}K\geq s S+\tau \mathbb{I}\end{equation} where $K:=(I_A\otimes\Lambda^+_B)(\psi_{AB})$ and $\Lambda^+$ refers to the dual channel of quantum channel $\Lambda$. By taking the trace with the input state $\rho_{AB}$ on both sides of Eq. \eqref{eq:steering ineq}, one can get $F\geq s\cdot S_{obs}+\tau$, in view of $\langle\Lambda_B^+(\psi_{AB}),\rho_{AB}\rangle =\langle \psi_{AB},\Lambda_B(\rho_{AB})\rangle$.
In the 1SDI scenario, Bob's side is untrusted, thus Eq. \eqref{eq:steering ineq} is required to hold for Alice in two dimension and Bob in arbitrary dimension. Since the measurements we considered in this paper is dichotomic, considering in qubit space will be sufficient in Bob's side.
\section{\label{sec:TCHSHsteering}One-sided self-testing based on 2-setting steering inequalities} In device-independent scenario, general pure entangled two-qubit state \begin{align}\label{eq:partial}
\ket{\Phi}=\cos \theta \ket{00}+\sin \theta \ket{11}, \end{align} has been proved to be self-tested \cite{Bamps2015,Coopmans2019} by the maximal violation of tilted-CHSH inequalities \cite{Acin2012}, which can be parametrized as \begin{align}\label{eq:TCHSH}
\Hat{I_\alpha}=\alpha A_0+ A_0B_0+ A_0B_1+A_1B_0-A_1B_1 \leq \alpha + 2, \end{align} where $\sin{2\theta}=\sqrt{\frac{4-\alpha^2}{4+\alpha^2}}$. The maximum quantum value is $\sqrt{8+2\alpha^2}$. The quantum measurements used to achieve the maximal quantum violation are: $\{\sigma_z;\sigma_x\}$ for Alcie, and $\{\cos\mu\sigma_z+\sin\mu\sigma_x\nonumber;\cos\mu\sigma_z-\sin\mu\sigma_x\}$ for Bob, where $\tan \mu=\sin 2\theta$ and $\sigma_{x,z}$ are Pauli $X,Z$ measurements.
When $\alpha=0$, it corresponds to CHSH inequality and the state can be self-tested as Bell state. The self-testing criteria based on this tilted-CHSH inequalities is robust against noise. The best robustness bound to date can be found in \cite{Kaniewski2016,Coopmans2019}, in which the authors introduced the local extraction channel method. However, as claimed in \cite{Kaniewski2016}, the theoretical optimal upper bound is not achievable. Theoretically, the optimal bound is tied to the maximum classical violation which starts to achieve nontrivial fidelity. The nontrivial fidelity that demonstrates entanglement for the target state is $F>\cos^2 \theta$. They guessed that it might be related to the fact that the quantum value of the CHSH inequality does not reach its algebraic limit of 4. Here in 1SDI scenario, we will show that the theoretical optimal bound can be achieved.
To achieve 1SDI self-testing criteria, we will construct two types of 2-setting steering inequalities, which are based on above tilted-CHSH inequality by taking the measurements on Alice's side as trusted.
\subsection{\label{subsec:TCHSHsteering}One-sided self-testing based on standard tilted-CHSH steering inequality} Taking the measurements on Alice's side as trusted, the standard tilted-CHSH inequality in Eq. \eqref{eq:TCHSH} can be transformed to the analog of tilted-CHSH steering inequality \begin{align}\label{eq:TCHSHsteering}
\Hat{S}_\alpha &=\alpha A_0+ A_0B_0+ A_0B_1+A_1B_0-A_1B_1 \nonumber\\
&=\alpha {Z}+{Z(B_0+B_1)}+{X(B_0-B_1)} \nonumber \\
& \leq \alpha + 2, \end{align} which maintains the maximum quantum violation $S_\alpha^{Q}=\sqrt{8+2\alpha^2}$ as in DI scenario. {We prove that partial entangled two-qubit states can be self-tested using this analog tilted-CHSH steering inequality in 1SDI manner. The proof is similar to DI self-testing using tilted-CHSH inequality, except that we can trust Alice's measurements now. The trustworthy of Alice's side can simplify the proof as an advantage. Another advantage is that theoretical optimal robustness bound can be obtained in 1SDI scenario with this steering inequality. By contrast, the optimal bound can not be achieved in DI self-testing with tilted-CHSH inequality. In the following, we will show both the analytical proof and the robustness analysis.} \paragraph{self-testing based on analog tilted-CHSH steering inequality}
{We provide the simple proof here. Though Alice's side are trustworthy, as definition only the existence of isometry in Bob's side will efficient to determine uniquely the state and the measurements. However, for simplicity, we also introduce one isometry in Alice's side, which has been widely used in DI scenario, shown in Fig. \ref{fig:1sdiSwap}. As shown in bellow, with sum of squares decomposition of positive semidefinite matrix \cite{Peyrl2008}, it's easy to find the algebraic relations that are necessarily satisfied by target quantum state and measurements to complete the proof. }
After the isometry, the systems will be \begin{align}\label{eq:isometry}
\Phi(\ket{\psi}) &=\frac{1}{4}[(I+Z_A)(I+\tilde{Z}_B)\ket{\psi}\ket{00} \nonumber\\
&+X_A(I+Z_A)(I-\tilde{Z}_B)\ket{\psi}\ket{01} \nonumber \\
&+\tilde{X}_B(I-Z_A)(I+\tilde{Z}_B)\ket{\psi}\ket{10}\nonumber \\
&+X_A\tilde{X}_B(I-Z_A)(I-\tilde{Z}_B)\ket{\psi}\ket{11}] \end{align} To derive the underlying state $\ket{\psi}$ is equivalent to the target one, the algebraic relations between the operator acting on the state should be given. { We notice that the analog tilted-CHSH steering inequality $ \Hat{S}_\alpha$ have the maximum quantum value
$ S_\alpha^{Q}$. This implies that the operator $\widehat{\mathcal{S}}_{\alpha}:= S_\alpha^{Q}\mathbb{I}- \Hat{S}_\alpha $ should be positive semidefinite (PSD) for all possible quantum states and measurement operators in Bob's side. This can be proven by providing a set of operators $\{P_i\}$ which are polynomial functions of $A_x$ ($Z_A, X_A$) and $B_y$ such that $ \widehat{\mathcal{S}}_{\alpha}=\sum_i P^\dagger_i P_i $, holds for any set of measurement operators satisfying the algebraic properties $A^2_x=\mathbb{I}$, $B^2_y=\mathbb{I}$. The decomposition form of $\widehat{\mathcal{S}}_{\alpha}=\sum_i P^\dagger_i P_i$ is called a \textit{sum of squares}(SOS). By SOS decomposition one can provide a direct certificate that the upper quantum bound of $\Hat{S}_\alpha$is $S_\alpha^{Q}$ from its PSD, as well as some relations between the projectors on the states, which will be used to give self-testing statement. This method was first introduced in \cite{Bamps2015} for the family of CHSH-liked Bell inequalities. Given SOS decompostions, if one observes the maximal quantum violation of the steering inequality (CHSH-liked one) under state $\ket{\psi}$, then each squared terms in SOS decompositions acting on $\ket{\psi}$ should be zero, i.e., $P_i\ket{\psi}=0$. Then useful relations for the measurements operators acting on underlying state can be obtained from these zero terms.}
{Similar to CHSH inequality scenario, two types of SOS decompositions for analog tilted-CHSH operator in Eq. \eqref{eq:TCHSHsteering} can be given. The first one is \begin{align}\label{SOS1} \widehat{\mathcal{S}}_{\alpha} =&\frac{1}{2\mathcal{S}^Q_{\alpha}}\{ \widehat{\mathcal{S}}_{\alpha}^2+( \alpha X_A-S_0)^2\} \end{align} And the second one is \begin{align}\label{SOS2} \widehat{\mathcal{S}}_{\alpha} =&\frac{1}{2 \mathcal{S}^Q_{\alpha}} \big\{(2Z_A-\mathcal{S}^Q_{\alpha}\frac{B_0+B_1}{2}+\frac{\alpha}{2}S_1)^2\nonumber\\ &+(2X_A-\mathcal{S}^Q_{\alpha}\frac{B_0-B_1}{2}+\frac{\alpha}{2}S_2)^2\big\} \end{align}
where \begin{align} S_0&= Z_A(B_0-B_1)+X_A(B_0+B_1),\nonumber\\ S_1&=Z_A(B_0+B_1)-X_A(B_0-B_1),\\ S_2&=Z_A(B_0-B_1)- X_A(B_0+B_1).\nonumber \end{align} Based on the maximal violation of analog tilted-CHSH inequality, the existence of the SOS decomposition for $\widehat{\mathcal{S}}_{\alpha}$ implies : \begin{align}
Z_A|\psi\rangle-\tilde{Z}_B|\psi\rangle=0,\label{relation1}\\
\sin(\theta)X_A(I+\tilde{Z}_B)|\psi\rangle-\cos(\theta)\tilde{X}_B(I-Z_A)|\psi\rangle=0 \label{relation2} \end{align} where $\tilde{Z}_B:=\frac{B_0+B_1}{2\cos \mu}$, and $\tilde{X}_B:=\frac{B_0-B_1}{2\sin \mu}$. Then with the algebraic relation of (\ref{relation1})-(\ref{relation2}) and the fact that $Z_AX_A=-X_AZ_A$, the equation in Eq. (\ref{eq:isometry}) can be rewritten to \[\Phi(\ket{\psi})=\ket{\text{junk}}[\cos\theta\ket{00}+\sin\theta\ket{11}]\] where $\ket{\text{junk}}=\frac{1}{2\cos\theta}(I+Z_A)\ket{\psi}$. This means the underlying state are unique to the target one up to local isometries, thus completes the self-testing statement.}
\begin{figure}
\caption{
The SWAP isometry applied on Alice and Bob's side, where the operators $Z_A$ and $X_A$ are exactly the Pauli $Z,X$ operators.
}
\label{fig:1sdiSwap}
\end{figure}
\paragraph{self-testing robustness}
{ Here we mainly focus on the self-testing of quantum states. For the self-testing of quantum measurements, the analysis can be related to quantum states according to Ref. \cite{Yang2014}. The procedure is similar, starting with $ \Phi M_B(\ket{\psi})$ instead of $ \Phi(\ket{\psi})$. In this case, the figure of merit should quantify how $ M_B\ket{\psi}$ is close to the ideal measurements acting on the target state.}
As introduced in Sec. \ref{sec:channel}, to obtain the better self-testing robustness bound for the state, we should find the smallest value of $s$ while keeps $K-s\Hat{S}-\tau \mathbb{I}$ to be PSD. To this end, we first give the spectral decomposition of $\Hat{S}_\alpha$. Without loss of generality, we write Bob's measurements as \begin{align} B_r=\cos \mu \sigma_z +(-1)^r \sin \mu \sigma_x, \end{align} with $r \in \{0,1\}$ and $\mu \in [0,\pi/2]$. Then the spectral decomposition of $\Hat{S}_\alpha$ is \begin{align}\label{eq:CHSHsDecompose}
\Hat{S}_{\alpha}=\sum \lambda_i |\psi_i\rangle \langle\psi_i|,\;\; i=1,2,3,4 \end{align} with $\lambda_1^2+\lambda_2^2=8+2\alpha^2,\lambda_3=-\lambda_2,\lambda_4=-\lambda_1$.
According to different value ranges of $\mu$, the following two cases are discussed.
\textbf{Case 1:} $\cos2\mu\geq\frac{\alpha^2}{4}$ or equivalently $\mu \in [0, \arcsin \sqrt{\frac{4-\alpha^2}{8}}]$.
The eigenvalues of $\Hat{S}_{\alpha}$ have the form, \[ \lambda_{1/2}=\pm\sqrt{\alpha^2+4\sin^2\mu}+2\cos\mu. \] The eigenvectors and the constraints for $\gamma$ and $\mu$ are \[ \left\{
\begin{array}{ll} |\psi_1\rangle=\cos\gamma|00\rangle +\sin\gamma|11\rangle;\\
|\psi_2\rangle=\sin\gamma|00\rangle -\cos\gamma|11\rangle;\\
|\psi_3\rangle=\cos\gamma|01\rangle +\sin\gamma|10\rangle;\\
|\psi_4\rangle=-\sin\gamma|01\rangle +\cos\gamma|10\rangle.\\
\lambda_1 \cos^2\gamma +\lambda_2 \sin^2\gamma=\alpha+2\cos\mu\\
\lambda_2 \cos^2\gamma +\lambda_1 \sin^2\gamma=-\alpha+2\cos\mu\\ (\lambda_1-\lambda_2)\cos\gamma\sin\gamma=2\sin\mu \end{array}
\right. \] with $\sin2\gamma=\frac{2\sin\mu}{\sqrt{\alpha^2+4\sin^2\mu}}$.
To obtain the optimal robustness bound, we consider the following local extraction channel on Bob's side: with the probability of $q_1$, he performs the identity operation $I$ on his qubit; with the probability of $q_2$, he performs $\sigma_z$ on his qubit. By this local extraction channel, the ideal state is transformed into
$K=q_1|\psi\rangle\langle\psi|+q_2\sigma_z|\psi\rangle\langle\psi|\sigma_z$. Denote $K-s\Hat{I_\alpha}-\tau \mathbb{I}$ as $G$. The PSD condition of $G$ requires that all the eigenvalues of it are non-negative, which points out
\begin{equation}\label{qicondition}
\frac{2\sin\mu s-C}{2\cos\theta\sin\theta}+\frac{1}{2} \leq q_1\leq \frac{2\sin\mu s+C}{2\cos\theta\sin\theta}+\frac{1}{2}, \end{equation}
where
\[\begin{array}{ll}
C=&\sqrt{\cos^2\theta+(\beta_Q-(\alpha+2\cos\mu))s-1}\\
&\cdot \sqrt{\sin^2\theta+(\beta_Q-(-\alpha+2\cos\mu))s-1}\\
\end{array} \] and $\beta_Q=\sqrt{8+2\alpha^2}$.
We can choose $q_1$ in the suitable range to saturate its upper bound, which makes $G$ to be PSD. Meanwhile, we obtain the smallest value of $s$ as \begin{equation}\label{s-value}
s=\frac{1-\cos^2\theta}{\beta_{Q}-(2+\alpha)}, \end{equation} and the corresponding value of $\tau$ is \begin{equation}\label{tau-value}
\tau=1-\sqrt{8+2\alpha^2}s, \end{equation} which exactly equal to the theoretical optimal value. Thus we obtain the optimal robustness bound in the 1SDI scenario using the given extraction channel. Therefore, it gives the optimal robustness bound of self-testing based on analog tilted-CHSH steering inequality: \begin{equation}\label{robuststeering} \begin{split}
F&=({\beta-\sqrt{8+2\alpha^2}})s+1\\ &=({\beta-\sqrt{8+2\alpha^2}})\frac{1-\frac{\sqrt{2}\alpha}{\sqrt{4-\alpha^2}}}{2\sqrt{8+2\alpha^2}-(4+2\alpha)}+1 \end{split} \end{equation}
for observed violation $\beta$.
\textbf{Case 2:} $0\leq\cos2\mu\leq\frac{\alpha^2}{4}$ or equivalently $\mu \in ( \arcsin \sqrt{\frac{4-\alpha^2}{8}} , \frac{\pi}{4}]$.
The local extraction channel in this case is: Bob performs identity operation $I$ with probability $q_1$, and performs $\sigma_z$ with probability $q_2$. Then the ideal state is transformed into
$K=q_1|\psi\rangle\langle\psi|+q_2\sigma_x|\psi\rangle\langle\psi|\sigma_x$. The PSD condition of $G:=K-s\Hat{I_\alpha}-\tau \mathbb{I}\geq 0$ gives $q_1=$ \begin{equation*} \text{max}\Big\{0,\frac{4\sin^2\mu \cdot s^2+(C_1s+\tau)(C_2s-\tau)}{(\beta_Q+2\sin 2\theta \sin\mu+\cos^2\theta C_2-\sin^2\theta C_1)s-1}\Big\} \end{equation*} where $\beta_Q=\sqrt{8+2\alpha^2}$. Meanwhile it gives $s=\frac{1-\cos^2\theta}{\beta_{Q}-(2+\alpha)}$, $\tau=1-\sqrt{8+2\alpha^2}s$, which turn out to obtain the same robustness bound as in Case 1. See Appendix \ref{appendixA} for the details.
In conclusion, the theoretical linear optimal robustness bound can be obtained for self-testing of two-qubit entangled states using the analog tilted-CHSH steering inequality. Different from self-testing in DI scenario, theoretical optimal robustness bound can be obtained using local extraction channel method. The reason might be that extraction channel is needed only on one side in steering scenario without coordination.
\textbf{\textit{Comparison with DI and DD scenario}} To put our work in perspective, we compare the certification in the 1SDI scenario with both DI and device-dependent (DD) scenario.
In the DD scenario, the measurements on both sides are trusted and equal to the ideal measurements. In this case, we have \begin{align}
\Hat{I_\alpha} &=\alpha A_0+ A_0B_0+ A_0B_1+A_1B_0-A_1B_1\\
&=\alpha{Z}+2\cos{\mu}{ZZ}+2\sin{\mu}{XX} \end{align} where $\sin{2\theta}=\sqrt{\frac{4-\alpha^2}{4+\alpha^2}}$ and $\tan{\mu}=\sin{2\theta}$. It could be shown that \begin{align} \dyad{\Psi} \geq \frac{\Hat{I_\alpha}}{\sqrt{8+2\alpha^2}}. \end{align}
Thus in trusted measurement scenario, we have the lower bound of the fidelity \begin{equation}
F_{\text{DD}} \geq \frac{\beta}{\sqrt{8+2\alpha^2}}. \end{equation}
In the DI scenario, the authors in \cite{Coopmans2019} conjectured the lower bound of fidelity \begin{equation}
F_{\text{DI}} \geq s_{\alpha}\beta+\mu_{\alpha}, \end{equation} with \begin{align} s_{\alpha} &= \frac{ \big( \sqrt{ 8 + 2 \alpha^{2} } + 2 + \alpha \big) \big( 3 \sqrt{ 8 + 2 \alpha^{2} } - \sqrt{ 4 - \alpha^{2} } - \alpha \sqrt{ 2 } \big) }{4 ( 2 - \alpha )^2 \sqrt{ 8 + 2 \alpha^{2} }}, \\
\mu_{\alpha} &= 1 - s_{\alpha} \cdot \sqrt{ 8 + 2\alpha^2 }. \end{align}
Their comparison with SDI scenario is given in Fig. \ref{comparefig}. In the case of $\alpha=0$, it corresponds to CHSH inequality and the target state is singlet. Other two cases correspond to tilted-CHSH inequality and partially entangled two-qubit states. Obviously, it has $F_{\text{DD}}>F_{\text{1SDI}}>F_{\text{DI}}$ for all three cases. For $\alpha=0$, the nontrivial fidelity bound of singlet state is $0.5$. The results show that nontrivial fidelity bound can be obtained in DI scenario when the quantum value is larger than $2.105$, while for 1SDI and DD scenario the bound is $2$ and $\sqrt{2} $ respectively. For $\alpha=0.5$, the nontrivial fidelity bound of target state is $0.672$. The results show that nontrivial fidelity bound can be obtained in DI scenario when the quantum value is larger than $2.655$, while for 1SDI and DD scenario the bound is $2.5$ and $1.958 $ respectively. For $\alpha=1$, the nontrivial fidelity bound of target state is $0.816$. The results show that nontrivial fidelity bound can be obtained in DI scenario when the quantum value is larger than $3.103$, while for 1SDI and DD scenario the bound is $3$ and $2.581 $ respectively. It is shown that with the increase of $\alpha$, especially for $\alpha=1$, the 1SDI self-testing bound is much better than DI scenario and more close to the DD scenario. Thus our method achieves significant improvement in the 1SDI certification of less entangled two-qubit states, which is comparable to the device-dependent scenario. \begin{figure}
\caption{
The comparison of robustness bound between DI (solid yellow line), 1SDI (dotted-dashed red line), and DD (dotted blue line,device dependent) for different value of $\alpha$,which are $0,0.5,0.1$ from top to bottom.
}
\label{comparefig}
\end{figure} \subsection{One-sided self-testing based on general tilted-CHSH inequality} In this section, we construct 2-setting steering inequalities from general tilted-CHSH inequality\cite{Acin2012} \begin{align}\label{eq:TCHSH2} \mathcal{\hat{S}}_{\alpha,\beta}=\alpha A_0+\beta A_0B_0+\beta A_0B_1+A_1B_0-A_1B_1. \end{align} The maximal classical and quantum bounds are $\alpha+2(1+\beta)$ and $\sqrt{(4+\alpha^2)(1+\beta^2)}$, respectively. The quantum bound can be achieved by pure two-qubit states \eqref{eq:partial} and corresponding measurements settings $\{\sigma_z;\sigma_x\}$ for Alcie, and $\{\cos\mu\sigma_z+\sin\mu\sigma_x\nonumber;\cos\mu\sigma_z-\sin\mu\sigma_x\}$ for Bob. with $\sin2\theta=\sqrt{\frac{4-\alpha^2\beta^2}{4+\alpha^2}}$ and $\tan\mu=\frac{\sin2\theta}{\beta}$.
Taking the measurements on Alice's side as trusted, this Bell inequality can be transformed into
\begin{align}
\Hat{S}_{\alpha,\beta}
&=\alpha {Z}+\beta{Z(B_0+B_1)}+{X(B_0-B_1)} \end{align}
which is a steering inequality. However we can also introduce two other measurements to represent $B_0+B_1$ and $B_0-B_1$, thus rewrite the steering inequality as,
\begin{align}\label{eq:TCHSHsteering2_3}
S_{\alpha,\beta}^{(1)}&=\alpha \expval{Z}+ \beta \expval{Z B_0}+ \expval{X B_1} \leq \sqrt{1+(\alpha+\beta)^2} \end{align} with $\beta>0$. The maximal quantum violation is $\beta+\sqrt{1+\alpha^2}:=S_Q$.
With this form of steering inequality, it allows us to compare the construction with the one proposed in Ref. \cite{Shrotriya2021}, which changes the marginal term to Bob's side,
\begin{align}\label{eq:TCHSHsteering2_2}
S_{\alpha,\beta}^{(2)} &=\alpha \expval{B_0}+ \beta \expval{Z B_0}+ \expval{X B_1} \leq \alpha+ \sqrt{1+\beta^2}, \end{align} with $\beta^2=\alpha^2+1$, and keeps the quantum bound as Eq. \eqref{eq:TCHSHsteering2_3}. It should be remarked the constraints of $\beta $ and $\alpha$ given in \cite{Shrotriya2021} can be relaxed to $\beta^2\geq\alpha^2+1$, which we have proved in Appendix \ref{appendixD} with SOS decomposition related to the steering operators.
{Both of these two steering inequalities of $S_{\alpha,\beta}^{(1)}$ and$ S_{\alpha,\beta}^{(2)}$ can be used to self-test pure partially entangled state with $\sin(2\theta)=\frac{1}{\sqrt{1+\alpha^2}}$.The only difference between our construction and the one in \cite{Shrotriya2021} is the exchanging role of Alice and Bob. The advantage of our construction will be shown later. Before that, we should give a proof about the maximum violation of both $S_{\alpha,\beta}^{(1)}$ and$ S_{\alpha,\beta}^{(2)}$ can be used to self-test pure partially entangled state. Though the proof for self-testing based on $S_{\alpha,\beta}^{2}$ has already been given in \eqref{eq:TCHSHsteering2_3}. However, a different proof is provided here which is based on the SOS decomposition related to the steering inequality and the isometry given in Fig. \ref{fig:1sdiSwap}. The benefit with this proof is that the constraints of $\beta^2=\alpha^2+1$ can be relaxed, the details can been seen in Appendix \ref{appendixD}.}
In the following we study the robustness of the self-testing based on these two steering inequalities.
In Ref. \cite{Shrotriya2021}, the robustness of one-sided self-testing is studied only for maximally entangled states based on operator inequalities. For the case $\alpha=0$, when the violation of the steering inequality is $S=2-\epsilon$, the actual state is $24\sqrt{\epsilon}+\epsilon$ close to the target state, see also Ref. \cite{Supic2020}. More precisely, the relation between the fidelity and the steering inequality value is
\begin{align}\label{eq:TCHSHsteeringFid2021} F \geq 1-24\sqrt{2-S}-(2-S), \end{align} which is quite loose. Nontrivial fidelity bound $f>1/2$ can only be obtained when the violation is larger than $1.99957$, which makes the robustness analysis in the one-sided self-testing impractical. Here we have improved this bound to be \begin{align}
F \geq \frac{S-2}{4-2\sqrt{2}}+1, \end{align} which is the theoretical optimal linear bound. The local extraction channel to achieve this bound is constructed in Appendix \ref{appendixB}, and this channel coincides with the extraction channel in the DI scenario introduced in Ref. \cite{Kaniewski2016}. However, the reason why this channel is used is not explained in Ref. \cite{Kaniewski2016}. Here we point out the channel is the optimal local channel that local party can take.
For the other case of $\alpha$, we give the robustness analysis of one-sided self-testing based on the numerical method. The details are given in Appendix \ref{appendixC}. The method works for general pure two-qubit states and the results show that the robustness bound is nearly linear.
The comparison of the robustness bound of self-testing based of Eq. (\ref{eq:TCHSHsteering2_3}) and Eq. (\ref{eq:TCHSHsteering2_2}) are given in Fig. \ref{fig:trust-untrust}, where we take $\alpha=1$ and $\beta=\sqrt{2}$ as an example. As shown, the one with trusted partial information gives a better robustness bound. The reason behind is that construction of steering inequality of Eq. \eqref{eq:TCHSHsteering2_3} shows smaller LHS bound compared with inequality of Eq. \eqref{eq:TCHSHsteering2_2}, however keeps the quantum maximum bound. Thus inequality of Eq. \eqref{eq:TCHSHsteering2_2} demonstrates an advantage for self-testing, it is more robust compared to using untrusted parties partial measurement expectation. Actually, in addition to the advantage in self-testing, the steering inequality constructed with trusted partial expectation can also have fewer constraints on variants $\alpha$ and $\beta$, thus could provide more reasonable steering inequalities, see Appendix \ref{appendixD} for details.
\begin{figure}
\caption{
The comparison of the robustness bound of self-testing based on 2-setting steering inequality $S_{\alpha,\beta}$ of Eq. (\ref{eq:TCHSHsteering2_3}) and Eq. (\ref{eq:TCHSHsteering2_2}), where $\alpha=1$ and $\beta=\sqrt{2}$.
}
\label{fig:trust-untrust}
\end{figure}
\section{One-sided self-testing based on 3-setting steering inequalities} So far the steering inequalities we considered are all of two measurement settings. In this section we introduce more measurements settings in constructing steering inequalities. Later it shows that adding more measurement settings can help to increase the robustness in one-sided self-testing. We construct a family of three setting steering inequalities \begin{equation}\label{three-setting-I}
I_{\alpha,\beta}\equiv \alpha\langle Z\rangle+\beta\langle ZB_0\rangle+\langle XB_1\rangle +\langle YB_2\rangle \leq\sqrt{2+(\alpha+\beta)^2} \end{equation} where $\beta\geq0$. These inequalities can be viewed as a generalization of analog tilted-CHSH steering inequalities in Eq. \eqref{eq:TCHSHsteering2_2}. A third measurement involving Pauli $Y$ measurement is added. Similar to the discussion in two setting scenario, the partial expectation in the construction can also be untrusted party Bob's measurement $B_0$. Thus $I_{\alpha,\beta}\equiv \alpha\langle B_0\rangle+\beta\langle ZB_0\rangle+\langle XB_1\rangle +\langle YB_2\rangle$ is constructed. {These two slightly different inequalities have different LHS bound while keep the same quantum bound, for the details discussion and their proof for self-testing two-qubit partial entangled state please see Appendix \ref{appendixD}}.
Here we just consider the first case in the main text for simplicity and give its self-testing robustness bound. The LHS bound is the maximum violation that we can have, assuming Bob has a pre-existing state known to Alice, rather than half of an entangled state shared with Alice. Bob's system may derived from a classical systems, thus we can denote his corresponding declared result by random variable $B_k\in \{-1,1\}$ for $k=0,1$. As shown in \cite{Saunders2010}, it is easy to see that \begin{equation}
\begin{array}{ll}
I_{\text{LHS}}=\max_{B_k}\lambda_{\text{max}}( I_{\alpha,\beta}), \end{array} \end{equation} $\lambda_{\text{max}}(\hat{O})$denotes the largest eigenvalue of $\hat{O}$. Then the LHS bound of Eq. \eqref{three-setting-I} shows to be $ \sqrt{2+(\alpha+\beta)^2}$.
The maximum quantum bound is $\beta+\sqrt{4+\alpha^2}:=S_Q$. This can be verified by the fact that $S_Q\mathbb{I}- I_{\alpha,\beta}$ is PSD. More precisely,
\begin{equation}
\begin{array}{ll}S_Q\mathbb{I}- \hat{I}_{\alpha,\beta}&=\frac{\beta}{2}(\mathbb{I}-ZB_0)^2\\ & +\frac{\sqrt{\alpha^2+4}}{4}(\mathbb{I}-\frac{\alpha}{\sqrt{4+\alpha^2}}Z-\frac{2}{\sqrt{4+\alpha^2}}XB_1)^2\\ & +\frac{\sqrt{\alpha^2+4}}{4}(\mathbb{I}-\frac{\alpha}{\sqrt{4+\alpha^2}}Z-\frac{2}{\sqrt{4+\alpha^2}}YB_2)^2\end{array} \end{equation} The quantum systems used to achieve the maximal quantum violation are, $B_0=Z,B_1=X,B_2=-Y$ and $\ket{\Phi}=\cos \theta \ket{00}+\sin \theta \ket{11}$ with $\sin2\theta=\frac{2}{\sqrt{4+\alpha^2}}$, which in turn can be self-tested when the maximum violation is reached up, { see Appendix \ref{appendixD}}.
Here for simplicity, we just consider the case of $\alpha=0,\beta=1$. Assume Bob's measurements are untrusted, without loss of generality, they can be written as, $B_{0,1}=\cos{\mu}\sigma_z\pm \sin{\mu}\sigma_x$ and $B_2=\cos\mu_1\cos\mu_2\sigma_z+\cos\mu_1\sin\mu_2\sigma_x+\sin\mu_1\sigma_y$. Due to the asymmetric of $I_{\alpha,\beta}$ introduced by the form of $B_2$, the spectral decomposition of it is not easy, which leads to the difficulty in constructing local extraction channel making $G$ PSD. We divide $G$ into two parts. If each part is PSD, then the whole matrix $G$ is PSD. \begin{align} \label{eq:operator} G:=&K-( s (ZB_0+XB_1+YB_2)+\tau \mathbb{I})\nonumber\\ =&K_1- s (ZB_0+XB_1)-\tau_1 \mathbb{I}\\ &+K_2-sYB_2-\tau_2 \mathbb{I}\nonumber \end{align} where $K_1+K_2=K$ denotes the two parts.
We consider the local extraction channel which ensures the part of $G_1:=K_1- s (ZB_0+XB_1)-\tau_1 \mathbb{I}$ and $G_2:=K_2-sYB_2-\tau_2 \mathbb{I} $ PSD simultaneously, see Appendix \ref{appendixF} for the details of the channel construction. The following robustness bound of self-testing in 3-setting steering scenario is obtained \begin{align}\label{CHSHsFidelity}
F \geq s{S_{obs}}+\tau \geq \frac{3}{12-4\sqrt{2}}{(S_{obs}-3)}+1.
\end{align} It should be noticed that, here we did not get the expected robustness bound of $F \geq \frac{(S_{obs}-3)}{2(3-\sqrt{3})}+1$. This may be because that the local extraction channel strategy we considered here is not optimal. It may be possible to find a better extraction strategy than here to obtain that bound. However, though the bound we give is optimal, it is still better than 2-setting analog-CHSH steering scenarios.
For a straightforward comparison between different inequalities, we transform the steering inequalities into the games characterized by the guessing probability which belongs to the same interval $[1/2,1]$. In the case of $\alpha=0$, we have $P=\sum_{i=0,1} p(a=b|A_iB_i)=\frac{1}{2}+\frac{S}{2S_Q}$, which is the successful probability of the nonlocal game guessing the other party's outcomes. For the other case, we can also find a nonlocal game, namely the guessing score is related to the inequality in Eqs. \eqref{eq:TCHSHsteering2_3} and \eqref{three-setting-I}, respectively. See Appendix \ref{appendixE} for details. We define the guessing probability as the probability for untrusted parties to successfully guess the trusted parties' outcomes, which is also important for the sample efficiency analysis in next section. Based on the guessing probability, we can compare the robustness bound for for one-sided self-testing of singlet based on 3-setting and 2-setting steering inequalities. The result is shown in Fig. \ref{fig:3better2}, where the 3-setting steering inequality we constructed gives a better robustness bound. It is worthy to study whether steering inequalities with more measurement settings can be constructed and further improve the robustness of one-sided self-testing.
\begin{figure}
\caption{
Comparison of robustness bounds for one-sided self-testing of singlet based on 3-setting and 2-setting steering inequalities.
}
\label{fig:3better2}
\end{figure}
\section{\label{sec:sampleefficiency} Sample efficiency} To construct a practical quantum verification protocol, it is crucial to study the sample efficiency \cite{Pallister2018,Zhu2019,Han2021,Dimic2022}. Sample efficiency is used to study the performance of the self-testing criteria in the finite copy regime, in a way that a fragment of the state copies is measured to warrant the rest states to be close to the target state.
Consider a quantum device produces the states $\rho_1,\rho_2,\dots,\rho_N$ in $N$ runs. Our task is to verify whether these states are sufficiently close to the target state $\ket{\Phi}\in \mathcal{H}$ on average. Here the one-sided extractability is a natural choice for quantifying the closeness in one-sided self-testing scenario.
For the extraction channel method, we obtain linear relation between the extractability and the observed value of the steering inequalities \begin{align}
F\geq s\cdot S_{obs}+\tau. \end{align} Since $\tau=1-s\cdot S_Q$, we have \begin{align}\label{eq:violation-fidelity}
s\cdot (S_Q - S_{obs}) \geq 1-F. \end{align}
The first step to construct the verification protocol is to view the steering inequalities as testing games. The details of transforming steering inequalities to testing games are shown in Appendix \ref{appendixE}. Based on this, results on unmeasured copies can be guaranteed based on the the measured copies. Define $p$ as the guessing probability of the game for a single state. For the steering inequality in Eqs. \eqref{eq:TCHSHsteering2_3} and \eqref{three-setting-I}, when $\alpha=0$ which corresponds to the singlet state, the testing game is straightforward based on the outcomes of the same Pauli measurements. When $\alpha>0$ and corresponds to non-maximal entangled state, virtual testing games are constructed from the steering inequalities in Appendix \ref{appendixE}. For these testing games, we have \begin{align}\label{eq:guessingprob}
p=\frac{1}{4}\sum_{i=0,1} p(a=b|A_iB_i)=\frac{1}{2}+\frac{S}{2S_Q}. \end{align}
This relation between the guessing probability and the violation of steering inequalities is essential for the study of sample efficiency. For the CHSH-analog steering inequality in Eq. \eqref{eq:TCHSHsteering}, we have $p=\frac{1}{4}\sum_{a\otimes b=ij} p(a,b|A_iB_i)=\frac{1}{2}+\frac{S}{4}$. This probability corresponds to the successful probability to win the game of $a\otimes b=ij$ for Alice and Bob. For steering inequalities in Eq. \eqref{eq:TCHSHsteering} for $\alpha\neq0$ and Eq. \eqref{eq:TCHSHsteering2_2}, we have not found corresponding testing games. One may resort to other theories to study its performance in the finite regime, such as \cite{Bancal2021}.
Define $\epsilon=1-F$ as the infidelity, combining Eq. \eqref{eq:violation-fidelity} and Eq. \eqref{eq:guessingprob}, we have \begin{align}
p \leq 1-\frac{\epsilon}{2s \cdot S_Q}. \end{align} Define $c=\frac{1}{2s \cdot S_Q}$, in general we have \begin{align}
p \leq 1-c \epsilon. \end{align}
Now for these inequalities which corresponds to a testing game, we are ready to estimate the number of copies sufficient to exceed a certain bound on the average one-sided extractability. Suppose the states in the test are independently distributed, the goal is to guarantee that the average one-sided extractability of the states $\rho_1,\rho_2,\dots,\rho_N$ is larger than $1-\epsilon$ with significance level $\delta$ (confidence level $1-\delta$). According to Ref. \cite{Dimic2022}, the scaling of sample efficiency depends on whether the quantum bound and algebraic bound coincide for the games between participants. When the quantum bound and algebraic bound coincide, the number of copies satisfies \begin{align}
N \geq \frac{\ln \delta^{-1}}{\ln (1-c\epsilon)^{-1}}\approx \frac{\ln \delta^{-1}}{c\epsilon}. \end{align}
For all the steering inequalities we considered in this paper, the 2-setting inequality in Eq. \eqref{eq:TCHSHsteering2_3} and the 3-setting inequality in Eq. \eqref{three-setting-I} satisfy this condition. In that case, the maximal guessing probability $1$ can be obtained in the testing games according to the strategy given in the Appendix. Thus we obtain the approximately optimal sample efficiency for one-sided self-testing of general two-qubit states in both 2-setting and 3-setting case, which is comparable to the number needed in quantum state verification.
For the CHSH-analog steering inequality in Eq. \eqref{eq:TCHSHsteering}, the quantum bound and algebraic bound are different. The number of copies needed satisfies \begin{align}
N=O(\frac{\ln{\delta^{-1}}}{c^2\epsilon^2}), \end{align} according to Ref. \cite{Dimic2022}.
In this section, we studied the sample efficiency for one-sided self-testing of two-qubit entangled states. Based on the steering inequalities we constructed, approximately optimal sample efficiency can be obtained in the SDI scenario, which is comparable to the device-dependent scenario. For the general DI scenario, the scaling of testing number is usually in quadratic form. Thus our strategies demonstrate a significant advantage over DI self-testing in sample efficiency.
\section{\label{sec:Conclustion} Conclusion}
In this paper, we studied the one-sided self-testing of general pure two-qubit states in the untrusted quantum network in which one party is not honest. The self-testing strategies are based on the violation of quantum steering inequalities. To achieve this goal, we firstly study two setting scenarios, where the steering inequalities can be constructed from standard tilted-CHSH inequalities and its general form. Based on these steering inequalities, we studied the robustness of one-sided self-testing using both local extraction map method and numerical semi-definite-programming method. Especially, the local extraction map method shows to provide the analytical and theoretical optimal linear bound. Our result also demonstrates an explicit approach to construct the local extraction channel. The comparison with device-independent scenario and device-dependent scenario shows clearly that the robustness of SDI certification lies in the middle. The numerical method involving SDP and swap trick gives nearly linear robustness bound for general pure two-qubit states. To construct a practical certification protocol, we also clarified the sample efficiency of our 1SDI self-testing protocols. The results show that approximately optimal sample efficiency can be obtained based on the steering inequalities we constructed.
Furthermore, we construct three measurement settings steering inequalities for general two-qubit states, which is not studied for partially entangled state before. It is shown that the robustness bound can be further improved by introducing the third measurement setting. It is worthy to study whether steering inequalities with more measurement settings can be constructed and further improve the robustness of one-sided self-testing. This question is also of close interest to the foundational studies on quantum steering. The improvement of robustness bound in our work can be applied to the certification of high dimensional quantum devices as building blocks. In the future, it would be potential to generalize our results to generic bipartite pure states, multipartite GHZ states, and other quantum states. \\
\begin{acknowledgments}
This research is supported by National Nature Science Foundation of China (Grant No.62101600, No.61901218, and No.62201252), China University of Petroleum Beijing (Grant No.ZX20210019), State Key Laboratory of Cryptography Science and Technology(Grant No.MMKFKT202109), and Natural Science Foundation of Jiangsu Province, China (Grant No.BK20190407).\\
\end{acknowledgments} \appendix
\section{Local extraction channel method for self-testing based on analog tilted-CHSH inequality} \label{appendixA}
This section provides the robust bound of the self-testing based on analog tilted-CHSH inequality in Case 2.
\textbf{Case 2:} $0\leq\cos2\mu\leq\frac{\alpha^2}{4}$ or equivalently $\mu \in ( \arcsin \sqrt{\frac{4-\alpha^2}{8}} , \frac{\pi}{4}]$.
In this case, the egivenvalues of the decomposition of $ \Hat{S}_{\alpha}=\sum \lambda_i |\psi_i\rangle \langle\psi_i|$ is, $\lambda_{1,2}=\sqrt{\alpha^2+4\sin^2\mu}\pm 2\cos\mu$. The constraints between $\gamma$ and $\mu$ are, \[ \left\{
\begin{array}{ll}
\lambda_1 \cos^2\gamma -\lambda_2 \sin^2\gamma=\alpha+2\cos\mu;\\
\lambda_2 \cos^2\gamma -\lambda_1 \sin^2\gamma=\alpha-2\cos\mu;\\
(\lambda_1+\lambda_2)\cos\gamma\sin\gamma=2\sin\mu
\end{array}
\right. \] Still $\sin2\gamma=\frac{2\sin\mu}{\sqrt{\alpha^2+4\sin^2\mu}}$.
The local extraction channel in this case is: Bob takes rotation operation $I$ with probability $q_1$, and takes $\sigma_z$ with probability $q_2$. Then the ideal state is transformed into
$K=q_1|\psi\rangle\langle\psi|+q_2\sigma_x|\psi\rangle\langle\psi|\sigma_x$. The PSD requirement of $G:=K-s\Hat{I_\alpha}-\tau \mathbb{I}\geq 0$ gives \\ \begin{widetext} \begin{eqnarray} \begin{pmatrix} q_1\cos^2(\theta)-C_1s-\tau & 0 & 0&q_1\frac{\sin2\theta}{2}-2\sin\mu s \\ 0 & q_2\cos^2\theta-C_2s-\tau & q_2\frac{\sin2\theta}{2}-2\sin\mu s& 0\\ 0 &q_2\frac{\sin2\theta}{2}-2\sin\mu s & q_2\sin^2\theta+C_1s-\tau& 0\\ q_1\frac{\sin2\theta}{2}-2\sin\mu s & 0 & 0&q_1\sin^2\theta+C_2s-\tau \\ \end{pmatrix} \geq 0 \end{eqnarray} \end{widetext} where $C_1=\alpha+2\cos\mu$, and $C_2=\alpha-2\cos\mu$. The eigenvalues OF $G$ are, \begin{align}
&\lambda_{1,2}=\frac{G_{11}+G_{44}\pm\sqrt{(G_{11}-G_{44})^2+4G_{14}^2}}{2},\\
&\lambda_{3,4}=\frac{G_{22}+G_{33}\pm\sqrt{(G_{22}-G_{33})^2+4G_{23}^2}}{2}. \end{align} which should be positive to make $G$ is PSD, \begin{equation*} \left\{ \begin{split} &q_1\geq\frac{4\sin^2\mu \cdot s^2+(C_1s+\tau)(C_2s-\tau)}{(\beta_Q+2\sin 2\theta \sin\mu+\cos^2\theta C_2-\sin^2\theta C_1)s-1}\\ &q_2 \geq \frac{4\sin^2\mu \cdot s^2+(C_2s+\tau)(C_1s-\tau)}{(\beta_Q+2\sin2\theta\sin\mu+\cos^2\theta C_1-\sin^2\theta C_2)s-1}\\ \end{split} \right. \end{equation*} where $\beta_Q=\sqrt{8+2\alpha^2}$.
We can also set $s=\frac{1-\cos^2\theta}{\beta_{Q}-(2+\alpha)}$, and $\tau=1-\sqrt{8+2\alpha^2}s$, keeps $q_1$ in above range. It gives the same bound as in Case 1. To this end, we take $ q_1$ to be the maximum between $0$ and the value which saturates the above inequality in brace.
\section{Local extraction channel method for self-testing based on reverse CHSH inequality} \label{appendixB}
For the analog CHSH steering operator $\Hat{S}= ZB_0+ XB_1$, it has the following spectral decomposition \begin{align}\label{eq:CHSHsDecompose}
\Hat{S}=\sum \lambda_i |\psi_i\rangle \langle\psi_i| , \end{align} with $\lambda_1^2+\lambda_2^2=4,\lambda_3=-\lambda_2,\lambda_4=-\lambda_1$. Precisely, \begin{align} &\lambda_1=\sqrt{2}(\cos \mu+\sin\mu), \lambda_2=\sqrt{2}(\cos\mu-\sin\mu), \end{align} where Bob's measurements are written as $B_r=\cos \mu \sigma_z +(-1)^r\sin\mu \sigma_x$, with $r=0,1$.
In the case of $\mu \in (0,\pi/4]$ , there has $\lambda_1,\lambda_2\geq 0$, and \begin{equation}\label{CHSHdecom} \left\{
\begin{array}{ll}
|\psi_1\rangle=\frac{|00_B\rangle + |11_B\rangle}{\sqrt{2}};
|\psi_2\rangle=\frac{|00_B'\rangle + |11_B'\rangle}{\sqrt{2}}, \\
|\psi_3\rangle=\frac{|01_B'\rangle - |10_B'\rangle}{\sqrt{2}};
|\psi_4\rangle=\frac{|01_B\rangle - |10_B\rangle}{\sqrt{2}}, \; \text{where,}\\
0_B=\cos\frac{\pi}{8}|0\rangle+\sin\frac{\pi}{8}|1\rangle;
1_B=\sin\frac{\pi}{8}|0\rangle-\cos\frac{\pi}{8}|1\rangle\\
0_B'=\cos\frac{\pi}{8}|0\rangle-\sin\frac{\pi}{8}|1\rangle;
1_B'=\sin\frac{\pi}{8}|0\rangle+\cos\frac{\pi}{8}|1\rangle. \end{array} \right. \end{equation}
We consider the following local extraction channel: Bob takes rotation operation $R_{1}=I$ on his qubit with the probability of $q_1$, and takes $R_{2}=\sigma_z$ on his qubit with the probability of $q_2$
, the ideal state is transformed into the mixture of Bell operator's eigenvectors
$|\psi\rangle:=q_1\ketbra{\psi_1}{\psi_1}+q_2\ketbra{\psi_2}{\psi_2}$. In this case, $G:=K-s\Hat{S}-\tau \mathbb{I}$ is diagonal, the PSD requirement gives, \[ \left\{
\begin{array}{ll}
q_1-s\lambda_1-\tau\geq 0 ;\\
q_2-s\lambda_2-\tau\geq 0 ;\\
\Tr(\rho)=p_1+p_2=1;\\
\Tr(\rho \hat{S})=\lambda_1p_1+\lambda_2p_2=S;
\end{array}
\right. \] where we set $\tau=1-2s$.
By simplifying, we have
$s\lambda_1-2s+1 \leq q_2\leq -s\lambda_2+2s$ which gives us $ s\geq\frac{1}{4-(\lambda_1+\lambda_2)} \geq\frac{1}{4-2\sqrt{2}}$. It gives out the following robustness bound of self-testing via steering inequality: \begin{align}\label{CHSHsFidelity}
F \geq s{S}+\tau \geq \frac{S-2}{4-2\sqrt{2}}+1. \end{align}
Besides, we get the constaints on the rotation probability
\begin{align}\label{CHSHsp1}
(1+\sqrt{2})(\cos \mu+\sin \mu +1) \leq q_1 \leq 1. \end{align}
For the case of $\mu \in (\frac{\pi}{4},\frac{\pi}{2})$, the local extraction channel are considered as: Bob takes rotation $R_{1}=I$ with the probability of $q_1$, and takes $R_{2}=\sigma_x$ with the probability of $q_2$. It gives the same robustness bound.
Above, we get the optimal linear bound and nontrivial fidelity can be obtained as long as the steering inequality is violated. But, as shown in Ref. \cite{Kaniewski2016} that nontrivial fidelity bound could not be obtained at inequality violation at $2$, with this local extraction channel. The reason might be that to define the appropriate extraction channel the two local sites need coordinating. In the DI scenario, both sides are not trusted. The decomposition of Bell operator is related both to Alice and Bob's local measurements directions.
Once Alice and Bob could inform each other what measurement directions they choose (do classical communication), it is possible for them to define the appropriate local rotation channel which could rotate the idea states to be the eigenvetors of Bell operator with positive positive eigenvalues. It could make $G:=K-s\Hat{I}-\tau \mathbb{I}$ to be PSD. In this case, the optimal $s$ and $t$ is easy to find to be the optimal one. However, allowing communication is no usual sense of device independent. Thus in DI scenario, when there need coordination, the non-trivial fidelity could not be reached up.
\section{Numerical results utilising the SWAP isometry} \label{appendixC}
In this section, we consider the numerical method based on SDP to show the robustness of the self-testing via steering inequality, which has been widely used in DI frameworks \cite{Yang2014,Wang2016}. A detailed robustness analysis is given for 3-setting steering inequalities. For 2-setting scenarios, only need to remove the third measurement in the code.
\begin{figure}
\caption{
The one-side SWAP isometry applied on Bob's side.
}
\label{fig:onesideSwap}
\end{figure}
The target sate is $\ket{\psi}=\cos\theta\ket{00}+\sin\theta\ket{11}$. And Bob's measurements can be written as,
$B_0=2E_{0|0}-I$, $B_1=2E_{0|1}-I$ and $B_2=2E_{0|2}-I$, where $B_0^2=B_1^2=B_2^2$. After applying the isometry given in Fig. \ref{fig:onesideSwap} to the physical state $\ket{\psi'}$, we obtain the state \begin{align}
\ket{\psi'}=E_{0|0}\ket{\psi}\ket{0}_{A'}+XE_{1|0}\ket{\psi}\ket{1}_{A'} \end{align} We trace the desired system out \begin{align}
\rho_{\text{swap}}=\tr_A(\ket{\psi'}\bra{\psi'}). \end{align}
Utilising the SWAP isometry on Bob's side, the fidelity can be bounded as: \begin{align} f=&\bra{\psi}\rho_{\text{swap}}\ket{\psi} \nonumber\\
=&\cos^2\theta\bra{0} \tr_A(E_{0|0}\rho_{AB})\ket{0}
+\sin^2\theta \bra{1}\tr_A (E_{1|0}\rho_{AB})\ket{1}\nonumber \\
+&\frac{\sin2\theta}{2}[ \bra{0} \tr_A(E_{1|0}X E_{0|0}\rho_{AB})\ket{1}
+\bra{1}\tr_A(E_{0|0}X E_{1|0}\rho_{AB})\ket{0}]\nonumber \\
=&\cos^2\theta\bra{0} \tr_A(E_{0|0}\rho_{AB})\ket{0}+\sin^2\theta\bra{1}(\rho_B-\tr_A E_{0|0})\ket{1} \nonumber \\
&+\sin2\theta[\bra{0}\tr_A(E_{0|1}E_{0|0}-E_{0|0}E_{0|1}E_{0|0})\ket{1}\nonumber \\
&+\bra{1} \tr_A(E_{0|0}E_{0|1}-E_{0|0}E_{0|1}E_{0|0}\rho_{AB})\ket{0}]\nonumber \\
=&\cos^2\theta \bra{0} \sigma_{0|0}\ket{0}\nonumber+\sin^2\theta \bra{1}(\rho_B-\sigma_{0|0})\ket{1}\nonumber \\
&+\sin2\theta [\bra{0} (\sigma_{0|1,0|0}-\sigma_{0|0,0|1,0|0})\ket{1}\nonumber\\
&+\bra{1}\sigma_{0|0,0|1}-\sigma_{0|0,0|1,0|0}\ket{0}] \end{align}
The goal is now to give a lower bound to $f$. The numerical method of minimizing the fidelity for given steering inequality value is given by the following SDP: \begin{align} \label{eq:sdp} \textrm{minimize }&f:=\Tr(M\Gamma)\\ \nonumber \textrm{subject to: }&\Gamma\geq 0,\\ \nonumber &I_{\alpha,\beta} =Q, \end{align} where $M$ is matrix $\text{zeros}(14,14)$, with $M_{2,2} = \sin^2\theta$; $M_{2,9} =M_{9,2} = \sin2\theta$; $M_{3,3} = \cos^2\theta$; $M_{4,4} = -\sin^2\theta$ and $M_{9,10} =M_{10,9} = -\sin2\theta$.
\begin{widetext} \begin{align} \Gamma&=\left( \begin{array}{ccccccc}
\rho_{C} & \sigma_{0|0} & \sigma_{0|1} &\sigma_{0|2} & \sigma_{0|1,0|0}&\sigma_{0|2,0|0}&\sigma_{0|2,0|1}\\
\sigma_{0|0} & \sigma_{0|0} & \sigma_{0|0,0|1} &\sigma_{0|0,0|2} & \sigma_{0|0,0|1,0|0}& \sigma_{0|0,0|2,0|0}& \sigma_{0|0,0|2,0|1}\\
\sigma_{0|1} & \sigma_{0|1,0|0} & \sigma_{0|1} & \sigma_{0|1,0|2}& \sigma_{0|1,0|0}& \sigma_{0|1,0|2,0|0}& \sigma_{0|1,0|2,0|1}\\
\sigma_{0|2} & \sigma_{0|2,0|0} & \sigma_{0|2,0|1} & \sigma_{0|2}& \sigma_{0|2,0|1,0|0}& \sigma_{0|2,0|0}& \sigma_{0|2,0|1}\\
\sigma_{0|0,0|1} & \sigma_{0|0,0|1,0|0} & \sigma_{0|0,0|1} & \sigma_{0|0,0|1,0|2}& \sigma_{0|0,0|1,0|0}& \sigma_{0|0,0|1,0|2,0|0}& \sigma_{0|0,0|1,0|2,0|1}\\
\sigma_{0|0,0|2} & \sigma_{0|0,0|2,0|0} & \sigma_{0|0,0|2,0|1} & \sigma_{0|0,0|2}& \sigma_{0|0,0|2,0|1,0|0}& \sigma_{0|0,0|2,0|0}& \sigma_{0|0,0|2,0|1}\\
\sigma_{0|1,0|2} & \sigma_{0|1,0|2,0|0} & \sigma_{0|1,0|2,0|1} & \sigma_{0|1,0|2}& \sigma_{0|1,0|2,0|1,0|0}& \sigma_{0|1,0|2,0|0}& \sigma_{0|1,0|2,0|1}\\ \end{array} \right) \end{align}
$I_{\alpha,\beta}=\alpha\langle Z\rangle+\beta\langle ZB_0\rangle+\langle XB_1\rangle +\langle YB_2\rangle=\Tr[(\alpha-\beta)Z\rho_C-(X+Y)\rho_C+2\beta Z\sigma_{0|0}+2X\sigma_{0|1}+2Y\sigma_{0|2}]$, \; or
$I_{\alpha,\beta}=\alpha\langle B_0\rangle+\beta\langle ZB_0\rangle+\langle XB_1\rangle +\langle YB_2\rangle=\Tr[-(\alpha+\beta Z+X+Y)\rho_C+(2\alpha I+2\beta Z)\sigma_{0|0}+2X\sigma_{0|1}+2Y\sigma_{0|2}]$. \end{widetext}
We constrain $\Gamma$ in the optimization to be positive semi-definite and note that each sub-matrix of $\Gamma$ corresponding to something like an element of an assemblage is a valid quantum object. It actually turns out that all assemblages that satisfy no-signalling can be realized in quantum theory \cite{Hughston}. Discussion of this point is beyond the scope of this paper as all we wish to do is give a lower bound on the value of $G$ therefore just imposing $\Gamma\geq 0$ gives such bound. Based on the SDP of Eq. (\ref{eq:sdp}), we showed several robustness bound of self-testing based on 3-setting steering inequality for $\alpha=1,2$ and $\beta=1,2,10$. \begin{figure}
\caption{
Robustness bound of self-testing based on 3-setting steering inequality for $\alpha=1,2$ and $\beta=1,2,10$.
}
\label{fig:threesdp}
\end{figure}
\section{Analysis of different type of 2-setting and 3-setting steering inequalities}\label{appendixD} {Here we study the maximal quantum violation of the steering inequalities
involved in the main text and provide that the maximal violation of these inequalities can be used to self-testing.}
For 2-setting steering inequality
\begin{align}\label{eq:TCHSHsteering2_2appendix}
S_{\alpha,\beta}^2 &=\alpha \expval{B_0}+ \beta \expval{Z B_0}+ \expval{X B_1} \leq \alpha+ \sqrt{1+\beta^2} \end{align} {The maximum quantum bound is $\beta+\sqrt{1+\alpha^2}:=S_Q$. This can be confirmed by showing $S_Q\mathbb{I}- \hat{S}_{\alpha,\beta}^{(2)}\geq0$ to be true for all the possible underlying state and the measurements. To do so, we provide the following SOS decompositons of $S_Q\mathbb{I}- \hat{S}_{\alpha,\beta}^{(2)}$ to illustrate its PSD}.
{The first SOS decomposition is, \begin{align} \label{sos1} &S_Q\mathbb{I}- \hat{ S}_{\alpha,\beta}^{(2)}\nonumber\\ &=\alpha_1^2(\mathbb{I}-cB_0-sX_AB_1)^2+\alpha_2^2(Z_A-B_0)^2\nonumber\\
&+\alpha_3^2(-cB_1+sX_AB_0+Z_AB_1)^2\nonumber\\ &+\alpha_4^2(S_Q\mathbb{I}-\hat{ S}_{\alpha,\beta}^2)^2 \end{align} where $c=\frac{\alpha}{\sqrt{1+\alpha^2}}$,$s=\frac{1}{\sqrt{1+\alpha^2}}$,$\alpha_4^2=\frac{1}{4\beta}$,$\alpha_3^2=\frac{\beta\sqrt{1+\alpha^2}}{1}\alpha_4^2=\frac{\sqrt{1+\alpha^2}}{4}$,$\alpha_1^2=(\frac{\beta\sqrt{1+\alpha^2}}{1}-\frac{1+\alpha^2}{1})\alpha_4^2$, and $\alpha_2^2=\frac{\beta-\sqrt{1+\alpha^2}}{4}$.
And the second one is, \begin{align} \label{sos2} &S_Q\mathbb{I}- \hat{ S}_{\alpha,\beta}^{(2)}\nonumber\\ &=\alpha_1^2(\mathbb{I}-cB_0-sX_AB_1)^2+\alpha_2^2(Z_A-B_0)^2\nonumber\\ &+\alpha_3^2((\Delta+s^2)B_0-(\Delta+1)Z_A+cZ_AB_0-csX_AB_1)^2\nonumber\\
&+\alpha_4^2(-(\Delta+s^2)B_1+s(\Delta+1)X_A+\Delta cZ_AB_1-csX_AB_0))^2 \end{align} where $\alpha_1$ and $\alpha_2$ are the same as the first SOS decomposition, and $\alpha_3^2=\Delta \alpha_4^2$, $\alpha_4^2=\frac{S_Q}{4s\beta (\Delta^2+s^2)(\Delta^2+1)}$, $\Delta=\frac{\beta}{\sqrt{1+\alpha^2}}$.}
{It is easy to verify that the left part of Eq. (\ref{sos1})-(\ref{sos2}) are equal to the right SOS forms. In addition, to make the SOS decompositions are positive semidefinite, there should have $\alpha_i\geq 0$, thus has $\beta\geq\sqrt{1+\alpha^2}$. Apparently, $S_Q$ is the upper bound of the steering inequality ${ S}_{\alpha,\beta}^2$ under this constraint, although we don't know whether quantum can reach up the bound or not. Provided that $B_0=Z,B_1=X$ and $\ket{\Phi}=\cos \theta \ket{00}+\sin \theta \ket{11}$ with $\sin2\theta=\frac{1}{\sqrt{1+\alpha^2}}$ can make ${ S}_{\alpha,\beta}^2$ achieves $S_Q$, we conclude $S_Q$ is the maximum quantum violation.}
{Next, we show the maximal violation of this steering inequality will self-test the partial entangled state. The local isometry used to determine the equivalence of the states is the same as the main text, but with $\tilde{Z}_B=B_0$ and $\tilde{X}_B=B_1$. As shown in the main text, the relations required to show this isometry works are \begin{align}
Z_A|\psi\rangle-B_0|\psi\rangle=0,\label{AP-relation1}\\
\sin\theta X_A(I+B_0)|\psi\rangle-\cos\theta B_1(I-Z_A)|\psi\rangle=0 \label{AP-relation2} \end{align} To obtain this relations, we let each side of Eq. (\ref{sos1})-(\ref{sos2}) to take action on $\ket{\psi}$, which state was supposed to reach up the maximum violation of the steering inequality. Then seven terms of $P_i\ket{\psi}=0$ will be obtained, among them the second squared term in Eq. (\ref{sos1}) gives Eq.(\ref{AP-relation1}), meanwhile the linear combination of the third squared term in Eq.(\ref{sos1}) and the forth squared term in Eq. (\ref{sos2}) leads to Eq. (\ref{AP-relation1}). Then similar to the proof for the analog of tilted-CHSH steering inequality given in the main text, by the isometry given in Fig. \ref{fig:1sdiSwap}, we complete the self-testing statement via 2-setting steering inequality $S_{\alpha,\beta}^2$.
}
For the 2-setting steering inequality \begin{align}
S_{\alpha,\beta}^{(1)} &=\alpha \expval{Z}+ \beta \expval{Z B_0}+ \expval{X B_1} \leq \sqrt{1+(\alpha+\beta)^2} \end{align} It keeps the same maximal quantum violation as Eq. \eqref{eq:TCHSHsteering2_2appendix}. {For this steering inequality, three different types of SOS decompositins related to $S_Q\mathbb{I}- \hat{S}_{\alpha,\beta}^{(1)}$ can be given, the first one is, \begin{equation}\label{sos_z1} \frac{\beta}{2}(\mathbb{I}-Z_AB_0)^2+\frac{\sqrt{\alpha^2+1}}{2}(\mathbb{I}-cZ_A-sX_AB_1)^2 \end{equation} the second one is \begin{equation}\label{sos_z2} \frac{1}{2S_Q}(-cX_A+sZ_AB_1+X_AB_0)^2+\frac{\beta\sqrt{\alpha^2+1}}{2S_Q}(S_Q\mathbb{I}- \hat{S}_{\alpha,\beta}^{(1)})^2 \end{equation} and the third one is \begin{align}\label{sos_z3} &\alpha_1^2((\Delta+s^2)Z_A-(\Delta+1)B_0+cZ_AB_0-csX_AB_1)^2\nonumber\\
&+\alpha_2^2(-(\Delta+s^2)X_A+s(\Delta+1)B_1+\Delta cX_AB_0-csZ_AB_1))^2 \end{align} where $\alpha_1^2=\Delta \alpha_2^2$, $\alpha_2^2=\frac{(1+\alpha^2)^2}{2(\beta^2\sqrt{1+\alpha^2})+\beta(1+\alpha^2)+S_Q}$, $\Delta=\frac{\beta}{\sqrt{1+\alpha^2}}$. The PSD requirements only require $\beta>0$. And with each squared terms in Eq. (\ref{sos_z1})-(\ref{sos_z3}) acting on $\ket{\psi}$ is zero, it also can lead to the relations our self-testing proofs heavily relied on, namely Eq. (\ref{AP-relation1})-(\ref{AP-relation2})(the first term in Eq. \ref{sos_z1} leads to Eq. (\ref{AP-relation1}), the first term in Eq. (\ref{sos_z1}) and the second term in Eq. (\ref{sos_z3}) lead to Eq. (\ref{AP-relation2})). Then we can complete the proof of self-testing based on $ S_{\alpha,\beta}^{(1)}$.}
For the 3-settings scenario, the partial part expectation can be changed into the untrusted part's measurement. Thus there are two 3-setting steering inequality, the one in the main text, \begin{equation}\label{rthree-setting-I}
I^{(1)}_{\alpha,\beta}\equiv \alpha\langle Z\rangle+\beta\langle ZB_0\rangle+\langle XB_1\rangle +\langle YB_2\rangle \leq\sqrt{2+(\alpha+\beta)^2} \end{equation} and the one, \begin{equation}
I^{(2)}_{\alpha,\beta}\equiv \alpha\langle B_0\rangle+\beta\langle ZB_0\rangle+\langle XB_1\rangle +\langle YB_2\rangle \leq \alpha+\sqrt{2+\beta^2}, \end{equation}
The advantage of this change is its LHS bound is lower than using Alice's $Z$ measurement in 3-setting inequality, while the quantum bound is maintained. It extends the gap between LHS bound and steering bound, which is a benefit for the practical experiment. Denoting Bobβs corresponding declared result by the random variable $B_k\in \{-1,1\}$ for $k=0,1$, it is easy to obtain the LHS bound $\alpha+\sqrt{2+\beta^2}$.
{The quantum bound of the both 3-setting steering inequality is the same, $\beta+\sqrt{4+\alpha^2}$. However, an extra condition should be satisfied for $I^{(2)}_{\alpha,\beta}$, that is $\beta\geq \sqrt{4+\alpha^2}$. For $I^{(2)}_{\alpha,\beta}$ it only requires $\beta\geq0$. This can be obtained from the following SOS, the first one is, \begin{align}\label{3-settingSOS1} &(\beta+\sqrt{4+\alpha^2})\mathbb{I}- \hat{I}^{(2)}_{\alpha,\beta}\nonumber\\ &=\alpha_1^2(\mathbb{I}-cB_0-sX_AB_1)^2+\alpha_2^2(Z_A-B_0)^2\nonumber\\
&+\alpha_3^2(\mathbb{I}-cB_0-sY_AB_2)^2\nonumber\\
&+\alpha_4^2(-cB_1+sX_AB_0+Z_AB_1)^2\nonumber\\
&+\alpha_5^2(-cB_2+sY_AB_0+Z_AB_2)^2\nonumber\\ &+\alpha_6^2((\beta+\sqrt{4+\alpha^2})\mathbb{I}-I_{\alpha,\beta})^2\nonumber\\
&+\alpha_7^2(X_AB_1-Y_AB_2)^2 \end{align} where $c=\frac{\alpha}{\sqrt{4+\alpha^2}}$,$s=\frac{2}{\sqrt{4+\alpha^2}}$,$\alpha_6^2=\alpha_7^2=\frac{1}{4\beta}$,$\alpha_4^2=\alpha_5^2=\frac{\beta\sqrt{4+\alpha^2}}{2}\alpha_6^2=\frac{\sqrt{4+\alpha^2}}{8}$,$\alpha_1^2=\alpha_3^2=(\frac{\beta\sqrt{4+\alpha^2}}{2}-\frac{4+\alpha^2}{2})\alpha_6^2$, and $\alpha_2^2=\frac{\beta-\sqrt{4+\alpha^2}}{4}$. And the second one is, \begin{align}\label{3-settingSOS2} &(\beta+\sqrt{4+\alpha^2})\mathbb{I}- \hat{I}^{(2)}_{\alpha,\beta}\nonumber\\ &=\alpha_1^2(\mathbb{I}-cB_0-sX_AB_1)^2+\alpha_2^2(Z_A-B_0)^2\nonumber\\
&+\alpha_3^2(\mathbb{I}-cB_0-sY_AB_2)^2\nonumber\\ &+\alpha_4^2((\Delta+s^2)B_0-(\Delta+1)Z_A+cZ_AB_0-csX_AB_1)^2\nonumber\\ &+\alpha_5^2((\Delta+s^2)B_0-(\Delta+1)Z_A+cZ_AB_0-csY_AB_2)^2\nonumber\\
&+\alpha_6^2(-(\Delta+s^2)B_1+s(\Delta+1)X_A+\Delta cZ_AB_1-csX_AB_0))^2\nonumber\\
&+\alpha_7^2(-(\Delta+s^2)B_2+s(\Delta+1)Y_A+\Delta cZ_AB_2-csY_AB_0))^2 \end{align} where $c=\frac{\alpha}{\sqrt{4+\alpha^2}}$,$s=\frac{2}{\sqrt{4+\alpha^2}}$,$\alpha_6^2=\alpha_7^2=\frac{1}{4s\Delta(\Delta^2+s)}$,$\alpha_4^2=\alpha_5^2=\Delta\alpha_6^2=\frac{1}{4s(\Delta^2+s)}$,$\alpha_1^2=\alpha_3^2=\frac{1}{2S}-(\Delta+1)(\Delta+s^2)\alpha_6^2$, and $\alpha_2^2=\frac{\beta}{2}-\frac{\Delta^2+1}{s(\Delta+1)}$,and $\Delta=1$. }
{To make the SOS decomposition is positive semidefinite, it requires each $\alpha_i\geq 0$, thus $\beta\geq\sqrt{4+\alpha^2}$. And with some squared terms in (\ref{3-settingSOS1})-(\ref{3-settingSOS2}) acting on $\ket{\psi}$ are zero, it also can lead to the relations (\ref{AP-relation1})-(\ref{AP-relation2}). Thus with the isometry given in the main text we can complete the proof of self-testing based on $ S_{\alpha,\beta}^{(2)}$. }
{For the first 3-setting steering inequality, three types of SOS decompostions can be given, the first one is, \begin{align}\label{3z-settingSOS1} &(\beta+\sqrt{4+\alpha^2})\mathbb{I}- \hat{I}^{(1)}_{\alpha,\beta}\nonumber\\ &=\frac{\beta}{2}(\mathbb{I}-Z_AB_0)^2\nonumber\\ &+\frac{\sqrt{\alpha^2+4}}{4}(\mathbb{I}-cZ_A-sX_AB_1)^2\nonumber\\&+\frac{\sqrt{\alpha^2-4}}{4}(\mathbb{I}-cZ_A-sY_AB_2)^2 \end{align} the second one is, \begin{align}\label{3z-settingSOS2} &(\beta+\sqrt{4+\alpha^2})\mathbb{I}- \hat{I}^{(1)}_{\alpha,\beta}\nonumber\\ &=\alpha_1^2(-cX_A+sZ_AB_1+X_AB_0)^2\nonumber\\
&+\alpha_2^2(-cY_A+sZ_AB_2+Y_AB_0)^2\nonumber\\ &+\alpha_3^2(S_Q\mathbb{I}-\hat{I}^{(1)}_{\alpha,\beta})^2 \end{align} where $\alpha_1^2=\alpha_2^2=\frac{\alpha^2+\beta^2+\beta\sqrt{4+\alpha^3}+3}{4S_Q}$, $\alpha_3^2=\frac{1}{2S_Q}$. and the third one is, \begin{align}\label{3z-settingSOS3} &(\beta+\sqrt{4+\alpha^2})\mathbb{I}- \hat{I}^{(1)}_{\alpha,\beta}\nonumber\\ &=\alpha_1^2((\Delta+s^2)Z_A-(\Delta+1)B_0+cZ_AB_0-csX_AB_1)^2\nonumber\\
&+\alpha_2^2(-(\Delta+s^2)X_A+s(\Delta+1)B_1+\Delta cX_AB_0-csZ_AB_1))^2\nonumber\\
&+\alpha_3^2((\Delta+s^2)Z_A-(\Delta+1)B_0+cY_AB_0-csZ_AB_2)^2\nonumber\\
&+\alpha_4^2(-(\Delta+s^2)Y_A+s(\Delta+1)B_2+\Delta cY_AB_0-csZ_AB_2))^2 \end{align} where $\alpha_1^2=\alpha_3^2=\frac{\beta}{4(\Delta+s^2)(\Delta+1)}$, $\alpha_2^2=\alpha_4^2=\frac{(1}{2s(\Delta+s^2)(\Delta+1)}$,$\Delta=\frac{\beta}{\sqrt{1+\alpha^2}}$.}
{ The PSD condition requires $\beta\geq0$. And with the first squared term in (\ref{3z-settingSOS1}) acting on $\ket{\psi}$ is zero ($\ket{\psi}$ is the state which maximally violates the steering inequality), it has the relations (\ref{AP-relation1}), meanwhile with the linear combination of the second squared term in (\ref{3z-settingSOS2})and the first squared term in (\ref{3z-settingSOS3}) gives the relation (\ref{AP-relation2}). Thus with the isometry given in the main text we can complete the proof of self-testing based on $ S_{\alpha,\beta}^{(1)}$. }
\textbf{\textit{self-testing for the measurements}} {Above, we mainly focus on the states self-testing, for the self-testing of the corresponding measurements (whose analysis can resort to \cite{Yang2014}) it will be similar. Starting with $ \Phi M_B(\ket{\psi})$ instead of $ \Phi(\ket{\psi})$. Let's show it for one of the three measurements in 3-setting steering inequality cases for example.}
{After the isometry, the systems will be \begin{align}\label{eq:isometry2}
\Phi(\underline{\tilde{Z}_B}\ket{\psi}) &=\frac{1}{4}[(I+Z_A)(I+\tilde{Z}_B)\underline{\tilde{Z}_B}\ket{\psi}\ket{00} \nonumber\\
&+X_A(I+Z_A)(I-\tilde{Z}_B)\underline{\tilde{Z}_B}\ket{\psi}\ket{01} \nonumber \\
&+\tilde{X}_B(I-Z_A)(I+\tilde{Z}_B)\underline{\tilde{Z}_B}\ket{\psi}\ket{10}\nonumber \\
&+X_A\tilde{X}_B(I-Z_A)(I-\tilde{Z}_B)\underline{\tilde{Z}_B}\ket{\psi}\ket{11}] \end{align}}
{With the relations (\ref{AP-relation1})-(\ref{AP-relation2}) and the fact that $Z_AX_A=-X_AZ_A$, we find, $\tilde{Z}_B\tilde{X}_B\ket{\psi}=-\tilde{X}_B\tilde{Z}_B\ket{\psi}$. By using this anti-commutation relation between Bob's two measurements, one moves $\tilde{Z}_B$ to the left in the first, second, third and fourth lines while changing the sign of the forth line. The analysis is then the same as the state self-testing, and the result is. \begin{align}\Phi(\underline{\tilde{Z}_B}\ket{\psi})&=\ket{\text{junk}}[\cos\theta\ket{00}-\sin\theta\ket{11}] \nonumber\\ &=\ket{\text{junk}}[\underline{(I\otimes \sigma_z)}\cos\theta\ket{00}+\sin\theta\ket{11}] \end{align}
Besides, from the SOS decomposition we can also find the relation $\sin\theta Y_A(I+B_0)|\psi\rangle-\cos\theta B_2(I-Z_A)|\psi\rangle=0$. Thus we have $\tilde{Z}_B\tilde{Y}_B\ket{\psi}=-\tilde{Y}_B\tilde{Z}_B\ket{\psi}$. Following the above idea, we can finally conclude the measurement in Bob's side are $B_0=Z,B_1=X,B_2=-Y$.}
\section{The transformation of a steering inequality into a game}\label{appendixE} In this section, we relate the constructed steering inequality to a game which two party played to gain the score and build the relation between the quantum violation and success probability of the game defined. This is helpful for a direct comparison between different steering inequalities and it is necessary in the analysis of sample efficiency. For simplicity, here we only consider the 3-setting steering inequality.
In principle to obtain the maximum violation of the three setting steering inequality in main text Eq. (\ref{three-setting-I}), the state between Alice and Bob should be $\cos\theta|00\rangle+\sin\theta|11\rangle$, which can be further written as
$\frac{1}{\sqrt{2}}(\ket{\psi_0}\ket{+}+\ket{\psi_1}\ket{-})$, where we denote $|\psi_0\rangle=\cos({\theta})|0\rangle+\sin({\theta})|1\rangle$ and $|\psi_1\rangle=\cos({\theta})|0\rangle-\sin({\theta})|1\rangle$.
We define two measurements in Alice's side $\{ |\psi_0\rangle,|\psi_0^{\dagger}\rangle; \;\; |\psi_1\rangle,|\psi_1^{\dagger}\rangle\}$, which actually the new measurements that introduced to substitute the measurements chosen in the main text in the real experiments. The measurements can also be written in the Pauli operators form, $\{{A}_0={\cos({2\theta})\sigma_z+\sin({2\theta})\sigma_x};\;\;{A}_1={\cos({2\theta})\sigma_z-\sin({2\theta})\sigma_x}\}$.
We notice that, if Bob gets $\ket{+}$, Alice takes${A}_0$, Bob can conclude that Alice's qubit must be projected into $\ket{\psi_0}$; Meanwhile, if Bob gets $\ket{-}$, Alice takes${A}_1$, then Bob can conclude that Alice's qubit must be projected into $\ket{\psi_1}$. Since in steering scenario, Bob can sent information to Alice, such as the measurements result. Thus, this allows us to define the success probability of Bob guessing Alice's measurement result as, \begin{equation}
P_\text{virtual}^x=
p(A_0^0,B_1^0)+
P(A_1^0,B_1^1), \end{equation}
which actually is related to the operators in the three setting steering inequality Eq. (34
). More precisely, $\frac{\alpha}{2} Z+ XB_1=(\frac{\alpha}{2} Z+X)B_1^0+(\frac{\alpha}{2} Z-X)B_1^1=\frac{\sqrt{4+\alpha^2}}{2}(A_0B_1^0+A_1B_1^1)=\frac{\sqrt{4+\alpha^2}}{2}(2A_0^0B_1^0+2A_1^0B_1^1-I_B)$
for $\sin(2\theta)=\frac{2}{\sqrt{4+\alpha^2}}$. Thus $P_\text{virtual}^x$ is related to $\alpha\langle Z\rangle+\langle XB_1\rangle$. Similarly, we can define $P_\text{virtual}^y$ for $\sigma_y$ measurements scenario, which is related to $\frac{\alpha}{2}\langle Z\rangle+\langle YB_2\rangle$.
Together with the guessing probability for $(Z_A,B_0)$, we define the total average passing probability as,
\begin{equation}
P_\text{virtual}=\frac{\sqrt{4+\alpha^2}(\frac{{P_\text{virtual}^x+P_\text{virtual}^y}}{2})+\beta p(a=b|Z_A,B_0)}{\sqrt{4+\alpha^2}+\beta} \end{equation} Thus we have,
\begin{equation}
\begin{split}
P_\text{virtual}=\frac{\sqrt{4+\alpha^2}+\beta+S}{2(\sqrt{4+\alpha^2}+\beta)}=\frac{1}{2}+\frac{S}{2S_Q}
\end{split}. \end{equation} This relation between the guessing probability and the violation holds for steering inequalities in Eqs.(30) and (34) in main text. Thus the steering inequalities are transformed to testing games.
\section{Robust self-testing of 3-setting inequality}\label{appendixF}
In this section, we provide an analytical robustness bound for self-testing via 3-setting steering inequality.
We first consider the part of $G_1:=K_1- s (ZB_0+XB_1)-\tau_1 \mathbb{I}$ for $\mu \in (0,\pi/4]$, the spectral decomposition is already given in Eq. (\ref{CHSHdecom}). To make $G_1\geq 0$, we consider the local extraction channel as, Bob takes $R_{1}=I$ with probability $q_1$, and takes $R_{2}=\sigma_z$ with probability $q_2$, meanwhile with the rest of the probability $1-q_1-q_2:=q_3$ Bob takes some other local extraction channel which subjects to the choice of $B_2$. Then we have,
\begin{align}
&q_1-s\lambda_1-\tau_1\geq 0 ;\nonumber \\
&q_2-s\lambda_2-\tau_1\geq 0 ;\nonumber \\
&s\lambda_{(1/2)}-\tau_1\geq 0 ;\nonumber \\
&\Tr(\rho)=q_1+q_2+q_3=1;\nonumber \\
&\Tr(\rho \hat{B})=\lambda_1q_1+\lambda_2q_2+q_3\Tr(\rho YB_2)=S;\nonumber
\end{align}
where $\tau_1=1-\gamma s$ with $\gamma\in[2,3]$. And $\tau_1$ should be less than zero. We obtain $ s\geq\frac{1+q_3}{2\gamma-(\lambda_1+\lambda_2)} \geq\frac{1+q_3}{2\gamma-2\sqrt{2}}$.
Next, we determine the value of $q_3$ to make $K_2$ is PSD. We notice $s\lambda_1-\tau_1$ and $s\lambda_2-\tau_1$ which according to the coefficients of $\ket{\psi_3}$ and$\ket{\psi_4}$ are greater than zero. That is, only the coefficients of $\ket{\psi_1}$ and$\ket{\psi_2}$ are greater than zero, $K_1$ part will be PSD. Therefore, we put$\ket{\psi_3}$ and$\ket{\psi_4}$ into $K_2$ part to make it PSD. Now $K_2$ part becomes, \begin{equation} \begin{array}{ll}
G_2:=&q_3\Lambda^+_B(\psi_{1})+(s\lambda_1-\tau_1)\ketbra{\psi_3}{\psi_3}\\
&+(s\lambda_2-\tau_1)\ketbra{\psi_4}{\psi_4}-sYB_2-(\gamma-3)s \mathbb{I}
\end{array} \end{equation}
which is equivalent to
\begin{equation} \begin{array}{ll}
G_2 :=&q_3\Lambda^+_B(\psi_{1})+(s\lambda_1-\tau_1)\ketbra{\psi_3}{\psi_3}\\
&+(s\lambda_2-\tau_1)\ketbra{\psi_4}{\psi_4}\\
&-s(\gamma-2)(U\ketbra{\phi_1}{\phi_1}U^{T}+U\ketbra{\phi_2}{\phi_2}U^{T})\\
&+s(4-\gamma)(U\ketbra{\phi_3}{\phi_3}U^{T}+U\ketbra{\phi_4}{\phi_4}U^{T})
\end{array} \end{equation}
where
$ U=\left[\begin{smallmatrix}
V & 0 \\
0 & V \end{smallmatrix} \right] \;\text{and}\;
U^{T}=\left[\begin{smallmatrix}
V^{*} & 0 \\
0 & V^{*}
\end{smallmatrix}\right] $ \begin{align}
V=\left[\begin{matrix}
\frac{ -\sin\mu_1i - \cos\mu_1\sin\mu_2)}{\sqrt{2-2\cos\mu_1\cos\mu_2}}& \frac{-\sin\mu_1i - \cos\mu_1\sin\mu_2}{\sqrt{2+2\cos\mu_1\cos\mu_2}} \\
\frac{\cos\mu_1\cos\mu_2 - 1}{\sqrt{2-2\cos\mu_1\cos\mu_2}}& \frac{ \cos\mu_1\cos\mu_2 + 1 }{\sqrt{2+2\cos\mu_1\cos\mu_2}} \end{matrix} \right] \end{align} with $\phi_1=[\frac{ -1i}{\sqrt{2}}, 0, \frac{1}{\sqrt{2}}, 0],\phi_2=[ 0, \frac{1i}{\sqrt{2}}, 0, \frac{1}{\sqrt{2}}], \phi_3=[ \frac{ 1i}{\sqrt{2}}, 0, \frac{ 1}{\sqrt{2}}, 0], $ and $ \phi_4=[ 0, \frac{ -1i}{\sqrt{2}}, 0, \frac{ 1}{\sqrt{2}}]$. The requirement of $ G_2\geq 0$ gives out,
\begin{align}
\frac{q_3(1+c)}{2}+(s\lambda_2-\tau_1)\text{overlap}^2({\psi_3},U^{-1}\phi_1)\nonumber\\+(s\lambda_1-\tau_1)\text{overlap}^2({\psi_4},U^{-1}\phi_1)-s(\gamma-2)\geq 0 ;\\ \frac{q_3(1-c)}{2}+(s\lambda_2-\tau_1)\text{overlap}^2({\psi_3},U^{-1}\phi_2)\nonumber\\+(s\lambda_1-\tau_1)\text{overlap}^2({\psi_4},U^{-1}\phi_2)-s(\gamma-2)\geq 0 ; \end{align} That is, \[ \left\{ \begin{array}{ll}
\frac{q_3(1-c)}{2}+C_2\frac{\cos^2(\frac{\pi}{8})(\sin\mu_1-1)^2+\cos^2\mu_1\sin^2(\frac{\pi}{8}+\mu_2)}{4}\\+C_1\frac{\cos^2(\frac{\pi}{8})(\sin\mu_1+1)^2+\cos^2\mu_1\sin^2(\frac{\pi}{8}-\mu_2)}{4}-s(\gamma-2)\geq 0 ;\\
\frac{q_3(1+c)}{2}+C_2\frac{\sin^2(\frac{\pi}{8})(\sin\mu_1-1)^2+\cos^2\mu_1\cos^2(\frac{\pi}{8}+\mu_2)}{4}\\+C_1\frac{\sin^2(\frac{\pi}{8})(\sin\mu_1+1)^2+\cos^2\mu_1\cos^2(\frac{\pi}{8}-\mu_2)}{4}-s(\gamma-2)\geq 0 ;\\
\end{array}
\right. \] where $C_1=s\lambda_1-\tau_1$ and $C_2=s\lambda_2-\tau_1$. With this channel, we have $$\frac{q_3(1+c)}{2}+\frac{2-\sqrt{2}}{8}(s+\gamma s-1)-s(\gamma-2)\geq0$$ and $$\frac{q_3(1-c)}{2}+\frac{2+\sqrt{2}}{8}(s+\gamma s-1)-s(\gamma-2)\geq0.$$
It gives us $q_3c=\frac{\sqrt{2}}{4}(s+\gamma s-1)$ for $\gamma>2$ and $ q_3\geq \frac{-5\gamma + 2\sqrt{2} + 9}{-\gamma + 4 \sqrt{2} - 9}$. We can choose $\gamma=3$, which gives out $q_3=\frac{1}{2}$, and $s=\frac{3}{12-4\sqrt{2}}=0.4730$, $\tau=1-3s$. Thus we give the following robustness bound of one-sided self-testing based on three-setting steering inequality, \begin{align}\label{CHSHsFidelity}
F \geq s{S_{obs}}+\tau \geq \frac{3}{12-4\sqrt{2}}{(S_{obs}-3)}+1.
\end{align}
Although this does not reach the theoretical bound $s=\frac{1}{2(3-\sqrt{3})}$, the result is better than that of 2-setting inequality. This shows that adding more measurement settings can help to increase the robustness in one-sided self-testing.
\nocite{*}
\twocolumngrid
\end{document} |
\begin{document}
\title[Generalized skew Pieri rules]{Quasisymmetric and noncommutative skew Pieri rules}
\author{V. Tewari} \address{Department of Mathematics, University of Washington, Seattle, WA 98105, USA} \email{\href{mailto:[email protected]}{[email protected]}} \author{S. van Willigenburg} \address{Department of Mathematics, University of British Columbia, Vancouver, BC V6T 1Z2, Canada} \email{\href{mailto:[email protected]}{[email protected]}}
\thanks{ The authors were supported in part by the National Sciences and Engineering Research Council of Canada.} \subjclass[2010]{Primary 05E05, 16T05, 16W55; Secondary 05A05, 05E10} \keywords{composition, composition poset, composition tableau, noncommutative symmetric function, quasisymmetric function, skew Pieri rule}
\begin{abstract} In this note we derive skew Pieri rules in the spirit of Assaf-McNamara for skew quasisymmetric Schur functions using the Hopf algebraic techniques of Lam-Lauve-Sottile, and recover the original rules of Assaf-McNamara as a special case. We then apply these techniques a second time to obtain skew Pieri rules for skew noncommutative Schur functions. \end{abstract}
\maketitle
\section{Introduction}\label{sec:intro} The Hopf algebra of quasisymmetric functions, $\ensuremath{\operatorname{QSym}}$, was first defined explicitly in \cite{gessel}. It is a nonsymmetric generalization of the Hopf algebra of symmetric functions, and arises in many areas such as the representation theory of the 0-Hecke algebra \cite{BBSSZ, DKLT, konig, 0-Hecke}, probability \cite{hersh-hsiao, stanley-riffle}, and is the terminal object in the category of combinatorial Hopf algebras \cite{aguiar-bergeron-sottile}. Recently a basis of $\ensuremath{\operatorname{QSym}}$, known as the basis of quasisymmetric Schur functions, was discovered \cite{QS}, which is a nonsymmetric generalization of the symmetric function basis of Schur functions. These quasisymmetric Schur functions arose from the combinatorics of Macdonald polynomials \cite{HHL}, have been used to resolve the conjecture that $\ensuremath{\operatorname{QSym}}$ over the symmetric functions has a stable basis \cite{lauve-mason}, and have initiated the dynamic research area of discovering other quasisymmetric Schur-like bases such as row-strict quasisymmetric Schur functions \cite{ferreira, mason-remmel}, Young quasisymmetric Schur functions \cite{LMvW}, dual immaculate quasisymmetric functions \cite{BBSSZ}, type $B$ quasisymmetric Schur functions \cite{jingli, oguz}, quasi-key polynomials \cite{assafsearles, searles} and quasisymmetric Grothendieck polynomials \cite{monical}. Their name was aptly chosen since these functions not only naturally refine Schur functions, but also generalize many classical Schur function properties, such as the Littlewood-Richardson rule from the classical \cite{littlewood-richardson} to the generalized \cite[Theorem 3.5]{BLvW}, the Pieri rules from the classical \cite{pieri} to the generalized \cite[Theorem 6.3]{QS} and the RSK algorithm from the classical \cite{knuth, robinson, schensted} to the generalized \cite[Procedure 3.3]{mason}.
Dual to $\ensuremath{\operatorname{QSym}}$ is the Hopf algebra of noncommutative symmetric functions, $\ensuremath{\operatorname{NSym}}$ \cite{GKLLRT}, whose basis dual to that of quasisymmetric Schur functions is the basis of noncommutative Schur functions \cite{BLvW}. By duality this basis again has a Littlewood-Richardson rule and RSK algorithm, and, due to noncommutativity, two sets of Pieri rules, one arising from multiplication on the right \cite[Theorem 9.3]{tewari} and one arising from multiplication on the left \cite[Corollary 3.8]{BLvW}. Therefore in both $\ensuremath{\operatorname{QSym}}$ and $\ensuremath{\operatorname{NSym}}$ a key question in this realm remains: Are there \emph{skew} Pieri rules for quasisymmetric and noncommutative Schur functions? In this note we give such rules that are analogous to that of their namesake Schur functions.
More precisely, the note is structured as follows. In Section~\ref{sec:comps} we review necessary notions on compositions and define operators on them. In Section~\ref{sec:QSYMNSYM} we recall $\ensuremath{\operatorname{QSym}}$ and $\ensuremath{\operatorname{NSym}}$, the bases of quasisymmetric Schur functions and noncommutative Schur functions, and their respective Pieri rules. In Section~\ref{sec:skew} we give skew Pieri rules for quasisymmetric Schur functions in Theorem~\ref{the:QSskewPieri} and recover the Pieri rules for skew shapes of Assaf and McNamara in Corollary~\ref{cor:AM}. We close with skew Pieri rules for noncommutative Schur functions in Theorem~\ref{the:NCskewPieri}.
\section{Compositions and diagrams}\label{sec:comps} A finite list of integers $\alpha = (\alpha _1, \ldots , \alpha _\ell)$ is called a \emph{weak composition} if $\alpha _1, \ldots , \alpha _\ell$ are nonnegative, is called a \emph{composition} if $\alpha _1, \ldots , \alpha _\ell$ are positive, and is called a \emph{partition} if $\alpha _1\geq \cdots \geq\alpha _\ell >0$. Note that every weak composition has an underlying composition, obtained by removing every zero, and in turn every composition has an underlying partition, obtained by reordering the list of integers into weakly decreasing order. Given $\alpha = (\alpha _1, \ldots , \alpha _\ell)$ we call the $\alpha _i$ the \emph{parts} of $\alpha$, also $\ell$ the \emph{length} of $\alpha$ denoted by $\ell(\alpha)$, and the sum of the parts of $\alpha$ the \emph{size} of $\alpha$ denoted by $|\alpha |$. The empty composition of length and size zero is denoted by $\emptyset$. If there exists $\alpha _{k+1} = \cdots = \alpha _{k+j} = i$ then we often abbreviate this to $i^j$. Also, given weak compositions $\alpha= (\alpha_1,\ldots ,\alpha_{\ell})$ and $\beta=(\beta_1,\ldots,\beta_m)$, we define the \emph{concatenation} of $\alpha$ and $\beta$, denoted by $\alpha \beta$, to be the weak composition $(\alpha_1,\ldots,\alpha_\ell,\beta_1,\ldots,\beta_m)$. We define the \emph{near-concatenation} of $\alpha$ and $\beta$, denoted by $\alpha \odot \beta$, to be the weak composition $(\alpha_1,\ldots,\alpha_\ell + \beta_1,\ldots,\beta_m)$. For example, if $\alpha=(2,1,0,3)$ and $\beta=(1,4,1)$, then $\alpha\beta=(2,1,0,3,1,4,1)$ and $\alpha\odot \beta=(2,1,0,4,4,1)$.
The \emph{composition diagram} of a weak composition $\alpha$, also denoted by $\alpha$, is the array of left-justified boxes with $\alpha _i$ boxes in row $i$ from the \emph{top}, that is, following English notation for Young diagrams of partitions. We will often think of $\alpha$ as both a weak composition and as a composition diagram simultaneously, and hence future computations such as adding/subtracting 1 from the rightmost/leftmost part equalling $i$ (as a weak composition) are synonymous with adding/removing a box from the bottommost/topmost row of length $i$ (as a composition diagram).
\begin{example}\label{ex:comps} The composition diagram of the weak composition of length 5, $\alpha=(2,0,4,3,6)$, is shown below.
$$\tableau{\ &\ \\ \\ \ &\ &\ &\ \\ \ &\ &\ \\\ &\ &\ &\ &\ &\ }$$
The composition of length 4 underlying $\alpha$ is $(2,4,3,6)$, and the partition of length 4 underlying it is $(6,4,3,2)$. They all have size 15. \end{example}
\subsection{Operators on compositions}\label{sec:ops} In this subsection we will recall four families of operators, each of which are dependent on a positive integer parameter. These families have already contributed to the theory of quasisymmetric and noncommutative Schur functions, and will continue to cement their central role as we shall see later. Although originally defined on compositions, we will define them in the natural way on weak compositions to facilitate easier proofs. The first of these operators is the box removing operator $\mathfrak{d}$, which first appeared in the Pieri rules for quasisymmetric Schur functions \cite{QS}. The second of these is the appending operator $a$. These combine to define our third operator, the jeu de taquin or jdt operator $\mathfrak{u}$. This operator is pivotal in describing jeu de taquin slides on tableaux known as semistandard reverse composition tableaux and in describing the right Pieri rules for noncommutative Schur functions \cite{tewari}. Our fourth and final operator is the box adding operator $\mathfrak{t}$ \cite{BLvW, MNtewari}, which plays the same role in the left Pieri rules for noncommutative Schur functions \cite{BLvW} as $\mathfrak{u}$ does in the aforementioned right Pieri rules. Each of these operators is defined on weak compositions for every integer $i\geq 0$. We note that $$\mathfrak{d} _0 = a_0 = \mathfrak{u} _0 = \mathfrak{t} _ 0 = {Id}$$namely the identity map, which fixes the weak composition it is acting on. With this in mind we now define the remaining operators for $i\geq 1$.
The first \emph{box removing operator} on weak compositions, $\mathfrak{d} _i$ for $i\geq 1$, is defined as follows. Let $\alpha$ be a weak composition. Then $$\mathfrak{d} _i (\alpha) = \alpha '$$where $\alpha '$ is the weak composition obtained by subtracting 1 from the rightmost part equalling $i$ in $\alpha$. If there is no such part then we define $\mathfrak{d} _i(\alpha) = 0$.
\begin{example}\label{ex:down} Let $\alpha=(2,1,2)$. Then $\mathfrak{d}_1(\alpha)=(2,0,2)$ and $\mathfrak{d}_2(\alpha)=(2,1,1)$. \end{example}
Now we will discuss two notions that will help us state our theorems in a concise way later, as well as connect our results to those in the classical theory of symmetric functions. Let $i_1 < \cdots < i_k$ be a sequence of positive integers, and let $\alpha$ be a weak composition. Consider the operator $\mathfrak{d}_{i_1}\cdots \mathfrak{d}_{i_k}$ acting on the weak composition $\alpha$, and assume that the result is a valid weak composition. Then the boxes that are removed from $\alpha$ are said to form a \emph{$k$-horizontal strip}, and we can think of the operator $\mathfrak{d}_{i_1}\cdots \mathfrak{d}_{i_k}$ as removing a $k$-horizontal strip. Similarly, given a sequence of positive integers $i_1\geq \cdots \geq i_k$, consider the operator $\mathfrak{d}_{i_1}\cdots \mathfrak{d}_{i_k}$ acting on $\alpha$ and suppose that the result is a valid weak composition. Then the boxes that are removed from $\alpha$ are said to form a \emph{$k$-vertical strip}. As before, we can think of the operator $\mathfrak{d}_{i_1}\cdots \mathfrak{d}_{i_k}$ as removing a $k$-vertical strip.
\begin{example}\label{ex:horizontal and vertical strip} Consider $\alpha=(2,5,1,3,1)$. When we compute $\mathfrak{d}_{1}\mathfrak{d}_2\mathfrak{d}_4\mathfrak{d}_5(\alpha)$, the operator $\mathfrak{d}_{1}\mathfrak{d}_2\mathfrak{d}_4\mathfrak{d}_5$ removes the $4$-horizontal strip shaded in red from $\alpha$.
$$ \ytableausetup{smalltableaux,boxsize=0.5em} \begin{ytableau} *(white) & *(red!80)\\ *(white) &*(white) &*(white) &*(red!80) &*(red!80) \\ *(white) \\ *(white) &*(white) &*(white) \\ *(red!80)\\ \end{ytableau} $$
When we compute $\mathfrak{d}_3\mathfrak{d}_2\mathfrak{d}_1\mathfrak{d}_1(\alpha)$, the operator $\mathfrak{d}_3\mathfrak{d}_2\mathfrak{d}_1\mathfrak{d}_1$ removes the $4$-vertical strip shaded in red from $\alpha$.
$$ \ytableausetup{smalltableaux,boxsize=0.5em} \begin{ytableau} *(white) & *(red!80)\\ *(white) &*(white) &*(white) &*(white) &*(white) \\ *(red!80) \\ *(white) &*(white) &*(red!80) \\ *(red!80)\\ \end{ytableau} $$
\end{example}
\begin{remark}\label{rem:horizontal and vertical strip} If we consider partitions as Young diagrams in English notation, then the above notions of horizontal and vertical strips coincide with their classical counterparts. For example, consider the operator $\mathfrak{d}_1\mathfrak{d}_2\mathfrak{d}_4\mathfrak{d}_5$ acting on the partition $(5,3,2,1,1)$, in contrast to acting on the composition $(2,5,1,3,1)$ as in Example \ref{ex:horizontal and vertical strip}. Then the $4$-horizontal strip shaded in red is removed.
$$ \ytableausetup{smalltableaux,boxsize=0.5em} \begin{ytableau} *(white) &*(white) &*(white) &*(red!80) &*(red!80) \\ *(white) &*(white) &*(white) \\ *(white) & *(red!80)\\ *(white) \\ *(red!80) \end{ytableau} $$
\end{remark}
We now define the second \emph{appending operator} on weak compositions, $a_i$ for $i\geq 1$, as follows. Let $\alpha = (\alpha _1, \ldots , \alpha _{\ell(\alpha)})$ be a weak composition. Then $$a _i (\alpha) = (\alpha _1, \ldots , \alpha _{\ell(\alpha)}, i)$$namely, the weak composition obtained by appending a part $i$ to the end of $\alpha$.
\begin{example}\label{ex:append} Let $\alpha = (2,1,3)$. Then $a_2 ((2,1,3))= (2,1,3,2)$. Meanwhile, $a_j \mathfrak{d} _2 ((3,5,1)) = 0$ for all $j\geq 0$ since $\mathfrak{d} _2 ((3,5,1)) = 0$. \end{example}
With the definitions of $a_i$ and $\mathfrak{d} _i$ we define the third \emph{jeu de taquin} or \emph{jdt operator} on weak compositions, $\mathfrak{u} _i$ for $i\geq 1$, as $$\mathfrak{u} _i = a_i \mathfrak{d}_1\mathfrak{d}_2\mathfrak{d}_3 \cdots \mathfrak{d} _{i-1}.$$
\begin{example}\label{ex:jdt} We will compute $\mathfrak{u}_4(\alpha)$ where $\alpha = (3,5,2,4,1,2)$. This corresponds to computing $a_4\mathfrak{d}_1\mathfrak{d}_2\mathfrak{d}_3(\alpha)$. Now \begin{eqnarray*} \mathfrak{d}_1\mathfrak{d}_2\mathfrak{d}_3(\alpha)&=&\mathfrak{d}_1\mathfrak{d}_2\mathfrak{d}_3((3,5,2,4,1,2))\\&=&\mathfrak{d}_1\mathfrak{d}_2((2,5,2,4,1,2))\\&=& \mathfrak{d}_1((2,5,2,4,1,1))\\&=& (2,5,2,4,1,0). \end{eqnarray*} Hence $\mathfrak{u}_4(\alpha)=(2,5,2,4,1,0,4)$. \end{example}
Let $i_1 < \cdots < i_k$ be a sequence of positive integers, and let $\alpha$ be a weak composition. Consider the operator $\mathfrak{u}_{i_k}\cdots \mathfrak{u}_{i_1}$ acting on the weak composition $\alpha$, and assume that the result is a valid weak composition. Then the boxes that are added to $\alpha$ are said to form a \emph{$k$-right horizontal strip}, and we can think of the operator $\mathfrak{u}_{i_k}\cdots \mathfrak{u}_{i_1}$ as adding a $k$-right horizontal strip. Similarly, given a sequence of positive integers $i_1\geq \cdots \geq i_k$, consider the operator $\mathfrak{u}_{i_k}\cdots \mathfrak{u}_{i_1}$ acting on $\alpha$ and suppose that the result is a valid weak composition. Then the boxes that are added to $\alpha$ are said to form a \emph{$k$-right vertical strip}. As before, we can think of the operator $\mathfrak{u}_{i_k}\cdots \mathfrak{u}_{i_1}$ as adding a $k$-right vertical strip.
Lastly, we define the fourth \emph{box adding operator} on weak compositions, $\mathfrak{t} _i$ for $i\geq 1$, as follows. Let $\alpha = (\alpha _1, \ldots , \alpha _{\ell(\alpha)})$ be a weak composition. Then $$\mathfrak{t} _1 (\alpha) = (1, \alpha _1, \ldots , \alpha _{\ell(\alpha)})$$and for $i\geq 2$ $$\mathfrak{t} _i (\alpha) = (\alpha _1, \ldots , \alpha _j + 1, \ldots ,\alpha _{\ell(\alpha)})$$where $\alpha _j$ is the leftmost part equalling $i-1$ in $\alpha$. If there is no such part, then we define $\mathfrak{t} _i (\alpha) = 0$.
\begin{example}\label{ex:boxadd} Consider the composition $\alpha=(3,2,3,1,2)$. Then $\mathfrak{t}_1(\alpha)=(1,3,2,3,1,2)$, $\mathfrak{t}_2(\alpha)=(3,2,3,2,2)$, $\mathfrak{t}_3(\alpha)=(3,3,3,1,2)$, $\mathfrak{t}_4(\alpha)=(4,2,3,1,2)$ and $\mathfrak{t}_i(\alpha)=0$ for all $i\geq 5$. \end{example}
As with the jdt operators let $i_1 < \cdots < i_k$ be a sequence of positive integers, and let $\alpha$ be a weak composition. Consider the operator $\mathfrak{t}_{i_k}\cdots \mathfrak{t}_{i_1}$ acting on the weak composition $\alpha$, and assume that the result is a valid weak composition. Then the boxes that are added to $\alpha$ are said to form a \emph{$k$-left horizontal strip}, and we can think of the operator $\mathfrak{t}_{i_k}\cdots \mathfrak{t}_{i_1}$ as adding a $k$-left horizontal strip. Likewise, given a sequence of positive integers $i_1\geq \cdots \geq i_k$, consider the operator $\mathfrak{t}_{i_k}\cdots \mathfrak{t}_{i_1}$ acting on $\alpha$ and suppose that the result is a valid weak composition. Then the boxes that are added to $\alpha$ are said to form a \emph{$k$-left vertical strip}, and we can think of the operator $\mathfrak{t}_{i_k}\cdots \mathfrak{t}_{i_1}$ as adding a $k$-left vertical strip.
The box adding operator is also needed to define the composition poset \cite[Definition 2.3]{BLvW}, which in turn will be needed to define skew quasisymmetric Schur functions in the next section.
\begin{definition}\label{def:RcLc} The \emph{composition poset}, denoted by $\mathcal{L}_{c}$, is the poset consisting of the set of all compositions equipped with cover relation $\lessdot _{c}$ such that for compositions $\alpha, \beta$ $$\beta \lessdot _{c} \alpha \mbox{ if and only if } \alpha = \mathfrak{t} _i (\beta)$$for some $i\geq1$. \end{definition}
The order relation $< _{c}$ in $\mathcal{L}_{c}$ is obtained by taking the transitive closure of the cover relation $\lessdot _{c}$.
\begin{example}\label{ex:boxaddLc} We have that $(3,2,3,1,2) \lessdot _{c} (4,2,3,1,2)$ by Example~\ref{ex:boxadd}. \end{example}
\section{Quasisymmetric and noncommutative symmetric functions}\label{sec:QSYMNSYM} We now recall the basics of graded Hopf algebras before focussing on the graded Hopf algebra of quasisymmetric functions \cite{gessel} and its dual, the graded Hopf algbera of noncommutative symmetric functions \cite{GKLLRT}. We say that $\mathcal{H}$ and $\mathcal{H}^*$ form a pair of dual graded Hopf algebras each over a field $K$ if there exists a duality pairing $\langle \ ,\ \rangle : \mathcal{H}\otimes \mathcal{H}^{*} \longrightarrow K$, for which the structure of $\mathcal{H}^*$ is dual to $\mathcal{H}$ that respects the grading, and vice versa. More precisely, the duality pairing pairs the elements of any basis $\{B_i\}_{i\in I}$ of the graded piece $\mathcal{H}^N$ for some index set $I$, and the elements of its dual basis $\{D_i\}_{i\in I}$ of the graded piece $(\mathcal{H}^N)^*$, given by $\langle B_i, D_j\rangle = \delta_{ij}$, where the \emph{Kronecker delta}\index{Kronecker delta} $\delta_{ij} = 1$ if $i=j$ and 0 otherwise. Duality is exhibited in that the product coefficients of one basis are the coproduct coefficients of its dual basis and vice versa, that is, \begin{eqnarray*} B_i \cdot B_j = \sum_k b^k_{i,j} B_k &\qquad \Longleftrightarrow\qquad & \Delta D_k = \sum_{i,j} b^k_{i,j} D_i \otimes D_j \\ D_i \cdot D_j = \sum_k d^k_{i,j} D_k &\qquad \Longleftrightarrow\qquad & \Delta B_k = \sum_{i,j} d^k_{i,j} B_i \otimes B_j \end{eqnarray*}where $\cdot$ denotes \emph{product} and $\Delta$ denotes \emph{coproduct}. Graded Hopf algebras also have an \emph{antipode} $S: \mathcal{H}\longrightarrow\mathcal{H}$, whose general definition we will not need. Instead we will state the specific antipodes, as needed, later. Lastly, before we define our specific graded Hopf algebras, we recall one Hopf algebraic lemma, which will play a key role later. For $h\in \mathcal{H}$ and $a\in \mathcal{H}^{*}$, let the following be the respective coproducts in Sweedler notation. \begin{eqnarray}\label{eq:coproductH} \Delta (h)&=& \displaystyle\sum_{h} h_{(1)}\otimes h_{(2)} \end{eqnarray} \begin{eqnarray}\label{eq:coproductHdual} \Delta (a)&=&\displaystyle\sum_{a} a_{(1)}\otimes a_{(2)} \end{eqnarray} Now define left actions of $\mathcal{H}^{*}$ on $\mathcal{H}$ and $\mathcal{H}$ on $\mathcal{H}^{*}$, both denoted by $\rightharpoonup$, as \begin{eqnarray}\label{eq:HdualactingonH} a \rightharpoonup h &=& \displaystyle\sum_{h}\langle h_{(2)},a\rangle h_{(1)}, \end{eqnarray} \begin{eqnarray}\label{eq:HactingonHdual} h\rightharpoonup a &=& \displaystyle\sum_{a} \langle h,a_{(2)}\rangle a_{(1)}, \end{eqnarray} where $a\in \mathcal{H}^{*}$, $h\in \mathcal{H}$. Then we have the following.
\begin{lemma}\cite{lam-lauve-sottile}\label{lem:magiclemma} For all $g,h \in \mathcal{H}$ and $a\in \mathcal{H}^{*}$, we have that \begin{eqnarray*} (a\rightharpoonup g)\cdot h &= & \displaystyle\sum_{h} \left( S(h_{(2)})\rightharpoonup a \right)\rightharpoonup \left(g\cdot h_{(1)}\right) \end{eqnarray*} where $S:\mathcal{H}\longrightarrow \mathcal{H}$ is the antipode. \end{lemma}
The graded Hopf algebra of quasisymmetric functions, $\ensuremath{\operatorname{QSym}}$ \cite{gessel}, is a subalgebra of $\mathbb{C} [[x_1, x_2, \ldots]]$ with a basis given by the following functions, which in turn are reliant on the natural bijection between compositions and sets, for which we first need to recall that $[i]$ for $i\geq 1$ denotes the set $\{1,2,\ldots , i\}$. Now we can state the bijection. Given a composition $\alpha = ( \alpha _1 , \ldots , \alpha _{\ell(\alpha)})$, there is a natural subset of $[|\alpha|-1]$ corresponding to it, namely,
$$\mathrm{set} (\alpha) = \{ \alpha _1 , \alpha _1 + \alpha _2, \ldots , \alpha _1+\alpha _2 + \cdots + \alpha _{\ell(\alpha)-1}\} \mbox{ and } \mathrm{set}((|\alpha|))=\emptyset.$$Conversely, given a subset $S = \{ s_1< \cdots < s_{|S|}\}\subseteq [N-1]$, there is a natural composition of size $N$ corresponding to it, namely, $$\mathrm{comp} (S) = (s_1, s_2 - s_1, \ldots , N-s_{|S|}) \mbox{ and } \mathrm{comp}(\emptyset)=(N).$$
\begin{definition}\label{def:Fbasis} Let $\alpha = (\alpha _1, \ldots , \alpha _{\ell(\alpha)})$ be a composition. Then the \emph{fundamental quasisymmetric function} $F_\alpha$ is defined to be $F_\emptyset = 1$ and
$$F_\alpha = \sum x_{i_1} \cdots x_{i_{|\alpha|}}$$where the sum is over all $|\alpha|$-tuples $(i_1, \ldots , i_{|\alpha|})$ of indices satisfying
$$i_1\leq \cdots \leq i_{|\alpha|} \mbox{ and } i_j<i_{j+1} \mbox{ if } j \in \mathrm{set}(\alpha).$$ \end{definition}
\begin{example}\label{ex:Fbasis} $F_{(1,2)} = x_1x_2^2 + x_1x_3^2 + \cdots + x_1x_2x_3 + x_1x_2x_4 + \cdots.$ \end{example}
Then $\ensuremath{\operatorname{QSym}}$ is a graded Hopf algebra $$\ensuremath{\operatorname{QSym}} = \bigoplus _{N\geq 0} \ensuremath{\operatorname{QSym}} ^N$$where
$$\ensuremath{\operatorname{QSym}} ^N = \operatorname{span} \{ F_\alpha \;|\; |\alpha| = N \}.$$The product for this basis is inherited from the product of monomials and Definition~\ref{def:Fbasis}. The coproduct \cite{gessel} is given by $\Delta(1)=1\otimes1$ and \begin{equation}\label{eq:Fcoproduct} \Delta(F_\alpha)= \sum _{\beta\gamma = \alpha \atop \mbox{ or }\beta\odot\gamma = \alpha}F_\beta \otimes F_\gamma \end{equation}and the antipode, which was discovered independently in \cite{ehrenborg-1, malvenuto-reutenauer}, is given by $S(1)=1$ and \begin{equation}\label{eq:antipode}
S(F_\alpha)= (-1)^{|\alpha|}F_{\mathrm{comp}(\mathrm{set}(\alpha)^c)}
\end{equation}where $\mathrm{set}(\alpha)^c$ is the complement of $\mathrm{set}(\alpha)$ in the set $[|\alpha| -1]$.
\begin{example}\label{ex:Fcoprodantipode} $$\Delta (F_{(1,2)})=F_{(1,2)}\otimes 1 + F_{(1,1)}\otimes F_{(1)} + F_{(1)}\otimes F_{(2)} + 1\otimes F_{(1,2)}$$and $S(F_{(1,2)})=(-1)^3 F_{(2,1)}$. \end{example}
However, this is not the only basis of $\ensuremath{\operatorname{QSym}}$ that will be useful to us. For the second basis we will need to define skew composition diagrams and then standard skew composition tableaux.
For the first of these, let $\alpha, \beta$ be two compositions such that $\beta < _{c} \alpha$. Then we define the \emph{skew composition diagram} $\alpha {/\!\!/} \beta $ to be the array of boxes that are contained in $\alpha$ but not in $\beta$. That is, the boxes that arise in the saturated chain $\beta \lessdot _{c} \cdots \lessdot _{c} \alpha$. We say the \emph{size} of $\alpha {/\!\!/} \beta$ is $|\alpha {/\!\!/} \beta | = |\alpha| - |\beta|$. Note that if $\beta=\emptyset$, then we recover the composition diagram $\alpha$.
\begin{example}\label{ex:skewshape} The skew composition diagram $(2,1,3){/\!\!/} (1)$ is drawn below with $\beta$ denoted by $\bullet$. $$\tableau{\ &\ \\ \ \\ \bullet&\ &\ \\}$$ \end{example}
We can now define standard skew composition tableaux. Given a saturated chain, $C$, in $\mathcal{L}_{c}$
$$\beta = \alpha ^0 \lessdot _{c} \alpha ^1 \lessdot _{c} \cdots \lessdot _{c} \alpha ^{|\alpha {/\!\!/} \beta|} = \alpha$$we define the \emph{standard skew composition tableau} ${\tau} _C$ of \emph{shape} $\alpha{/\!\!/} \beta$ to be the skew composition diagram $\alpha {/\!\!/} \beta$ whose boxes are filled with integers such that the number $|\alpha {/\!\!/} \beta| -i +1$ appears in the box in ${\tau} _C$ that exists in $\alpha ^i$ but not $\alpha ^{i-1}$ for $1\leq i\leq |\alpha {/\!\!/} \beta|$. If $\beta = \emptyset$, then we say that we have a \emph{standard composition tableau}. Given a standard skew composition tableau, ${\tau}$, whose shape has size $N$ we say that the \emph{descent set} of ${\tau}$ is $$\mathrm{Des} ({\tau}) = \{ i \;|\; i+1 \mbox{ appears weakly right of } i \} \subseteq [N-1]$$and the corresponding \emph{descent composition} of ${\tau}$ is $\mathrm{comp}({\tau})= \mathrm{comp}(\mathrm{Des}({\tau}))$.
\begin{example}\label{ex:skewCT} The saturated chain $$(1)\lessdot _{c} (2) \lessdot _{c} (1,2) \lessdot _{c} (1,1,2) \lessdot _{c} (1,1,3) \lessdot _{c} (2,1,3)$$gives rise to the standard skew composition tableau ${\tau}$ of shape $(2,1,3){/\!\!/} (1)$ below. $$\tableau{3 &1\\ 4 \\ \bullet&5 &2 \\}$$Note that $\mathrm{Des}({\tau}) = \{1,3, 4\}$ and hence $\mathrm{comp}({\tau}) = (1,2,1,1)$. \end{example}
With this is mind we can now define skew quasisymmetric Schur functions \cite[Proposition 3.1]{BLvW}.
\begin{definition}\label{def:QSbasis} Let $\alpha {/\!\!/} \beta$ be a skew composition diagram. Then the \emph{skew quasisymmetric Schur function} ${\mathcal{S}} _{\alpha{/\!\!/} \beta}$ is defined to be $${\mathcal{S}} _{\alpha {/\!\!/} \beta} = \sum F_{\mathrm{comp} ({\tau})}$$where the sum is over all standard skew composition tableaux ${\tau}$ of shape $\alpha{/\!\!/} \beta$. When $\beta = \emptyset$ we call ${\mathcal{S}} _\alpha$ a \emph{quasisymmetric Schur function}. \end{definition}
\begin{example}\label{ex:QSbasis} We can see that ${\mathcal{S}} _{(n)} = F_{(n)}$ and ${\mathcal{S}} _{(1^n)} = F_{(1^n)}$ and $${\mathcal{S}} _{(2,1,3){/\!\!/} (1)} = F_{(2,1,2)}+ F_{(2,2,1)} + F_{(1,2,1,1)}$$from the standard skew composition tableaux below. $$\tableau{2 &1\\ 3 \\ \bullet&5 &4 \\}\qquad \tableau{2 &1\\ 4 \\ \bullet&5 &3 \\}\qquad \tableau{3 &1\\ 4 \\ \bullet&5 &2 \\}$$ \end{example}
Moreover, the set of all quasisymmetric Schur functions forms another basis for $\ensuremath{\operatorname{QSym}}$ such that
$$\ensuremath{\operatorname{QSym}} ^N = \operatorname{span} \{ {\mathcal{S}} _\alpha \;|\; |\alpha| = N\}$$and while explicit formulas for their product and antipode are still unknown, their coproduct \cite[Definition 2.19]{BLvW} is given by \begin{eqnarray}\label{eq:coproductquasischur} \Delta({\mathcal{S}}_{\alpha})=\displaystyle\sum_{\gamma} {\mathcal{S}}_{\alpha{/\!\!/}\gamma}\otimes {\mathcal{S}}_{\gamma} \end{eqnarray}where the sum is over all compositions $\gamma$. As discussed in the introduction, quasisymmetric Schur functions have many interesting algebraic and combinatorial properties, one of the first of which to be discovered was the exhibition of Pieri rules that utilise our box removing operators \cite[Theorem 6.3]{QS}.
\begin{theorem}\emph{(Pieri rules for quasisymmetric Schur functions)}\label{the:QSPieri} Let $\alpha $ be a composition and $n$ be a positive integer. Then \begin{align*} {\mathcal{S}}_{\alpha}\cdot {\mathcal{S}}_{(n)}=\sum {\mathcal{S}}_{\alpha^+} \end{align*} where $\alpha^+$ is a composition such that $\alpha$ can be obtained by removing an $n$-horizontal strip from it.
Similarly, \begin{align*} {\mathcal{S}}_{\alpha}\cdot {\mathcal{S}}_{(1^n)}=\sum {\mathcal{S}}_{\alpha^+ } \end{align*} where $\alpha^+$ is a composition such that $\alpha$ can be obtained by removing an $n$-vertical strip from it. \end{theorem}
Dual to $\ensuremath{\operatorname{QSym}}$ is the graded Hopf algebra of noncommutative symmetric functions, $\ensuremath{\operatorname{NSym}}$, itself a subalgebra of $\mathbb{C} << x_1, x_2, \ldots >>$ with many interesting bases \cite{GKLLRT}. The one of particular interest to us is the following \cite[Section 2]{BLvW}.
\begin{definition}\label{def:NCbasis} Let $\alpha$ be a composition. Then the \emph{noncommutative Schur function} ${\mathbf{s}} _\alpha$ is the function under the duality pairing $\langle \ ,\ \rangle :\ensuremath{\operatorname{QSym}} \otimes \ensuremath{\operatorname{NSym}} \rightarrow \mathbb{C}$ that satisfies $$\langle {\mathcal{S}} _\alpha , {\mathbf{s}} _\beta \rangle = \delta _{\alpha\beta}$$where $\delta _{\alpha\beta} = 1$ if $\alpha = \beta$ and $0$ otherwise.\end{definition}
Noncommutative Schur functions also have rich and varied algebraic and combinatorial properties, including Pieri rules, although due to the noncommutative nature of $\ensuremath{\operatorname{NSym}}$ there are now Pieri rules arising both from multiplication on the right \cite[Theorem 9.3]{tewari}, and from multiplication on the left \cite[Corollary 3.8]{BLvW}. We include them both here for completeness, and for use later.
\begin{theorem}\emph{(Right Pieri rules for noncommutative Schur functions)}\label{the:RightPieri} Let $\alpha $ be a composition and $n$ be a positive integer. Then \begin{align*} {\mathbf{s}}_{\alpha }\cdot {\mathbf{s}}_{(n)}=\sum {\mathbf{s}}_{\alpha^+} \end{align*} where $\alpha^+$ is a composition such that it can be obtained by adding an $n$-right horizontal strip to $\alpha$.
Similarly, \begin{align*} {\mathbf{s}}_{\alpha }\cdot {\mathbf{s}}_{(1^n)}=\sum {\mathbf{s}}_{\alpha^+} \end{align*} where $\alpha^+$ is a composition such that it can be obtained by adding an $n$-right vertical strip to $\alpha$. \end{theorem}
\begin{theorem}\emph{(Left Pieri rules for noncommutative Schur functions)}\label{the:LeftPieri} Let $\alpha $ be a composition and $n$ be a positive integer. Then \begin{align*} {\mathbf{s}}_{(n)} \cdot {\mathbf{s}}_{\alpha } =\sum {\mathbf{s}}_{\alpha^+} \end{align*} where $\alpha^+$ is a composition such that it can be obtained by adding an $n$-left horizontal strip to $\alpha$.
Similarly, \begin{align*} {\mathbf{s}}_{(1^n)} \cdot {\mathbf{s}}_{\alpha } =\sum {\mathbf{s}}_{\alpha^+ } \end{align*} where $\alpha^+$ is a composition such that it can be obtained by adding an $n$-left vertical strip to $\alpha$.\end{theorem}
Note that since quasisymmetric and noncommutative Schur functions are indexed by compositions, \emph{if any parts of size 0 arise during computation, then they are ignored}.
\section{Generalized skew Pieri rules}\label{sec:skew} \subsection{Quasisymmetric skew Pieri rules}\label{subsec:QSymskewPieri} We now turn our attention to proving skew Pieri rules for skew quasisymmetric Schur functions. The statement of the rules is in the spirit of the Pieri rules for skew shapes of Assaf and McNamara \cite{assaf-mcnamara}, and this is no coincidence as we recover their rules as a special case in Corollary~\ref{cor:AM}. However first we prove a crucial proposition.
\begin{proposition}\label{ob:skewingisharpooning} Let $\alpha, \beta$ be compositions. Then $\mathbf{s}_{\beta} \rightharpoonup {\mathcal{S}}_{\alpha} = {\mathcal{S}}_{\alpha{/\!\!/}\beta}$. \end{proposition}
\begin{proof} Recall Equation~\eqref{eq:coproductquasischur} states that $$ \Delta({\mathcal{S}}_{\alpha})=\displaystyle\sum_{\gamma} {\mathcal{S}}_{\alpha{/\!\!/}\gamma}\otimes {\mathcal{S}}_{\gamma} $$ where the sum is over all compositions $\gamma$. Thus using Equations \eqref{eq:HdualactingonH} and \eqref{eq:coproductquasischur} we obtain \begin{eqnarray}\label{eq:harpoons} \mathbf{s}_{\beta} \rightharpoonup {\mathcal{S}}_{\alpha}= \displaystyle\sum_{\gamma} \langle {\mathcal{S}}_{\gamma},\mathbf{s}_{\beta}\rangle {\mathcal{S}}_{\alpha{/\!\!/}\gamma} \end{eqnarray} where the sum is over all compositions $\gamma$. Since by Definition~\ref{def:NCbasis}, $\langle {\mathcal{S}}_{\gamma},\mathbf{s}_{\beta}\rangle$ equals $1$ if $\beta= \gamma$ and $0$ otherwise, the claim follows. \end{proof}
\begin{remark}\label{rem:terms that equal 0 for poset reasons} The proposition above does not tell us when $\mathbf{s}_{\beta} \rightharpoonup {\mathcal{S}}_{\alpha} = {\mathcal{S}}_{\alpha{/\!\!/}\beta}$ equals $0$. However, by the definition of $\alpha {/\!\!/} \beta$ this is precisely when $\alpha$ and $\beta$ satisfy $\beta \not< _{c} \alpha$. Consequently in the theorem below the \emph{nonzero} contribution will only be from those $\alpha^+$ and $\beta^-$ that satisfy $\beta^- < _{c} \alpha^+$. As always if any parts of size 0 arise during computation, then they are ignored. \end{remark}
\begin{theorem}\label{the:QSskewPieri} Let $\alpha, \beta$ be compositions and $n$ be a positive integer. Then \begin{align*} {\mathcal{S}}_{\alpha{/\!\!/}\beta}\cdot {\mathcal{S}}_{(n)}=\sum_{i+j=n}(-1)^j{\mathcal{S}}_{\alpha^+{/\!\!/}\beta^-} \end{align*} where $\alpha^+$ is a composition such that $\alpha$ can be obtained by removing an $i$-horizontal strip from it, and $\beta^{-}$ is a composition such that it can be obtained by removing a $j$-vertical strip from $\beta$.
Similarly, \begin{align*} {\mathcal{S}}_{\alpha{/\!\!/}\beta}\cdot {\mathcal{S}}_{(1^n)}=\sum_{i+j=n}(-1)^j{\mathcal{S}}_{\alpha^+{/\!\!/}\beta^-} \end{align*} where $\alpha^+$ is a composition such that $\alpha$ can be obtained by removing an $i$-vertical strip from it, and $\beta^{-}$ is a composition such that it can be obtained by removing a $j$-horizontal strip from $\beta$. \end{theorem}
\begin{proof}
For the first part of the theorem, our aim is to calculate ${\mathcal{S}}_{\alpha{/\!\!/}\beta}\cdot {\mathcal{S}}_{(n)}$, which in light of Proposition \ref{ob:skewingisharpooning}, is the same as calculating $(\mathbf{s}_{\beta} \rightharpoonup {\mathcal{S}}_{\alpha})\cdot {\mathcal{S}}_{(n)}$.
Taking $a=\mathbf{s}_{\beta}$, $g={\mathcal{S}}_{\alpha}$ and $h={\mathcal{S}}_{(n)}$ in Lemma \ref{lem:magiclemma} gives the LHS as $(\mathbf{s}_{\beta} \rightharpoonup {\mathcal{S}}_{\alpha})\cdot {\mathcal{S}}_{(n)}$. For the RHS observe that, by Definition~\ref{def:QSbasis}, ${\mathcal{S}} _{(n)} = {F} _{(n)}$ and by Equation~\eqref{eq:Fcoproduct} we have that \begin{eqnarray}\label{eq:coproductF} \Delta({F}_{(n)})&=& \sum_{i+j=n}{F}_{(i)}\otimes {F}_{(j)}. \end{eqnarray}
Substituting these in yields \begin{eqnarray}\label{eq:firststeprhs} \displaystyle\sum_{i+j=n}(S({F}_{(j)})\rightharpoonup \mathbf{s}_{\beta})\rightharpoonup({\mathcal{S}}_{\alpha}\cdot {F}_{(i)}). \end{eqnarray}
Now, by Equation~\eqref{eq:antipode}, we have that $S({F}_{(j)})=(-1)^j{F}_{(1^j)}$. This reduces \eqref{eq:firststeprhs} to \begin{eqnarray}\label{eq:secondsteprhs} \displaystyle\sum_{i+j=n}((-1)^j{F}_{(1^j)}\rightharpoonup \mathbf{s}_{\beta})\rightharpoonup({\mathcal{S}}_{\alpha}\cdot {F}_{(i)}). \end{eqnarray}
We will first deal with the task of evaluating ${F}_{(1^j)}\rightharpoonup \mathbf{s}_{\beta}$. We need to invoke Equation \eqref{eq:HactingonHdual} and thus we need $\Delta(\mathbf{s}_{\beta})$. Assume that \begin{eqnarray} \Delta(\mathbf{s}_{\beta})=\sum_{\gamma,\delta}b_{\gamma,\delta}^{\beta}{\mathbf{s}}_{\gamma}\otimes {\mathbf{s}}_{\delta} \end{eqnarray} where the sum is over all compositions $\gamma,\delta$. Thus Equation~\eqref{eq:HactingonHdual} yields \begin{eqnarray} {F}_{(1^j)}\rightharpoonup \mathbf{s}_{\beta}&=& \displaystyle\sum_{\gamma,\delta} b_{\gamma,\delta}^{\beta}\langle {F}_{(1^j)},{\mathbf{s}}_{\delta}\rangle{\mathbf{s}}_{\gamma}. \end{eqnarray} Observing that, by Definition~\ref{def:QSbasis}, ${F}_{(1^j)}={\mathcal{S}}_{(1^j)}$ and that, by Definition~\ref{def:NCbasis}, $\langle {\mathcal{S}}_{(1^j)},{\mathbf{s}}_{\delta}\rangle$ equals $1$ if $\delta = (1^j)$ and equals $0$ otherwise, we obtain \begin{eqnarray}\label{eq:eharpooningncsstep1} {F}_{(1^j)}\rightharpoonup \mathbf{s}_{\beta} &=& \displaystyle\sum_{\gamma} b_{\gamma,(1^j)}^{\beta}{\mathbf{s}}_{\gamma}. \end{eqnarray} Since by Definition~\ref{def:NCbasis} and the duality pairing we have that $\langle {\mathcal{S}}_{\gamma}\otimes {\mathcal{S}}_{\delta},\Delta(\mathbf{s}_{\beta})\rangle=\langle {\mathcal{S}}_{\gamma}\cdot{\mathcal{S}}_{\delta},\mathbf{s}_{\beta}\rangle=b_{\gamma,\delta}^{\beta}$, we get that \begin{eqnarray} \langle {\mathcal{S}}_{\gamma}\cdot{\mathcal{S}}_{(1^j)},\mathbf{s}_{\beta}\rangle&=& b_{\gamma,(1^j)}^{\beta}. \end{eqnarray}
The Pieri rules for quasisymmetric Schur functions in Theorem~\ref{the:QSPieri} state that $b_{\gamma,(1^j)}^{\beta}$ is $1$ if there exists a weakly decreasing sequence $\ell_1\geq \ell_2\geq\cdots \geq \ell_j$ such that $\mathfrak{d}_{\ell_1}\cdots \mathfrak{d}_{\ell_j}(\beta)=\gamma$, and is $0$ otherwise. Thus this reduces Equation~\eqref{eq:eharpooningncsstep1} to \begin{eqnarray}\label{eq:eharpooningncsstep2} {F}_{(1^j)}\rightharpoonup \mathbf{s}_{\beta} &=& \displaystyle\sum_{\substack{\mathfrak{d}_{\ell_1}\cdots \mathfrak{d}_{\ell_j}(\beta)=\gamma\\\ell_1\geq\cdots \geq \ell_j}}{\mathbf{s}}_{\gamma}. \end{eqnarray}
Since ${\mathcal{S}}_{(i)}={F}_{(i)}$, by Definition~\ref{def:QSbasis}, the Pieri rules in Theorem~\ref{the:QSPieri} also imply that \begin{eqnarray}\label{eq:rowpieriruleqschur} {\mathcal{S}}_{\alpha}\cdot {F}_{(i)}&=& \displaystyle \sum_{\substack{\mathfrak{d}_{r_1}\cdots \mathfrak{d}_{r_i}(\varepsilon)=\alpha\\r_1<\cdots < r_i}}{\mathcal{S}}_{\varepsilon}. \end{eqnarray}
Using Equations~\eqref{eq:eharpooningncsstep2} and \eqref{eq:rowpieriruleqschur} in \eqref{eq:secondsteprhs}, we get \begin{eqnarray} \displaystyle\sum_{i+j=n}((-1)^j{F}_{(1^j)}\rightharpoonup \mathbf{s}_{\beta})\rightharpoonup({\mathcal{S}}_{\alpha}\cdot {F}_{(i)})&=& \displaystyle\sum_{i+j=n}\left((-1)^j\displaystyle\sum_{\substack{\mathfrak{d}_{\ell_1}\cdots \mathfrak{d}_{\ell_j}(\beta)=\gamma\\\ell_1\geq\cdots \geq \ell_j}}{\mathbf{s}}_{\gamma}\right)\rightharpoonup \left( \sum_{\substack{\mathfrak{d}_{r_1}\cdots \mathfrak{d}_{r_i}(\varepsilon)=\alpha\\r_1<\cdots < r_i}}{\mathcal{S}}_{\varepsilon} \right). \nonumber\\ \end{eqnarray} Using Proposition \ref{ob:skewingisharpooning}, we obtain that \begin{eqnarray}\label{eq:penultimateskewpieri} \displaystyle\sum_{i+j=n}((-1)^j{F}_{(1^j)}\rightharpoonup \mathbf{s}_{\beta})\rightharpoonup({\mathcal{S}}_{\alpha}\cdot {F}_{(i)})&=&\displaystyle\sum_{i+j=n}\left(\sum_{\substack{\mathfrak{d}_{\ell_1}\cdots \mathfrak{d}_{\ell_j}(\beta)=\gamma\\\ell_1\geq\cdots \geq \ell_j\\\mathfrak{d}_{r_1}\cdots \mathfrak{d}_{r_i}(\varepsilon)=\alpha\\r_1<\cdots < r_i}}(-1)^j{\mathcal{S}}_{\varepsilon{/\!\!/}\gamma}\right). \end{eqnarray}
Thus \begin{eqnarray}\label{eq:skewpierirule-row} {\mathcal{S}}_{\alpha{/\!\!/}\beta}\cdot {\mathcal{S}}_{(n)}&=&\displaystyle\sum_{i+j=n}\left(\sum_{\substack{\mathfrak{d}_{\ell_1}\cdots \mathfrak{d}_{\ell_j}(\beta)=\gamma\\\ell_1\geq\cdots \geq \ell_j\\\mathfrak{d}_{r_1}\cdots \mathfrak{d}_{r_i}(\varepsilon)=\alpha\\r_1<\cdots < r_i}}(-1)^j{\mathcal{S}}_{\varepsilon{/\!\!/}\gamma}\right). \end{eqnarray} The first part of the theorem now follows from the definitions of $i$-horizontal strip and $j$-vertical strip.
For the second part of the theorem we use the same method as the first part, but this time calculate $$(\mathbf{s}_{\beta} \rightharpoonup {\mathcal{S}}_{\alpha})\cdot {\mathcal{S}}_{(1^n)}.$$ \end{proof}
\begin{remark}\label{rem:noomega} Notice that as opposed to the classical case where one can apply the $\omega$ involution to obtain the corresponding Pieri rule, we can not do this here. This is because the image of the skew quasisymmetric Schur functions under the $\omega$ involution is not yet known explicitly. Notice that the $\omega$ map applied to quasisymmetric Schur functions results in the row-strict quasisymmetric Schur functions of Mason and Remmel \cite{mason-remmel}. \end{remark}
\begin{example}\label{ex:skewQSPierirow} Let us compute ${\mathcal{S}}_{(1,3,2){/\!\!/} (2,1)}\cdot {\mathcal{S}}_{(2)}$. We first need to compute all compositions $\gamma$ that can be obtained by removing a vertical strip of size at most 2 from $\beta=(2,1)$. These compositions correspond to the white boxes in the diagrams below, while the boxes in the darker shade of red correspond to the vertical strips that are removed from $\beta$.
$$ \ytableausetup{smalltableaux,boxsize=0.5em} \begin{ytableau} *(white) & *(white)\\ *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) & *(white)\\ *(red!80) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) & *(red!80)\\ *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) & *(red!80)\\ *(red!80) \end{ytableau} $$
Next we need to compute all compositions $\varepsilon$ such that a horizontal strip of size at most $2$ can be removed from it so as to obtain $\alpha$. We list these $\varepsilon$s below with the boxes in the lighter shade of green corresponding to horizontal strips that need to be removed to obtain $\alpha$.
$$ \ytableausetup{smalltableaux,boxsize=0.5em} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(white) & *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(white) & *(white)\\ *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(green!70)\\ *(white) & *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(green!70)\\ *(white) & *(white) & *(white)\\ *(white) & *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(white) & *(white) & *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) &*(green!70)\\ *(white) & *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) \\ *(green!70)\\ *(white) & *(white) & *(green!70) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) & *(green!70)\\ *(green!70)\\ *(white) & *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(green!70)\\ *(white) & *(white) & *(white)\\ *(white) & *(white) & *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(green!70)\\ *(white) & *(white) & *(white) & *(green!70)\\ *(white) & *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) \\ *(white) & *(white) \\ *(green!70) & *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) \\ *(white) & *(white) & *(green!70)\\ *(green!70) \\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) & *(green!70) \\ *(white) & *(white) \\ *(green!70) \\ \end{ytableau} \hspace{5mm} $$ $$ \ytableausetup{smalltableaux,boxsize=0.5em} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) \\ *(white) & *(white) & *(green!70) & *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) & *(green!70) \\ *(white) & *(white) & *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) & *(green!70) & *(green!70)\\ *(white) & *(white) \\ \end{ytableau} $$
Now to compute ${\mathcal{S}}_{(1,3,2){/\!\!/} (2,1)}\cdot {\mathcal{S}}_{(2)}$, our result tells us that for every pair of compositions in the above lists $(\varepsilon,\gamma)$ such that (the number of green boxes in $\varepsilon$)+(the number of red boxes in $\gamma$)=2, and $\gamma < _{c} \varepsilon$ we have a term ${\mathcal{S}}_{\varepsilon {/\!\!/} \gamma}$ with a sign $(-1)^{\text{number of red boxes}}$. Hence we have the following expansion, suppressing commas and parentheses in compositions for ease of comprehension. \begin{align*} {\mathcal{S}}_{132{/\!\!/} 21}\cdot {\mathcal{S}}_{2}=&{\mathcal{S}}_{132{/\!\!/} 1}-{\mathcal{S}}_{1321{/\!\!/} 11}-{\mathcal{S}}_{1312{/\!\!/} 2}-{\mathcal{S}}_{1132\cskew2}-{\mathcal{S}}_{1132{/\!\!/} 11}-{\mathcal{S}}_{133{/\!\!/} 2}-{\mathcal{S}}_{133{/\!\!/} 11}\\&-{\mathcal{S}}_{142{/\!\!/} 2}-{\mathcal{S}}_{142{/\!\!/} 11}+{\mathcal{S}}_{1133{/\!\!/} 21}+{\mathcal{S}}_{1142{/\!\!/} 21}+{\mathcal{S}}_{1322{/\!\!/} 21}+{\mathcal{S}}_{1331{/\!\!/} 21}\\&+{\mathcal{S}}_{1421{/\!\!/} 21}+{\mathcal{S}}_{143{/\!\!/} 21}+{\mathcal{S}}_{152{/\!\!/} 21} \end{align*} \end{example}
\begin{example}\label{ex:skewQSPiericol} Let us compute ${\mathcal{S}}_{(1,3,2){/\!\!/} (2,1)}\cdot {\mathcal{S}}_{(1,1)}$. We first need to compute all compositions $\gamma$ that can be obtained by removing a horizontal strip of size at most 2 from $\beta=(2,1)$. These compositions correspond to the white boxes in the diagrams below, while the boxes in the darker shade of red correspond to the horizontal strips that are removed from $\beta$.
$$ \ytableausetup{smalltableaux,boxsize=0.5em} \begin{ytableau} *(white) & *(white)\\ *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) & *(white)\\ *(red!80) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) & *(red!80)\\ *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) & *(red!80)\\ *(red!80) \end{ytableau} $$
Next we need to compute all compositions $\varepsilon$ such that a vertical strip of size at most $2$ can be removed from it so as to obtain $\alpha$. We list these $\varepsilon$s below with the boxes in the lighter shade of green corresponding to vertical strips that need to be removed to obtain $\alpha$.
$$ \ytableausetup{smalltableaux,boxsize=0.5em} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(white) & *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(white) & *(white)\\ *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(green!70)\\ *(white) & *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(green!70)\\ *(white) & *(white) & *(white)\\ *(white) & *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(white) & *(white) & *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) &*(green!70)\\ *(white) & *(white) \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(white) & *(white) \\ *(green!70)\\ *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(green!70)\\ *(white) & *(white) \\ *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(green!70)\\ *(white) & *(white) & *(white)\\ *(white) & *(white) \\ *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white)\\ *(green!70)\\ *(green!70)\\ *(white) & *(white) \\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(green!70)\\ *(white) & *(white) & *(white)\\ *(green!70)\\ *(white) & *(white) \\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(green!70)\\ *(green!70)\\ *(white) & *(white) & *(white)\\ *(white) & *(white) \\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) \\ *(white) & *(white) & *(green!70) \\ *(green!70) \\ \end{ytableau} \hspace{5mm} $$ $$ \ytableausetup{smalltableaux,boxsize=0.5em} \begin{ytableau} *(white)\\ *(white) & *(white) & *(white) \\ *(green!70)\\ *(white) & *(white) & *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white)\\ *(green!70)\\ *(white) & *(white) & *(white) \\ *(white) & *(white) & *(green!70) \\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) & *(green!70)\\ *(white) & *(white) & *(white) \\ *(white) & *(white) & *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) \\ *(white) & *(white) & *(white) & *(green!70)\\ *(white) & *(white) \\ *(green!70)\\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) \\ *(white) & *(white) & *(white) & *(green!70)\\ *(green!70)\\ *(white) & *(white) \\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) \\ *(green!70)\\ *(white) & *(white) & *(white) & *(green!70)\\ *(white) & *(white) \\ \end{ytableau} \hspace{5mm} \begin{ytableau} *(white) \\ *(white) & *(white) & *(white) & *(green!70) \\ *(white) & *(white) & *(green!70)\\ \end{ytableau} $$
Now to compute ${\mathcal{S}}_{(1,3,2){/\!\!/} (2,1)}\cdot {\mathcal{S}}_{(1,1)}$, our result tells us that for every pair of compositions in the above lists $(\varepsilon,\gamma)$ such (that the number of green boxes in $\varepsilon$)+(the number of red boxes in $\gamma$)=2 and $\gamma < _{c} \varepsilon$, we have a term ${\mathcal{S}}_{\varepsilon{/\!\!/} \gamma}$ with a sign $(-1)^{\text{number of red boxes}}$. Hence we have the following expansion, suppressing commas and parentheses in compositions for ease of comprehension. \begin{align*} {\mathcal{S}}_{132{/\!\!/} 21}\cdot {\mathcal{S}}_{11}=&{\mathcal{S}}_{132{/\!\!/} 1}-{\mathcal{S}}_{1321{/\!\!/} 11}-{\mathcal{S}}_{1312{/\!\!/} 2}-{\mathcal{S}}_{1132\cskew2}-{\mathcal{S}}_{1132{/\!\!/} 11}-{\mathcal{S}}_{133{/\!\!/} 2}-{\mathcal{S}}_{133{/\!\!/} 11}\\&-{\mathcal{S}}_{142{/\!\!/} 2}-{\mathcal{S}}_{142{/\!\!/} 11}+{\mathcal{S}}_{13121{/\!\!/} 21}+{\mathcal{S}}_{11321{/\!\!/} 21}+{\mathcal{S}}_{11132{/\!\!/} 21}+{\mathcal{S}}_{1331{/\!\!/} 21}\\&+{\mathcal{S}}_{1133{/\!\!/} 21}+{\mathcal{S}}_{233{/\!\!/} 21}+{\mathcal{S}}_{1421{/\!\!/} 21}+{\mathcal{S}}_{1142{/\!\!/} 21}+{\mathcal{S}}_{143{/\!\!/} 21} \end{align*} \end{example}
We now turn our attention to skew Schur functions, which we will define in the next paragraph, after we first discuss some motivation for our attention. Skew Schur functions can be written as a sum of skew quasisymmetric Schur functions \cite[Lemma 2.23]{SSQSS}, so one might ask whether we can recover the Pieri rules for skew shapes of Assaf and McNamara by expanding a skew Schur function as a sum of skew quasisymmetric Schur functions, applying our quasisymmetric skew Pieri rules and then collecting suitable terms. However, a much simpler proof exists.
A skew Schur function $s _{\lambda/\mu}$ for partitions $\lambda, \mu$ where $\ell(\lambda)\geq\ell(\mu)$, can be defined, given an $M>\ell(\lambda)$, by \cite[Section 5.1]{BLvW} \begin{equation}\label{eq:skewSchur}s _{\lambda/\mu}={\mathcal{S}} _{\lambda + 1^{M} {/\!\!/} \mu + 1^{M}}\end{equation}where $\lambda + 1^{M} = (\lambda _1 + 1, \ldots, \lambda_{\ell(\lambda)} +1, 1^{M-\ell(\lambda)})$, and $\mu + 1^{M} = (\mu _1 + 1, \ldots, \mu_{\ell(\mu)} +1, 1^{M -\ell(\mu)})$. It follows immediately that $s_{(n)}=s_{(n)/\emptyset}={\mathcal{S}} _{(n)}$ and $s_{(1^n)}=s_{(1^n)/\emptyset}={\mathcal{S}} _{(1^n)}$ by Equation~\eqref{eq:skewSchur}. Then as a corollary of our skew Pieri rules we recover the skew Pieri rules of Assaf and McNamara as follows.
\begin{corollary}\cite[Theorem 3.2]{assaf-mcnamara}\label{cor:AM} Let $\lambda, \mu$ be partitions where $\ell(\lambda)\geq\ell(\mu)$ and $n$ be a positive integer. Then $$s_{\lambda/\mu}\cdot s_{(n)} = \sum _{i+j = n} (-1) ^j s _{\lambda^+/\mu ^-}$$ where $\lambda^+$ is a partition such that the boxes of $\lambda ^+$ not in $\lambda$ are $i$ boxes such that no two lie in the same column, and $\mu ^-$ is a partition such that the boxes of $\mu$ not in $\mu ^-$ are $j$ boxes such that no two lie in the same row.
Similarly, $$s_{\lambda/\mu}\cdot s_{(1^n)} = \sum _{i+j = n} (-1) ^j s _{\lambda^+/\mu ^-}$$ where $\lambda^+$ is a partition such that the boxes of $\lambda ^+$ not in $\lambda$ are $i$ boxes such that no two lie in the same row, and $\mu ^-$ is a partition such that the boxes of $\mu$ not in $\mu ^-$ are $j$ boxes such that no two lie in the same column. \end{corollary}
\begin{proof} Let $N>\ell(\lambda)+n+1$. Then consider the product ${\mathcal{S}} _{\lambda + 1^N {/\!\!/} \mu + 1^N} \cdot {\mathcal{S}} _{(n)}$ (respectively, ${\mathcal{S}} _{\lambda + 1^N {/\!\!/} \mu + 1^N} \cdot {\mathcal{S}} _{(1^n)}$) where $\lambda, \mu$ are partitions and $\ell(\lambda)\geq\ell(\mu)$. By the paragraph preceding the corollary, this is equivalent to what we are trying to compute.
To begin, we claim that if $$\mathfrak{d} _1\mathfrak{d} _{r_2} \cdots \mathfrak{d} _{r_i} (\alpha ')= \lambda + 1^N$$where $1<r_2<\cdots<r_i$ (respectively, $\mathfrak{d} _{r _1} \cdots \mathfrak{d} _{r _{i-q}} \mathfrak{d} _1^q (\alpha ')= \lambda + 1^N$ where $q\geq 0$ and $r _1 \geq \cdots \geq r _{i-q} >1$) and $$\mathfrak{d} _{\ell _1} \cdots \mathfrak{d} _{\ell _{j-p}} \mathfrak{d} _1^p (\mu+ 1^N) = \beta '$$ where $p\geq 0$ and $\ell _1 \geq \cdots \geq \ell _{j-p} >1$ (respectively, $\mathfrak{d} _1\mathfrak{d} _{\ell_2} \cdots \mathfrak{d} _{\ell_j} (\mu+ 1^N) = \beta '$ where $1<\ell_2<\cdots< \ell_j$) and $$\mathfrak{d} _{r_2} \cdots \mathfrak{d} _{r_i} (\alpha '')= \lambda + 1^N$$(respectively, $\mathfrak{d} _{r _1} \cdots \mathfrak{d} _{r _{i-q}} \mathfrak{d} _1^{q+1}(\alpha '')= \lambda + 1^N$) and $$\mathfrak{d} _{\ell _1} \cdots \mathfrak{d} _{\ell _{j-p}} \mathfrak{d} _1^{p+1} (\mu+ 1^N) = \beta ''$$(respectively, $\mathfrak{d} _{\ell_2} \cdots \mathfrak{d} _{\ell_j}(\mu+ 1^N) = \beta ''$) then $\beta ' < _{c} \alpha ' $ if and only if $\beta '' < _{c} \alpha ''$. This follows from three facts. Firstly $\ell(\lambda) \geq \ell(\mu)$, secondly $\alpha ' $ and $\alpha ''$ only differ by $\mathfrak{d} _1$, and thirdly $\beta '$ and $\beta ''$ only differ by $\mathfrak{d} _1$. Moreover, ${\alpha ' {/\!\!/} \beta '}= {\alpha '' {/\!\!/} \beta ''}$. Furthermore, by our skew Pieri rules in Theorem~\ref{the:QSskewPieri}, the summands ${\mathcal{S}} _{\alpha ' {/\!\!/} \beta '}$ and ${\mathcal{S}}_{\alpha '' {/\!\!/} \beta ''}$ will be of opposite sign, and thus will cancel since ${\alpha ' {/\!\!/} \beta '}= {\alpha '' {/\!\!/} \beta ''}$. Consequently, any nonzero summand appearing in the product ${\mathcal{S}} _{\lambda + 1^N {/\!\!/} \mu + 1^N} \cdot {\mathcal{S}} _{(n)}$ (respectively, ${\mathcal{S}} _{\lambda + 1^N {/\!\!/} \mu + 1^N} \cdot {\mathcal{S}} _{(1^n)}$) is such that no box can be removed from the first column of $(\lambda + 1^N)^+$ to obtain $\lambda + 1^N$, nor from the first column of $\mu+ 1^N$ to obtain $(\mu+ 1^N)^-$.
Next observe that we can obtain $\lambda + 1^N$ by removing an $i$-horizontal (respectively, $i$-vertical) strip not containing a box in the first column from $(\lambda + 1^N)^+$ if and only if $(\lambda + 1^N)^+ = \lambda ^+ + 1^N$ where $\lambda ^+$ is a partition such that the boxes of $\lambda ^+$ not in $\lambda$ are $i$ boxes such that no two lie in the same column (respectively, row).
Similarly, we can obtain $(\mu+ 1^N)^-$ by removing a $j$-vertical (respectively, $j$-horizontal) strip not containing a box in the first column from $\mu+ 1^N$ if and only if $(\mu+ 1^N)^- = \mu^-+ 1^N$ where $\mu ^-$ is a partition such that the boxes of $\mu$ not in $\mu ^-$ are $j$ boxes such that no two lie in the same row (respectively, column). \end{proof}
\subsection{Noncommutative skew Pieri rules}\label{subsec:NSymskewPieri} It is also natural to ask whether skew Pieri rules exist for the dual counterparts to skew quasisymmetric Schur functions and whether our methods are applicable in order to prove them. To answer this we first need to define these dual counterparts, namely skew noncommutative Schur functions.
\begin{definition}\label{def:ncsskew} Given compositions $\alpha, \beta$, the \emph{skew noncommutative Schur function} ${\mathbf{s}}_{\alpha/\beta}$ is defined implicitly via the equation \begin{eqnarray*} \Delta(\mathbf{s}_{\alpha})&=&\displaystyle\sum_{\beta}{\mathbf{s}}_{\alpha/\beta}\otimes \mathbf{s}_{\beta} \end{eqnarray*} where the sum ranges over all compositions $\beta$. \end{definition}
With this definition and using Equation~\eqref{eq:HactingonHdual} we can deduce that $${\mathcal{S}}_{\beta} \rightharpoonup \mathbf{s}_{\alpha}= {\mathbf{s}}_{\alpha/\beta}$$via a proof almost identical to that of Proposition~\ref{ob:skewingisharpooning}. We know from Definition~\ref{def:QSbasis} that ${\mathcal{S}} _{(n)} = F_{(n)}$ and ${\mathcal{S}} _{(1^n)} = F_{(1^n)}$. Combined with the product for fundamental quasisymmetric functions using Definition~\ref{def:Fbasis}, Definition~\ref{def:NCbasis}, and the duality pairing, it is straightforward to deduce that for $n\geq 1$ the coproduct on ${\mathbf{s}} _{(n)}$ and ${\mathbf{s}} _{(1^n)}$ is given by $$\Delta({\mathbf{s}} _{(n)}) = \sum _{i+j=n} {\mathbf{s}} _{(i)}\otimes {\mathbf{s}} _{(j)} \qquad\Delta({\mathbf{s}} _{(1^n)}) = \sum _{i+j=n} {\mathbf{s}} _{(1^i)}\otimes {\mathbf{s}} _{(1^j)}.$$Also the action of the antipode $S$ on ${\mathbf{s}} _{(n)}$ and ${\mathbf{s}} _{(1^n)}$ is given by $$S({\mathbf{s}} _{(j)})= (-1)^j {\mathbf{s}} _{(1^j)} \qquad S({\mathbf{s}} _{(1^j)})= (-1)^j {\mathbf{s}} _{(j)}.$$Using all the above in conjunction with the right Pieri rules for noncommutative Schur functions in Theorem~\ref{the:RightPieri} yields our concluding theorem, whose proof is analogous to the proof of Theorem~\ref{the:QSskewPieri}, and hence is omitted. As always if any parts of size 0 arise during computation, then they are ignored.
\begin{theorem}\label{the:NCskewPieri} Let $\alpha, \beta$ be compositions and $n$ be a positive integer. Then \begin{align*} {\mathbf{s}}_{\alpha/\beta}\cdot {\mathbf{s}}_{(n)}=\sum_{i+j=n}(-1)^j{\mathbf{s}}_{\alpha^+/\beta^-} \end{align*} where $\alpha^+$ is a composition such that it can be obtained by adding an $i$-right horizontal strip to $\alpha$, and $\beta^{-}$ is a composition such that $\beta$ can be obtained by adding a $j$-right vertical strip to it.
Similarly, \begin{align*} {\mathbf{s}}_{\alpha/\beta}\cdot {\mathbf{s}}_{(1^n)}=\sum_{i+j=n}(-1)^j{\mathbf{s}}_{\alpha^+/\beta^-} \end{align*} where $\alpha^+$ is a composition such that it can be obtained by adding an $i$-right vertical strip to $\alpha$, and $\beta^{-}$ is a composition such that $\beta$ can be obtained by adding a $j$-right horizontal strip to it. \end{theorem}
\end{document} |
\begin{document}
\null \nointerlineskip
\let\snewpage
\let
\relax \begin{center}
\textbf{\huge Methods for Finding Analytic Solutions for Time Dependent Two-Level Quantum Systems and Its Generalizations}
\Large Rajath Krishna R, SRFP:381
\Large Jawaharlal Nehru Centre for Advanced Scientific Research (JNCASR) \\ Bangalore, India.\\
\Large Guide: Prof N.S. Vidhyadhiraja, \\ Theoretical Sciences Unit, JNCASR. \end{center} \let
\snewpage
\break
\begin{center} \textbf{\LARGE Acknowledgments}
\end{center}
\large This work would not have been possible without the guidance of Prof. N.S. Vidhyadhiraja, and I thank him for the same. I also thank Prof. Robert J Joynt, University of Wisconsin-Madison for his valuable suggestions and comments on my work. I thank the members of the lab in which I was working during the period of my internship for their constant help and support and also the administration department of JNCASR for the facilities and hospitality provided during that time.
\break
\begin{center} \textbf{\LARGE Abstract} \end{center}
\large Two-level systems are one of the most important quantum systems and they form the basis of quantum computers. We briefly look at the traditional approach to two-level systems with an external driving field as well as those subjected to noise. This project is aimed at studying two specific methods for obtaining analytic solutions for two-level systems. One of the methods enables us to obtain analytic solutions for driven time-dependent two-level systems while the other attempts to give exact solution of qubit decoherence using a transfer matrix method. A thorough study of both papers is done and results are reproduced. The latter method is generalized for a qutrit system as well as a two qubit system subjected to noise. A general method is formally derived for an N-dimensional quantum system and the difficulties in applying the method in real life systems is discussed.
\break
\section{Introduction} \large Two-level systems are the basic constituents of quantum computers and though they are the simplest quantum systems, studying them and its interaction with the environment gives great insights into how quantum mechanical systems behave. Except for some famous special cases like the Landau-Zener model \cite{lz}, The Rabi problem \cite{r}, Jaynes-Cummings Model \cite{jc} obtaining analytic solutions for the evolution of two-level systems are extremely difficult. Analytic solutions of such systems are important for qubit control operations, self-induced transparency and for studying decoherence.
No real system is isolated and hence, it is important to consider the interaction of a quantum system with its environment. Two-level systems coupled to an environment were largely studied using the Caldeira-Leggett model for two-state systems \cite{cl}, also called as the Spin-Boson model. Firstly, we look at a rather new method introduced by Edwin Barnes and S. Das Sarma which provides a recipe to form Hamiltonians which is guaranteed to give analytic solutions. Secondly, we study a transfer matrix approach towards finding exact solutions for two-state systems coupled to an environment under certain approximations. We formally generalize the method for a N-dimensional quantum system with special emphasis on qutrit and two-qubit systems. Also, we discuss the limitations of this method when applied to a real life system.
\section{Driven time-dependent two level systems} \large Two state systems which are driven by an external time-dependent field are very difficult to solve. Some of the special cases for which an analytic solution has been possible include Landau-Zener model, The Rabi problem, Jaynes-Cummings Model etc. We will take a brief survey of these models. \subsection{Landau-Zener Model} Provides the probability of transition between the two quantum states coupled by an external field of constant amplitude and time dependent frequency.
\[H=\begin{bmatrix}
\Delta & \omega_R\\
\omega_R & \Delta
\end{bmatrix}
\]
$\Delta$ is the detuning parameter and $\omega_R$ is the Rabi frequency.
We have $\omega=\omega_o+\Delta$ and we sweep over various values of $\omega$ at a rate $\dot \omega$.
\subsection{The Rabi Problem} The response of an two-level atom to an applied harmonic electric field like for eg: $E(t)=\vec E_0cos{\omega t}$.
$$H=H_0+H^{'}$$ where $H^{'}=- d\cdot \vec E$, $d$ is the dipole operator.
\subsection{Jaynes-Cummings Model} Describes a two state system interacting with a bosonic field.
$$H=H_{field}+H_{atom}+H_{int}$$ where $$H_{field}=\hbar \omega_c \hat a^+\hat a $$
$$H_{atom}=\hbar \omega_a \sigma_z/2$$
$$H_{int}=\frac{\hbar \Omega}{2} \hat E \hat S$$
where $\hat{a}^+$ and $\hat{a}$ are bosonic creation and annihilation operators respectively. $\hat{E}=\hat{a}+\hat{a}^+$ is the field operator and $\uvec{S}=\hat{\sigma}_=+\hat{\sigma}_-$ is the polarization operator.
\subsection{A recipe for finding Hamiltonians with guaranteed analytic-solutions} In 2012, Edwin Barnes and S. Das Sarma \cite{as} developed a completely new theoretical approach towards obtaining analytic solutions for driven two-state systems. This new method gives an unbounded set of analytically solvable driven-two level systems which are driven by a single axis field. It is shown that the driving field and the evolution operator of such a system can be obtained from a single real-function satisfying some conditions.
The Hamiltonian in consideration has the following form: $$H=\frac{J(t)}{2}\sigma_z + \frac{h}{2}\sigma_x$$ which describes any two level system driven along a single-axis.
Transforming to the rotating x-basis we have, $$\ket{+}(t)=e^{-iht}\ket{+} and \ket{-}(t)=e^{iht}\ket{-}$$
Also, we have the state vector in the rotating basis, $$\ket{\Psi(t)}=d_+(t)\ket{+}(t)+d_-(t)\ket{-}(t)$$
Plugging into the time-dependent Schrodinger's equation and simplifying we get, $$\dot d_{\pm}(t)=-i\frac{J(t)}{2}e^{\pm iht}d_{\mp}(t)$$
We have the unitary operator in which the elements satisfy:
\[U=\begin{bmatrix}
u_{11} & -u_{21}^*\\
u_{21} & u_{11}^*
\end{bmatrix} \]
We\hspace{0.10cm} have\hspace{0.10cm} the\hspace{0.10cm} unitary\hspace{0.10cm} evolution\hspace{0.10cm} (in\hspace{0.10cm} the\hspace{0.10cm} z-basis)\hspace{0.10cm} of\hspace{0.10cm} the\hspace{0.10cm} wavefunction:
$$\ket{\Psi(t)}=U(t)\ket{\Psi(0)} =c_1(0)U(t)\ket{\uparrow}+c_2(0)U(t)\ket{\downarrow} \quad$$
Transforming to the x-basis and then transforming to the rotating x-basis we get, $$\ket{\Psi(t)}=[D_+(t)c_1(0)+D_-^*(t)c_2(0)]\ket{+}(t) +[D_-(t)c_1(0)-D_+^*(t)c_2(0)]\ket{-}(t)$$
$$ where \quad D_\pm=\frac{1}{\sqrt{2}}e^{\pm iht/2}(u_{11}\pm u_{21})$$
Comparing the two equations obtained for $\ket{\Psi(t)}$ we get,
$$\dot D_\pm=-i\frac{J(t)}{2}e^{\pm iht}D_\mp$$
The two coupled first-order differential equations can be combined to give one second order differential equation.
$$\ddot D_+ + (-ih-\dot J/J)\dot D_++(J^2/4)=0$$ \\
A reverse engineering approach is adopted and the differential equation is solved for $J(t)$ instead of $D_+$ which gives: $$J(t)=\pm \frac{\dot D_+e^{-iht}}{\sqrt{c-\frac{1}{4}D_+^2e^{-2iht}-\frac{ih}{2}\int_{0}^{t} dt e^{-2iht}D_+^2(t)}}$$and back substitution gives: $$D_-=\pm 2i \sqrt{c-\frac{1}{4}D_+^2e^{-2iht}-\frac{ih}{2}\int_{0}^{t}e^{-2iht}D_+^2(t) dt}$$
We take the following ansatz which preserved unitarity $$D_+=e^{i(F-K+ht)}cos(\Phi)$$
$$D_-=e^{-iK}sin(\Phi)$$ and the following relations are obtained:
$$sin(2\Phi)=sec(F)e^{h\int_{0}^{t} tan(F)}$$ and, $$J(t)=2\dot Ksec(F)tan(\Phi)$$
We will take the initial condition to be $D_+(0)=D_-(0)=\frac{1}{\sqrt{2}}$ which in turn gives $\Phi(0)=\frac{\pi}{4}$, $F(0)=K(0)=0.$
\\ Taking F to be of the form $F=arctan(\frac{\dot q}{hq})$ gives $$sin(2\Phi)=\sqrt{q^2+\frac{\dot q^2}{h^2}}$$ and
$$J=\frac{\ddot q+ h^2q}{\sqrt{h^2(1-q^2)-\dot q^2}}$$
The initial conditions on $F$, $K$ and $\Phi$ translate to $$q(0)=1, \quad \dot q(0)=0 \quad \ddot q(0)=-h^2$$
Also, we have the constraint on the function q given by, $$\dot q^2 \leq h^2(1-q^2)$$
Any function satisfying the above initial conditions and constraint is guaranteed to produce an analytical solution for the evolution of the system.
\section{Two-level system with noise} Decoherence problems are usually solved by coupling the system concerned with an environment. Then, a master equation for the reduced density matrix is formed which can take care of the effect of the system on the environment too. A successful model which implements the above method is called the Spin-Boson model.
\subsection{Spin-Boson Model} It describes a quantum particle in one dimension coupled to a bath of infinite harmonic oscillators. The Hamiltonian for this model has the following form \cite{w} : $$H=H_0+H_B+H_{int}$$ where $H_0$ is the free Hamiltonian of the spin-1/2 system. $$H_B=\sum_{k} \frac{p_k^2}{2m_k}+\frac{1}{2}m_k\omega_k^2x_k^2$$ and $$H_{int}=\sigma_z \sum_{k} \lambda_k x_k$$. Here, $p_k$, $m_k$, $\omega_k$ and $x_k$ are the momentum, mass, frequency and position respectively of the $k^{th}$ harmonic oscillator.
$\lambda_k$ is the strength of coupling between oscillator and spin. The system in consideration eventually loses coherence due to this coupling.
Now, a master equation can be formulated for the reduced density matrix. This, method can also take into account the "back-action" of the system on the environment.
\subsection{A transfer matrix approach} In certain cases, this back-action is not important and we can model environment as a source of noise. A transfer matrix method \cite{tm} can be used to obtain exact solution for such systems. \\
We will consider the following form of the Hamiltonian:
$$H=-B_0 \sigma_z-\vec b(t)\cdot \vec \sigma$$
were $\vec b(t)$ is the random function.
We will work in the Heisenberg piture and thus, we will be concerned with the evolution of the operators $\sigma_x$, $\sigma_y$ and $\sigma_z$ which together with identity forms a complete operator basis for a two-state quantum system.
We will assume that the the function $\vec b(t)$ is piecewise constant with discontinuous jumps at regular intervals of length $\tau$. $$\vec B(t)=B_0\uvec{z}+\vec b_1=\vec B_1 \quad \quad 0< t < \tau$$
$$\vec B(t)=B_0\uvec{z}+\vec b_2=\vec B_2 \quad \quad \tau< t < 2\tau \quad etc.$$
\\
Define $H_i=-\vec B_i \cdot \vec \sigma=-\vec B_0\sigma_z-\vec b_i \cdot \vec \sigma $
and $$U_i=e^{-iH_i\tau}$$
We will also assume that there is no correlation between the different $\vec b_i$ and that each $\vec b_i$ has the same probability distribution $P(\vec b)$.
The expectation value of the operators at time $\tau$ is given by:
$$\overline{\vec \sigma(\tau)=U_i^+\vec \sigma(0)U_i}$$
Simplifying the above expression:
\[ \overline{\left[ E \cos(B_1\tau) - i {\hat B_1}\cdot{\vec \sigma}\sin(B_1\tau)\right]{\vec \sigma}(0) \left[ E \cos(B_1\tau) + i {\hat B_1}\cdot{\vec \sigma}\sin(B_1\tau)\right]} = \\ \]
\[= \overline{\cos^2(B_1\tau)} \;{\vec \sigma}(0) + i {\vec \sigma}(0) \left[\overline{\cos(B_1\tau)\sin(B_1\tau){\hat B_1} } \cdot{\vec \sigma}\right] - \] \\
$$- i \left[ \overline{\cos(B_1\tau)\sin(B_1\tau){\hat B_1} } \cdot{\vec \sigma} \right] {\vec \sigma}(0) + \overline{\sin^2(B_1\tau)\left[{\hat B_1} \cdot{\vec \sigma}\right] {\vec \sigma}(0) \left[{\hat B_1} \cdot{\vec \sigma} \right] }=\\$$
$$= \overline{\cos^2(B_1\tau)} \;{\vec \sigma}(0) + i \sum_i{\left(\overline{\cos(B_1\tau)\sin(B_1\tau)B_{1,i} } \right) \left[ {\vec \sigma}(0)\sigma_i - \sigma_i {\vec \sigma}(0)\right]} +$$
$$ \quad \quad \quad \quad \quad \quad \hspace{6cm} \sum_{i,j}{\left( \overline{\sin^2(B_1\tau)B_{1,i} B_{1,j} } \right) \sigma_i \;{\vec \sigma}(0)\; \sigma_j}$$
Which we can write as:
$$\vec \sigma(\tau)=I_0\vec \sigma(0)+ \sum_{i} I_i[\vec \sigma(0)\sigma_i-\sigma_i\vec \sigma(0)] +\sum_{ij} I_{ij}[\sigma_i\vec \sigma(0)\sigma_j] $$ where,
$$I_0=\int P(\vec b)cos^2(B\tau)d^3b $$
$$I_i=\int P(\vec b)\uvec{B}_i sin(B\tau)cos(B\tau)d^3b$$
$$I_{ij}=\int P(\vec b) \uvec{B}_i \uvec{B}_j sin^2(B\tau)d^3b$$
Expanding the above summation and writing the coefficients of the Pauli matrices as a matrix gives:
$$\left[ \begin{array}{c} \overline{ \sigma_x(\tau)} \\ \overline{ \sigma_y(\tau)} \\ \overline{ \sigma_z(\tau)} \end{array} \right] = \begin{bmatrix} T_{xx} & T_{xy} & T_{xz} \\ T_{yx} & T_{yy} & T_{yz} \\ T_{zx} & T_{zy} & T_{zz} \end{bmatrix} \times \left[ \begin{array}{c} \overline{\vec \sigma_x(0)} \\ \overline{ \sigma_y(0)} \\ \overline{ \sigma_z(0)} \end{array} \right]$$
or
$$\overline{ \vec \sigma(\tau)}=T\vec \sigma(0)$$
where $$ T_{xx}=I_0+I_{xx}-I_{yy}-I_{zz} \quad T_{yy}=I_0-I_{xx}+I_{yy}-I_{zz}$$ $$T_{zz}=I_0-I_{xx}-I_{yy}+I_{zz} $$
$$T_{xy}=2I_{xy}+2I_z ,\quad T_{yx}=2I_{xy}-2I_z$$
$$ T_{xz}=2I_{xz}-2I_y ,\quad T_{zx}=2I_{xz}+2I_y $$
$$T_{yz}=2I_{yz}+2I_x ,\quad T_{xy}=2I_{yz}-2I_x $$
After m time steps we have:
$$\overline{ \vec \sigma(m\tau)}=T^m\vec \sigma(0)$$
The transfer matrix can be diagonalized and hence, $T^m$ can be easily calculated giving exact solutions for the expectation value of the operators.
\section{An attempt to extend the transfer matrix method to other systems}
\subsection{Three State(Qutrit) System with Noise}
We will choose the 8 Gell-Mann matrices as the operator basis. Consider the following Hamiltonian for a three state system: $$H=-\vec B\cdot \vec \lambda.$$ were $\vec B=B_0 \uvec{i} +\vec b$ where $\vec b$ is the random vector with components as the three random functions which will get coupled to the three Gell-Mann matrices $\vec\lambda=(\lambda_1,\lambda_2,\lambda_2)$.
Assume that the random vector is piecewise constant with discontinuous jumps at regular time intervals of length $\tau$. Therefore, we have $$B_i=B_0 \uvec{i} +\vec b_i$$ for the $i^{th}$ time slot. We will also assume that there is no correlation between the different $\vec b_i$ and that each $\vec b_i$ has the same probability distribution $P(\vec b)$.
Also, we have the unitary operator given by $U_i=e^{\frac{-iH_i\tau}{\hbar}}$ where $H_i=\vec B_i\cdot \vec \lambda$.
Since, the Gell-Mann matrices $\lambda_1$,$\lambda_2$ and $\lambda_3$ form a SU(2) subgroup of $SU(3)$ they anti-commute. Defining $\beta=\tau|\vec B|/\hbar$.
$$U_1=\sum_{n=0}^{\infty} \frac{(i)^n\beta^n(\hat{B}\cdot \vec \lambda)^n}{n!}$$ $$=\sum_{n=0}^{\infty} \frac{(-1)^n\beta^{2n}(\hat{B}\cdot \vec \lambda)^{2n}}{(2n)!}+i\sum_{n=0}^{\infty} \frac{(-1)^n\beta^{2n+1}(\hat{B}\cdot \vec \lambda)^{2n+1}}{(2n+1)!}$$
Now, $$(\hat{B}\cdot \vec \lambda)^2=(\sum_{i}^{3} B_i\lambda_i)^2=\sum_{ij}^{3} B_iB_j\lambda_i\lambda_j$$
Since, $\lambda_1,\lambda_2,\lambda_3$ anti-commute with each other the above expression reduces to \[(\hat{B}\cdot \vec \lambda)^2=\sum_{i}^{3}B_i^2A=A \]
Substituting and simplifying we get,
\begin{equation}
U_1=I+i\beta (\hat{B}\cdot \vec \lambda)+A[cos(\beta)-1]+iA(\vec B\cdot \vec \lambda)[sin(\beta)-\beta] \end{equation}
where $\beta=\frac{\tau |\vec B|}{\hbar}$ and \[ A=
\begin{bmatrix}
1 0 0\\
0 1 0\\
0 0 0\\
\end{bmatrix} \] \\ \\
Proceeding in a way analogous to that given in the previous section to obtain the transfer matrix results in a 9x9 transfer matrix instead of a 8x8 one. This, is because unlike the Pauli Matrices the Gell-Mann matrices are not closed under multiplication(apart form a multiplicative constant). Thus, while deriving the equations for the expectation values of the operators we will have to invoke the identity matrix making the number of operators considered 9 instead of the 8 Gell-Mann matrices alone.
A more transparent method which also removes the difficulty in dealing with identity is obtained by considering the evolution of the density matrix itself and its decomposition in terms of the operator basis. The most general form of the density matrix after a time $t$ can be written as \cite{bq}: $$\rho (t)=\frac{1}{3}I+\sum_{a=1}^{8} \rho_{a}(t)\lambda_{a}$$
The $\rho_{\alpha}$ are real numbers which completely characterize the state of the system.
Also, density matrix evolved in a unitary fashion in the following way as $\rho(\tau)=\overline{U_1\rho(0)U_1^+}$ So, we can write: $$\rho (\tau)=\overline{U_1\Big [\frac{1}{3}I+\sum_{b=1}^{8} \rho_{b}(0)\lambda_{b}\Big ]U_1^+}$$ $$==\frac{1}{3}I+\overline{U_1 \Big [\sum_{b=1}^{8} \rho_{b}(0)\lambda_{b}\Big ]U_1^+}$$ Then we have, $$\sum_{a=1}^{8} \rho_{a}(\tau)\lambda_{a}=\overline{U_1 \Big [\sum_{b=1}^{8} \rho_{b}(0)\lambda_{b}\Big ]U_1^+}$$
We know that for the Gell-Mann matrices $Tr(\lambda_a\lambda_b)=2\delta_{ab}$. Thus, we can simplify the above expression by multiplying it with $\lambda_c$ and taking the trace.Then we get, $$\rho_{c}(\tau)=\frac{1}{2}Tr\Big (\lambda_c \overline{U_1 \Big [\sum_{b=1}^{8} \rho_{b}(0)\lambda_{b}\Big ]U_1^+} \Big )$$ which we can write as, $$\rho_{c}(\tau)=\sum_{b=1}^{8} T_{cb}\rho_b(0)$$ where $$T_{cb}=\frac{1}{2}Tr ( \lambda_c \overline{U_1\lambda_b U_1^+})$$ $$=\frac{1}{2}Tr\Big(\hspace{0.1cm}\overline{[I+i\beta (\hat{B}\cdot \vec \lambda)+A[cos(\beta)-1]+iA(\vec B\cdot \vec \lambda)[sin(\beta)-\beta]]\lambda_b}$$
$$\hspace{5cm}\overline{[I-i\beta (\hat{B}\cdot \vec \lambda)+A[cos(\beta)-1]-iA(\vec B\cdot \vec \lambda)[sin(\beta)-\beta]]}\lambda_c \Big )$$
\subsection{Two-Qubit System with Noise} The observables in the Hilbert space corresponding to a two-qubit system can be spanned by the following set of 16 3x3 matrices. $$\{\sigma_i \otimes \sigma_j: 0 \leq i,j \leq 3 \}$$
where $\sigma_1,\sigma_2,\sigma_3$ are the Pauli matrices and $\sigma_0=I$. Here $\otimes$ denotes the Kronecker product. Note that the subsets $$\{\sigma_i \otimes \sigma_0: 0 \leq i \leq 3 \}, \quad \{\sigma_0 \otimes \sigma_j: 0 \leq j \leq 3 \}$$ acts as Pauli matrices.
Hence, we can write down a Hamiltonian in which the coupling to the noise function is through any of the six matrices in the above subset. This will enable us to apply the same ideas as in section 3.2. \\ Consider the Hamiltonian, $$H=-\vec B\cdot \vec e.$$ where $\vec B=B_0 \uvec{i} +\vec b$ where $\vec b$ is the stochastic vector with components as the three stochastic functions which will get coupled to the Gell-Mann matrices $\vec e=(e_4,e_8,e_{12})$. We will assume that the random functions and the probability distribution has properties already assumed in the previous two sections.
Since the matrices $e_4,e_8,e_{12}$ have properties of Pauli matrices we can derive the following identity for the exponentiation of the Hamiltonian $U_i=e^{\frac{-iH_i\tau}{\hbar}}$:
$$U_1=Icos(\beta)+i(\hat{B}\cdot \vec e)sin(\beta)$$
where $\beta=\frac{\tau |\vec B|}{\hbar}$.
We can consider the general form of a 4x4 density matrix and obtain the transfer matrix through the method followed in the previous section.
For two-qubit system we obtain the following equation for the coefficients of the density matrix: $$\rho_{c}(\tau)=\sum_{b=1}^{15} T_{cb}\rho_b(0)$$ where
$$T_{bc}=\frac{1}{4}Tr ( e_c \overline{U_1e_b U_1^+})$$ $$=\frac{1}{4}Tr\Big(\hspace{0.1cm}\overline{[Icos(\beta)+i(\vec B\cdot \vec e)sin(\beta)]e_b[Acos(\beta)-iA(\vec B\cdot \vec e)sin(\beta)]}e_c \Big )$$
\subsection{N-dimensional Quantum System with Noise}
The transformations of a N-dimensional quantum system belongs to SU(N) and the algebra is an $N^2-1$ dimensional space. The transfer matrix will be $N^2-1$x$N^2-1$. A suitable orthogonal basis is the set of Generalized Gell-Mann matrices \cite{bq} $\lambda_i$ where $i=1,2,,...N^2-1$. Expanding a general NxN density matrix in terms of the $\lambda_i$s we get: $$\rho_{c}(\tau)=\frac{1}{2}Tr\Big (\lambda_c \overline{U_1 \Big [\sum_{b=1}^{N^2-1} \rho_{b}(t)\lambda_{b}\Big ]U_1^+} \Big )$$ which we can write as, $$\rho_{c}(\tau)=\sum_{b=1}^{N^2-1} T_{cb}\rho_b(0)$$ where $$T_{cb}=\frac{1}{2}Tr ( \lambda_c \overline{U_1\lambda_b U_1^+})$$
Now, if \( \lambda_{k_1},..,\lambda_{k_n}\) form some sub-algebra of $SU(N)$ such that $\lambda_{k_i}$ anti-commute with each other and squares to a matrix A which scales to A, we can use these matrices to model the Hamiltonian for the system. Then, as before we can derive the identity:
$$U_1=I+i\beta (\hat{B}\cdot \vec \lambda)+A[cos(\beta)-1]+iA(\vec B\cdot \vec \lambda)[sin(\beta)-\beta]$$ where $\vec \lambda=(\lambda_{k_{1}},...,\lambda_{k_{n}})$ and $\beta=\frac{\tau |\vec B|}{\hbar}$.
Using the above identity we can write the transfer matrix as: $$T_{cb}=\frac{1}{2}i\sum_i \Big[Tr(\overline{\beta(cos(\beta)-1)\hat{B_i}}[\lambda_c\lambda_i\lambda_bA-\lambda_cA\lambda_b\lambda_i])+$$ $$Tr(\overline{(sin(\beta)-\beta)(cos(\beta)-1)\hat{B_i}}[\lambda_cA\lambda_i\lambda_bA-\lambda_cA\lambda_bA\lambda_i])+$$ $$Tr(\overline{(sin(\beta)-\beta)\hat{B_i}}[\lambda_cA\lambda_i\lambda_b])-Tr(\overline{\beta\hat{B_i}}[\lambda_c\lambda_b\lambda_i-\lambda_c\lambda_i\lambda_b])\Big]+$$ $$\frac{1}{2}\sum_{ij} \Big[Tr(\overline{\beta(sin(\beta)-\beta)\hat{B_i}\hat{B_j}}[\lambda_c\lambda_i\lambda_bA\lambda_j-\lambda_cA\lambda_i\lambda_b\lambda_i])+ Tr(\overline{(sin(\beta)-\beta)^2\hat{B_i}\hat{B_j}}[\lambda_cA\lambda_iA\lambda_j])+$$ $$ Tr(\overline{\hat{B_i}\hat{B_j}\beta^2}[\lambda_c\lambda_i\lambda_b\lambda_j])\Big]+\frac{1}{2}\overline{(cos(\beta)-1)^2}[\lambda_cA\lambda_bA]+\frac{1}{2}\overline{cos(\beta)-1}[\lambda_cA\lambda_b+\lambda_c\lambda_bA]+\frac{1}{2}\lambda_c\lambda_b$$
If the system in consideration is a N-qubit system then, we can take the operator basis to be: $$\{\sigma_{i_{1}} \otimes ... \otimes \sigma_{i_{N}}: 0 \leq i_1,i_2,...i_N \leq 3 \}$$
where $\sigma_{1_{j}},\sigma_{2_{j}},\sigma_{3_{j}}$ are the Pauli matrices and $\sigma_{0_{j}}=I$ where j=1,..,N. Block diagonal elements within this basis forms different subsets such that the elements in teach of the subsets have properties as that of Pauli matrices. Any, one such subset can be used to model the Hamiltonian. Then, as before we can derive the identity:
$$U_1=Icos(\beta)+i(\hat{B}\cdot \vec e)sin(\beta)$$
where $\beta=\frac{\tau |\vec B|}{\hbar}$. \\ Using the above identity we can write the transfer matrix as: $$T_{cb}=\frac{1}{c}\Big[ Tr(\overline{cos(\beta)}e_ce_b)+i\sum_i Tr( \overline{sin(\beta) cos(\beta) \hat{B_i}}[e_ce_ie_b-e_ce_be_i])+$$ $$\hspace{7cm}\sum_{ij} Tr(\overline{sin^2(\beta) \hat{B_i}\hat{B_j}}[e_ce_ie_be_j])\Big]$$ where c is the constant in the orthogonality condition.
In general, simplifying the transfer matrix to obtain exact solutions is not trivial. But if the transfer matrix can be diagonalized we can form the diagonal matrix $D=diag(d_1,...,d_{N^2-1})$ where $$D=RTR^{-1}$$. Then, at time $t_f=m\tau$ steps the coefficients in the expansion of the density matrix is given by: $$\rho_i(t_f)=R^{-1}_{ij}(d_j)^{t_f/\tau}R_{jk}\rho_k(0)$$ where $i=1,...,N^2-1$
\section{Conclusion} The method introduced by Barnes and Das Sarma is a highly efficient one. There has been extensions of this method \cite{ash} \cite{ta} for other systems with different forms of coupling but the idea remains the same. It is clear from equation that the function $q(t)$ should be a smooth one with at least first and second derivatives. So, it is obvious that noise functions discussed in section 3 doesn't come under this category as the simplest of them have discontinuous jumps.
The transfer matrix method enable us to find analytic solutions for two-state systems in which the interaction with the environment can be assumed to be of a special kind i.e, where we can assume it to be a noise function which is piecewise-constant at equal intervals. The method cannot be as such applied to higher dimensional systems. A significant property of SU(2) which enabled the derivation of a transfer matrix is that apart from the operator basis(the three Pauli matrices plus identity) being closed under multiplication the set of Pauli matrices themselves have closure under multiplication. This is a crucial property which neither the 8-Gell-Mann matrices nor the 15 matrices(except identity) of the operator basis of two-qubit space have. Though, we can formally write an extended matrix in both the cases it is not easy to see how to simplify them.
Rather, we could derive the transfer matrix for an N-dimensional quantum system by looking at the density matrix evolution. Whether this transfer matrix can be simplified to obtain exact solutions depends on the specific system under consideration. There may be situations in which the matrix is diagonalizable in which case the exact solution is easily obtained.
\end{document} |
\begin{document}
\numberwithin{equation}{section} \title{Roots of crosscap slides and crosscap transpositions}
\author{Anna Parlak \hspace{1.5em} Micha\l $\ $Stukow}
\address[]{
Institute of Mathematics, Faculty of Mathematics, Physics and Informatics, University of Gda\'nsk, 80-308 Gda\'nsk, Poland }
\thanks{The second author is supported by grant 2015/17/B/ST1/03235 of National Science Centre, Poland.}
\email{[email protected], [email protected]}
\keywords{Mapping class group, nonorientable surface, punctured sphere, elementary braid, \mbox{$Y$--homeomorphism}, crosscap slide, crosscap transposition, roots} \subjclass[2000]{Primary 57N05; Secondary 20F38, 57M99}
\begin{abstract} Let $N_{g}$ denote a closed nonorientable surface of genus $g$. For $g \geq 2$ the mapping class group $\mathcal{M}(N_{g})$ is generated by Dehn twists and one crosscap slide ($Y$--homeomorphism) or by Dehn twists and a crosscap transposition.
Margalit and Schleimer observed that Dehn twists have nontrivial roots.
We give necessary and sufficient conditions for the existence of roots of crosscap slides and crosscap transpositions. \end{abstract}
\maketitle
\section{Introduction}
Let $N_{g, s}^{n}$ be a connected, nonorientable surface of genus $g$ with $s$ boundary components and $n$ punctures, that is a surface obtained from a connected sum of $g$ projective planes $N_{g}$ by removing $s$ open disks and specifying the set $\Sigma = \lbrace p_{1}, \ldots, p_{n} \rbrace $ of $n$
distinguished points in the interior of $N_{g}$.
If $s$ or/and $n$ equals zero, we omit it from notation. The \emph{mapping class group} $\mathcal{M}(N_{g, s}^{n})$ consists of isotopy classes of self--homeomorphisms $h: N_{g,s}^{n} \rightarrow N_{g,s}^{n}$ fixing boundary components pointwise and such that $h(\Sigma) = \Sigma$. The mapping class group $\mathcal{M}(S_{g,s}^{n})$ of an orientable surface is defined analogously, but we consider only orientation--preserving maps. If we allow orientation--reversing maps, we obtain the \emph{extended mapping class group} $\mathcal{M}^{\pm}(S_{g,s}^{n})$. By abuse of notation, we identify a homeomorphism with its isotopy class.
In the orientable case, the mapping class group $\mathcal{M}(S_{g})$ is generated by Dehn twists \cite{Lick1}. As for nonorientable surfaces, Lickorish proved that Dehn twists alone do not generate $\mathcal{M}(N_{g})$, $g \geq 2$. This group is generated by Dehn twists and one crosscap slide ($Y$--homeomorphism)~\cite{Lick3}.
A presentation for $\mathcal{M}(N_{g})$ using these generators was obtained by Stukow \cite{StukowSimpSzepPar}. This presentation was derived from the presentation given by Paris and Szepietowski \cite{SzepParis}, which used as generators Dehn twists and yet another homeomorphisms of nonorientable surfaces, so--called crosscap transpositions.
Margalit and Schleimer discovered a surprising property of Dehn twists: in the mapping class group of a closed, connected, orientable surface $S_{g}$ of genus $g \geq 2$, every Dehn twist has a nontrivial root \cite{MargSchleim}. It is natural to ask if crosscap slides and crosscap transpositions also have a similar property. The main goal of this paper is to prove the following:
\begin{Mtw} In $\mathcal{M}(N_{g})$ a nontrivial root of a crosscap transposition [or a crosscap slide] exists if and only if $g \geq 5$ or $g = 4$ and the complement of the support of this crosscap transposition [or crosscap slide] is orientable. \label{main} \end{Mtw}
\section{Preliminaries}
\subsection*{Crosscap transpositions and crosscap slides.}
Let $N = N_{g}$ be a nonorientable surface of genus $g \geq 2$. Let $\alpha$ and $\mu$ be two simple closed curves in $N$ intersecting in one point, such that $\alpha$ is two--sided and $\mu$ is one--sided. A regular neighborhood of $\mu \cup \alpha$ is homeomorphic to the Klein bottle with a hole denoted by $K$. A convenient model of $K$ is a disk with 2 crosscaps, see Figure \ref{UY}. In this figure shaded disks represent crosscaps, thus the boundary points of these disks are identified by the antipodal map.
\begin{figure}
\caption{Crosscap transposition and crosscap slide.}
\label{UY}
\end{figure}
A \emph{crosscap transposition} $U_{\mu,\alpha}$ specified by $\mu$ and $\alpha$ is
a homeomorphism of $K$ which interchanges two crosscaps keeping the boundary of $K$ fixed \cite{SzepParis}. It may be extended by the identity to a homeomorphism of $N$. If $t_{\alpha}$ is the Dehn twist about $\alpha$ (with the direction of the twist indicated by small arrows in Figure \ref{UY}), then $Y_{\mu,\alpha} = t_{\alpha}U_{\mu,\alpha}$ is a \emph{crosscap slide} of $\mu$ along $\alpha$, that is the effect of pushing $\mu$ once along $\alpha$ keeping the boundary of $K$ fixed. Note that $U_{\mu,\alpha}^{2}=Y_{\mu,\alpha}^{2}=t_{\partial K}$.
If $g$ is even, then the complement of $K$ in $N_g$ can be either a nonorientable surface $N_{g-2,1}$ or an orientable surface $S_{\frac{g-2}{2},1}$, therefore on surfaces of even genus two conjugacy classes of crosscap slides and crosscap transpositions exist.
\subsection*{Notation}
Represent $N_g$ as a connected sum of $g$ projective planes and let $\mu_1,\ldots,\mu_{g}$ be
one--sided circles that correspond to crosscaps as in indicated in Figure \ref{fig_surface}.
By abuse of notation, we identify $\mu_i$ with the corresponding crosscap.
\begin{figure}
\caption{Nonorientable surface $N_g$.}
\label{fig_surface}
\end{figure}
If $\alpha_1,\ldots,\alpha_{g-1}$ are two--sided circles indicated in the same figure, then for each $i=1,\ldots,g-1$ by $t_{\alpha_i},u_i,y_i$ we denote the Dehn twist about $\alpha_i$, the crosscap transposition $U_{\mu_{i+1},\alpha_i}$, and the crosscap slide $Y_{\mu_{i+1},\alpha_i}$, respectively.
\subsection*{Relations in the mapping class group of a nonorientable surface} A full presentation for $\mathcal{M}(N_{g})$ is given in \cite{SzepParis, StukowSimpSzepPar}. Among others, the following relations hold in $\mathcal{M}(N_{g})$: \begin{itemize}
\item[(R1)] $u_{i}u_{j} = u_{j}u_{i}$ for $i,j = 1, \ldots, g-1, \ |i-j|>1$,
\item[(R2)] $u_{i}u_{i+1}u_{i} = u_{i+1}u_{i}u_{i+1}$ for $i=1,\ldots, g-2$,
\item[(R3)] $(u_{1} \ldots u_{g-1})^{g} = 1$,
\item[(R4)] $t_{\alpha_{i}}u_{j} = u_{j}t_{\alpha_{i}}$ and hence $y_{i}u_{j} = u_{j}y_{i}$ for $i,j=1, \ldots, g-1, \ |i-j|>1$
\end{itemize} It is straightforward to check that relations (R1)--(R3) imply \begin{itemize}
\item[(R5)] $(u_{1}^{2}u_{2}\ldots u_{g-1})^{g-1} = 1$ \end{itemize}
Geometrically $u_{1}\ldots u_{g-1}$ is a cyclic rotation of $\mu_1,\ldots,\mu_{g}$ and $u_{1}^{2}u_{2}\ldots u_{g-1}$ is a cyclic rotation of $\mu_2,\ldots,\mu_{g}$ around $\mu_1$. In particular, \begin{itemize}
\item[(R6)] $(u_{1} \ldots u_{g-1})^{g} = (u_{1}^{2}u_{2} \ldots u_{g-1})^{g-1} = t_{\partial N_{g,1}}$ in $\mathcal{M}(N_{g,1})$. \end{itemize}
We also have the following chain relation between Dehn twists (Proposition 4.12 of \cite{MargaliFarb}): if $k \geq 2$ is even and $c_{1}, \ldots, c_{k}$ is a chain of simple closed curves in a surface $S$, such that the boundary of a closed regular neighborhood of their union is isotopic to $d$, then \begin{itemize} \item[(R7)] $(t_{c_{1}}\ldots t_{c_{k}})^{2k+2} = t_{d}$.
\end{itemize}
\section{Proof of the Main Theorem}
\begin{uw}\label{rem:odd}
Automorphisms of $H_{1}(N_{g}; \mathbb{R})$ induced by crosscap transpositions and crosscap slides have determinants equal to $-1$, so if a root of a crosscap slide or a crosscap transposition exists, it must be of odd degree. \end{uw}
Let $K$ be a subsurface of $N_g$ that is a Klein bottle with one boundary component $\delta$ and which contains $\mu_1$ and $\mu_2$ (Figure \ref{fig_surface}). In particular $u_1^2=y_1^2=t_{\delta}$.
\subsection*{The case of ${g\geq 5}$ odd.} Let $p,q \in \mathbb{Z}$ be such that $2p + q(g-2) = 1$. By relations (R6) and (R1),
\[\begin{aligned}
u_{1}^{2}&=t_\delta=(u_{3} \ldots u_{g-1})^{g-2}\\
u_{1}^{2p}&=(u_{3} \ldots u_{g-1})^{p(g-2)}\\
u_{1}&=\left((u_{3}\ldots u_{g-1})^{p}u_{1}^{q}\right)^{g-2}
\end{aligned} \]
Analogously, by relations (R6), (R1) and (R4), $y_{1} = \left((u_{3}\ldots u_{g-1})^{p}y_{1}^{q}\right)^{g-2}$.
\subsection*{The case of ${g \geq 6}$ even and ${N_{g} \backslash K}$ nonorientable.} Let $p,q \in \mathbb{Z}$ be such that $2p + q(g-3) = 1$. By relations (R6) and (R1),
\[\begin{aligned} u_{1}^{2}&=t_\delta=(u_{3}^{2}u_{4} \ldots u_{g-1})^{g-3}\\ u_{1}^{2p}&=(u_{3}^{2}u_{4} \ldots u_{g-1})^{p(g-3)}\\ u_{1}&=((u_{3}^{2}u_{4} \ldots u_{g-1})^{p}u_{1}^{q})^{g-3}. \end{aligned}\]
Analogously, by relations (R6), (R1) and (R4), $y_{1} = ((u_{3}^{2}u_{4} \ldots u_{g-1})^{p}y_{1}^{q})^{g-3}$.
\subsection*{The case of ${g \geq 4}$ even and ${N_{g} \backslash K}$ orientable.} Suppose now that crosscap transposition $u$ and crosscap slide $y$ are supported in a Klein bottle with a hole $K$ such that $N_g\setminus K$ is orientable.
If $c_{1}, \ldots, c_{g-2}$ is a chain of two--sided circles in $N_g\setminus K$, then by relation (R7),
\[\begin{aligned} u_{1}^{2}&=t_{\partial K}=(t_{c_{1}}\ldots t_{c_{g-2}})^{2g-2}\\ \left(u_{1}^{2}\right)^{\frac{g}{2}}&=\left((t_{c_{1}}\ldots t_{c_{g-2}})^{2g-2}\right)^{\frac{g}{2}}\\ u_{1}&=((t_{c_{1}}\ldots t_{c_{g-2}})^{g}u_{1}^{-1})^{g-1}. \end{aligned}\] Analogously, $y_{1} = ((t_{c_{1}}\ldots t_{c_{g-2}})^{g}y_{1}^{-1})^{g-1}$. \subsection*{The case of $g=2$.}
Crosscap slides and a crosscap transpositions are primitive in $\mathcal{M}(N_{2})$ because \cite {Lick3}
\[\begin{aligned}
\mathcal{M}(N_{2})&\cong \langle t_{\alpha_{1}}, y_{1} \;|\; t_{\alpha_{1}}^{2} = y_{1}^{2} = (t_{\alpha_{1}}y_{1})^{2} = 1 \rangle\\
&\cong \langle t_{\alpha_{1}}, u_{1} | \ t_{\alpha_{1}}^{2} = u_{1}^{2} = (t_{\alpha_{1}}u_{1})^{2} = 1 \rangle \cong \mathbb{Z}_{2} \oplus \mathbb{Z}_{2}.
\end{aligned}\] \subsection*{The case of $g=3$.} \begin{uw}\label{rem:order6} It is known that the mapping class group $\mathcal{M}(N_{3})$ is hyperelliptic \cite{Stukow_HiperOsaka} and has the central element $\varrho$ such that $\mathcal{M}(N_{3})/\gen{\varrho}$ is the extended mapping class group $\mathcal{M}^{\pm}(S_{0}^{3,1})$ of a sphere with 4 punctures. Two upper subscripts mean that we have four punctures on the sphere, but one of them must be fixed. This implies \cite{Buskirk} that the maximal finite order of an element in $\mathcal{M}^{\pm}(S_{0}^{3,1})$ is 3, and hence the maximal finite order of an element in $\mathcal{M}(N_{3})$ is 6. Moreover, each two rotations of order 3 in $\mathcal{M}^{\pm}(S_{0}^{3,1})$ are conjugate, which easily implies that each two elements of order 6 in $\mathcal{M}(N_{3})$ are conjugate. The details of the proof of the last statement are completely analogous to that used in \cite{MaxHyp}, hence we skip them.
The same conclusion can be obtained also purely algebraically: it is known \cite{Scharlemann} that $\mathcal{M}(N_{3})\cong\mathrm{GL}(2,\mathbb{Z})$ and the maximal finite order of an element in $\mathrm{GL}(2,\mathbb{Z})$ is 6. Moreover, there is only one conjugacy class of such elements in $\mathrm{GL}(2,\mathbb{Z})$ --- for details see for example Theorem 2 of \cite{Meskin}. \end{uw}
We will show that crosscap transpositions do not have nontrivial roots in $\mathcal{M}(N_{3})$.
Suppose that $x \in \mathcal{M}(N_{3})$ exists such that $x^{2k+1} = u_{1}$, where $k\geq 1$ (see Remark \ref{rem:odd}). Then \[x^{4k+2}=u_1^2=t_\delta=1.\] By Remark \ref{rem:order6}, $k=1$. Moreover, by relation (R7), \[(t_{\alpha_1}t_{\alpha_2})^6=t_\delta=1,\] hence $x$ is conjugate to $t_{\alpha_1}t_{\alpha_2}$. This contradicts Remark \ref{rem:odd}, because Dehn twists induce automorphisms of $H_1(N_3;\field{R})$ with determinant equal to 1 and $x^3=u_{1}$.
In the case of a crosscap slide the argument is completely analogous, hence we skip the details.
\subsection*{The case of $g=4$ and ${N_{4} \backslash K}$ nonorientable.}
If $N_{4} \backslash K$ is nonorientable, then $\delta$ cuts $N_4$ into two Klein bottles with one boundary component: $K$ and $K_1$. Moreover, as was shown in Appendix A of \cite{Stukow_twist}, \[\begin{aligned}
\mathcal{M}(K)&=\langle t_{\alpha_{1}}, u_{1} \ | \ u_{1}t_{\alpha_{1}} = t_{\alpha_{1}}^{-1}u_{1}\rangle\\
\mathcal{M}(K_1)&=\langle t_{\alpha_{3}}, u_{3} \ | \ u_{3}t_{\alpha_{3}} = t_{\alpha_{3}}^{-1}u_{3}\rangle.
\end{aligned} \]
If $x \in \mathcal{M}(N_{4})$ exists such that $x^{2k+1} = u_{1}$ and $k\geq 1$ (see Remark \ref{rem:odd}), then \[x^{4k+2}=u_1^2=t_\delta.\] In particular, $x$ commutes with $t_\delta$ and \[t_\delta=xt_{\delta}x^{-1}=t_{x(\delta)}^{\pm}.\] By Proposition 4.6 of \cite{Stukow_twist}, up to isotopy of $N_4$, $x(\delta)=\delta$. Because $u_1$ does not interchange two sides of $\delta$ and does not reverse the orientation of $\delta$, $x$ has exactly the same properties. Therefore, we can assume that $x$ is composed of maps of $K$ and $K_1$. Moreover $u_1=x^{2k+1}$ interchanges $\mu_1$ and $\mu_2$ and does not interchange $\mu_3$ and $\mu_4$, hence \[\begin{aligned}
x&=t_{\alpha_{1}}^{k_{1}}u_{1}^{2m_{1}+1}t_{\alpha_{3}}^{k_{2}}u_{3}^{2m_{2}}= t_{\alpha_{1}}^{k_{1}}u_{1}t_{\alpha_{3}}^{k_{2}}t_{\delta}^{m_1+m_2}\\ x^2&=t_{\alpha_{3}}^{2k_{2}}t_{\delta}^{2m_1+2m_2+1} \end{aligned}\] But then \[t_\delta=(x^2)^{2k+1}=t_{\alpha_{3}}^{2k_{2}(2k+1)}t_{\delta}^{(2m_1+2m_{2}+1)(2k+1)}\] which is a contradiction, because Dehn twists about disjoint circles generate a free abelian group (Proposition 4.4 of \cite{Stukow_twist}).
In the case of a crosscap slide the argument is completely analogous, hence we skip the details.
\section{Roots of elementary braids in the mapping class group of $n$-punctured sphere.}
Margalit and Schleimer observed in \cite{MargSchleim} that if $g\geq 5$, then roots of elementary braids in $\mathcal{M}(S_{0}^g)$ exist. The Main Theorem implies slightly stronger version of that result.
\begin{wn}
An elementary braid in the mapping class group $\mathcal{M}(S_{0}^n)$ or in the extended mapping class group $\mathcal{M}^\pm(S_{0}^n)$
of \mbox{$n$-punctured} sphere has a nontrivial root if and only if $n \geq 5$. \end{wn} \begin{proof}
By Proposition 2.4 of \cite{SzepParis}, there is a monomorphism \[\map{\varphi}{\mathcal{M}^{\pm}(S_{0}^g)}{\mathcal{M}(N_g)}\] which is induced by blowing up each puncture to a crosscap. In particular, this monomorphism sends elementary braids to crosscap transpositions. Moreover, all roots of crosscap transpositions constructed in the proof of the Main Theorem are elements of $\varphi(\mathcal{M}(S_{0}^g))$. \end{proof}
\end{document} |
\begin{document}
\title{On the observability of Bell's inequality violation in the optical Stern-Gerlach model}
\author{M. Tumminello \dag, A. Vaglica \ddag, G. Vetri \ddag} \affiliation{ \dag Istituto Nazionale di Fisica della Materia and Dipartimento di Fisica e Tecnologie Relative, Universit\`{a} degli Studi di Palermo, Viale delle Scienze, Palermo, I-90128, Italy\\ \ddag Istituto Nazionale di Fisica della Materia and Dipartimento di Scienze Fisiche ed Astronomiche,Universit\`{a} degli Studi di Palermo, via Archirafi 36, 90123 Palermo, Italy }
\date{\today}
\begin{abstract} Using the optical Stern-Gerlach model, we have recently shown that the non-local correlations between the internal variables of two atoms that successively interact with the field of an ideal cavity in proximity of a nodal region are affected by the atomic translational dynamics. As a consequence, there can be some difficulties in observing violation of the Bell's inequality for the atomic internal variables. These difficulties persist even if the atoms travel an antinodal \textit{region}, except when the spatial wave packets are exactly centered in an antinodal \textit{point}.\\ \end{abstract}
\pacs{03.65.Ud, 32.80.Lg, 42.50.Xa}
\maketitle
Peculiar concepts of quantum mechanics (QM), such as the Bohr's principle of complementarity \cite{Scu, Zei, Durr, Bert}, have their origin in the vectorial nature of the state space, which involves a superposition principle. Complementarity (or duality) \cite{Engl} establishes a sort of ``orthogonality" between the which-way information and the possibility of observing interference pattern. In other words, these two behaviors are mutually exclusive. The visibility $V$ of the interference pattern and the distinguishability $D$ of the quantum paths can in some extent coexist, and as shown by Englert in its quantitative analysis of complementarity \cite{Engl,See}, they satisfy the inequality $D^{2}+V^{2}\leq1$. According to this analysis and in the ambit of the optical Stern-Gerlach (SG) model, we have recently shown \cite{Tum1} that the visibility of the Rabi oscillations and the distinguishability of the two atomic translational paths satisfy the equality relation $D^{2}+V^{2}=1$ when pure initial states are considered.
When applied to a composite system, the superposition principle leads to quantum correlations (entanglement), which may hide the individuality of the subsystems. Differently from the classical case, and in idealized configurations, two quantum systems that have interacted for a time, generally do not recover their individuality, even if the subsystems become spatially separated. This inseparability, which has been at the origin of the famous debate between Einstein \cite{Ein} and Bohr \cite{Boh} on the completeness of QM, implies a non local character of the correlations (EPR correlation \cite{Wer}) between the two subsystems. This non-locality can be individuated by the violation of some Bell's inequality \cite{Bel}. It is to note that, differently from the pure case, a mixed state may be EPR correlated and, at the same time, it may satisfy the Bell's inequality \cite{Wer}.
Recently, it has been payed attention to teleportation, non-local correlations, separability and related issues for massive particles \cite{Har, Bar, Rie, Phoe, Kni, Fre, Zub, Ray, Tum2}. As suggested by Phoenix and Barnett \cite{Phoe} (see also \cite{Har} and \cite{Kni}), a simple model which can realize an EPR state for massive particles consists of two atoms which interact successively with the field of an optical cavity, in the ambit of the standard Jaynes-Cummings (JC) model. The entanglement developed during the interaction between the first atom and the field, may induce quantum correlations between the two atoms as the second one interacts with the field of the same cavity. An experimental effort to observe a Bell's inequality violation for this system has been done by Haroche and co-workers \cite{Har}, which ascribe to several experimental imperfections the reduction of purity of the entanglement that prevents the Bell's inequality violation.
In a recent paper \cite{Tum2} we have suggested that a careful analysis of the interatomic correlations may require the quantization of the translational dynamics of the two atoms along the cavity axis. In that paper we have considered two-level atoms entering the cavity in a nodal region, where the field gradient is different from zero. The entanglement between the internal and external atomic variables affects the non local features of the interatomic correlations making more difficult the observation of Bell's inequality violation with respect to the JC model. However, in most cases (as in ref.: \cite{Har}) the experiments are performed in such a way that the atoms interact with the cavity field in an antinodal region. Consequently, it seems suitable to extend our previous analysis to this case. Our present analysis confirms qualitatively the results of the previous one, except when the spatial atomic wave packets are exactly centered in an antinodal \textit{point}.
In our model two two-level atoms interact successively with a single mode of the e.m. field of an ideal cavity. The first atom, say $A_{1}$, enters the cavity at time $t=0$ and interacts with the field for a time $t_{1}$. It moves prevalently along the \emph{z}-direction, orthogonal to the \emph{x}-cavity axis and we assume that the velocity along this direction is large enough to treat classically this component of the motion. The second atom, say $A_{2}$, enters the cavity at time $t_{2}>t_{1}$, interacts with the e.m. field as modified by the first atom and leaves the cavity at time $t_{3}$. Finally, both the atoms evolve freely for $t>t_{3}$. The atoms enter the cavity in proximity of an antinodal region of the resonant \emph{k}-mode, and the width of their wave packets is sufficiently small with respect to the wavelength of this mode. The Hamiltonian of the system at all times can conseguently be written as
\begin{widetext} \begin{equation}\label{ham}
\hat{H}=\frac{\hat{p}_{1}^{2}}{2\, m}+\hbar \omega (\hat{a}^{\dag}\hat{a}+\hat{S}_{z,1}+
\frac{1}{2})+(\frac{\hat{p}_{2}^{2}}{2\,m}+\hbar \omega \hat{S}_{z,2})\theta_{t}(t_{2})+
\hbar\varepsilon (\frac{k^{2}\,\hat{x}_{1}^{2}}{2}-1)\mu_{t}(0,t_{1})\hat{u}_{1}
+\hbar\varepsilon (\frac{k^{2}\,\hat{x}_{2}^{2}}{2}-1)\mu_{t}(t_{2},t_{3})\hat{u}_{2}, \end{equation} \end{widetext}
where $\hat{x}_{i}$ is the position of atom $A_{i}$ with respect to the antinodal point and $\hat{p}_{i}$ its conjugate momentum. The atom-field interaction is described by $\hat{u}_{i}=\hat{a}^{\dag}\hat{S}_{-,i}+\hat{a}\hat{S}_{+,i}$ where $\hat{a}$ and $\hat{a}^{\dag}$ are the usual annihilation and creation field-operators, while $\hat{S}_{\pm,i}$ are the $1/2$ spin operators. The atoms have same mass \emph{m} and same atom-field coupling constant $\varepsilon$. The linear combination of step-functions $\mu_{t}(x,y)=\theta_{t}(x)-\theta_{t}(y)$, with different points (x and y) of discontinuity, distinguishes the different time ranges concerning the successive interactions.
As in Ref. \cite{Phoe} where the standard JC model is adopted and as in our previous paper \cite{Tum2}, we consider the simple case of only one atom-field system excitation. In particular, we start considering both the atoms initially in the ground state and just one photon in the cavity, so at time $t=0$ the state is
$\ket{\psi(0)}=\ket{g_{1}}\ket{1}\ket{\varphi_{1}(0)}$,
where $\ket{\varphi_{1}(0)}$ is a translational state of the atom $A_{1}$.
Using the evolution operator related to eq.(\ref{ham}), the state of the system for $t\leq{t_{2}}$ is (except an irrelevant global phase factor)
\begin{eqnarray}\label{psit} \ket{\psi(t)}=exp[-\frac{i}{\hbar}\frac{\hat{p}_{1}^{2}}{2 m} (t-t_{1})]\cdot\quad\quad\quad\quad\quad\quad\quad\nonumber\\ \quad\cdot[\ket{S_{1}^{-}(t_{1})}\ket{e_{1}}\ket{0}\ +\ket{S_{1}^{+}(t_{1})}\ket{g_{1}}\ket{1}], \end{eqnarray}
where $\ket{e_{i}}$ indicates the excited state of $A_{i}$ and
\begin{eqnarray} \ket{S_{1}^{\pm}(t_{1})}=\frac{1}{2}[e^{{i}\varepsilon t_{1}}\ket{\phi_{1}^{+}(t_{1})}\pm e^{{-i}\varepsilon t_{1}}\ket{\phi_{1}^{-}(t_{1})}] \label{Spm1}\\
\ket{\phi_{1}^{\pm}(t_{1})}= \exp\{-\frac{i}{\hbar}[\frac{\hat{p}_{1}^{2}}{2 m}\pm\hbar \varepsilon k^{2}\frac{\hat{x}_{1}^{2}}{2}]t_{1}\}\ket{\varphi_{1}(0)}. \label{fipm1t1} \end{eqnarray}
At time $t=t_{2}$ the second atom, in its ground internal state, enters the cavity and starts to interact with the field modified by the interaction with the first atom. Let $\ket{\varphi_{2}(t_{2})}$ be the translational state of atom $A_{2}$ at the beginning of its interaction with the cavity field. The state of the entire system at this time is
$\ket{\Psi(t_{2})}=\ket{\psi(t_{2})}\ket{g_{2}}\ket{\varphi_{2}(t_{2})}$.
Applying the same procedure as above, we derive the state at time $t>t_{3}$, when both the two atoms have left the cavity and evolve freely
\begin{eqnarray}\label{Psit}
\ket{\Psi(t)}=\ket{S_{1}^{+}(t)}\ket{S_{2}^{+}(t)}\ket{g_{1}}\ket{g_{2}}\ket{1}+\quad\quad\quad \quad\quad\quad\nonumber\\ +\{\ket{S_{1}^{-}(t)}\ket{\varphi_{2}(t)}\ket{e_{1}}\ket{g_{2}}+\quad\quad\quad\quad \quad\quad\quad\nonumber\\ +\ket{S_{1}^{+}(t)}\ket{S_{2}^{-}(t)}\ket{g_{1}}\ket{e_{2}}\}\ket{0}\quad\quad \end{eqnarray}
where
\begin{eqnarray} \ket{S_{i}^{\pm}(t)}=exp[-\frac{i}{\hbar}\frac{\hat{p}_{i}^{2}}{2 m}(t-t_{3})]\ket{S_{i}^{\pm}(t_{3})}\quad\label{St}\\
\ket{S_{i}^{\pm}(t_{3})}=\frac{1}{2}[e^{{i}\varepsilon T_{i}}\ket{\phi_{i}^{+}(t_{3})}\pm e^{{-i}\varepsilon T_{i}} \ket{\phi_{i}^{-}(t_{3})}]\quad\label{St3}\\
\ket{\phi_{1}^{\pm}(t_{3})}=exp[-\frac{i}{\hbar}\frac{\hat{p}_{1}^{2}}{2 m}(t_{3}-t_{1})]\ket{\phi_{1}^{\pm}(t_{1})}\quad \label{fipm1t3}\\
\ket{\phi_{2}^{\pm}(t_{3})}=\exp\{-\frac{i}{\hbar}[\frac{\hat{p}_{2}^{2}}{2 m}\pm\hbar \varepsilon k^{2}\frac{\hat{x}_{2}^{2}}{2}]T_{2}\}\ket{\varphi_{2}(t_{2})} \label{fipm2t3}\\
\ket{\varphi_{2}(t_{3})}=exp[-\frac{i}{\hbar}\frac{\hat{p}_{2}^{2}}{2 m}T_{2}]\ket{\varphi_{2}(t_{2})},\label{fi2t3}\quad \end{eqnarray}
and we have introduced the interaction time $T_{1}=t_{1}$ and $T_{2}=t_{3}-t_{2}$ for atoms $A_{1}$ and $A_{2}$, respectively. Tracing on the field and atomic translational variables, the following reduced density operator is obtained
\begin{widetext} \begin{eqnarray}\label{rhorid}
\rho=Tr_{f,s_{1},s_{2}}(\ket{\Psi(t_{3})}\bra{\Psi(t_{3})})= \frac{1}{4}(1+c_{R}^{(1)})(1+c_{R}^{(2)})\ket{g_{1}}\ket{g_{2}}\bra{g_{1}} \bra{g_{2}}+\frac{1}{2}(1-c_{R}^{(1)})\ket{e_{1}}\ket{g_{2}}\bra{e_{1}} \bra{g_{2}}+ \nonumber \\ +\frac{1}{4}(1+c_{R}^{(1)})(1-c_{R}^{(2)})\ket{g_{1}}\ket{e_{2}}\bra{g_{1}} \bra{e_{2}}+ \frac{i}{4} c_{I}^{(1)}[(c_{-}-c_{+})\ket{e_{1}}\ket{g_{2}}\bra{g_{1}} \bra{e_{2}}-h.c.],
\end{eqnarray} \end{widetext}
where we have put
\begin{eqnarray}\label{param} e^{{-2i}\varepsilon T_{1}}\braket{\phi_{1}^{+}(t_{1})}{\phi_{1}^{-}(t_{1})}=c_{R}^{(1)}+ic_{I}^{(1)}\nonumber\\ e^{{-2i}\varepsilon T_{2}}\braket{\phi_{2}^{+}(t_{3})}{\phi_{2}^{-}(t_{3})}=c_{R}^{(2)}+ic_{I}^{(2)}\nonumber\\ e^{\mp{i}\varepsilon T_{1}}\braket{\phi_{2}^{\pm}(t_{3})}{\varphi_{2}(t_{3})}=c_{\pm}.\quad\quad\quad \end{eqnarray}
As it is easy to see, eq. (\ref{rhorid}) is formally very similar to the corresponding equation of Ref.\cite{Phoe}. The difference between eq. (\ref{rhorid}) and the corresponding in Ref.\cite{Phoe} is the fact that the coefficients (\ref{param}) are now affected by the translational dynamics. As in Ref.\cite{Tum2}, the scalar products which appear in this equation are generally subjected to a non dissipative decay and this behavior may affect the non local character of the correlations between the internal atomic variables.
An evaluation of the quantities (\ref{param}) is not a trivial operation because the evolution operator in eq.s (\ref{fipm1t1}) and (\ref{fipm2t3}) describes a harmonic-like evolution (sign $-$) or a squeezing-like evolution in the other case (sign $+$) \cite{Vag}. In fact one can write
\begin{equation}\label{sq} e^{-\frac{i}{\hbar}\left(\frac{\hat{p}_{i}^{2}}{2\,m}- \frac{\hbar \varepsilon k^{2}}{2}\,\hat{x}_{i}^{2}\right)\,T_{i}}= e^{i\,\frac{\omega_{0}}{2}\,T_{i}\left(\hat{b}_{i}^{\dag\,2}+\hat{b}_{i}^{2}\right)},\ \end{equation}
where $T_{i}$ is the interaction time of atom $A_{i}$,
\begin{equation}\label{bosons}
\hat{b}_{j}=\frac{1}{\sqrt{2}}\left(\sqrt{\frac{m\,\omega_{0}}{\hbar}}\,\hat{x}_{j}+ i\,\frac{1}{\sqrt{m\,\omega_{0}\hbar}}\,\hat{p}_{j}\right) \end{equation}
are boson operators and $\omega_{0}^2 =\frac{\hbar k^{2}}{m} \varepsilon$. To calculate the scalar products (\ref{param}), it is convenient to put the squeezing operators (\ref{sq}) in factored forms \cite{cav}, for instance
\begin{eqnarray}\label{fact} e^{i\,\frac{\alpha}{2}\left(\hat{b}_{i}^{\dag\,2}+\hat{b}_{i}^{2}\right)}= \exp\{-\ln[\cosh(\alpha)](\hat{b}_{i}^\dag\ \hat{b}_{i}+\frac{1}{2})\}\cdot\nonumber\\ \cdot\exp\{\frac{i}{2}\tanh(\alpha)e^{2\ln[\cosh(\alpha)]} \hat{b}_{i}^{\dag\,2}\} \cdot\exp\{\frac{i}{2}\tanh(\alpha)\hat{b}_{i}^{2}\}, \end{eqnarray}
and similar expressions. Moreover, for the sake of mathematical simplicity, we assume that the initial translational states for both the atoms are given by coherent states of the boson-like operators $b_{j}$, with the same width. In other words, we suppose that at the beginning of the interaction with the cavity field the translational states of both the atoms are coherent states with respect to the bosons operators $\hat{b}_{j}$: $\ket{\varphi_{j}(initial)}$=$\ket{\alpha_{j}}$, with $\hat{b}_{j}\ket{\alpha_{j}}=\alpha_{j}\ket{\alpha_{j}}$, and
\begin{eqnarray}\label{psinit} \ket{\alpha_{j}}=\exp\left[\frac{i}{\hbar}\left(p_{0}^{(j)} \hat{x}_{j}-x_{0}^{(j)}\hat{p}_{j}\right)\right]\ket{0_{j}}\\ \alpha_{j}=x_{0}^{(j)}\sqrt{\frac{m\omega_{0}}{2\hbar}}+ip_{0}^{(j)}\frac{1}{\sqrt{2m\hbar\omega_{0}}} \equiv a_{j}+ib_{j} \end{eqnarray}
where
\begin{equation}\label{psivac} \braket{x_{j}}{0_{j}}=\left(\frac{1}{\Delta x_{0}\sqrt{2\,\pi}}\right)^{\frac{1}{2}}\,\exp[-\frac{x_{j}^{2}}{4\,\Delta x_{0}^{2}}]\\ \end{equation}
is the wave function of the ground state of the $\hat{b}_{j}$ corresponding harmonic ``oscillator" and $\Delta x_{0}^2=\frac{\hbar}{2\,m\,\omega_{0}}$ is the same for both the atoms. This choice is not too restrictive because the only restriction introduced with respect to a minimum uncertainty gaussian packet with arbitrary initial momentum $p_{0}$ and position $x_{0}$ is its wideness. Furthermore, it is to notice that a general packet can always be expressed as a superposition of coherent states. Using eq.s (\ref{fact}) and (\ref{psinit}), the scalar products which appear in eq. (\ref{param}) assume the following form,
\begin{widetext}
\begin{eqnarray}
\braket{\phi_{1}^{+}(T_{1})}{\phi_{1}^{-}(T_{1})}=\bra{\alpha_{1}}e^{\{i\,\omega_{0}\,T_{1}-\ln[\cosh(\omega_{0} T_{1})]\}\left(\hat{b}_{1}^{\dag}\,\hat{b}_{1}+\frac{1}{2}\right)}\,e^{\frac{i}{4}\sinh(2\,\omega_{0}\,T_{1})\,\hat{b}_{1}^{\dag\,2}}\, e^{\frac{i}{2}\tanh(\omega_{0}\,T_{1})\,\hat{b}_{1}^{2}}\ket{\alpha_{1}}\label{phiupphidownboson}\\
\braket{\phi_{2}^{+}(T_{2})}{\varphi_{2}(T_{2})}=\bra{\alpha_{2}}e^{i\,\omega_{0}\,T_{2}\, \left(\hat{b}_{2}^{\dag}\,\hat{b}_{2}+\frac{1}{2}\right)}\,e^{-i\,\frac{\hat{p}_{2}^{2}}{2\,m\,\hbar}\,T_{2}} \ket{\alpha_{2}}\label{phiupphi0boson}\\
\braket{\phi_{2}^{-}(T_{2})}{\varphi_{2}(T_{2})}=\bra{\alpha_{2}} e^{-\frac{i}{2}\tanh(\omega_{0}\,T_{2})\,\hat{b}_{2}^{\dag\,2}}\, e^{-\frac{i}{4}\sinh(2\,\omega_{0}\,T_{2})\,\hat{b}_{2}^{2}}\, e^{\{-\ln[\cosh(\omega_{0}T_{2})]\}\left(\hat{b}_{2}^{\dag}\,\hat{b}_{2}+\frac{1}{2}\right)}\, e^{-i\,\frac{\hat{p}_{2}^{2}}{2\,m\,\hbar}\,T_{2}}\, \ket{\alpha_{2}}\label{phidownphi0boson}
\end{eqnarray}
\end{widetext}
A straightforward calculation leads now to the evaluation of these scalar products, where the expansion of the state $\exp[-i\,\frac{\hat{p}_{2}^{2}}{2\,m\,\hbar}\,t]\, \ket{\alpha_{2}}$ in terms of coherent states corresponding to the second atom boson-like operators is required.\\
For $x_{0} \neq 0$ and/or $p_{0} \neq 0$ these terms are characterized by a non dissipative damping. For example, the scalar product (\ref{phiupphidownboson}) for $t\leq{T_{1}}$ behaves as
\begin{widetext} \begin{eqnarray}
\braket{\phi_{1}^{+}(t)}{\phi_{1}^{-}(t)}=e^{i\frac{\omega_{0}}{2}t}e^{-i|\alpha_{1}|^2\frac{\sin(\omega_{0}t)}{\cosh(\omega_{0}t)}} \exp\{\frac{i}{2}\tanh(\omega_{0}t)[(a_{1}^2-b_{1}^2)(1+\cos(2\omega_{0}t))+2a_{1}b_{1}\sin(2\omega_{0}t)]\}\cdot\nonumber\\
\cdot\frac{1}{\sqrt{\cosh(\omega_{0}t)}}e^{-|\alpha_{1}|^2(1-\frac{\cos(\omega_{0}t)}{\cosh(\omega_{0}t)})} \exp\{-\tanh(\omega_{0}t)[a_{1}b_{1}(1-\cos(2\omega_{0}t))+\frac{1}{2}(a_{1}^2-b_{1}^2)\sin(2\omega_{0}t)]\}\label{phiupdown1}\\ \propto{[1-\frac{(\omega_{0}t)^2}{2}]}\cdot\exp{\{-2a_{1}^2(\omega_{0}t)^2\}}\label{phiupdown1red}\quad\quad\quad\quad\quad\quad\quad (\omega_{0}t<1)\quad\quad \quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad
\end{eqnarray} \end{widetext}
The damping factor shown by this last approximated expression, which is at the origin of the non dissipative damping of the Rabi oscillations \cite{Vag,Cus,Tum1}, is due to the increasing distance in the phase space \cite{Chian} of the two deflected components of the translational wave packet \cite{Aha}. Similar behaviors hold for the other coefficients of eq.(\ref{param}). The condition $(\omega_{0}t<1)$ is not much restrictive for the parameters used in this paper, and at the same time, it is in agreement with the quadratic approximation of the cavity mode function. After a few periods of Rabi oscillations, the damping factors involved in the scalar products (\ref{param}) when $x_0 \neq 0$, determine a decoherence of the system described by the density matrix (\ref{rhorid}), i.e. the last term in eq. (\ref{rhorid}), representing the non-diagonal terms, goes to zero. The system tends to become separable \cite{Wer}. It is impossible to observe such a behavior in the JC model context.\\
It is to notice that when both the atoms interact with the field exactly in coincidence of the antinode (i.e. $x_{0}=0$), the scalar products (\ref{phiupphidownboson}-\ref{phidownphi0boson}) produce just a slow damping of the correlation functions and the density matrix remain essentially non-separable.\\
\begin{figure}
\caption{ Graphical solution of Bell's inequality
in terms of the $M(\rho)=\,max\{(a),(b)\}$ function, for two two-level
atoms interacting in succession with the field of the same cavity. Figure
(i) shows the periodicity of $\lambda_{1}+\lambda_{2}$ (continuous
line) and $2 \lambda_{2}$ (dashed line) when the JC model is
adopted Ref.\cite{Phoe} or the SG model is considered with
$x_{0}=0$ for both the atoms.
Fig. (ii) illustrates the non dissipative damping of the
correlation between the two atoms
due to the entanglement of the field and the internal
atomic variables with the translational atomic degrees of
freedom when $x_{0} \neq 0$. For little interaction times a magnification of the
non locality of the entanglement is observed.
Concerning the translational dynamics, in this graph we suppose
for both the atoms an initial wave packet of minimum uncertainty, with zero
mean value of $\hat{p}_{1}$ and $\hat{p}_{2}$, centered in $x_{1}=x_{2}=\lambda/10$ and with
a width imposed by the condition of dealing with coherent initial
states. $\lambda = 2 \pi /k$ is the wavelength of the resonant k-mode of the undamped cavity.
The values of the other parameters are
$m=10^{-26}$ kg, $\varepsilon=10^8 sec^{-1}$ and $\lambda=10^{-5}$ meters.}
\label{fig1}
\end{figure}
Because, as said above, non-separability does'nt imply a violation of Bell's inequality, guarantee of non-locality, it is also useful to investigate the nature of the interatomic correlations in terms of the Bell's inequality. To this end we consider the Horodecki family formulation \cite{Hor1}: \emph{A density matrix $\rho$ describing a system composed by two spin $1/2$ subsystems violates some Bell's inequality in the CHSH formulation} \cite{Cla} \emph{if and only if the relation $M(\rho)>1$ is satisfied}. The quantity $M(\rho)$ is defined as follows. Consider the $3\times3$ matrix $T_{\rho}$ with coefficients $t_{n,m}=tr(\rho\,\sigma_{n}\otimes \sigma_{m})$, where $\sigma_{n}$ are the standard Pauli matrices. Diagonalizing the symmetric matrix $U_{\rho}=T_{\rho}^{T}\cdot T_{\rho}$ ($T_{\rho}^{T}$ is the transpose of $T_{\rho}$), and denoting the three eigenvalues of $U_{\rho}$ by $\lambda_{1}$, $\lambda_{2}$ and $\lambda_{3}$, then $M(\rho)= max\{\lambda_{1}+\lambda_{2},\lambda_{1}+\lambda_{3},\lambda_{2}+\lambda_{3}\}$. In our case $\lambda_{2}=\lambda_{3}$ and then $M(\rho)= max\{\lambda_{1}+\lambda_{2},2 \lambda_{2}\}$. Fig. \ref{fig1} compares the behaviors of $\lambda_{1}+\lambda_{2}$ (continuous line) and $2 \lambda_{2}$ (dashed line) as a function of the interaction time for the two models. For simplicity, in both the figures (i) and (ii) we have assumed $T_{1}=T_{2}=T$. The response of the Bell's inequality test outlines the great difference between the interatomic correlations predicted by the two models when $x_{0} \neq 0$. When $x_{0}=0$ the JC and SG models conduce to an almost indistinguishable behavior of the system with respect to non-locality (see Fig. \ref{fig1} (i)). This is due to the fact that the eq. (\ref{phiupdown1}) reduces to $1/ \sqrt{\cosh(\omega_0 t)}$ when $x_0 = p_0 = 0$ and this term results slowly decaing for our values of parameters also in comparison with the decay in eq. (\ref{phiupdown1red}).\\
It is possible, furthermore, to extend the discussion to another simple case in which the single excitation belongs initially to the atom $A_{1}$. For this initial state, the quantity $M(\rho)$ reduces simply to the dashed line of figures (i) and (ii).\\
In conclusion, the internal variables of two atoms that successively cross an optical cavity may result strongly entangled through the interaction with the field of the same cavity. This entanglement may lead to Bell's inequality violation. As it is known, transfer of information from the system of interest to other degrees of freedom (to a bath, in the extreme case) produces a degradation of quantum correlations. For the system here considered (the optical SG model) the correlation with the atomic translational degrees of freedom can actually be avoided by letting the atoms cross as accurately as possible the cavity in the region with a zero gradient of the mode function.\\
\end{document} |
\begin{document}
\title{Latent Variable Model for Multivariate Data with Measure-specific Sample Weights and Its Application in Hospital Compare} \begin{abstract}
We developed a single factor model with measure-specific sample weights for multivariate data with multiple observed indicators clustered within a higher level subject. The factor is therefore a latent variable shared by multiple indicators within a same subject and the sample weights are different across different indicators and different subjects. Even after integrating out the latent variable, the likelihood of the data cannot be written as the sum of weighted likelihood of each subject because a subject has different sample weights respectively for its multiple indicators. In addition, the number of available indicators varies across subjects. We derive a pseudo likelihood for the latent variable model with measure-specific weights. We investigate various statistical properties of the latent variable model with measure-specific sample weights and its connection to the traditional factor analysis. We found that the latent variable model provides consistent estimates for its variances when the measure-specific sample weights are properly re-scaled. Two estimation procedures are developed - EM algorithm for the pseudo likelihood and marginalization of the pseudo likelihood by directly integrating out the latent variable to obtain the parameter estimates. This approach is illustrated by the analysis of publicly reported hospitals with indicators and sample weights. Numerical studies are conducted to investigate the influence of weights and their sample distribution. \end{abstract}
\keywords{pseudo likelihood, latent variable model, factor analysis, measure-specific sample weights}
\section{Introduction} This work is built on the basis of factor analysis. The factor analysis is a widely used statistical tool in many fields, such as psychology, educational testing, social behavior and biomedical sciences \cite{henderson1975best,thompson2007factor}. The factor analysis is popular since it provides a convenient modeling tool for multiple observed indicators within a subject \cite{muthen2013new}. In this paper, we propose a latent variable model with a single factor for multivariate data with measure-specific weights that vary across indicators and across subjects. A pseudo likelihood approach is developed for our model. \\
Over the years, Centers for Medicare and Medicaid Services (CMS) Hospital Compare website publishes hospitals performance scores which are called hospital indicators in this paper. CMS hopes that these indicators will help people choose their hospitals. In 2016, CMS started to report the star ratings of more than four thousand hospitals across the whole country \cite{venkateshoverall}. The goal of the CMS overall hospital quality star rating is to estimate one summary score using a total of fifty-seven hospitals indicators collected from the hospital compare database. The fifty-seven indicators are divided into seven different groups according the quality aspect they represent. Each indicator within a hospital has its sample weight representing the volume of patients that contribute to that indicator. A group-specific factor score is derived for the indicators within the group. The goal of this paper is to estimate the factor model within a group incorporating sample weights for each indicator within each hospital.\\
The factor is an unobserved latent variable that represents the underlying hospital performance. In addition to the presence of measure-specific weights that vary at both indicator and hospital level, there is a missing data issue as only a few hospitals report the complete set of all the hospitals indicators. Traditional factor analysis using correlation matrix approach is not possible to deal with such situation and therefore we propose a pseudo likelihood method to estimate such model.\\
Existing literature has dealt with subject-specific weights. \cite{shwartz2008estimating} studied the volume related weights which are subject-specific via the hierarchical logistic models. \cite{veiga2014use} applied subject-specific weights in multivariate multilevel models to the longitudinal data. \cite{landrum2000analytic} gave an approach that based on the likelihood to generalize the overall score. And \cite{Agostinelli2013} proposed a weighted latent likelihood method based on subject-specific weights. To the best of our knowledge, there are no studies for the latent variable model with measure-specific weights, as well as its asymptotic behaviors. Therefore, we fill the gap by proposing a version of weighted pseudo likelihood that fits for the measure-specific weights through two algorithms: Expectation-Maximization (EM) method and the marginalization of the pseudo likelihood to get the parameter estimates. We apply this model to the CMS hospital compare dataset. \\
The sum of weights for each indicator across the hospitals is set to be the sample size of that indicator so that a hospital with a smaller volume for that indicator has a smaller sample weight for that indicator comparing to a hospital with a larger volume for that indicator. However, we show in Section 3 that the sum of such intuitive sample weights across hospitals for the indicator need to be bounded below the sample size in order for the estimates of the variance for the latent variable to be consistent. We impose such bound by multiplying each sample weight by 0.99 which in practice remains the same interpretation.\\
The rest of the paper is organized as follows. In Section 2 we present our model and specify the pseudo likelihood. The statistical properties of the latent variable model are given in Section 3. In Section 4 we describe the two algorithms including the EM approach and the marginal likelihood approach. The bound of the weights is given in Section 3. In Section 5 we conduct the numerical studies. In Section 6 we analyze three datasets from US hospital compare and in Section 7 we conclude with a discussion.
\section{Model Specification and Pseudo Likelihood}
We start the model with the following set-up:
Suppose there are a total of $m$ indicators with $H$ hospitals (subjects) in each indicator to be evaluated. Let $Y_{jh}$ denote the $j$th indicator in the hospital $h$ with $j=1, \dots, m$ and $h=1, \dots, H$. Let $w_{jh}$ denote the measure-specific weight of hospital $h$ and indicator $j$. For each $h$, we fit a single confirmatory factor model as: \begin{align} \nonumber
& \quad \quad \quad \quad \quad Y_{jh}|\alpha_h \sim N(\mu_j+\gamma_j\alpha_h, \sigma_j^2) \\ \nonumber &\text{ with measure-specific weight } w_{jh}, \quad j=1, \dots, m, \end{align}
where $\alpha_h \sim N(0,1)$ is the underlying factor or latent variable representing hospital $h$'s performance based on its all $m$ indicators. The higher the value of $Y_{jh}$, the better the performance of hospital h in indicator m. The $\mu_j, \gamma_j$ and $\sigma_j^2$ are unknown parameters that we need to estimate.
\subsection{The Pseudo Joint Likelihood of Data and Latent Variable} Given $\alpha_h$, $Y_{1h}, \dots, Y_{mh}$ are conditionally independent. We have the joint density for the latent variable and $Y_{1h},\dots, Y_{mh}$ satisfies
\begin{align}\nonumber P(Y_{1h},\dots, Y_{mh},\alpha_h)&=P(\alpha_h)P(Y_{1h},\dots, Y_{mh}|\alpha_h)
\\ \nonumber&=P(\alpha_h)\prod_{j=1}^mP(Y_{jh}|\alpha_h). \end{align} We define the joint pseudo likelihood for hospital $h$ with sample weights as \begin{align}\label{yi}
P^*(Y_{1h},\dots, Y_{mh},\alpha_h)&=P(\alpha_h)\prod_{j=1}^m[P(Y_{jh}|\alpha_h)]^{w_{jh}}, \end{align} where $w_{jh}$ bounded differentiable non-negative (the sample weight) function \cite{Agostinelli2013} independent to $Y_{jh}$.\\
The logarithm of the term within the product is given as : \begin{align}\nonumber
\log P^*(Y_{jh}|\alpha_h)&=w_{jh}\log P(Y_{jh}|\alpha_h) \\ \nonumber &=-w_{jh} \log \sigma_j -\frac{w_{jh}}{2\sigma_j^2}(Y_{jh}-\mu_j-\gamma_j\alpha_h)^2-\frac{w_{jh}}{2}\log 2\pi. \end{align} Thus, the conditional log-density of expression \eqref{yi} is given as: \begin{align}\nonumber
& \quad \quad \log P^*(Y_{1h},\dots, Y_{mh}|\alpha_h)=\sum \limits_{j=1}^mw_{jh}\log P(Y_{jh}|\alpha_h)\\ \label{czhang}&=-\sum \limits_{j=1}^m w_{jh} \log \sigma_j -\sum \limits_{j=1}^m \frac{w_{jh}}{2\sigma_j^2}(Y_{jh}-\mu_j-\gamma_j\alpha_h)^2-\frac{1}{2}\sum \limits_{j=1}^m w_{jh} \log 2\pi \\ \nonumber &= - \big[\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{2\sigma_j^2}\alpha_h^2-\sum \limits_{j=1}^m\frac{2w_{jh}(Y_{jh}-\mu_j)\gamma_j}{2\sigma_j^2}\alpha_h+ \sum \limits_{j=1}^m \frac{w_{jh}(Y_{jh}-\mu_j)^2}{2\sigma_j^2}\big]\\ \nonumber &\quad -\sum \limits_{j=1}^m w_{jh} \log \sigma_j -\frac{1}{2}\sum \limits_{j=1}^m w_{jh} \log 2\pi. \end{align}
Therefore, the negative logarithm of the joint density of all $m$ indicators for hospital $h$ is \begin{align}\nonumber
&-\log P^*(Y_{1h},\dots, Y_{mh},\alpha_h)=-\log P^*(Y_{1h},\dots, Y_{mh}|\alpha_h)-\log P(\alpha_h)\\ \nonumber &=(\frac{1}{2}+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{2\sigma_j^2})\alpha_h^2-\sum \limits_{j=1}^m\frac{2w_{jh}(Y_{jh}-\mu_j)\gamma_j}{2\sigma_j^2}\alpha_h+\sum \limits_{j=1}^m w_{jh} \log \sigma_j \\ \nonumber &\quad +\sum \limits_{j=1}^m \frac{w_{jh}(Y_{jh}-\mu_j)^2}{2\sigma_j^2}+\frac{1}{2}(\sum \limits_{j=1}^m w_{jh}+1) \log 2\pi\\ \nonumber &=(\frac{1}{2}+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{2\sigma_j^2}) \left \{\alpha_h-\frac{\sum \limits_{j=1}^m\frac{w_{jh}(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2}}{1+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2}}\right \}^2+\frac{1}{2}(\sum \limits_{j=1}^m w_{jh}+1) \log 2\pi\\ & \label{cdu} \quad \quad \quad +\sum \limits_{j=1}^m w_{jh} \log \sigma_j+\sum \limits_{j=1}^m \frac{w_{jh}(Y_{jh}-\mu_j)^2}{2\sigma_j^2}-\frac{(\sum \limits_{j=1}^m\frac{w_{jh}(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2})^2}{2+2\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2}}. \end{align}
And the log joint density of all $m$ indicators for all $H$ hospitals is the summation of $\log P^*(Y_{1h},\dots, Y_{mh},\alpha_h)$ through $1$ to $H$.\\
Note that we can also bring missing values of $Y_{jh}s$ into \eqref{cdu} by setting the corresponding $w_{jh}=0$, therefore, the joint pseudo log-density of latent variable model is also compatible with missing data in $Y$.
\subsection{The Marginal Pseudo Likelihood} Note that part of \eqref{cdu} can be rewritten as the log-density of a normal distribution: \begin{align}\nonumber \eqref{cdu}&=(\frac{1}{2}+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{2\sigma_j^2}) \left \{\alpha_h-\frac{\sum \limits_{j=1}^m\frac{w_{jh}(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2}}{1+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2}}\right \}^2+\frac{1}{2}\log 2\pi+\frac{1}{2} \log ({1}+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2})^{-1}\\ \nonumber & -\frac{1}{2} \log ({1}+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2})^{-1}+\sum \limits_{j=1}^m w_{jh} \log \sigma_j+\sum \limits_{j=1}^m \frac{w_{jh}(Y_{jh}-\mu_j)^2}{2\sigma_j^2}-\frac{(\sum \limits_{j=1}^m\frac{w_{jh}(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2})^2}{2+2\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2}}\\ \label{liao} &+\frac{1}{2}\sum \limits_{j=1}^m w_{jh} \log 2\pi. \end{align}
The first line of \eqref{liao} is exactly the density function for a normal distribution after logarithm. Denote $Y_{.h}=[Y_{1h},\dots,Y_{mh}]'$ as the indicator vector for hospital $h$, by integration with respect to $\alpha_h$, we have the marginal pseudo log-likelihood (denoted by $\mathcal{L}^*$) of all the parameters for hospital $h$ satisfies \begin{align}\nonumber
&-2\log \mathcal{L}^*(\mu_1 \dots \mu_m, \gamma_1 \dots \gamma_m, \sigma_1 \dots \sigma_m|Y_{.h}) =\sum \limits_{j=1}^m w_{jh} \log 2\pi \\ \label{lin}
& +\log ({1}+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2})+\sum \limits_{j=1}^m w_{jh} \log \sigma_j^2+\sum \limits_{j=1}^m \frac{w_{jh}(Y_{jh}-\mu_j)^2}{\sigma_j^2}-\frac{(\sum \limits_{j=1}^m\frac{w_{jh}(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2})^2}{1+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2}}. \end{align}
\section{ Statistical Properties of the Model} \subsection{Main Theorems} In this subsection, we describe the asymptotic behaviors of the latent variable model under uniform weight case (all $w_{jh}=1$) and the varying weights case.\\
We start with the simple uniform weight case when all the weights equal to one. Without missing value, the negative marginal logarithm likelihood (denoted by $\mathcal{L}$) for all the parameters for hospital $h$ satisfies \begin{align}\nonumber
&-2\log \mathcal{L}(\mu_1 \dots \mu_m, \gamma_1 \dots \gamma_m, \sigma_1 \dots \sigma_m|Y_{.h})=m\log 2\pi \\ \label{qqq}
& +\log ({1}+\sum \limits_{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})+\sum \limits_{j=1}^m \log
\sigma_j^2+\sum \limits_{j=1}^m \frac{(Y_{jh}-\mu_j)^2}{\sigma_j^2}-\frac{(\sum \limits_{j=1}^m\frac{(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2})^2}{1+\sum \limits_{j=1}^m \frac{\gamma_j^2}{\sigma_j^2}}, \end{align} since all $w_{jh}=1$. The pseudo log-likelihood becomes log-likelihood.\\
The asymptotic behaviors of the latent variable model are followed by the result of a toy example:
\begin{ex}
Let $m=3$, $Z_{jh} \sim N(\mu_j, \sigma_j^2+\gamma_j^2), j=1,\dots,3, h=1,\dots,H.$ And the covariance between $Z_{i.}=[Z_{i1},\dots,Z_{iH}]$ and $Z_{j.}=[Z_{j1},\dots,Z_{jH}]$ is $\gamma_i\gamma_j, (1 \le i, j \le 3).$ Without missing values, we have twice the negative log-likelihood of $Z$ is the same as \eqref{qqq} when $m=3$. \end{ex}
Example 1 shows in the latent variable model with uniform weight, When there are no missing values, the latent variable model is the same as the confirmatory factor analysis with single factor, since they both have the same log-likelihood. However, compared to obtaining parameter estimates through the calculation of the inverse variance-covariance matrix in the confirmatory factor model with multivariate normal distribution, the estimation through the likelihood of latent variable model formulation for the factor model is easier to obtain with $m>3$ cases. Therefore, we can have the following results.
\begin{thm}
When there are no missing values, as $H \to \infty$, we have the expected negative marginal logarithm likelihood ($\text{ENMLL}_H$) satisfies \begin{align}\nonumber \text{ENMLL}_H & \xrightarrow{a.s.} \frac{1}{2} \log \big[ (1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m \sigma_j^2 \big]+\frac{m}{2}(\log 2\pi+1), \end{align} and $\sqrt{H}(\frac{1}{H}\sum \limits_{h=1}^H \text{NMLL}_h-\text{ENMLL}_H)$ has a normal distribution with mean equals to zero, finite variance. \end{thm}
Theorem 1 gives the asymptotic behavior of the marginal likelihood of latent variable model with uniform weights.
\begin{thm} Let the weight matrix $W=[w_{jh}]_{m\times H}$ where $w_{jh}=w_j$, are positive constants. Then by definition of NMLL, we have the expected pseudo marginal log-likelihood (ENWMLL) for hospital $h$ satisfies \begin{align}\nonumber
2\text{ENWMLL}_H &\xrightarrow{a.s.} m+\log \big[(1+\sum \limits _{j=1}^m\frac{w_j\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m\frac{\sigma_j^2}{w_j} \big]+\sum\limits_{j=1}^m (w_j-1)\log \sigma_j^2\\ \nonumber
&+\sum \limits_{j=1}^m \log w_j+\sum \limits_{j=1}^m (w_{j}-1) \log 2\pi, \end{align} without missing, as $H \to \infty$, and the central limit theorem also holds for NWMLL. \end{thm}
Theorem 2 gives the asymptotic behavior of marginal likelihood of the latent variable model with the weights those are specifically assigned.
\subsection{Variance Bounded From Zero} This subsection mainly address the issue that in the latent variable model, certain $\sigma_j^2$ may become zero. In that case, the validity of the latent variable model can be endangered. However, the $\sigma^2_j$ can be bounded away from zero by adjusting the weights in a simple way presented in this section. \\
Easy to observe that each $\sigma_j$ should be bounded away from zero in order to make the negative marginal logarithm (pseudo) likelihood valid. In the mean time, the computational speed for estimating the parameters will slow down heavily at the area where certain $\sigma^2$ is tiny since there are no closed form solutions for \eqref{lin}. In the following part, we discuss the approaches that prevent the estimated standard error from going to zero when we incorporate with varying weights.
\subsubsection{Uniform Weight} We will focus on three indicators ($m=3$) since if the number of indicators exceeds three, we can still pick three indicators to study.
For $j=1,2,3$, let $Y_{jh} \sim N(\mu_j, \sigma_j^2+\gamma_j^2)$, where $h=1,\dots, H$, and the covariance between $Y_{i.}$ and $Y_{j.}$ is $\gamma_i\gamma_j>0, (1 \le i, j \le 3)$. Assume $Y_{1.}, Y_{2.}, Y_{3.}$ have the same variance, moreover, assume $$\text{Corr}(2,3)=\gamma_2\gamma_3/\sqrt{(\gamma_2^2+\sigma_2^2)(\gamma_3^2+\sigma_3^2)}$$ is the smallest correlation. Then we have $\sigma_1^2$ be the smallest parameter among all $\sigma$s since $\gamma_1$ is the largest one among all three $\gamma$s. And, when all the weights equal to one in latent variable model, two extreme examples may cause $\sigma^2_1$ to be exactly zero:
\begin{ex} Assume $Y_{1.}$ and $Y_{2.}$ are identical, then we have $\sigma_1=\sigma_2=0$. \end{ex}
\textbf{Proof:} Easy to verify that $$\text{Corr}(1,2)=\frac{\gamma_1\gamma_2}{\sqrt{\sigma_1^2+\gamma_1^2}\sqrt{\sigma_2^2+\gamma_2^2}}=\frac{\gamma_1^2}{\gamma_1^2+\sigma_1^2}=1.$$
Then we have $\sigma_1=\sigma_2=0$.\\
\begin{ex} Assume $Y_{1.}, Y_{2.}$ and $Y_{3.}$ satisfies Corr(1,2)$\times$Corr(1,3) $>$ Corr(2,3), then we have $\sigma_1^2 = 0$. \end{ex}
\textbf{Proof:} By the result in Example 1, we have
$$\text{Corr}(1,2)\times \text{Corr}(1,3)=\frac{\gamma_1^2}{\sigma_1^2+\gamma_1^2}\text{Corr}(2,3)\le \text{Corr}(2,3),$$
thus the latent variable model outputs $\sigma_1=0$ as its minimized occupation.\\
When Example 2 or Example 3 happens, the likelihood estimates of the latent variable model will be at the boundary, thus the posterior variance of $\alpha_h$ may become zero. In order to prevent it, we need proper weights to get the posterior variance bounded from zero.\\
\subsubsection{Varying Weights}\label{ccc} Followed by Theorem 2, let $W=[w_{jh}]_{m\times H}$ where $w_{jh}=w_j$, are positive constants. We have, \begin{align}\nonumber
2\text{ENWMLL}_H &\xrightarrow{a.s.} m+\log \big[(1+\sum \limits _{j=1}^m\frac{w_j\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m\frac{\sigma_j^2}{w_j} \big]+\sum\limits_{j=1}^m (w_j-1)\log \sigma_j^2\\ \nonumber
&+\sum \limits_{j=1}^m \log w_j+\sum \limits_{j=1}^m w_{j} \log 2\pi, \end{align}
as $H \to \infty$. We can see that if we set the sum of weight equals to the sample size ($\bar{w}_{j.}=w_j=1$), we have $\text{ENWMLL}_H = \text{ENLL}_H$ since all the weights are equal to one. Both Example 2 and Example 3 can cause $\sigma$ being zero.\\
Assuming there are not identical indicators among $Y_{1.}, \dots , Y_{m.}$, there is at most one $\sigma$ can be zero. Without loss of generality, assume $\sigma_1^2$ is the smallest among all $\sigma^2$s, then we have \begin{align}\nonumber
|\log \big[(1+\sum \limits _{j=1}^m\frac{w_j\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m\frac{\sigma_j^2}{w_j} \big]| &= |\log\big[( \gamma_1^2 \prod_{j=2}^m\frac{\sigma_j^2}{w_j})+\frac{\sigma_1^2}{w_1}(1+\sum \limits _{j=2}^m\frac{w_j\gamma_j^2}{\sigma_j^2})\prod_{j=2}^m\frac{\sigma_j^2}{w_j} \big]| \\ \nonumber
&< |\log( \gamma_1^2 \prod_{j=2}^m\frac{\sigma_j^2}{w_j})|< \infty. \end{align}
Thus we have for $M=\sum \limits_{j=1}^m \log w_j+\sum \limits_{j=1}^m w_j \log 2\pi$ which is a constant, \begin{align}\nonumber
2\text{ENWMLL}_H-M& \xrightarrow{a.s.} m+\sum\limits_{j=2}^m (w_j-1)\log \sigma_j^2+(w_1-1)\log\sigma_1^2\\ \label{sss}
&+\log\big[( \gamma_1^2 \prod_{j=2}^m\frac{\sigma_j^2}{w_j})+\frac{\sigma_1^2}{w_1}(1+\sum \limits _{j=2}^m\frac{w_j\gamma_j^2}{\sigma_j^2})\prod_{j=2}^m\frac{\sigma_j^2}{w_j} \big] \\ \nonumber
&\xrightarrow{a.s.} m+\sum\limits_{j=2}^m (w_j-1)\log \sigma_j^2+\log( \gamma_1^2 \prod_{j=2}^m\frac{\sigma_j^2}{w_j})+(w_1-1)\log\sigma_1^2. \end{align}
If we have $w_1-1<0$, it will penalize the expected marginal weighted likelihood from $\sigma_1$ being zero since both $(w_1-1)\log\sigma_1^2 \to \infty$ will hold, and the rest terms are bounded.\\
Furthermore, if we let \begin{align}\nonumber
S_3= ( \gamma_1^2 \prod_{j=2}^m\frac{\sigma_j^2}{w_j})/\big[\frac{1}{w_1}(1+\sum \limits _{j=2}^m\frac{w_j\gamma_j^2}{\sigma_j^2})\prod_{j=2}^m\frac{\sigma_j^2}{w_j}\big]>0, \end{align}
by the Dominated Convergence Theorem, taking derivative to \eqref{sss} with respect to $\sigma_1^2$ yields \begin{align}\nonumber
\frac{\partial{\text{ENWMLL}_H}}{\partial{\sigma_1^2}}\xrightarrow{a.s.}\frac{1}{2(\sigma_1^2+S_3)}+\frac{w_1-1}{2\sigma_1^2}<0 \end{align} at the neighborhood larger than zero for $\sigma_1^2$ when $w_1-1<0$. Which proves that $\sigma_1^2$ is bounded from zero.\\
Similarly, if we have $w_1>1$, then $\text{ENWMLL}_H \to -\infty$ as $\sigma_1$ approaching zero. And $\sigma_1=0$ will become an optimal estimate since the pseudo likelihood then goes to infinity. Therefore, we showed that if we have $w_j<1, j=1, \dots, m$, then we can ensure all the estimated standard errors are bounded from zero.\\
Both numerical study and data analysis will show that by setting the mean of weights smaller than one, under the case which $W=[w_{jh}]_{m\times H}$ denotes the weight matrix with arbitrary values, we still can have $\sigma$s bounded from zero property.\\
\section{Estimation} Two approaches (EM \cite{dempster1977maximum} and the marginal) will be provided in this section.
\subsection{The EM Algorithm}
\subsubsection{E-Step:}
Since \eqref{cdu} has an exact form of normal distribution, we can get the posterior mean of $\alpha_h$ to be
$$x_h=E(\alpha_h|Y_{.h},\mu,\gamma,\sigma^2)=\frac{\sum \limits_{j=1}^m\frac{w_{jh}(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2}}{1+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2}},$$ and its posterior variance is
$$y_h=Var(\alpha_h|Y_{.h},\mu,\gamma,\sigma^2)=({1}+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2})^{-1},$$
by the definition of normal pdf. Along with these, we also need to calculate the posterior second moment in our EM approach:
\begin{align}\nonumber z_h=E(\alpha_h^2|Y_{.h},\mu,\gamma,\sigma^2)&=E^2(\alpha_h|Y_{.h},\mu,\gamma,\sigma^2)+Var(\alpha_h|Y_{.h},\mu,\gamma,\sigma^2)\\ \nonumber &=({1}+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2})^{-1}+\left \{\frac{\sum \limits_{j=1}^m\frac{w_{jh}(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2}}{1+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2}}\right \}^2 \end{align} given $Y_{jh}, \mu_j, \gamma_j$ and $\sigma_j^2, \quad j = 1,\dots, m, h=1,\dots, H$ .\\
\subsubsection{M-Step:}
Note that directly minimizing \eqref{cdu} has computational difficulty, an alternative way is to maximize \eqref{czhang} under the condition where $\alpha_h=x_h, h=1,\dots,H$, and repeat the process in E-step.\\
We adopt an iterative method by firstly taking derivatives to \eqref{czhang} of all $H$ hospitals with respect to $\mu_j, \gamma_j$ and $\sigma_j,$ $j = 1,\dots, m$. \\
Therefore, our iterative method in M step is
\begin{align*}
\hat{\mu}_j&= \mathop{\mathrm{argmax}}\limits_{\mu_j} \sum \limits_{h=1}^Hw_{jh}\log P(Y_{jh}|x_h,z_h) \\ &=\sum \limits_{h=1}^H w_{jh}(Y_{jh}-\gamma_jx_h)/\sum \limits_{h=1}^H {w_{jh}}, \\
\hat{\gamma}_j&= \mathop{\mathrm{argmax}}\limits_{\gamma_j} \sum \limits_{h=1}^Hw_{jh}\log P(Y_{jh}|x_h,z_h) \\ &=\sum \limits_{h=1}^H {w_{jh}}x_h(Y_{jh}-\mu_j)/\sum \limits_{h=1}^H {w_{jh}z_h} \end{align*}
\begin{align}\nonumber
&\text{and} \quad \hat{\sigma}_j^2 = \mathop{\mathrm{argmax}}\limits_{\sigma_j} \sum \limits_{h=1}^Hw_{jh}\log P(Y_{jh}|x_h,z_h) =\\ \nonumber &\sum \limits_{h=1}^H w_{jh}\{(Y_{jh}-\mu_j)^2-2(Y_{jh}-\mu_j)\gamma_jx_h+\gamma_j^2z_h\}/\sum \limits_{h=1}^H w_{jh}, \quad j=1,\dots,m. \end{align}
Once the Expectation-Maximization algorithm converges, we can update the latent variables $\alpha_h$ by $\frac{\sum \limits_{j=1}^m\frac{w_{jh}(Y_{jh}-\hat{\mu}_j)\hat{\gamma}_j}{\hat{\sigma}_j^2}}{1+\sum \limits_{j=1}^m \frac{w_{jh}\hat{\gamma}_j^2}{\hat{\sigma}_j^2}}, h=1,\dots,H$. We repeat this procedure several times until every $\alpha_h$ becomes stable.\\
In the M-step, the solutions of $\hat{\mu}$s, $\hat{\gamma}$s and $\hat{\sigma}^2$s are consistent regardless the choices for any initial values since \begin{align}\nonumber
\frac{\partial^2 \eqref{cdu} }{\partial \mu_j^2} &=\sum \limits _{h=1}^H\frac{w_{jh}}{\sigma_j^2}>0, \\ \nonumber
\frac{\partial^2 \eqref{cdu} }{\partial \gamma_j^2} &=\sum \limits _{h=1}^H\frac{w_{jh}\alpha_h^2}{\sigma_j^2}>0, \quad j=1,\dots, m. \end{align}
In the same time, the values of $x_h$ in the E-step also maximize the joint pseudo log-likelihood with respect to $\alpha$, and \begin{align}\nonumber \frac{\partial^2 \eqref{cdu} }{\partial \alpha_h^2} &=1+\sum \limits _{h=1}^H\frac{w_{jh}\alpha_h^2}{\sigma_j^2}>0, \quad h=1,\dots,H, \end{align}
suggests that \eqref{cdu} is convex for all the $\mu$s, $\gamma$s and $\alpha$s. \\
Moreover, if we assume $\sigma_j^2 \ge \epsilon>0$ holds for some positive number of $\epsilon$, then \begin{align}\label{condition}
{\sigma}_j^2<2\sum \limits _{h=1}^Hw_{jh}(Y_{jh}-\mu_j-\gamma_j\alpha_h)^2 /\sum \limits _{h=1}^Hw_{jh} \end{align}
holds for every indicator. Since by the result in M-step, the estimated value of $\sigma_j^2$ is closed to $\sum \limits _{h=1}^Hw_{jh}(Y_{jh}-\mu_j-\gamma_j\alpha_h)^2 /\sum \limits _{h=1}^Hw_{jh}.$ \eqref{condition} implies
\begin{align}\nonumber
\frac{1}{{\sigma}_j^2}\sum \limits _{h=1}^Hw_{jh}(Y_{jh}-\mu_j-\gamma_j\alpha_h)^2 > \frac{1}{2}\sum \limits _{h=1}^Hw_{jh} \end{align} holds. Thus we have at $(0,\epsilon)$, \begin{align}\nonumber \frac{\partial^2 \eqref{cdu} }{\partial (\sigma_j^2)^2} &=-\sum \limits _{h=1}^H \frac{w_{jh}}{\sigma_j^4}+2\sum \limits _{h=1}^H\frac{w_{jh}}{\sigma_j^6}(Y_{jh}-\mu_j-\gamma_j\alpha_h)^2 \\ \nonumber &=\frac{1}{\sigma_j^4}\sum \limits _{h=1}^H w_{jh}\big[\frac{2(Y_{jh}-\mu_j-\gamma_j\alpha_h)^2}{\sigma_j^2}-1\big]>0. \end{align}
Therefore, the EM approach is the same as the coordinate descent method, we can find the minimizer, for $\sigma_j^2 \in (0,\epsilon)$. \cite{daubechies2004iterative}\\
In addition, if there are more than two indicators that contribute to the latent variable, then we have the local maximum should be the global maximum for \eqref{cdu}. \cite{yong2013beginner}
\subsection{The Marginal Pseudo Likelihood} Given $W$ as constant, conditional on $Y$, we can get the parameter estimates directly by maximizing \eqref{lin}, i.e. \begin{align}\nonumber
&-2\log \mathcal{L}^*(\mu_1 \dots \mu_m, \gamma_1 \dots \gamma_m, \sigma_1 \dots \sigma_m|Y_{.h}) =\sum \limits_{j=1}^m w_{jh} \log 2\pi \\ \nonumber
& +\log ({1}+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2})+\sum \limits_{j=1}^m w_{jh} \log \sigma_j^2+\sum \limits_{j=1}^m \frac{w_{jh}(Y_{jh}-\mu_j)^2}{\sigma_j^2}-\frac{(\sum \limits_{j=1}^m\frac{w_{jh}(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2})^2}{1+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2}}, \end{align} then use the estimates to get all $\alpha_h$s by
$$\hat{\alpha}_h=E(\alpha_h|Y_{.h},\mu,\gamma,\sigma^2)=\frac{\sum \limits_{j=1}^m\frac{w_{jh}(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2}}{1+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2}},$$ and their posterior variances by
$$\hat{Var}(\alpha_h)=Var(\alpha_h|Y_{.h},\mu,\gamma,\sigma^2)=({1}+\sum \limits_{j=1}^m \frac{w_{jh}\gamma_j^2}{\sigma_j^2})^{-1},$$ where $1 \le h \le H$. We implement this algorithm through the NLMIXED procedure in SAS.
\section{Numerical Study}
In this section, we study two cases based on measure-specific weights. The weights in every indicator are set to be mean equal to one for both cases at beginning. For every $j$, the weights satisfy: $w_{jh}$ are exponentially shaped (the extreme case) and $w_{jh}$ are mounded shaped (the regular case) for $h=1,\dots,H.$ We show that under the regular case, the latent variable gets consistent results where those results have been incorporated with the weight information. Under the extreme case, we demonstrate that by setting the sum of the weights less than the sample size in each indicator, or, equivalently, mean of each weights to be less than one, the estimates of $\sigma$s can be always bounded from zero. We can then get both the variables estimates as well as the variances estimates for all the latent variables. This is consistent with the results in section \eqref{ccc}. We also compare the performance of the latent variable model with different sample sizes through our algorithm in the extreme case.
\subsection{A Regular Case}\label{bbb} Assume there are three indicators in the group, let the sample size $H=1000$, the inputs $Y_{i.}=[Y_{i1},\dots, Y_{iH}]$ $,i=1,2,3$ are generated through a multivariate normal distribution with mean zero and variance one, the correlation among each pair of two indicators is set as $0.5$.\\
We firstly generate $H$ random numbers with Gamma distribution $G(3/2, 1/2)$ to get $W_{1.}=[w_{11},\dots, w_{1H}]$, where $3/2$ is the shape parameter, and $1/2$ is the scale parameter. For $W_{2.}$ and $W_{3.}$, we separately generate $H$ random numbers with Gamma distribution of $G(3, 1/3)$. Then we divide the $W_{i.}, i=1,2,3$ by their sample means, respectively. Thus we get the sample mean of the weights of every indicator be the same, one.\\
We replicate the study based on the above setting for $100$ times, and focus on both average loading ($\gamma$) and average standard deviation ($\sigma$) for the three indicators in the result. For contrast, we also replicate the study for uniform weights $100$ times. Table 1 shows the results:
\begin{table} \caption{Parameter Estimates With Varies Weights VS Uniform Weights} \centering \begin{tabular}{lllllllll}
\hline \hline
& & \multicolumn{2}{c}{Weight} & & & \multicolumn{2}{c}{No Weight}&\\ \hline
$\gamma_1$ & 0.8520 & $\sigma_1$ & 0.4801 & $\gamma_1$ & 0.7084 & $\sigma_1$ & 0.7057 \\
$\gamma_2$ & 0.6675 & $\sigma_2$ & 0.7391 & $\gamma_2$ & 0.7080 & $\sigma_2$ & 0.7088 \\
$\gamma_3$ & 0.6700 & $\sigma_3$ & 0.7356 & $\gamma_3$ & 0.7107 & $\sigma_2$ & 0.7053 \\ \hline \hline \end{tabular} \end{table}
We can see with uniform weights, by design, all three indicators seem to have similar average loadings and average root mean squared errors. By Theorem 1, the result with uniform weights is the same as the confirmatory factor analysis with one factor. When there are weights which are moderately skewed, the average loading for the first indicator is larger than those of the second and the third. This is meaningful since although all three weights have the same mean, $W_{1.}$ is more skewed than $W_{2.}$ and $W_{3.}$. The maximum value in $W_{1.}$ is larger than those of $W_{2.}$ and $W_{3.}$. Thus the latent variable model can also output the result with different weights.
\subsection{An Extreme Case} We use the same generating method for $Y_{jh}$ as section \eqref{bbb}, where $j=1,2,3, h=1,\dots,H.$ And we use Gamma distributions with the shape parameters are smaller than the scale parameters to generate more skewed weights.\\
We firstly generate $H$ random numbers with Gamma distribution of $G(1/2, 2)$ to get $W_{1.}.$ For $W_{2.}$ and $W_{3.}$, we separately generate $H$ random numbers with Gamma distributions of $G(1, 2)$. Again, we divide the $W_{i.}, i=1,2,3$ by their sample means, respectively. We will find $\sigma_1 \to 0$ as section 5.2 suggests, thus we focus on the result of $\sigma_1$ here. To make comparison, we test the performances of latent variable model under $0.9*W, 0.8*W, 0.7*W$ and $0.99*W$.\\
\begin{figure}
\caption{Values of ARMSE1 among different setting of weights for increasing sample size}
\end{figure}
We compare the performance of all five weight matrices with sample size $H$ varies from $300$ to $5000$, we replicate the study $100$ times under each value of the sample size. Figure 1 shows the average values of $\sigma_1$.\\
Figure 1 shows that, with the original weights of means equal to one, the smallest average root mean squared error (ARMSE1) tends to be zero. However, the smaller the means of weights of the indicators, the larger the ARMSE1. As the sample size goes larger, the result of ARMSE1 tends to be stable. Therefore, if we have the mean of weights of the indicator to be smaller than one, we will prevent the $\sigma$s from being zero, then we can get the estimates of the posterior variances of the latent variables.
\section{Data Analysis}
We applied our latent variable model to the CMSβs Overall Hospital Quality Rating database from the CMS 2019 public data across the subjected States. This database consists seven indicator groups: Mortality; Readmission; Safety of Care (Safety); Patient Experience; Effectiveness; Timeliness and Image Efficiency. In this section, we first analyze two indicator groups in the three outcome groups: Mortality and Readmission. We will then discuss the group of Safety.\\
In each indicator group, hospital had the reported indicator scores. For each indicator, the scores from the available hospitals are standardized with mean zero and variance one. There also exist measure-specific weights (CMS calls them as the denominator weights) for the hospitals reflecting their volumes of admissions. Similar as sample weights, the mean of the denominator weights in every indicator is standardized as just below one. Note that those weights vary across both indicator level and hospital level, therefore, the latent variable model is appropriated for the data.
\subsection{Mortality (regular)}
For the group of mortality, seven indicators among $4573$ hospitals are presented:
1. MORT-30-AMI: Acute Myocardial Infarction (AMI) 30-Day Mortality Rate;
2. MORT-30-CABG: Coronary Artery Bypass Graft (CABG) 30-Day Mortality Rate;
3. MORT-30-COPD: Chronic Obstructive Pulmonary Disease (COPD) 30-Day Mortality Rate;
4. MORT-30-HF: Heart Failure (HF) 30-Day Mortality Rate;
5. MORT-30-PN: Pneumonia (PN) 30-Day Mortality Rate;
6. MORT-30-STK: Acute Ischemic Stroke (STK) 30-Day Mortality Rate;
7. PSI-4-SURG-COMP: Death Among Surgical Patients with Serious Treatable Complications.\\
We apply both the EM approach and the marginal approach to the mortality data, and calculate the maximum absolute value of predicted the latent variables, the difference is only 2.2208e-04. This suggests that the EM and the marginal approach are identical. Table 2 shows the parameter estimates of the latent variable model. We found that the loadings are balanced across indicators, all the estimated variances are bounded from zero. This result is the same as the result from CMS via the SAS quadrature method. \cite{venkateshoverall}
\begin{table} \caption{Parameter Estimates in the Mortality Group with Un-adjusted Weights} \centering \begin{tabular}{llllll}
\hline \hline
$\mu$ & Un-adj & $\gamma$ & Un-adj & $\sigma$ & Un-adj \\ \hline $\mu_1$ & 0.113 & $\gamma_1$ & 0.508 & $\sigma_1$ & 0.927 \\ $\mu_2$ & 0.131 & $\gamma_2$ & 0.333 & $\sigma_2$ & 0.894 \\ $\mu_3$ & 0.002 & $\gamma_3$ & 0.676 & $\sigma_3$ & 0.822 \\ $\mu_4$ & 0.107 & $\gamma_4$ & 0.713 & $\sigma_4$ & 0.682 \\ $\mu_5$ & -0.007 & $\gamma_5$ & 0.665 & $\sigma_5$ & 0.740 \\ $\mu_6$ & -0.049 & $\gamma_6$ & 0.484 & $\sigma_6$ & 0.975 \\ $\mu_7$ & -0.061 & $\gamma_7$ & 0.281 & $\sigma_7$ & 1.049 \\ \hline \hline \end{tabular} \end{table}
We also multiplied $0.99$ to all the weights in the mortality data, and we found there is no difference in the parameter estimates between $0.99*W$ and $W$. Moreover, the rooted mean square error of the latent variable $\alpha_h$ between the original weight and $0.99$ multiples the weight method is $0.0025$. Therefore, the $0.99$ times weights performs very closely to the method with un-adjusted weights in the mortality group.
\subsection{Readmission (Extreme)} In the data of the readmission group, there are nine indicators among $4573$ hospitals:
1. EDAC-30-AMI: Excess Days in Acute Care (EDAC) after hospitalization for Acute Myocardial Infarction (AMI);
2. EDAC-30-HF: Excess Days in Acute Care (EDAC) after hospitalization for Heart Failure (HF);
3. EDAC-30-PN: Excess Days in Acute Care (EDAC) after hospitalization for Pneumonia (PN);
4. OP-32: Facility 7-Day Risk Standardized Hospital Visit Rate after Outpatient Colonoscopy;
5. READM-30-CABG: Coronary Artery Bypass Graft (CABG) 30-Day Readmission Rate;
6. READM-30-COPD: Chronic Obstructive Pulmonary Disease (COPD) 30-Day Readmission Rate;
7. READM-30-Hip-Knee: Hospital-Level 30-Day All-Cause Risk-Standardized Readmission Rate (RSRR) Following Elective Total Hip Arthroplasty (THA)/Total Knee Arthroplasty (TKA);
8. READM-30-HOSP-WIDE: HWR Hospital-Wide All-Cause Unplanned Readmission;
9. READM-30-STK: Stroke (STK) 30-Day Readmission Rate.\\
After running the latent variable model, we found the estimated $\sigma_8$ for the 30 day hospital-wide readmission indicator is zero. This is because in the indicator of 30 day hospital-wide readmission, the numbers of admissions varies from $25$ to $23915$, which are way larger than the rest indicators in the group. After standardization, the distribution of denominator weights are skewed much more heavily in 30 day hospital-wide readmission than the rest indicators. Thus we apply the method of $0.99$ times the weights to the 30 day hospital-wide readmission indicator, in order to force its standard error larger than zero, as well as keep the parameter estimates as close as possible. The result of parameter estimates is shown in Table 3.
\begin{table} \caption{Parameter Estimates in the Readmission Group via Un-adjusted and Adjusted Weights} \centering \begin{tabular}{lllllllll}
\hline \hline
$\mu$ & Un-adj & Adj & $\gamma$ & Un-adj & Adj & $\sigma$ & Un-adj & Adj\\ \hline $\mu_1$ & 0.031 & 0.031 & $\gamma_1$ & 0.316 & 0.318 & $\sigma_1$ & 0.710 & 0.710 \\ $\mu_2$ & -0.175 & -0.175 & $\gamma_2$ & 0.427 & 0.430 & $\sigma_2$ & 0.748 & 0.747 \\ $\mu_3$ & -0.243 & -0.243 & $\gamma_3$ & 0.410 & 0.413 & $\sigma_3$ & 0.775 & 0.774 \\ $\mu_4$ & 0.198 & 0.198 & $\gamma_4$ & -0.002 & -0.002 & $\sigma_4$ & 1.228 & 1.228 \\ $\mu_5$ & 0.106 & 0.106 & $\gamma_5$ & 0.303 & 0.304 & $\sigma_5$ & 1.024 & 1.024 \\ $\mu_6$ & -0.068 & -0.067 & $\gamma_6$ & 0.522 & 0.525 & $\sigma_6$ & 0.972 & 0.971 \\ $\mu_7$ & 0.194 & 0.194 & $\gamma_7$ & 0.388 & 0.390 & $\sigma_7$ & 1.043 & 1.042 \\ $\mu_8$ & 0.000 & 0.001 & $\gamma_8$ & 0.975 & 0.978 & $\sigma_8$ & 0.000 & 0.056 \\ $\mu_9$ & -0.051 & -0.051 & $\gamma_9$ & 0.499 & 0.502 & $\sigma_9$ & 0.983 & 0.982 \\ \hline \hline \end{tabular} \end{table}
We can see except for $\sigma_8$, all the parameters from the original weight and the adjusted weight methods have difference less than 0.001. Moreover, the 0.99 adjusted method can ensure all the variance in the readmission group bounded from zero. This is consistent with previous numerical and theoretical results.
\subsection{Safety (Extreme)} Modeling the group of Safety has been challenging over the years by its unbalanced loadings and bi-peak parameter estimates through the latent variable modeling.\\
In the Safety of Care group, there are eight indicators among $4573$ hospitals:
1. COMP-HIP-KNEE: Hospital-Level Risk-Standardized Complication Rate (RSCR) Following Elective Primary Total Hip Arthroplasty (THA) and Total Knee Arthroplasty (TKA);
2. HAI-1: Central-Line Associated Bloodstream Infection (CLABSI);
3. HAI-2: Catheter-Associated Urinary Tract Infection (CAUTI);
4. HAI-3: Surgical Site Infection from colon surgery (SSI-colon);
5. HAI-4: Surgical Site Infection from abdominal hysterectomy (SSI-abdominal hysterectomy);
6. HAI-5: MRSA Bacteremia;
7. HAI-6: Clostridium Difficile (C. difficile);
8. PSI-90-Safety: Complication/Patient Safety for Selected Indicators (PSI).\\
Similar to the readmission group, the safety group is also an extreme case since both COMP-HIP-KNEE and PSI-90-Safety indicators have much larger variance in numbers of admissions than the rest six indicators. After running the latent variable model with the un-adjusted sample weights, we found the loadings for HAI-1 to HAI-6 are closed to zero. This will cause the bi-peak issue of the parameter estimates since there are only two indicators loaded unto the latent factor, one or both of $\sigma_1$ or $\sigma_8$ can have zero variance. We provided two sets of methods to solve the issue in the safety group.
\subsubsection{Solution One} One way to solve the problem is comparing the marginal pseudo log-likelihood between the two peaks. And we found when PSI-90-Safety ($\sigma_8$) turned out to be zero, the marginal log-likelihood is larger. Similarly as the readmission group, we apply the methods of $0.99$ times weights (denoted by Adj) to both COMP-HIP-KNEE and PSI-90-Safety indicators. Table 4 shows the parameter estimates between the original weights and $0.99$ times weights in the Safety group. And Table 5 shows the root mean squared errors of the latent variable estimates between $0.9$, $0.8$, $0.7$ multiply weights method and the method of multiplying $0.99$ to the un-adjusted weights (since under the method of un-adjusted weights, the latent variable variances cannot be calculated). \\
\begin{table} \caption{Parameter Estimates in the Safety of Care Group via Different Weightings} \centering \begin{tabular}{lllllllll}
\hline \hline
$\mu$ & Un-adj & Adj & $\gamma$ & Un-adj & Adj & $\sigma$ & Un-adj & Adj\\ \hline $\mu_1$ & 0.287 & 0.287 & $\gamma_1$ & 0.188 & 0.189 & $\sigma_1$ & 1.039 & 1.039 \\ $\mu_2$ & -0.007 & -0.007 & $\gamma_2$ & 0.007 & 0.007 & $\sigma_2$ & 0.723 & 0.723 \\ $\mu_3$ & -0.010 & -0.010 & $\gamma_3$ & 0.008 & 0.008 & $\sigma_3$ & 0.757 & 0.757 \\ $\mu_4$ & -0.055 & -0.055 & $\gamma_4$ & 0.045 & 0.046 & $\sigma_4$ & 0.837 & 0.837 \\ $\mu_5$ & 0.010 & 0.010 & $\gamma_5$ & 0.060 & 0.060 & $\sigma_5$ & 0.867 & 0.867 \\ $\mu_6$ & 0.032 & 0.032 & $\gamma_6$ & 0.037 & 0.037 & $\sigma_6$ & 0.796 & 0.796 \\ $\mu_7$ & 0.003 & 0.003 & $\gamma_7$ & 0.025 & 0.025 & $\sigma_7$ & 0.622 & 0.622 \\ $\mu_8$ & 0.016 & 0.016 & $\gamma_8$ & 0.897 & 0.901 & $\sigma_8$ & 0.000 & 0.033 \\ \hline \hline \end{tabular} \end{table}
\begin{table} \caption{Summary Statistics Under Different Weights} \centering \begin{tabular}{lllll}
\hline \hline
& 0.99*W & 0.9*W & 0.8*W & 0.7*W \\
\\ \hline
RMSE & & 0.0861 & 0.2123 & 0.3552 \\
Mean & 0.0000 & 0.0000 & 0.0000 & 0.0000 \\
Std & 0.8354 & 0.7711 & 0.6754 & 0.5604 \\
\hline \hline \end{tabular} \end{table}
We found that the parameters estimate are very closed between the original weight and adjusted weight, and as the weight coefficient decreases from 0.99 to 0.7, the standard error of the predicted latent variables is getting farther away from the prior variance (one).
\subsubsection{Solution Two} Another idea of modeling the Safety group is smoothing the weights thus prevent the dominance of either COMP-HIP-KNEE and PSI-90-Safety. Consider the HAIs are similar in both admission volumes and indicator scores, this method is seeking a balanced loading from the latent variable model.
\begin{table} \caption{Parameter Estimates in the Safety Group via log transformation} \centering \begin{tabular}{llllll}
\hline \hline
$\mu$ & log-W & $\gamma$ & log-W & $\sigma$ & log-W \\ \hline $\mu_1$ & 0.048 & $\gamma_1$ & 0.105 & $\sigma_1$ & 0.995 \\ $\mu_2$ & 0.036 & $\gamma_2$ & 0.528 & $\sigma_2$ & 0.738 \\ $\mu_3$ & 0.016 & $\gamma_3$ & 0.372 & $\sigma_3$ & 0.846 \\ $\mu_4$ & 0.000 & $\gamma_4$ & 0.211 & $\sigma_4$ & 0.919 \\ $\mu_5$ & 0.022 & $\gamma_5$ & 0.279 & $\sigma_5$ & 0.897 \\ $\mu_6$ & 0.033 & $\gamma_6$ & 0.359 & $\sigma_6$ & 0.867 \\ $\mu_7$ & 0.019 & $\gamma_7$ & 0.078 & $\sigma_7$ & 0.865 \\ $\mu_8$ & 0.008 & $\gamma_8$ & 0.134 & $\sigma_8$ & 0.938 \\ \hline \hline \end{tabular} \end{table}
One option is taking logarithm transformation to the admission volume for all indicators in the Safety group. This will help to reduce the variance in skewness of the un-adjusted weights in the Safety group. The result is in Table 6. We can see that the loadings are balanced and there are more than three indicators with relatively high loadings in the latent variable. Thus taking logarithm to the admission volume in the Safety group can help make the result balanced and thus identifiable.
\section{Summary and Discussion} We present a latent variable model that incorporates measure-specific sample weights via pseudo-likelihood estimation in this work. The latent variable model can handle the missing value issue as well. The estimates obtained through the algorithms have desirable asymptotic properties. We gave examples in both numerical study and real data analysis where the latent variable model can produce zero standard error estimates for certain indicators. We showed that if the sample weights means of those indicators are less than one, the estimates of variance components are bounded away from zero. We provided a log transformation method prior to the sample weights can help to obtain nonzero variance components as well.\\
For future work, we plan to investigate the pseudo likelihood and the estimating algorithm of the latent variable model under random weights with Gamma distribution, thus we can discover the threshold between the choice of the shape (scale) parameters and the bounded-away-from-zero estimates of variance components. Moreover, we would also like to investigate more behaviors of the latent variable model under varies distributions of weights, such as Poisson distribution, beta distribution, etc.
\section{Appendix} We organize this section as: \eqref{7.1} and \eqref{7.2} are the proof to Example 1, \eqref{7.3} is the proof to Theorem 1, \eqref{7.4} is the proof to Theorem 2.\\
Recall that For any hospital $h$, there are $m$ indicators. Assume there exists an overall latent score $\alpha_h$ from the $m$ indicators, conditional on its score, each indicator has an independent normal distribution with
$$Y_{jh}|\alpha_h \sim N(\mu_j+\gamma_j\alpha_h, \sigma_j^2), \quad \quad 1\le j\le m$$ where $Y_{jh}$ is the response for the $j$th indicator in hospital $h$. $\mu_j, \gamma_j$ and $\sigma_j^2$ are unknown parameters. \\
We also assume $\alpha_h$ has a normal distribution with mean zero and variance one. We have, by the logged pdf of normal distribution, We have the marginal logarithm marginal likelihood for all the parameters for hospital $h$ in the safety domain satisfies \begin{align}\nonumber
&-2\log \mathcal{L}(\mu_1 \dots \mu_m, \gamma_1 \dots \gamma_m, \sigma_1 \dots \sigma_m|Y_{.h}) = m\log 2\pi \\ \nonumber
& +\log ({1}+\sum \limits_{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})+\sum \limits_{j=1}^m \log
\sigma_j^2+\sum \limits_{j=1}^m \frac{(Y_{jh}-\mu_j)^2}{\sigma_j^2}-\frac{(\sum \limits_{j=1}^m\frac{(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2})^2}{1+\sum \limits_{j=1}^m \frac{\gamma_j^2}{\sigma_j^2}}. \end{align}
\subsection{Asymptotic Behavior of LVM} \label{7.1}
We start with $m=3$. Based on the marginal log-likelihood with uniform weight, we have negative marginal log-likelihood for hospital $h$ satisfies \begin{align}\nonumber 2\text{NMLL}_h &= \log ({1}+\sum \limits_{j=1}^3 \frac{\gamma_j^2}{\sigma_j^2})+\sum \limits_{j=1}^3 \log \sigma_j^2+\sum \limits_{j=1}^3 \frac{(Y_{jh}-\mu_j)^2}{\sigma_j^2}-\frac{(\sum \limits_{j=1}^3\frac{(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2})^2}{1+\sum \limits_{j=1}^3 \frac{\gamma_j^2}{\sigma_j^2}}\\ \nonumber &+3\log 2 \pi=3\log 2\pi +\log(\sigma_1^2\sigma_2^2\sigma_3^2[1+\frac{\gamma_1^2}{\sigma_1^2}+\frac{\gamma_2^2}{\sigma_2^2}+\frac{\gamma_3^2}{\sigma_3^2}]) \\ \nonumber &+\bigg\{(Y_{1h}-\mu_1)^2(\sigma_2^2\sigma_3^2+\frac{\sigma_2^2\sigma_3^2\gamma_1^2}{\sigma_1^2}+\sigma_3^2\gamma_2^2+\sigma_2^2\gamma_3^2-\frac{\gamma_1^2\sigma_1^2\sigma_2^2\sigma_3^2}{\sigma_1^2\sigma_1^2}) \\ \nonumber &+(Y_{2h}-\mu_2)^2(\sigma_1^2\sigma_3^2+\sigma_1^2\gamma_3^2+\sigma_3^2\gamma_1^2) +(Y_{3h}-\mu_3)^2(\sigma_1^2\sigma_2^2+\sigma_1^2\gamma_2^2+\sigma_2^2\gamma_1^2) \\ \nonumber &-2(Y_{1h}-\mu_1)(Y_{2h}-\mu_2)\gamma_1\gamma_2\sigma_3^2-2(Y_{2h}-\mu_2)(Y_{3h}-\mu_3)\gamma_2\gamma_3\sigma_1^2 \\ \nonumber &-2(Y_{1h}-\mu_1)(Y_{3h}-\mu_3)\gamma_1\gamma_3\sigma_2^2 \bigg\}\bigg/(\sigma_1^2\sigma_2^2\sigma_3^2[1+\frac{\gamma_1^2}{\sigma_1^2}+\frac{\gamma_2^2}{\sigma_2^2}+\frac{\gamma_3^2}{\sigma_3^2}]). \\ \nonumber
\end{align} Let \begin{align} \nonumber A &= \sigma_1^2\sigma_2^2\sigma_3^2[1+\frac{\gamma_1^2}{\sigma_1^2}+\frac{\gamma_2^2}{\sigma_2^2}+\frac{\gamma_3^2}{\sigma_3^2}], \\ \nonumber B_h &=(Y_{1h}-\mu_1)^2(\sigma_2^2\sigma_3^2+\sigma_3^2\gamma_2^2+\sigma_2^2\gamma_3^2) +(Y_{2h}-\mu_2)^2(\sigma_1^2\sigma_3^2+\sigma_1^2\gamma_3^2+\sigma_3^2\gamma_1^2) \\ \nonumber & +(Y_{3h}-\mu_3)^2(\sigma_1^2\sigma_2^2+\sigma_1^2\gamma_2^2+\sigma_2^2\gamma_1^2)-2(Y_{1h}-\mu_1)(Y_{3h}-\mu_3)\gamma_1\gamma_3\sigma_2^2 \\ \nonumber &-2(Y_{1h}-\mu_1)(Y_{2h}-\mu_2)\gamma_1\gamma_2\sigma_3^2-2(Y_{2h}-\mu_2)(Y_{3h}-\mu_3)\gamma_2\gamma_3\sigma_1^2 ,
\end{align} then we have \begin{align} \label{du} 2\text{NMLL}_h = \log A + \frac{B_h}{A}+3\log 2\pi \end{align} and the expected negative marginal log-likelihood for $H$ hospitals (denoted by $\text{ENMLL}_H$) is \begin{align} \label{du1} \text{ENMLL}_H = \frac{1}{H}\sum \limits_{h=1}^H \text{NMLL}_h = \frac{1}{2}\log A + \frac{\frac{1}{H}\sum \limits_{h=1}^H B_h}{2A}+\frac{3}{2}\log 2\pi. \end{align}
Recall our model is
$$Y_{jh}|\alpha_h \sim N(\mu_j+\gamma_j\alpha_h, \sigma_j^2) \quad \quad 1\le j\le m,$$
by strong law of large numbers, we have \begin{align}\label{zhang}
\frac{1}{H}\sum \limits_{h=1}^H (Y_{ih}-\mu_i)^2 \xrightarrow{a.s.} \sigma_i^2+\gamma_i^2 \end{align} holds for $i=1,\dots,m$. Also note that
\begin{align}\label{deng} \frac{1}{H}\sum \limits_{h=1}^H (Y_{ih}-\mu_i)(Y_{jh}-\mu_j) \xrightarrow{a.s.} \gamma_i\gamma_j
\end{align} holds for $1 \le i, j \le m$.\\
Therefore, plug \eqref{zhang} and \eqref{deng} into $B$,
we have \begin{align} \nonumber \frac{1}{H}\sum \limits_{h=1}^H B_h &\xrightarrow{a.s.} \sum \limits _{i,j,k \in \{1,2,3\}}\big[(\sigma_i^2+\gamma_i^2)(\sigma_j^2\sigma_k^2+\sigma_j^2\gamma_k^2+\sigma_k^2\gamma_j^2)-2\gamma_i^2\gamma_j^2\sigma_k^2\big] \\ \nonumber &=3(\sigma_1^2\sigma_2^2\sigma_3^2 + \gamma_1^2\sigma_2^2\sigma_3^2 +\sigma_1^2\gamma_2^2\sigma_3^2+\sigma_1^2\sigma_2^2\gamma_3^2)=3A \end{align}
holds for sufficient large $H$. Therefore, we have $$\text{ENMLL}_H \xrightarrow{a.s.} \frac{1}{2}\log A+\frac{3}{2}(1+\log 2\pi),$$ as $H \to \infty$.
\subsection{Multivariate Normal Distribution} \label{7.2}
Let $Z_{jh} \sim N(\mu_j, \sigma_j^2+\gamma_j^2), 1\le j\le 3, 1\le h\le H$, moreover, assume the covariance between $Z_{i.}$ and $Z_{j.}$ is $\gamma_i\gamma_j$. Then we can have the Variance co-variance matrix of $Z_{1.}, Z_{2.}, Z_{3.}$ is:\\
$$\Sigma = \begin{bmatrix}
\sigma_1^2+\gamma_1^2 & \gamma_1\gamma_2 & \gamma_1\gamma_3\\
\gamma_2\gamma_1 & \sigma_2^2+\gamma_2^2 & \gamma_2\gamma_3 \\
\gamma_3\gamma_1 & \gamma_3\gamma_2 & \sigma_3^2+\gamma_3^2 \end{bmatrix}, $$\\
we can show that the determinant of $\Sigma$ satisfies
\begin{align}\nonumber
|\Sigma|&=(\sigma_1^2+\gamma_1^2)[(\sigma_2^2+\gamma_2^2)(\sigma_3^2+\gamma_3^2)-\gamma_2^2\gamma_3^2]-\gamma_1\gamma_2[\gamma_1\gamma_2(\sigma_3^2+\gamma_3^2)-\gamma_1\gamma_2\gamma_3^2] \\ \nonumber
&+\gamma_1\gamma_3[\gamma_1\gamma_2^2\gamma_3-\gamma_1\gamma_3(\sigma_2^2+\gamma_2^2)] \\ \nonumber
&=(\sigma_1^2+\gamma_1^2)(\sigma_2^2\sigma_3^2+\sigma_3^2\gamma_2^2+\sigma_2^2\gamma_3^2)-\gamma_1^2\gamma_2^2\sigma_3^2-\gamma_1^2\gamma_3^2\sigma_2^2\\ \nonumber
&=\sigma_1^2\sigma_2^2\sigma_3^2 + \gamma_1^2\sigma_2^2\sigma_3^2 +\sigma_1^2\gamma_2^2\sigma_3^2+\sigma_1^2\sigma_2^2\gamma_3^2=A, \end{align}
and we have $$\Sigma^* = \begin{bmatrix}
\sigma_2^2\sigma_3^2+\sigma_3^2\gamma_2^2+\sigma_2^2\gamma_3^2 & -\gamma_1\gamma_2\sigma_3^2 & -\gamma_1\gamma_3\sigma_2^2\\
-\gamma_2\gamma_1\sigma_3^2 & \sigma_1^2\sigma_3^2+\sigma_3^2\gamma_1^2+\sigma_1^2\gamma_3^2 & -\gamma_2\gamma_3\sigma_1^2 \\
-\gamma_3\gamma_1\sigma_2^2 & -\gamma_3\gamma_2\sigma_1^2 & \sigma_2^2\sigma_1^2+\sigma_1^2\gamma_2^2+\sigma_2^2\gamma_1^2 \end{bmatrix}, $$\\
let ${Z_{.h}} = [Z_{1h}, Z_{2h}, Z_{3h}]'$ and ${\mu} = [\mu_1, \mu_2, \mu_3]'$, thus we have the negative joint log-likelihood of multivariate normal distribution for hospital $h$ satisfies
\begin{align} \nonumber
\text{NLL}_h &\propto \log A+({Z_{.h}}-{\mu})'\Sigma^{-1}({Z_{.h}}-{\mu}) \\ \nonumber
& = \log A+({Z_{.h}}-{\mu})'\frac{\Sigma^*}{A}({Z_{.h}}-{\mu}). \end{align}
Denote $C_h=({Z_{.h}}-{\mu})'\Sigma^*({Z_{.h}}-{\mu})$, by matrix calculation, we have \begin{align}\nonumber
C_h&=(Z_{1h}-\mu_1)^2(\sigma_2^2\sigma_3^2+\sigma_3^2\gamma_2^2+\sigma_2^2\gamma_3^2) -2(Z_{1h}-\mu_1)(Z_{3h}-\mu_3)\gamma_1\gamma_3\sigma_2^2 \\ \nonumber &+(Z_{2h}-\mu_2)^2(\sigma_1^2\sigma_3^2+\sigma_1^2\gamma_3^2+\sigma_3^2\gamma_1^2) +(Z_{3h}-\mu_3)^2(\sigma_1^2\sigma_2^2+\sigma_1^2\gamma_2^2+\sigma_2^2\gamma_1^2) \\ \nonumber &-2(Z_{1h}-\mu_1)(Z_{2h}-\mu_2)\gamma_1\gamma_2\sigma_3^2-2(Z_{2h}-\mu_2)(Z_{3h}-\mu_3)\gamma_2\gamma_3\sigma_1^2. \end{align}
Since the only difference between $C_h$ and $B_h$ is the notation $Y,Z$. Therefore, we have for any hospital $h$, the negative multivariate log-likelihood is equal to the negative marginal log-likelihood, since they are both negative log-likelihoods.
\subsection{generalized LVM} \label{7.3}
We can generalize the latent variable mode with uniform weight from $3$ indicators into $m$ indicators. Note that, with a distance of $m\log 2\pi$, \begin{align}\nonumber 2\text{NMLL}_h &= \log ({1}+\sum \limits_{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})+\sum \limits_{j=1}^m \log \sigma_j^2+\sum \limits_{j=1}^m \frac{(Y_{jh}-\mu_j)^2}{\sigma_j^2}-\frac{(\sum \limits_{j=1}^m\frac{(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2})^2}{1+\sum \limits_{j=1}^m \frac{\gamma_j^2}{\sigma_j^2}}\\ \nonumber &=\log \big[(1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2}) \prod_{j=1}^m \sigma_j^2 \big] + \Big \{ \sum \limits _{j=1}^m \big[ (Y_{jh}-\mu_j)^2 (1+\sum \limits _{k \neq j} \frac{\gamma_k^2}{\sigma_k^2})\prod_{k \neq j} \sigma_k^2 \big] \\ \nonumber & - 2\sum \limits_{i \neq j}\big[ (Y_{ih}-\mu_i)(Y_{jh}-\mu_j)\gamma_i \gamma_j \prod_{k \neq i, k\neq j}\sigma_k^2 \big] \Big\}/\big[(1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m \sigma_j^2 \big] \\ \nonumber
\end{align} By \eqref{zhang} and \eqref{deng}, for sufficient large $H$, we have, with $m$ indicators, \begin{align}\nonumber 2\text{ENMLL}_H & \xrightarrow{a.s.} \log \big[(1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m \sigma_j^2 \big] + \Big \{ \sum \limits_{j=1}^m\big[(\sigma_j^2+\gamma_j^2)(1+\sum \limits _{k \neq j} \frac{\gamma_k^2}{\sigma_k^2})\prod_{k \neq j} \sigma_k^2\big]
\\ \nonumber
&-2\sum \limits_{i \neq j}\big[\gamma_i^2 \gamma_j^2 \prod_{k \neq i, k\neq j}\sigma_k^2 \big]\Big\} /\big[(1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m \sigma_j^2 \big] +{m}\log 2\pi\\ \nonumber &= \log \big[(1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m \sigma_j^2 \big] + (S_1-S_2)/\big[(1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m \sigma_j^2 \big] +m\log 2\pi. \end{align}
Note that
\begin{align}\nonumber
S_1&=\sum \limits_{j=1}^m\big[(\sigma_j^2+\gamma_j^2)(1+\sum \limits _{k \neq j} \frac{\gamma_k^2}{\sigma_k^2})\prod_{k \neq j} \sigma_k^2\big] \\ \nonumber
&=\sum \limits_{j=1}^m\big[\sigma_j^2(1+\sum \limits _{k \neq j} \frac{\gamma_k^2}{\sigma_k^2})\prod_{k \neq j} \sigma_k^2\big]+\sum \limits_{j=1}^m\big[\gamma_j^2(1+\sum \limits _{k \neq j} \frac{\gamma_k^2}{\sigma_k^2})\prod_{k \neq j} \sigma_k^2 \big]\\ \nonumber
&=\sum \limits_{j=1}^m (\prod_{k =1}^m \sigma_k^2+\sum \limits _{k \neq j} \frac{\gamma_k^2}{\sigma_k^2}\prod_{k =1}^m \sigma_k^2+\gamma_j^2\prod_{k \neq j} \sigma_k^2)+\sum \limits_{j=1}^m (\gamma_j^2 \sum \limits _{k \neq j}\frac{\gamma_k^2}{\sigma_k^2}\prod_{k \neq j} \sigma_k^2), \end{align}
by symmetry, \begin{align}\nonumber
S_1&=m(\prod_{k =1}^m \sigma_k^2+\sum \limits _{k \neq j} \frac{\gamma_k^2}{\sigma_k^2}\prod_{k =1}^m \sigma_k^2+\frac{\gamma_j^2}{\sigma_j^2}\prod_{k=1}^m \sigma_k^2)+2\sum \limits_{i \neq j}\big[\gamma_i^2 \gamma_j^2 \prod_{k \neq i, k\neq j}\sigma_k^2 \big].\\ \nonumber
&=m(\prod_{k =1}^m \sigma_k^2+\sum \limits _{k=1}^m \frac{\gamma_k^2}{\sigma_k^2}\prod_{k =1}^m \sigma_k^2)+S_2, \end{align} which implies \begin{align}\nonumber
(S_1-S_2)/\big[(1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m \sigma_j^2 \big]=m. \end{align}
Therefore, we have, for $m$ indicators
\begin{align}\label{ppp} \text{ENMLL}_H & \xrightarrow{a.s.} \frac{1}{2}\log \big[ (1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m \sigma_j^2 \big]+\frac{m}{2}(\log 2\pi+1). \end{align}
Furthermore, denote the right hand side of \eqref{ppp} by $f(m)$, and the 4th moment of each $Y_i$ exists since each $Y_j$ has a normal distribution, then by LindebergβL\'evy central limit theorem,
\begin{align}\nonumber
\sqrt{H}(\frac{1}{H}\sum \limits_{h=1}^H \text{NMLL}_h-\text{ENMLL}_H)\xrightarrow{d} N(0,\boldsymbol{\sigma^2}) \end{align}
holds as $H\to \infty$, since $\text{ENMLL}_H$ convergence almost surely for every fixed positive integer $m$, and \begin{align}\nonumber
\boldsymbol{\sigma^2}&= \lim \limits _{H \to \infty}\frac{1}{4H}\sum \limits_{h=1}^H (2\text{NMLL}_h-2\text{ENMLL}_H)^2 \\ \nonumber
&= \lim \limits _{H \to \infty}\frac{1}{4H}\sum \limits_{h=1}^H \Big \{2\text{NMLL}_h-m- \log \big[ (1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m \sigma_j^2 \big]-m\log 2\pi\Big \}^2 \\ \nonumber
&= \lim \limits _{H \to \infty}\frac{1}{4H}\sum \limits_{h=1}^H\Big \{ \sum \limits _{j=1}^m \big[ (Y_{jh}-\mu_j)^2 (1+\sum \limits _{k \neq j} \frac{\gamma_k^2}{\sigma_k^2})\prod_{k \neq j} \sigma_k^2 \big] \\ \nonumber & - 2\sum \limits_{i \neq j}\big[ (Y_{ih}-\mu_i)(Y_{jh}-\mu_j)\gamma_i \gamma_j \prod_{k \neq i, k\neq j}\sigma_k^2 \big]\Big\}^2/\big[(1+\sum \limits _{j=1}^m \frac{\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m \sigma_j^2 \big]^2-m^2 \\ \nonumber & < m^4P^{m}-m^2 <\infty, \end{align} where $P=\max \limits_{1 \le j \le m} E(Y_j-\mu_j)^4=\max \limits_{1 \le j \le m}3(\sigma_j^2+\gamma_j^2)^2.$ \subsection{Weighted LVM} \label{7.4}
Let the weight matrix $W=[w_{jh}]_{m\times H}$ where $w_{jh}=w_j$, are positive constants. And
$$Y_{jh}|\alpha_h \sim N(\mu_j+\gamma_j\alpha_h, \frac{\sigma_j^2}{w_j}). \quad \quad 1\le j\le m$$ Then by definition of NMLL, we have the marginal log-likelihood for hospital $h$ satisfies \begin{align}\nonumber
& \quad -2\log \mathcal{L}(\mu_1 \dots \mu_m, \gamma_1 \dots \gamma_m, \frac{\sigma_1}{\sqrt{w_1}} \dots \frac{\sigma_m}{\sqrt{w_m}}|Y_{.h}) -m\log 2\pi \\ \nonumber
&= \log ({1}+\sum \limits_{j=1}^m \frac{w_j\gamma_j^2}{\sigma_j^2})+\sum \limits_{j=1}^m \log \frac{\sigma_j^2}{w_j}+\sum \limits_{j=1}^m \frac{w_j(Y_{jh}-\mu_j)^2}{\sigma_j^2}-\frac{(\sum \limits_{j=1}^m\frac{w_j(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2})^2}{1+\sum \limits_{j=1}^m \frac{w_j\gamma_j^2}{\sigma_j^2}} \\ \nonumber
&= \log ({1}+\sum \limits_{j=1}^m \frac{w_j\gamma_j^2}{\sigma_j^2})+\sum \limits_{j=1}^m w_j\log \sigma_j^2+\sum \limits_{j=1}^m \frac{w_j(Y_{jh}-\mu_j)^2}{\sigma_j^2}-\frac{(\sum \limits_{j=1}^m\frac{w_j(Y_{jh}-\mu_j)\gamma_j}{\sigma_j^2})^2}{1+\sum \limits_{j=1}^m \frac{w_j\gamma_j^2}{\sigma_j^2}} \\ \nonumber
&\quad -\sum \limits_{j=1}^m \log w_j -\sum \limits_{j=1}^m (w_j-1)\log \sigma_j^2\\ \nonumber
& = -2\log \mathcal{L}^*(\mu_1 \dots \mu_m, \gamma_1 \dots \gamma_m, {\sigma_1} \dots \sigma_m|Y_{.h}) -\sum \limits_{j=1}^m \log w_j -\sum \limits_{j=1}^m (w_j-1)\log \sigma_j^2\\ \nonumber
&-\sum \limits_{j=1}^m w_{j} \log 2\pi. \end{align} By Theorem 2, \begin{align} \nonumber
-\frac{2}{H}\sum \limits _{h=1}^H\log \mathcal{L}(\mu_1 \dots \mu_m, \gamma_1 \dots \gamma_m, \frac{\sigma_1}{\sqrt{w_1}} \dots \frac{\sigma_m}{\sqrt{w_m}}|Y_{.h})-m\log 2\pi\\ \nonumber \xrightarrow{a.s.}m+\log \big[(1+\sum \limits _{j=1}^m\frac{w_j\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m\frac{\sigma_j^2}{w_j} \big]. \end{align}
Therefore, we have, the expected negative pseudo marginal log-likelihood ($\text{ENWMLL}_H$) for $m$ indicators satisfies
\begin{align}\nonumber
2\text{ENWMLL}_H &\xrightarrow{a.s.} m+\log \big[(1+\sum \limits _{j=1}\frac{w_j\gamma_j^2}{\sigma_j^2})\prod_{j=1}^m\frac{\sigma_j^2}{w_j} \big]+\sum\limits_{j=1}^m (w_j-1)\log \sigma_j^2\\ \nonumber
&+\sum \limits_{j=1}^m \log w_j+\sum \limits_{j=1}^m w_{j} \log 2\pi, \end{align}
as $H \to \infty$. Similarly, the condition for LindebergβL\'evy central limit theorem holds for NWMLL with every $m$.
\end{document} |
\begin{document}
\title{A note on the joint measurability of POVMs and its implications for contextuality} \author{Ravi Kunjwal} \email{[email protected]} \affiliation{Optics \& Quantum Information Group, The Institute of Mathematical Sciences, C.I.T Campus, Tharamani, Chennai 600 113, India}
\date{\today}
\begin{abstract} The purpose of this note is to clarify the logical relationship between joint measurability and contextuality for quantum observables in view of recent developments \cite{LSW,KG,KHF,FyuOh}. \end{abstract}
\pacs{03.65.Ta, 03.65.Ud}
\maketitle
\section*{Introduction}
In a recent work \cite{KG}, a new proof of contextuality---in the generalized sense of Spekkens \cite{Spe05, LSW}---was provided using positive operator-valued measures (POVMs) and the connection between joint measurability of POVMs and contextuality was explicated. It was later shown in \cite{KHF} that any joint measurability structure can be realized in quantum theory, leaving open the question of whether contextuality can always be demonstrated in these joint measurability structures. Subsequent to these two developments, in Ref. \cite{FyuOh} a peculiar feature of POVMs with respect to joint measurability was pointed out: that there exist three measurements which are pairwise jointly measurable and triplewise jointly measurable but for which there exist pairwise joint measurements which do not admit a triplewise joint measurement. In this note, I will briefly put these results in context and point out the logical relationship between joint measurability and the possibility of contextuality. Also, throughout this note, `sharp measurement' will be synonymous with projection-valued measures (PVMs) and `unsharp measurement' will be synonymous with POVMs that are not PVMs. \section*{Uniqueness of joint measurement for Projection-Valued Measures} Since the peculiarity of positive-operator valued measures (POVMs) in cases of interest here arises from the nonuniqueness of joint measurements, I will first prove the uniqueness of joint measurements for projection-valued measures (PVMs). This will help clarify how the distinction between sharp and unsharp measurements comes to play a role in Specker's scenario \cite{KG}.
Consider a nonempty set $\Omega_i$ and a $\sigma$-algebra $\mathcal{F}_i$ of subsets of $\Omega_i$, for $i\in\{1,\dots,N\}$. The POVM $M_i$ is defined as the map $M_i: \mathcal{F}_i\rightarrow \mathcal{B}_+(\mathcal{H})$, where $\sum_{X_i\in\mathcal{F}_i}M_i(X_i)=I$ and $\mathcal{B}_+(\mathcal{H})$ denotes the set of positive semidefinite operators on a Hilbert space $\mathcal{H}$. $I$ is the identity operator on $\mathcal{H}$. Therefore: $M_i\equiv\{M_i(X_i)|X_i\in\mathcal{F}_i\}$, where $X_i$ labels the elements of POVM $M_i$. $M_i$ becomes a projection-valued measure (PVM) under the additional constraint $M_i(X_i)^2=M_i(X_i)$ for all $X_i\in \mathcal{F}_i$.
\begin{theorem}\label{uniqueness} Given a set of POVMs, $\{M_1,\dots,M_N\}$, all of which except at most one---say $M_N$---are PVMs, so that for $i\in\{1,\dots,N-1\}$
$$M_i\equiv\{M_i(X_i)|X_i\in\mathcal{F}_i, M_i(X_i)^2=M_i(X_i)\}$$ and $$M_N\equiv\{M_N(X_N)|X_N\in\mathcal{F}_N\},$$ the set of POVMs, $\{M_1,\dots,M_N\}$, is jointly measurable if and only if they commute pairwise, i.e., $$M_j(X_j)M_k(X_k)=M_k(X_k)M_j(X_j),$$ for all $j,k\in\{1,\dots,N\}$ and $X_j\in\mathcal{F}_j, X_k\in\mathcal{F}_k$. In this case, there exists a unique joint POVM $M$, defined as a map $$M:\mathcal{F}_1\times\mathcal{F}_2\times\dots\times\mathcal{F}_N \rightarrow \mathcal{B}_+(\mathcal{H}),$$ such that $$M(X_1\times\dots\times X_N)=M_1(X_1)M_2(X_2)\dots M_N(X_N),$$ for all $X_1\times\dots\times X_N \in\mathcal{F}_1\times\dots\times \mathcal{F}_N.$ \end{theorem}
\emph{Proof.}---This proof is adapted from, and is a generalization of, the proof of Proposition 8 in the Appendix of Ref. \cite{heinosaari}.
The first part of the proof is for the implication: joint measurability $\Rightarrow$ pairwise commutativity---A joint POVM for $\{M_1,\dots,M_N\}$ is defined as a map $M:\mathcal{F}_1\times\mathcal{F}_2\times\dots\times\mathcal{F}_N \rightarrow \mathcal{B}_+(\mathcal{H})$, such that \begin{equation}
M_i(X_i)=\sum_{\{X_j\in\mathcal{F}_j|j\neq i\}}M(X_1\times\dots\times X_N) \end{equation} for all $X_i\in\mathcal{F}_i$, $i\in\{1\dots N\}$. Also, $M(X_1\times\dots\times X_N)\leq M_1(X_1)$, so the range of $M(X_1\times\dots\times X_N)$ is contained in the range of $M_1(X_1)$, and therefore: \begin{equation} M_1(X_1)M(X_1\times\dots\times X_N)=M(X_1\times\dots\times X_N). \end{equation} Using this relation for the complement $\Omega_1\backslash X_1 \in \mathcal{F}_1$: \begin{eqnarray} &&M_1(X_1)M(\Omega_1\backslash X_1\times\dots\times X_N)\nonumber\\ &&=(I-M_1(\Omega_1\backslash X_1))M(\Omega_1\backslash X_1\times\dots\times X_N)\nonumber\\ &&=0. \end{eqnarray} Taking the adjoints, it follows that \begin{equation} M(X_1\times\dots\times X_N)M_1(X_1)=M(X_1\times\dots\times X_N), \end{equation} and \begin{equation} M(\Omega_1\backslash X_1\times\dots\times X_N)M_1(X_1)=0. \end{equation} Denoting
$$M^{(i)}(X_{i+1}\times\dots\times X_N)\equiv\sum_{\{X_j\in\mathcal{F}_j|j\leq i\}}M(X_1\times\dots\times X_N),$$ this implies: \begin{eqnarray} &&M_1(X_1)M^{(1)}(X_2\times\dots\times X_N)\nonumber\\ &=&M_1(X_1)M(X_1\times\dots\times X_N)\nonumber\\&&+M_1(X_1)M(\Omega_1\backslash X_1\times\dots\times X_N)\nonumber\\ &=&M_1(X_1)M(X_1\times\dots\times X_N)\nonumber\\ &=&M(X_1\times\dots\times X_N). \end{eqnarray} Taking the adjoint, \begin{equation} M^{(1)}(X_2\times\dots\times X_N)M_1(X_1)=M(X_1\times\dots\times X_N). \end{equation} Therefore: \begin{eqnarray} &&M_1(X_1)M^{(1)}(X_2\times\dots\times X_N)\nonumber\\ &=&M^{(1)}(X_2\times\dots\times X_N)M_1(X_1)\nonumber\\ &=&M(X_1\times\dots\times X_N). \end{eqnarray} Noting that $M^{(i-1)}(X_i\times\dots\times X_N)\leq M_i(X_i)$, one can repeat the above procedure for $M_i$, $i\in\{2,\dots,N-1\},$ to obtain: \begin{eqnarray} &&M^{(i-1)}(X_i\times\dots\times X_N)\nonumber\\ &=&M_i(X_i)M^{(i)}(X_{i+1}\times\dots\times X_N)\nonumber\\ &=&M^{(i)}(X_{i+1}\times\dots\times X_N)M_i(X_i). \end{eqnarray} Doing this recursively until $i=N-1$ and noting that $M^{(N-1)}(X_N)=M_N(X_N)$, it follows: \begin{eqnarray} &&M(X_1\times\dots\times X_N)\nonumber\\ &=&M_1(X_1)M^{(1)}(X_2\times\dots\times X_N)\nonumber\\ &=&M^{(1)}(X_2\times\dots\times X_N)M_1(X_1)\nonumber\\ &&\vdots\nonumber\\ &=&M_1(X_1)M_2(X_2)\dots M_{N-1}(X_{N-1})M_N(X_N)\nonumber\\ &=&M_N(X_N)M_{N-1}(X_{N-1})\dots M_2(X_2)M_1(X_1).\nonumber\\
\end{eqnarray} For the last equality to hold, the POVM elements must commute pairwise, so that \begin{equation} M(X_1\times\dots\times X_N)=\prod_{i=1}^N M_i(X_i). \end{equation} This concludes the proof of the implication, joint measurability $\Rightarrow$ pairwise commutativity. The converse is easy to see since the joint POVM defined by taking the
product of commuting POVM elements, $$\{M(X_1\times\dots\times X_N)=\prod_{i=1}^N M_i(X_i)|X_i\in\mathcal{F}_i\},$$ is indeed a valid POVM which coarse-grains to the given POVMs, $\{M_1,\dots,M_N\}$.
\fbox\\
Indeed, pairwise commutativity $\Rightarrow$ joint measurability for any arbitrary set of POVMs, $\{M_1,\dots,M_N\}$, and it is only when all but (at most) one of these POVMs are PVMs that the converse---and the uniqueness of the joint POVM---holds.
\section*{Specker's scenario} Specker's scenario requires a set of three POVMs, $\{M_1,M_2,M_3\}$, that are pairwise jointly measurable, i.e., $\exists$ POVMs $M_{12}$, $M_{23}$, and $M_{31}$ which measure the respective pairs jointly. An immediate consequence of the requirement of pairwise joint measurability of $\{M_1,M_2,M_3\}$ is that in quantum theory these three measurements cannot be realized as projective measurements (PVMs) and still be expected to show any contextuality. This is because for projective measurements or projection-valued measures (PVMs), a set of three measurements that are pairwise jointly measurable---and therefore admit \emph{unique} pairwise joint measurements---are also triplewise jointly measurable in the sense that there exists a \emph{unique} triplewise joint measurement which coarse-grains to each pairwise implementation of the three measurements and therefore also to the single measurements.
From Theorem \ref{uniqueness}, it follows that if $M_i$, $i\in\{1,2,3\}$, are PVMs then they admit unique pairwise and triplewise joint PVMs: \begin{eqnarray} M_{ij}(X_i\times X_j)&=&M_i(X_i)M_j(X_j),\\ M(X_1\times X_2\times X_3)&=&M_1(X_1)M_2(X_2)M_3(X_3), \end{eqnarray}
corresponding to the maps $M_{ij}:\mathcal{F}_i\times\mathcal{F}_j\rightarrow \mathcal{B}_+(\mathcal{H})$ and $M:\mathcal{F}_1\times\mathcal{F}_2\times\mathcal{F}_3\rightarrow \mathcal{B}_+(\mathcal{H})$, respectively. Intuitively, this is easy to see since joint measurability is equivalent to pairwise commutativity for a set of projective measurements and the joint measurement for each pair is unique \cite{heinosaari}. The existence of a unique joint measurement implies that there exists a joint probability distribution realizable via this joint measurement, thus explaining the pairwise statistics of the triple of measurements noncontextually in the traditional Kochen-Specker sense.\footnote{KS-noncontextuality just means that there exists a joint probability distribution over the three measurement outcomes which marginalizes to the pairwise measurement statistics. Violation of a KS inequality---obtained under the assumption that a global joint distribution exists---rules out KS-noncontextuality.}
Clearly, then, the three measurements $\{M_1, M_2, M_3\}$ must necessarily be unsharp for Specker's scenario to exhibit KS-contextuality. The uniqueness of joint measurements (pairwise or triplewise) need not hold in this case. I will refer to pairwise joint measurements as ``2-joints'' and triplewise joint measurements as ``3-joints''. Also, I will use the phrases `joint measurability' and `compatibility' interchangeably since they will refer to the same notion. Consider the four propositions regarding the three measurements:
\begin{itemize}
\item $\exists$ 2-joint: $\{M_1,M_2,M_3\}$ admit 2-joints,
\item $\nexists$ 2-joint: $\{M_1,M_2,M_3\}$ do not admit 2-joints,
\item $\exists$ 3-joint: $\{M_1,M_2,M_3\}$ admit a 3-joint,
\item $\nexists$ 3-joint: $\{M_1,M_2,M_3\}$ do not admit a 3-joint, \end{itemize}
The possible pairwise-triplewise propositions for the three measurements are: \begin{itemize}
\item $(\exists \text{ 2-joint}, \exists \text{ 3-joint})$,
\item $(\exists \text{ 2-joint}, \nexists \text{ 3-joint})$,
\item $(\nexists \text{ 2-joint}, \nexists \text{ 3-joint})$. \end{itemize}
Note that the proposition $(\nexists \text{ 2-joint}, \exists \text{ 3-joint})$ is trivially excluded because triplewise compatibility implies pairwise compatibility. Of the three remaining propositions, the ones of interest for contextuality are $(\exists \text{ 2-joint}, \exists \text{ 3-joint})$ and $(\exists \text{ 2-joint}, \nexists \text{ 3-joint})$, since the remaining one is simply about observables that do not admit any joint measurement at all and hence no nontrivial measurement contexts exist for this proposition.\footnote{ It is worth noting that, if $\{M_1,M_2,M_3\}$ were PVMs, then there are only two possibilities: $(\exists \text{ 2-joint}, \exists \text{ 3-joint})$ and $(\nexists \text{ 2-joint}, \nexists \text{ 3-joint})$, since for three PVMs, $\exists \text{ 2-joint} \Leftrightarrow \exists \text{ 3-joint}$, because pairwise commutativity is equivalent to joint measurability and the joint measurements are unique on account of Theorem \ref{uniqueness}. This is why KS-contextuality is impossible with PVMs in this scenario.}
It may seem that for purposes of contextuality even the proposition $(\exists \text{ 2-joint}, \exists \text{ 3-joint})$ is of no interest, but there is a subtlety involved here: one is only considering whether 2-joints or a 3-joint exist for the set $\{M_1, M_2, M_3\}$. Since the statistics that is of relevance for Specker's scenario is the pairwise statistics \cite{LSW, KG},
one also needs to consider whether a given choice of 2-joints, $\{M_{12}, M_{23}, M_{31}\}$, admits a 3-joint, i.e., the proposition $(\exists \text{ 3-joint}|\text{ a choice of 2-joints})$ or its negation $(\nexists \text{ 3-joint}|\text{ a choice of 2-joints})$. The four possible conjunctions are:
\begin{itemize}
\item $(\exists \text{ 2-joint}, \exists \text{ 3-joint})\bigwedge(\exists \text{ 3-joint}|\text{ a choice of 2-joints}),$
\item $(\exists \text{ 2-joint}, \exists \text{ 3-joint})\bigwedge(\nexists \text{ 3-joint}|\text{ a choice of 2-joints}),$
\item $(\exists \text{ 2-joint}, \nexists \text{ 3-joint})\bigwedge(\exists \text{ 3-joint}|\text{ a choice of 2-joints}),$
\item $(\exists \text{ 2-joint}, \nexists \text{ 3-joint})\bigwedge(\nexists \text{ 3-joint}|\text{ a choice of 2-joints}).$ \end{itemize}
Of these, the first conjunction rules out the possibility of KS-contextuality, so it is not of interest for the present purpose. The third conjunction is false since the existence of a 3-joint for a given choice of 2-joints would also imply the existence of a 3-joint for the three measurements, hence contradicting the fact that these admit no 3-joints. Thus the two remaining conjunctions of interest are:
\begin{itemize}
\item \emph{Proposition 1}:\\$(\exists \text{ 2-joint}, \exists \text{ 3-joint})\bigwedge(\nexists \text{ 3-joint}|\text{ a choice of 2-joints})$,
\item \emph{Proposition 2}:\\$(\exists \text{ 2-joint}, \nexists \text{ 3-joint})\bigwedge(\nexists \text{ 3-joint}|\text{ a choice of 2-joints})$\\ $\Leftrightarrow (\exists \text{ 2-joint}, \nexists \text{ 3-joint})$. \end{itemize}
These two possibilities lead to the following propositions:
\begin{itemize}
\item \emph{Weak}: $(\exists \text{ 2-joint})\bigwedge(\nexists \text{ 3-joint}|\text{ a choice of 2-joints})$,
\item \emph{Strong}:\\$(\exists \text{ 2-joint})\bigwedge(\nexists \text{ 3-joint}|\text{ for all choices of 2-joints})$\\ $\Leftrightarrow (\exists \text{ 2-joint}, \nexists \text{ 3-joint})$. \end{itemize}
where \emph{Weak} $\Leftrightarrow$ \emph{Proposition 1} $\bigvee$ \emph{Proposition 2}, and \emph{Strong} $\Leftrightarrow$ \emph{Proposition 2}. The proposition \emph{Weak} relaxes the requirement of proposition \emph{Strong} that the three measurements should themselves be incompatible to only the requirement that there exists a choice of 2-joints that do not admit a 3-joint. Obviously, under \emph{Strong}, there exists no 3-joint for all possible choices of 2-joints: \emph{Strong} $\Rightarrow$ \emph{Weak}.\footnote{
Note that for the case of PVMs, only the conjunction $(\exists \text{ 2-joint}, \exists \text{ 3-joint})\bigwedge(\exists \text{ 3-joint}|\text{ a choice of 2-joints})$ makes sense and that it is, in fact, equivalent to the proposition $(\exists \text{ 2-joint}, \exists \text{ 3-joint})$ since there is no ``choice of 2-joints'' available: the 2-joints, if they exist, are unique and admit a unique 3-joint (cf. Theorem \ref{uniqueness}). Consequently, the propositions \emph{Weak} and \emph{Strong} are not admissible for PVMs.}
\subsection{Comment on Ref. \cite{FyuOh} vis-\`a-vis Ref. \cite{KG}}
In Ref. \cite{KG}, contextuality---in the generalized sense of Spekkens \cite{Spe05} and by implication in the Kochen-Specker sense---was shown keeping in mind the proposition \emph{Strong}, i.e., requiring that the three measurements $\{M_1,M_2,M_3\}$ are pairwise jointly measurable but not triplewise jointly measurable. This was in keeping with the approach adopted in Ref. \cite{LSW}, where the construction used did not violate the LSW inequality \cite{LSW, KG}. Indeed, as shown in Theorem 1 of Ref. \cite{KG}, the construction used in Ref. \cite{LSW} could not have produced a violation because it sought a state-independent violation.
In Ref. \cite{FyuOh}, the authors---under \emph{Proposition 1}---use the construction first obtained in \cite{KG} to show a higher violation of the LSW inequality than reported in \cite{KG}. It is easy to check that the construction in Ref. \cite{KG} recovers the violation reported in Ref. \cite{FyuOh} when the proposition \emph{Strong} is relaxed to the proposition \emph{Weak}: the expression for the quantum probability of anticorrelation in Ref.\cite{KG} is given by
\begin{equation}\label{anti} R_3^Q=\frac{C}{6}+(1-\frac{\eta}{3}) \end{equation} where $C>0$ for a state-dependent violation of the LSW inequality \cite{LSW,KG}. Given a coplanar choice of measurement directions $\{\hat{n}_1,\hat{n}_2,\hat{n}_3\}$, and $\eta$ satisfying $\eta_l<\eta\leq\eta_u$, the optimal value of $C$ ---denoted as $C^{\{\hat{n}_i\},\eta}_{\max}$---is given by \begin{eqnarray}\label{Cmax}\nonumber
&&C^{\{\hat{n}_i\},\eta}_{\max}=2\eta\\&+&\sum_{(ij)}\left(\sqrt{1+\eta^4(\hat{n}_i.\hat{n}_j)^2-2\eta^2}-(1+\eta^2 \hat{n}_i.\hat{n}_j)\right). \end{eqnarray}
For trine measurements, $\hat{n}_i.\hat{n}_j=-\frac{1}{2}$ for each pair of measurement directions, $\{\hat{n}_i,\hat{n}_j\}$. Also, $\eta_l=\frac{2}{3}$ and $\eta_u=\sqrt{3}-1$. $\eta>\eta_l$ ensures that the three measurements corresponding to $\{\hat{n}_1,\hat{n}_2,\hat{n}_3\}$ do not admit a 3-joint while $\eta\leq\eta_u$ is necessary and sufficient for 2-joints to exist: that is, $\eta_l<\eta\leq\eta_u$ corresponds to the proposition \emph{Strong}, $(\exists \text{ 2-joint}, \nexists \text{ 3-joint})$. On relaxing the requirement $\eta_l<\eta$, we have $0\leq\eta\leq\eta_u$. This allows room for the proposition $(\exists \text{ 2-joint}, \exists \text{ 3-joint})$ when $0\leq\eta\leq \eta_l$.
The quantity to be maximized is the quantum violation: $R_3^Q-(1-\frac{\eta}{3})=\frac{C}{6}$. Substituting the value $\hat{n}_i.\hat{n}_j=-\frac{1}{2}$ in Eq. (\ref{Cmax}), the quantum probability of anticorrelation from Eq. (\ref{anti}) for trine measurements is given by: \begin{equation}
R_3^Q=\frac{1}{2}+\frac{\eta^2}{4}+\frac{1}{2}\sqrt{1-2\eta^2+\frac{\eta^4}{4}}, \end{equation} which is the same as the bound in Eq. (11) in Theorem 3 of Ref. \cite{FyuOh}. The quantum violation is given by: \begin{equation}
R_3^Q-(1-\frac{\eta}{3})=-\frac{1}{2}+\frac{\eta}{3}+\frac{\eta^2}{4}+\frac{1}{2}\sqrt{1-2\eta^2+\frac{\eta^4}{4}}. \end{equation}
In Ref. \cite{KG}, this expression was maximized under the proposition \emph{Strong} ($\eta_l<\eta\leq \eta_u$) and the quantum violation was seen to approach a maximum of $0.0336$ for $R_3^Q\rightarrow0.8114$ as $\eta\rightarrow \eta_l=\frac{2}{3}$. In Ref. \cite{FyuOh}, the same expression was maximized while relaxing proposition \emph{Strong} to proposition \emph{Weak} (allowing $\eta\leq\eta_l$) and the maximum quantum violation was seen to be $0.0896$ for $R_3^Q=0.9374$ and $\eta\approx 0.4566$.
Another comment in Ref. \cite{FyuOh} is the following:
\emph{``Interestingly, there are three observables that are not triplewise jointly measurable but cannot violate LSW's inequality no matter how each two observables are jointly measured.''}
That is, \emph{Strong} $\nRightarrow$ Violation of LSW inequality. Equally, it is also the case that \emph{Weak} $\nRightarrow$ Violation of LSW inequality. Neither of these is surprising given the discussion in this note. In particular, note the following implications ($0\leq\eta\leq 1$):
\begin{enumerate}
\item Violation of LSW inequality, i.e., $R_3^Q>1-\frac{\eta}{3}$ $\Rightarrow$ Violation of KS inequality, i.e., $R_3^Q>\frac{2}{3}$,
\item Violation of KS inequality, i.e., $R_3^Q>\frac{2}{3}$ $\Rightarrow$ \emph{Weak}: $(\exists \text{ 2-joint})\bigwedge(\nexists \text{ 3-joint}|\text{ a choice of 2-joints})$,
\item \emph{Strong} $\Rightarrow$ \emph{Weak}. \end{enumerate}
Therefore, \emph{Weak} is a necessary condition for a violation of the LSW inequality. It can be satisfied either under \emph{Proposition 1} (as done in \cite{FyuOh}) or under \emph{Proposition 2} (or \emph{Strong}, as done in \cite{KG}).
\section*{Joint measurability structures}
I end this note with a comment on the result proven in Ref. \cite{KHF}, where it was shown constructively that any conceivable joint measurability structure for a set of $N$ observables is realizable via binary POVMs. With regard to contextuality, this result proves the admissibility in quantum theory of contextuality scenarios that are not realizable with PVMs alone. This should be easy to see, specifically, from the example of Specker's scenario, where PVMs do not suffice to demonstrate contextuality, primarily because they possess a very rigid joint measurability structure dictated by pairwise commutativity and their joint measurements are unique (Theorem \ref{uniqueness}). If one can demonstrate contextuality given the scenarios obtained from more general joint measurability structures then a relaxation of a sort similar to the case of Specker's scenario (from \emph{Strong} to \emph{Weak}) will also lead to contextuality. In this sense, an implication of the result of Ref. \cite{KHF} is that it allows one to consider the question of contextuality for joint measurability structures which admit no PVM realization in quantum theory on account of Theorem \ref{uniqueness}.
In particular, for PVMs, \emph{pairwise compatibility} $\Leftrightarrow$ \emph{global compatibility} because commutativity is a necessary and sufficient criterion for compatibility. On the other hand, POVMs allow for a failure of the implication \emph{pairwise compatibility} $\Rightarrow$ \emph{global compatibility} because pairwise compatibility is not equivalent to pairwise commutativity for POVMs: \emph{pairwise commutativity} $\Rightarrow$ \emph{pairwise compatibility}, but not conversely.
\section{Conclusion} I hope this note clarifies issues that may have escaped analysis in Refs. \cite{LSW,KG,KHF,FyuOh}. In particular, the logical relationship between admissible joint measurability structures and the possibility of contextuality should be clear from the discussion here.
\section{Acknowledgment} I would like to thank Sibasish Ghosh and Prabha Mandayam for comments on earlier drafts of this article.
\end{document} |
\begin{document}
\title{Sequence entropy tuples and mean sensitive tuples} \author[J. Li]{Jie Li } \address[Jie Li]{School of Mathematics and Statistics, Jiangsu Normal University, Xuzhou, Jiangsu, 221116, P.R. China} \email{[email protected]}
\author[C. Liu]{Chunlin Liu } \address[Chunlin Liu]{CAS Wu Wen-Tsun Key Laboratory of Mathematics, School of Mathematical Sciences, University of Science and Technology of China, Hefei, Anhui, 230026, P.R. China} \email{[email protected]}
\author[S. Tu]{Siming Tu } \address[Siming Tu]{ School of Mathematics (Zhuhai), Sun Yat-sen University,
Zhuhai, Guangdong 519082, P.R. China} \email{[email protected]}
\author[T. Yu]{Tao Yu } \address[Tao Yu]{Department of Mathematics, Shantou University, Shantou 515063, P. R. China} \email{[email protected]}
\begin{abstract} Using the idea of local entropy theory, we characterize the sequence entropy tuple via mean forms of the sensitive tuple in both topological and measure-theoretical senses. For the measure-theoretical sense, we show that for an ergodic measure-preserving system, the $\mu$-sequence entropy tuple, the $\mu$-mean sensitive tuple and the $\mu$-sensitive in the mean tuple coincide, and give an example to show that the ergodicity condition is necessary. For the topological sense, we show that for a certain class of minimal systems, the mean sensitive tuple is the sequence entropy tuple. \end{abstract} \date{\today} \subjclass[2020]{37A35, 37B05} \keywords{Sequence entropy tuples; mean sensitive tuples; sensitive in the mean tuples} \maketitle
\section{Introduction} By a {\it topological dynamical system} ({\it t.d.s.} for short) we mean a pair $(X,T)$, where $X$ is a compact metric space with a metric $d$ and $T$ is a homeomorphism from $X$ to itself. A point $x\in X$ is called a \textit{transitive point} if ${\mathrm{Orb}(x,T)}=\{x,Tx,\ldots\}$ is dense in $X$. A t.d.s. $(X,T)$ is called \textit{minimal} if all points in $X$ are transitive points. Denote by $\B_X$ all Borel measurable subsets of $X$. A Borel (probability) measure $\mu$ on $X$ is called $T$-\textit{invariant} if $\mu(T^{-1}A)=\mu(A)$ for any $A\in \mathcal{B}_X$. A $T$-invariant measure $\mu$ on $X$ is called \textit{ergodic} if $B\in \mathcal{B}_X$ with $T^{-1}B=B$ implies $\mu(B)=0$ or $\mu(B)=1$. Denote by $M(X, T)$ (resp. $M^e(X, T)$) the collection of all $T$-invariant measures (resp. all ergodic measures) on $X$. For $\mu \in M(X,T)$, the \textit{support} of $\mu$ is defined by $\supp(\mu )=\{x\in X\colon \mu (U)>0\text{ for any neighbourhood }U\text{ of }x\}$. Each measure $\mu\in M(X,T)$ induces a {\it measure-preserving system} ({\it m.p.s.} for short) $(X,\B_X,\mu, T)$.
It is well known that the entropy can be used to measure the local complexity of the structure of orbits in a given system. One may naturally ask how to characterize the entropy in a local way. The related research started from the series of pioneering papers of Blanchard et al \cite{B1992, B1993, B1997, B1995}, in which the notions of entropy pairs and entropy pairs for a measure were introduced. From then on entropy pairs have been intensively studied by many researchers. Huang and Ye \cite{HY06} extended the notions from pairs to finite tuples, and showed that if the entropy of a given system is positive, then there are entropy $n$-tuples for any $n\in \mathbb{N}$ in both topological and measurable settings.
The sequence entropy was introduced by Ku\v shnirenko \cite{Kus} to establish the relation between spectrum theory and entropy theory. As in classical local entropy theory, the sequence entropy can also be localized. In \cite{HLSY03, HMY04} authors investigated the sequence entropy pairs, sequence entropy tuples and sequence entropy tuples for a measure, respectively. Using tools from combinatorics, Kerr and Li \cite{KL07, KL09} studied (sequence) entropy tuples, (sequence) entropy tuples for a measure and IT-tuples via independence sets. Huang and Ye \cite{HY09} showed that a system has a sequence entropy $n$-tuple if and only if its maximal pattern entropy is no less than $\log n$ in both topological and measurable settings. More introductions and applications of the local entropy theory can refer to a survey \cite{GY09}.
In addition to the entropy, the sensitivity is another candidate to describe the complexity of a system, which was first used by Ruelle \cite{Ruelle1977}. In \cite{X05}, Xiong introduced a multi-variant version of the sensitivity, called the $n$-sensitivity. \begin{comment}
According to Auslander and Yorke \cite{AY80}
a t.d.s. $(X,T)$ is called \emph{sensitive}
if there exists $\delta>0$ such that
for every opene (open and non-empty)
subset $U$, there exist $x_1,x_2\in U$ and $m\in\mathbb{N}$ with $d(T^mx_1,T^mx_2)>\delta$.
In \cite{X05}, Xiong introduced a multi-variate version of sensitivity, called $n$-sensitivity. \end{comment} Motivated by the local entropy theory, Ye and Zhang \cite{YZ08} introduced the notion of sensitive tuples. Particularly, they showed that a transitive t.d.s. is $n$-sensitive if and only if it has a sensitive $n$-tuple; and a sequence entropy $n$-tuple of a minimal t.d.s. is a sensitive $n$-tuple. For the converse, Maass and Shao \cite{MS07} showed that in a minimal t.d.s., if a sensitive $n$-tuple is a minimal point of the $n$-fold product t.d.s. then it is a sequence entropy $n$-tuple.
\begin{comment}
They introduced the notions of $n$-sensitivity for a measure $\mu$ and sensitive $n$-tuple for $\mu$ and showed that a t.d.s. with an ergodic measure $\mu$ is $n$-sensitive for $\mu$ if and only if it has a sensitive $n$-tuple for $\mu$;
and for a t.d.s. with an ergodic
measure $\mu$, sequence entropy $n$-tuple for $\mu$ is a sensitive $n$-tuple for $\mu$. \end{comment}
Recently, Li, Tu and Ye \cite{LTY15} studied the sensitivity in the mean form. Li, Ye and Yu \cite{LY21,LYY22} further studied the multi-version of mean sensitivity and its local representation, namely, the mean $n$-sensitivity and the mean $n$-sensitive tuple. One naturally wonders if there is still a characterization of sequence entropy tuples via mean sensitive tuples. By the results of \cite{ FGJO, GJY21,KL07,LYY22} one can see that a sequence entropy tuple is not always a mean sensitive tuple even in a minimal t.d.s. Nonetheless, the works of \cite{DG16,Huang06,LTY15} yield that every minimal mean sensitive t.d.s. (i.e. has a mean sensitive pair by \cite{LYY22}) is not tame (i.e. exists an IT pair by \cite{KL07}). So generally, we conjecture that for any minimal t.d.s., a mean sensitive $n$-tuple is an IT $n$-tuple and so a sequence entropy $n$-tuple by \cite[Theorem 5.9]{KL07}. Now we can answer this question under an additional condition. Namely,\begin{thm}\label{thm:ms=>it}
Let $(X,T)$ be a minimal t.d.s. and $\pi: (X,T)\rightarrow (X_{eq},T_{eq})$ be the factor map
to its maximal equicontinuous factor which is almost one to one. Then for $2\le n\in\mathbb{N}$,
$$MS_n(X,T)\subset IT_n(X,T),$$
where $MS_n(X,T)$ denotes all the mean sensitive $n$-tuples and $IT_n(X,T)$ denotes all the IT $n$-tuples. \end{thm}
In the parallel measure-theoretical setting, Huang, Lu and Ye \cite{HLY11} studied measurable sensitivity and its local representation. The notion of $\mu$-mean sensitivity for an invariant measure $\mu$ on a t.d.s. was studied by Garc\'{\i}a-Ramos \cite{G17}. Li \cite{L16} introduced the notion of the $\mu$-mean $n$-sensitivity, and showed that an ergodic m.p.s. is $\mu$-mean $n$-sensitive if and only if its maximal pattern entropy is no less than $\log n$. The authors in \cite{LYY22} introduced the notion of the $\mu$-$n$-sensitivity in the mean, which was \begin{comment}
if there is $\delta>0$ such that for any Borel subset $A$ of $X$ with $\mu(A)>0$ there are $m\in \mathbb{N}$ and $n$ pairwise distinct points $x_1^m,x_2^m,\dots,x_n^m\in A$ such that
$$
\frac{1}{m}\sum_{k=0}^{m-1}\min_{1\le i\neq j\le n} d(T^k x_i^m, T^k x_j^m)>\delta.
$$
By definitions $\mu$-sensitivity in the mean tuple seems weaker than $\mu$-mean sensitivity tuple, however, they are \end{comment}
proved to be equivalent to the $\mu$-mean $n$-sensitivity in the ergodic case.
Using the idea of localization, the authors \cite{LY21} introduced the notion of the $\mu$-mean sensitive tuple and showed that every $\mu$-entropy tuple of an ergodic m.p.s. is a $\mu$-mean sensitive tuple. A natural question is left open in \cite{LY21}: \begin{ques}
Is there a characterization of $\mu$-sequence entropy tuples via $\mu$-mean sensitive tuples? \end{ques} The authors in \cite{LT20} introduced a weaker notion named the density-sensitive tuple and showed that every $\mu$-sequence entropy tuple of an ergodic m.p.s. is a $\mu$-density-sensitive tuple. In this paper, we give a positive answer to this question. Namely, \begin{thm}\label{cor:se=sm}
Let $(X,T)$ be a t.d.s., $\mu\in M^e(X,T)$ and $2\le n\in \mathbb{N}$. Then
the $\mu$-sequence entropy $n$-tuple, the $\mu$-mean sensitive $n$-tuple and the
$\mu$-$n$-sensitive in the mean tuple coincide. \end{thm} By the definitions, it is easy to see that a $\mu$-mean sensitive $n$-tuple must be a $\mu$-$n$-sensitive in the mean tuple. Thus, Theorem \ref{cor:se=sm} is a direct corollary of the following two theorems. \begin{thm}\label{thm:sm=>se}
Let $(X,T)$ be a t.d.s., $\mu\in M(X,T)$ and $2\le n\in \mathbb{N}$. Then
each $\mu$-$n$-sensitive in the mean tuple is a $\mu$-sequence entropy $n$-tuple. \end{thm}
\begin{thm}\label{thm:se=>ms}
Let $(X,T)$ be a t.d.s., $\mu\in M^e(X,T)$ and $2\le n\in \mathbb{N}$. Then
each $\mu$-sequence entropy $n$-tuple is a $\mu$-mean sensitive $n$-tuple. \end{thm} In fact, Theorem \ref{thm:sm=>se} shows a bit more than Theorem \ref{cor:se=sm}, as for a $T$-invariant measure $\mu$ which is not ergodic, every $\mu$-$n$-sensitive in the mean tuple is still a $\mu$-sequence entropy $n$-tuple. However, the following result shows that ergodicity of $\mu$ in Theorem \ref{thm:se=>ms} is necessary. \begin{thm}\label{thm:sm=/=se}
For every $2\le n\in \mathbb{N}$, there exist a t.d.s. $(X,T)$ and $\mu\in M(X,T)$ such that there is a $\mu$-sequence entropy $n$-tuple but it is not a $\mu$-$n$-sensitive in the mean tuple. \end{thm}
It is fair to note that Garc{\'i}a-Ramos told us that at the same time, he with Mu{\~n}oz-L{\'o}pez had also got a completely independent proof of the equivalence of the sequence entropy pair and the mean sensitive pair in the ergodic case \cite{GM22}. Their proof relies on the deep equivalent characterization of measurable sequence entropy pairs developed by Kerr and Li \cite{KL09} using the combinatorial notion of independence. Our results provide more information in general case, and the proofs work on the classical definition of sequence entropy pairs introduced in \cite{HMY04}. It is worth noting that the proofs depend on a new interesting ergodic measure decomposition result (Lemma \ref{0726}), which was applied to prove the profound Erd\"os's conjectures in the number theory by Kra, Moreira, Richter and Robertson \cite{KMRR,KMRR1}. This decomposition may have more applications because it has the hybrid topological and Borel structures.
The outline of the paper is the following. In Sec. \ref{sec2}, we recall some basic notions that we will use in the paper. In Sec. \ref{sec3}, we prove Theorem \ref{thm:sm=>se}. In Sec. \ref{sect:proof of thm se=>ms}, we show Theorem \ref{thm:se=>ms} and Theorem \ref{thm:sm=/=se}. In Sec. \ref{sec5}, we study the mean sensitive tuple and the sequence entropy in the topological sense and show Theorem \ref{thm:ms=>it}. \section{Preliminaries}\label{sec2} Throughout the paper, denote by $\mathbb{N}$ and ${\mathbb{Z}}_{+}$ the collections of natural numbers $\{1,2,\dots\}$ and non-negative integers $\{0,1,2,\dots\}$, respectively.
For $F\subset \mathbb{Z}_+$, denote by $\#\{F\}$ (or simply write $\#F$ when it is clear from the context) the cardinality of $F$. The \emph{upper density} $\overline{D}(F)$ of $F$ is defined by $$ \overline{D}(F)=\limsup_{n\to\infty} \frac{\#\{F\cap[0,n-1]\}}{n}. $$
Similarly, the \emph{lower density} $\underline{D}(F)$ of $F$ can be given by $$ \underline{D}(F)=\liminf_{n\to\infty} \frac{\#\{F\cap[0,n-1]\}}{n}. $$ If $\overline{D}(F)=\underline{D}(F)$, we say that the \textit{density} of $F$ exists and equals to the common value, which is written as $D(F)$.
Given a t.d.s. $(X,T)$ and $n\in \mathbb{N}$, denote by $X^{(n)}$ the $n$-fold product of $X$. Let $\Delta_n(X)=\{(x,x,\dots, x)\in X^{(n)}\colon x\in X\}$ be the diagonal of $ X^{(n)}$ and $\Delta_n^\prime(X)=\{(x_1,x_2,...,x_n)\in X^{(n)}: x_i=x_j \text{ for some } 1\le i\neq j\le n \}$.
If a closed subset $Y\subset X$ is $T$-invariant in the sense of $TY= Y$, then the restriction $(Y, T|_Y)$ (or simply write $(Y,T)$ when it is clear from the context) is also a t.d.s., which is called a \textit{subsystem} of $(X,T)$.
Let. $(X,T)$ be a t.d.s., $x\in X$ and $U,V\subset X$. Denote by $$ N(x,U)=\{n\in\mathbb{Z}_+ \colon T^n x\in U\} \ \text{ and }\ N(U,V)=\{n\in\mathbb{Z}_+: U\cap T^{-n}V\neq\emptyset\}. $$ A t.d.s. $(X,T)$ is called \textit{transitive} if $N(U,V)\neq\emptyset$ for all non-empty open subsets $U,V$ of $X$. It is well known that the set of all transitive points in a transitive t.d.s. forms a dense $G_\delta$ subset of $X$ .
Given two t.d.s. $(X, T)$ and $(Y,S)$, a map $\pi\colon X\to Y$ is called a \textit{factor map} if $\pi$ is surjective and continuous such that $\pi\circ T=S\circ\pi$, and in which case $(Y,S)$ is referred to be a \textit{factor} of $(X, T)$. Furthermore, If $\pi$ is a homeomorphism, we say that $(X,T)$ is \textit{conjugate} to $(Y,S)$.
A t.d.s. $(X,T)$ is called \textit{equicontinuous} (resp. \textit{mean equicontinuous}) if for any $\epsilon>0$ there is $\delta>0$ such that if $x,y\in X$ with $d(x,y)<\delta$ then $\max_{k\in\mathbb{Z}_+}d(T^kx,T^ky)<\epsilon$ (resp. $\limsup_{n\to\infty}\frac{1}{n}\sum_{k=0}^{n-1}d(T^kx,T^ky)<\epsilon$). Every t.d.s. $(X, T)$ is known to have a maximal equicontinuous factor (or a maximal mean equicontinuous factor \cite{LTY15}). More studies on mean equicontinuous systems can see the recent survey \cite{LYY}.
In the following of this section, we fix a t.d.s. $(X,T)$ with a measure $\mu\in M(X,T)$. The {\it entropy of a finite measurable partition $\alpha=\left\{A_1, A_2, \ldots, A_k\right\}$ of $X$ } is defined by $ H_\mu(\alpha)=-\sum_{i=1}^k \mu\left(A_i\right) \log \mu\left(A_i\right), $ where $0 \log 0$ is defined to be 0. Moreover, we define the {\it sequence entropy of $T$ with respect to $\alpha$ along an increasing sequence $S=\left\{s_i\right\}_{i=1}^{\infty}$ of $\mathbb{Z}_+$ } by $$ h_\mu^{S}(T, \alpha)=\limsup _{n\rightarrow \infty} \frac{1}{n} H_\mu\left(\bigvee_{i=1}^n T^{-s_i} \alpha\right). $$ The {\it sequence entropy of $T$ along the sequence $S$} is $$ h_\mu^{S}(T)=\sup _{\alpha} h_\mu^{S}(T, \alpha), $$ where the supremum takes over all finite measurable partitions. Correspondingly, the {\it topological sequence entropy of $T$ with respect to $S$ and a finite open cover $\mathcal{U}$ } is $$ h^{S}(T, \mathcal{U})=\limsup _{n \rightarrow\infty} \frac{1}{n} \log N\left(\bigvee_{i=1}^n T^{-s_i} \mathcal{U}\right), $$ where $N\left(\bigvee_{i=1}^n T^{-s_i} \mathcal{U}\right)$ is the minimum among the cardinalities of all sub-families of $\bigvee_{i=1}^n T^{-s_i} \mathcal{U}$ covering $X$. The {\it topological sequence entropy of $T$ with respect to $S$ } is defined by $$h^{S}(T)=\sup _{\mathcal{U}} h^{S}(T, \mathcal{U}),$$ where the supremum takes over all finite open covers.
Let $(x_i)_{i=1}^n\in X^{(n)}$. A finite cover $\mathcal{U}=\{U_1,U_2,\ldots,U_k\}$ of $X$ is said to be an {\it admissible cover} with respect to $(x_i)_{i=1}^n$ if for each $1\leq j\leq k$ there exists $1\leq i_j\leq n$ such that $x_{i_j}\notin\overline{U_j}$. Analogously, we define admissible partitions with respect to $(x_i)_{i=1}^n$.
\begin{defn}[\cite{HMY04},\cite{MS07}]An $n$-tuple $(x_i)_{i=1}^n\in X^{(n)}\setminus \Delta_n(X)$, $n\geq 2$ is called
\begin{itemize}
\item a sequence entropy $n$-tuple for $\mu$ if for any admissible finite Borel measurable partition $\alpha$ with respect to $(x_i)_{i=1}^n$, there exists a sequence $S=\{m_i\}_{i=1}^{\infty}$ of $\mathbb{Z}_+$ such that $h^{S}_{\mu}(T,\alpha)>0$. Denote by $SE_n^{\mu}(X,T)$ the set of all sequence entropy $n$-tuples for $\mu$.
\item a sequence entropy $n$-tuple if for any admissible finite open cover $\mathcal{U}$ with respect to $(x_i)_{i=1}^n$, there exists a sequence $S=\{m_i\}_{i=1}^{\infty}$ of $\mathbb{Z}_+$ such that $h^{S}(T,\mathcal{U})>0$. Denote by $SE_n(X,T)$ the set of all sequence entropy $n$-tuples.
\end{itemize} \end{defn}
We say that $f\in L^2(X,\B_X,\mu)$ is {\it almost periodic} if $\{f\circ T^n : n\in \mathbb{Z}_+\}$ is precompact in $L^2(X,\B_X,\mu)$. The set of all almost periodic functions is denoted by $H_c$, and there exists a $T$-invariant $\sigma$-algebra $\mathcal{K}_\mu \subset \B_X$
such that $H_c= L^2(X,\mathcal{K}_\mu,\mu)$, $\mathcal{K}_\mu$ is called the Kronecker algebra of $(X, \B_X,\mu, T )$. The product $\sigma$-algebra of $X^{(n)}$ is denoted by $\mathcal{B}_X^{(n)}$. Define the measure $\lambda_n(\mu)$ on $\mathcal{B}_X^{(n)}$ by letting $$\lambda_n(\mu)(\prod_{i=1}^nA_i)=\int_{X}\prod_{i=1}^n\mathbb{E}(1_{A_i}|\mathcal{K}_\mu)d\mu.$$ Note that $SE_n^{\mu}(X,T)=\supp(\lambda_n(\mu))\setminus \Delta_n(X)$ \cite[Theorem 3.4]{HMY04}.
\section{Proof of Theorem \ref{thm:sm=>se}}\label{sec3} \begin{defn}[\cite{LY21}]\label{defn:mu mean n-sensitive tuple} For $2\le n\in \mathbb{N}$ and a t.d.s. $(X,T)$ with $\mu\in M(X,T)$, we say that the $n$-tuple $(x_1,x_2,\dotsc,x_n)\in X^{(n)}\setminus \Delta_n(X)$ is \begin{enumerate} \item a \textit{$\mu$-mean $n$-sensitive tuple} if for any open neighbourhoods $U_i$ of $x_i$ with $i=1,2,\dotsc,n$, there is $\delta> 0$ such that for any $A\in \B_X$ with $\mu(A)>0$ there are $y_1,y_2,\dotsc,y_n\in A$ and a subset $F$ of $\mathbb{Z}_+$ with $\overline{D}(F)>\delta$ such that $T^k y_i \in U_i$ for all $i=1,2,\dots,n$ and $k\in F$. \item a \textit{$\mu$-$n$-sensitive in the mean tuple} if for any $\tau>0$, there is $\delta=\delta(\tau)> 0$ such that for any $A\in\B_X$ with $\mu(A)>0$ there is $m\in \mathbb{N}$ and $y_1^m,y_2^m,\dotsc,y_n^m\in A$ such that $$ \frac{\#\{0\le k\le m-1: T^ky_i^m\in B(x_i,\tau), i=1,2,\ldots,n\}}{m}>\delta. $$ \end{enumerate} \end{defn} We denote the set of all $\mu$-mean $n$-sensitive tuples (resp. $\mu$-$n$-sensitive in the mean tuples) by $MS_n^\mu(X,T)$ (resp. $SM_n^\mu(X,T)$). We call an $n$-tuple $(x_1,x_2,\dotsc,x_n)\in X^{(n)}$ \textit{essential} if $x_i\neq x_j$ for each $1\le i<j\le n$, and at this time we write the collection of all essential $n$-tuples in $MS_n^\mu(X,T)$ (resp. $SM_n^\mu(X,T)$) as $MS_n^{\mu,e}(X,T)$ (resp. $SM_n^{\mu,e}(X,T)$).
\begin{comment} \begin{defn}[\cite{LYY22}]\label{defn:mu-n-sensitive in the mean} For $2\le n\in \mathbb{N}$ and a t.d.s. $(X,T)$ with $\mu\in M(X,T)$, we say that $(X,T)$ is \textit{$\mu$-$n$-sensitive in the mean} if there is $\delta>0$ such that for any Borel subset $A$ of $X$ with $\mu(A)>0$ there are $m\in \mathbb{N}$ and $n$ pairwise distinct points $x_1^m,x_2^m,\dots,x_n^m\in A$ such that $$ \frac{1}{m}\sum_{k=0}^{m-1}\min_{1\le i\neq j\le n} d(T^k x_i^m, T^k x_j^m)>\delta. $$ \end{defn} \end{comment}
\begin{proof}[Proof of Theorem \ref{thm:sm=>se}] It suffices to prove $SM_n^{\mu,e}(X,T)\subset SE_n^{\mu,e}(X,T)$. Let $(x_1,\ldots,x_n)\in SM_n^{\mu,e}(X,T)$. Take $\alpha=\{A_1,\ldots,A_l\}$ as an admissible partition of $(x_1,\ldots,x_n)$. Then for each $1\le k\le l$, there is $i_k\in \{1,\ldots,n\}$ such that $x_{i_k}\notin \overline{A_k}$. Put $E_i=\{1\le k\le l: x_i\not\in \overline{A_k}\}$ for $1\le i\le n$. Obviously, $\cup_{i=1}^n E_i=\{1,\ldots,l\}$. Set $$B_1=\cup_{k\in E_1}A_k, B_2=\cup_{k\in E_2\setminus E_1}A_k, \ldots, B_n=\cup_{k\in E_n\setminus(\cup_{j=1}^{n-1}E_j)}A_k. $$ Then $\beta=\{B_1,\ldots,B_n\}$ is also an admissible partition of $(x_1,\ldots,x_n)$ such that $x_i\notin \overline{B_i}$ for all $1\le i\le n$. Without loss of generality, we assume $B_i\neq \emptyset$ for $1\le i\le n$. It suffices to show that there exists a sequence $S=\{m_i\}_{i=1}^{\infty}$ of $\mathbb{Z}_+$ such that $h^{S}_{\mu}(T,\beta)>0,$ as $\alpha\succ\beta$. Let $$h^*_\mu(T,\beta)=\sup \{h^{S}_{\mu}(T,\beta): S \ \text{is a sequence of } \mathbb{Z}_+ \}.$$
By \cite[Lemma 2.2 and Theorem 2.3]{HMY04}, we have $h^*_\mu(T,\beta)=H_\mu(\beta|\mathcal{K}_\mu)$, where $\mathcal{K}_\mu$ is the Kronecker algebra of $(X,\B_X,\mu,T)$. So it suffices to show $\beta\nsubseteq \mathcal{K}_\mu$.
We prove it by contradiction. Now we assume that $\beta\subseteq \mathcal{K}_\mu$. Then for each $i=1,\ldots,n$, $1_{B_i}$ is an almost periodic function. By \cite[Theorems 4.7 and 5.2]{Y19}, $1_{B_i}$ is a $\mu$-equicontinuous in the mean function. That is, for each $1\le i\le n$ and any $\tau>0$, there is a compact $K\subset X$ with $\mu(K)>1-\tau$ such that for any $\epsilon'>0$, there is $\delta'>0$ such that for all $m\in\mathbb{N}$, whenever $x,y\in K$ with $d(x,y)<\delta'$, \begin{equation}\label{3}
\frac{1}{m}\sum_{t=0}^{m-1}|1_{B_i}(T^tx)- 1_{B_i}(T^ty)|<\epsilon'. \end{equation}
On the other hand, take $\epsilon>0$ such that $B_\epsilon(x_i)\cap B_i=\emptyset$ for $i=1,\ldots,n$. Since $(x_1,\ldots,x_n)\in SM_n^{\mu,e}(X,T)$, there is $\delta:=\delta(\epsilon)>0$ such that for any $A\in \B_X$ with $\mu(A)>0$ there are $m\in\mathbb{N}$ and $y_1^m,\ldots,y_n^m\in A$ such that if we denote $C_m=\{0\le t\le m-1:T^ty_i^m\in B_\epsilon(x_i)\text{ for all }i=1,2,\ldots,n\}$ then $\#C_m \ge m\delta$. Since $ B_\epsilon(x_1)\cap B_1=\emptyset$, then $ B_\epsilon(x_1)\subset \cup_{i=2}^nB_i$. This implies that there is $i_0\in \{2,\ldots,n\}$ such that $$ \# \{t\in C_m: T^ty_1^m\in B_{i_0} \}\ge \frac{\#C_m}{n-1}. $$ For any $t\in C_m$, we have $T^ty_{i_0}^m\in B_\epsilon(x_{i_0})$, and then $T^ty_{i_0}^m\notin B_{i_0}$, as $B_\epsilon(x_{i_0})\cap B_{i_0}=\emptyset$. This implies that \begin{equation}\label{e1}
\frac{1}{m}\sum_{t=0}^{m-1}|1_{B_{i_0}}(T^ty_1^m)-1_{B_{i_0}}(T^ty_{i_0}^m)|\ge\frac{\#C_m}{m(n-1)}\ge \frac{\delta}{n-1}. \end{equation} Choose a measurable subset $A\subset K$ such that $\mu(A)>0$ and $\diam(A)=\sup\{d(x,y):x,y\in A\}<\delta'$, and $\epsilon'=\frac{\delta}{2(n-1)}$. Then by \eqref{3}, for any $m\in\mathbb{N}$ and $x,y\in A$, $$
\frac{1}{m}\sum_{t=0}^{m-1}|1_{B_{i_0}}(T^tx)- 1_{B_{i_0}}(T^ty)|<\frac{\delta}{2(n-1)}, $$ a contradiction with \eqref{e1}. Thus, $SM_n^{\mu,e}(X,T)\subset SE_n^{\mu,e}(X,T)$. \end{proof}
\section{Proof of Theorem \ref{thm:se=>ms}}\label{sect:proof of thm se=>ms} In Section 4.1, we first reduce Theorem \ref{thm:se=>ms} to just prove that it is true for the ergodic m.p.s.
with a continuous factor map to its Kronecker factor, and then we finish the proof of Theorem \ref{thm:se=>ms} under this assumption. In Section 4.2, we show the condition that $\mu$ is ergodic is necessary. \subsection{Ergodic case} Throughout this section, we will use the following two types of factor maps between two m.p.s. $(X, \B_X,\mu, T)$ and $(Z, \B_Z,\nu, S)$. \begin{enumerate} \item \emph{Measurable factor maps:} a measurable map $\pi: X \rightarrow Z$ such that $\mu\circ\pi^{-1}=\nu$ and $\pi \circ T=S \circ \pi$ for $\mu$-a.e; \item \emph{Continuous factor maps:} a topological factor map $\pi: X \rightarrow Z$ such that $\mu\circ\pi^{-1}=\nu$. \end{enumerate} If a continuous factor map $\pi$ such that $\pi^{-1}(\B_Z)=\mathcal{K}_\mu$,
$\pi$ is called a continuous factor map to its Kronecker factor.
The following result is a weaker version in \cite[Proposition 3.20]{KMRR}. \begin{lem}\label{lem3} Let $(X, \mathcal{B}_X,\mu, T)$ be an ergodic m.p.s. Then there exists an ergodic m.p.s. $(\tilde{X},\tilde{B}, \tilde{\mu}, \tilde{T})$ and a continuous factor map $\tilde{\pi}: \tilde{X} \rightarrow X$ such that $(\tilde{X},\tilde{B}, \tilde{\mu}, \tilde{T})$ has a continuous factor map to its Kronecker factor. \end{lem}
The following result shows that we only need to prove $SE_n^{\mu}(X,T)\subset MS_n^{\mu}(X,T)$ for all ergodic m.p.s. with a continuous factor map to its Kronecker factor. \begin{lem}\label{lem5} If $SE_n^{\tilde{\mu}}(\tilde{X},\tilde T)\subset MS_n^{\tilde{\mu}}(\tilde{X},\tilde T)$ for all ergodic m.p.s. $(\tilde{X},\tilde{B}, \tilde{\mu}, \tilde{T})$ with a continuous factor map to its Kronecker factor, then $SE_n^{\mu}(X,T)\subset MS_n^{\mu}(X,T)$ for all ergodic m.p.s. $(X, \mathcal{B}_X,\mu, T)$. \end{lem} \begin{proof} By Lemma \ref{lem3}, there exists an ergodic m.p.s. $(\tilde{X},\tilde{B}, \tilde{\mu}, \tilde{T})$ and a continuous factor map $\tilde{\pi}: \tilde{X} \rightarrow X$ such that $(\tilde{X},\tilde{B}, \tilde{\mu}, \tilde{T})$ has a continuous factor map to its Kronecker factor. Thus $SE_n^{\tilde\mu}(\tilde{X},\tilde T)\subset MS_n^{\tilde\mu}(\tilde{X},\tilde T)$, by the assumption.
For any $(x_1,\dotsc,x_n)\in SE_n^{\mu}(X,T)\setminus \Delta_n'(X)$, by \cite[Theorem 3.7]{HMY04} there exists an $n$-tuple $(\tilde{x_1},\dots,\tilde{x_n})\in SE_n^{\tilde\mu}(\tilde{X},\tilde T)\setminus \Delta_n'(\tilde{X})$ such that $\tilde\pi(\tilde{x_i})=x_i$. For any open neighborhood $U_1\times \dots \times U_n$ of $(x_1,\dotsc,x_n)$ with $U_i\cap U_j=\emptyset$ for $i\neq j$, then $\tilde\pi^{-1}(U_1)\times \dots \times \tilde\pi^{-1}(U_n)$ is an open neighborhood of $(\tilde{x_1},\dots,\tilde{x_n})$. Since $(\tilde{x_1},\dots,\tilde{x_n})\in SE_n^{\tilde\mu}(\tilde{X},\tilde T)\setminus \Delta_n'(\tilde{X})\subset MS_n^{\tilde\mu}(\tilde{X},\tilde T)\setminus \Delta_n'(\tilde{X})$, there exists $\delta>0$ such that for any $A\in\mathcal{B}_X$ with $\tilde{\mu}(\tilde\pi^{-1}(A))=\mu(A)>0$, there exist $F\subset \mathbb{N}$ with $\overline{D}(F)\ge \delta$ and $\tilde{y_1},\dots,\tilde{y_n}\in \tilde\pi^{-1}(A)$ such that for any $m\in F$, $$ (\tilde T^m\tilde{y_1},\dots,\tilde T^m\tilde{y_n}) \in \tilde\pi^{-1}(U_1)\times \dots \times \tilde\pi^{-1}(U_n)$$ and hence $(T^m\tilde\pi(\tilde{y_1}),\dots,T^m\tilde\pi(\tilde{y_n}))\in U_1\times \dots \times U_n$. Note that $\tilde\pi(\tilde{y_i})\in A$ for each $i=1,2,\ldots,n$. Thus we have $(x_1,\dotsc,x_n)\in MS_n^{\mu}(X,T)$. \end{proof}
According to the lemma above-mentioned, in the rest of this section, we fix an ergodic m.p.s. with a continuous factor map $\pi:(X,\mathcal{B}_X, \mu, T)\rightarrow (Z,\mathcal{B}_Z, \nu, R)$
to its Kronecker factor. Moreover, we fix a measure disintegration $z \to \eta_{z}$ of $\mu$ over $\pi$, i.e. $\mu = \int_Z \eta_{z} d\nu(z)$.
The following lemma plays a crucial role in our proof. In \cite[Proposition 3.11]{KMRR}, the authors proved it for $n=2$, but general cases are similar in idea. For readability, we move the complicated proof to Appendix \ref{APPENDIX}.
\begin{lem}\label{0726} Let $\pi:(X,\mathcal{B}_X, \mu, T)\rightarrow (Z,\mathcal{B}_Z, \nu, R)$ be
a continuous factor map to its Kronecker factor. Then for each $n\in\mathbb{N}$, there exists a continuous map $\textbf{x}\mapsto \lambda_{\textbf{x}}^n$ from $X^{(n)}$ to $M(X^{(n)})$ such that the map $\textbf{x} \mapsto \lambda_{\textbf{x}}^n$ is an ergodic decomposition of $\mu^{(n)}$, where $\mu^{(n)}$ is the n-fold product of $\mu$ and $$ \lambda^n_\textbf{x} = \int_Z \eta_{z + \pi(x_1)} \times\dots\times \eta_{z+\pi(x_n)} d\nu(z), \text{ for }\textbf{x}=(x_1,x_2,\ldots,x_n).$$ \end{lem}
The following two lemmas can be viewed as generalizations of Lemma 3.3 and Theorem 3.4 in \cite{HMY04}, respectively. \begin{lem}\label{lem1} Let $\pi:(X,\mathcal{B}_X, \mu, T)\rightarrow (Z,\mathcal{B}_Z, \nu, R)$ be a continuous factor map to its Kronecker factor. Assume that $\mathcal{U}=\{U_1, U_2, \dots, U_n\}$ is a measurable cover of $X$. Then for any measurable partition $\alpha$ finer than $\mathcal{U}$ as a cover, there exists an increasing sequence $S\subset\mathbb{Z}_+$ with $h_{\mu}^{S}(T,\alpha)>0$ if and only if $\lambda_\textbf{x}^n (U_1^c\times\dots\times U_n^c)>0$ for all $\textbf{x}=(x_1,\dotsc, x_n)\in X^{(n)}$. \end{lem} \begin{proof} $(\Rightarrow)$ By the contrary, we may assume that $\lambda_\textbf{x}^n(U_1^c\times\dots\times U_n^c)=0$ for some $\textbf{x}=(x_1,\dotsc, x_n)\in X^{(n)}$. Let $C_i=\{z\in Z: \eta_{z+\pi(x_i)}(U_i^c)>0\}$ for $i=1,\dotsc,n$. Then $$\mu(U_i^c\setminus \pi^{-1}(C_i))=\int_{Z}\eta_{z+\pi(x_i)}(U_i^c\cap \pi^{-1}(C_i^c))d \nu(z)=0.$$ Put $D_i=\pi^{-1}(C_i)\cup (U_i^c\setminus \pi^{-1}(C_i))$. Then $D_i\in \pi^{-1}(\mathcal{B}_Z)= \mathcal{K}_\mu$ and $D_i^c\subset U_i$, where $\mathcal{K}_\mu$ is the Kronecker factor of $X$.
For any $\textbf{s}=(s(1),\dotsc,s(n))\in \{0,1\}^n$, let $D_{\textbf{s}}=\cap_{i=1}^nD_i\left(s(i)\right)$, where $D_i(0)=D_i$ and $D_i(1)=D_i^c$. Set $E_1=\left(\cap_{i=1}^nD_i\right)\cap U_1 $ and $E_j=\left(\cap_{i=1}^nD_i\right)\cap( U_j\setminus \bigcup_{i=1}^{j-1}U_i)$ for $j=2,\dotsc,n$.
Consider the measurable partition $$\alpha=\left\{D_\textbf{s}:\textbf{s}\in\{0,1\}^n\setminus\{(0,\dotsc,0)\}\right\}\cup\{E_1, \dotsc, E_n\}.$$ For any $\textbf{s}\in \{0,1\}^n\setminus\{(0,\dotsc,0)\}$, we have $s(i)=1$ for some $i=1,\dotsc,n$, then $D_\textbf{s}\subset D_i^c\subset U_i$. It is straightforward that for all $1\leq j\leq n$, $E_j\subset U_j$. Thus $\alpha$ is finer than $\mathcal{U}$ and by hypothesis there exists an increasing sequence $S$ of $\mathbb{Z}_+$ with $h_{\mu}^{S}(T,\alpha)>0$.
On the other hand, since $\lambda_{\textbf{x}}^n(U_1^c\times\dots\times U_n^c)=0$, we deduce $\nu\left(\cap_{i=1}^nC_i\right)=0$ and hence $\mu\left(\cap_{i=1}^nD_i\right)=0$. Thus we have $E_1,\dotsc, E_n\in \mathcal{K}_\mu$. It is also clear that $D_\textbf{s}\in \mathcal{K}_\mu$ for all $\textbf{s}\in\{0,1\}^n\setminus\{(0,\dotsc,0)\}$, as $D_1,\dotsc,D_n\in \mathcal{K}_\mu.$ Therefore each element of $\alpha$ is $\mathcal{K}_\mu$-measurable, by \cite[Lemma 2.2]{HMY04}, $$h^{S}_{\mu}(T,\alpha)\leq H_{\mu}(\alpha|\mathcal{K}_\mu)=0,$$ a contradiction.
$(\Leftarrow)$Assume $\lambda_\textbf{x}^n(U_1^c\times\dots\times U_n^c)>0$ for any $\textbf{x}\in X^{(n)}$. In particular, we take $\textbf{x}=(x,\ldots,x)$ such that $\pi(x)$ is the identity element of group $Z$. Without loss of generality, we may assume that any finite measurable partition $\alpha$ which is finer than $\mathcal{U}$ as a cover is of the type $\alpha=\left\{A_1, A_2, \ldots, A_n\right\}$ with $A_i \subset U_i$, for $1 \leqslant i \leqslant n$. Let $\alpha$ be one of such partitions. We observe that \begin{equation*} \begin{split} \int_Z \eta_{z}({A_1^c}) \dots\eta_{z}(A_n^c) d\nu(z) \ge \int_Z \eta_{z }({U_1^c}) \dots\eta_{z}(U_n^c) d\nu(z)=\lambda_\textbf{x}^n(U_1^c\times\dots\times U_n^c)>0. \end{split} \end{equation*} Therefore, $A_j \notin \mathcal{K}_\mu$ for some $1 \leqslant j \leqslant n$. It follows from \cite[Theorem 2.3]{HMY04} that there exists a sequence $S \subset \mathbb{Z}_+$ such that $h_\mu^{S}(T, \alpha)=H_\mu\left(\alpha \mid \mathcal{K}_\mu\right)>0$. This finishes the proof. \end{proof}
\begin{lem}\label{lem2} For any $\textbf{x}=(x_1,\dotsc,x_n)\in X^{(n)}$, \[SE_{n}^\mu(X,T)= \operatorname{supp}\lambda_{\textbf{x}}^n\setminus \Delta_n(X).\] \end{lem} \begin{proof} On the one hand, let $\textbf{y}=(y_1,\dotsc,y_n)\in SE_n^{\mu}(X,T)$. We show that $\textbf{y}\in\operatorname{supp}\lambda_{\textbf{x}}^n\setminus \Delta_n(X)$. It suffices to prove that for any measurable neighborhood $U_1\times \dots \times U_n$ of $\textbf{y}$, $$\lambda_{\textbf{x}}^n\left(U_1\times U_2\times \dots \times U_n\right)> 0.$$ Without loss of generality, we assume that $U_i\cap U_j=\emptyset$ if $y_i\not= y_j$. Then $\mathcal{U}=\{U_1^c, U_2^c, \dots, U_n^c\}$ is a finite cover of $X$. It is clear that any finite measurable partition $\alpha$ finer than $\mathcal{U}$ as a cover is an admissible partition with respect to $\textbf{y}$. Therefore, there exists an increasing sequence $S\subset\mathbb{Z}_+$ with $h_{\mu}^{S}(T,\alpha)>0$. By Lemma \ref{lem1}, we obtain that $$\lambda_\textbf{x}^n\left(U_1\times U_2\times \dots \times U_n\right)> 0,$$ which implies that $\textbf{y}\in \operatorname{supp}\lambda_{\textbf{x}}^n$. Since $\textbf{y}\notin \Delta_n(X)$, $\textbf{y}\in \operatorname{supp}\lambda_{\textbf{x}}^n\setminus \Delta_n(X)$.
On the other hand, let $\textbf{y}=(y_1,\ldots,y_n) \in \operatorname{supp}\lambda_\textbf{x}^n\setminus \Delta_n(X)$. We show that for any admissible partition $\alpha=\left\{A_1, A_2, \ldots, A_k\right\}$ with respect to $\textbf{y}$ there exists an increasing sequence $S \subset \mathbb{Z}_+$ such that $h_\mu^{S}(T, \alpha)>0$. Since $\alpha$ is an admissible partition with respect to $\textbf{y}$ there exist closed neighborhoods $U_i$ of $y_i, 1 \leqslant i \leqslant n$, such that for each $j \in\{1,2, \ldots, k\}$ we find $i_j \in\{1,2, \ldots, n\}$ with $A_j \subset U_{i_j}^c$. That is, $\alpha$ is finer than $\mathcal{U}=\left\{U_1^c, U_2^c, \ldots, U_n^c\right\}$ as a cover. Since $$\lambda_\textbf{x}^n\left(U_1\times U_2\times \dots \times U_n\right)>0,$$ by Lemma \ref{lem1}, there exists an increasing sequence $S \subset \mathbb{Z}_+$ such that $h_\mu^{S}(T, \alpha)>0$. \end{proof}
Now we are ready to give the proof of Theorem \ref{thm:se=>ms}. \begin{proof}[Proof of Theorem \ref{thm:se=>ms}] We only need to prove that $SE_n^{\mu,e}(X,T)\subset MS_n^{\mu,e}(X,T)$. We let $\pi:(X,\mathcal{B}_X, \mu, T)\rightarrow (Z,\mathcal{B}_Z, \nu, R)$ be a continuous factor map to its Kronecker factor. For any $\textbf{y}=(y_1,\ldots,y_n)\in SE_n^{\mu,e}(X,T)$, let $U_1\times U_2\times \dots \times U_n$ be an open neighborhood of $\textbf{y}$ such that $U_i\cap U_j=\emptyset$ for $1\le i\not=j \le n$. By Lemma \ref{lem2}, one has $\lambda_\textbf{x}^n\left(U_1\times U_2\times \dots \times U_n\right)> 0$ for any $\textbf{x}=(x_1,\dotsc,x_n)\in X^{(n)}$. Since the map $\textbf{x} \mapsto \lambda_\textbf{x}^n$ is continuous, $X$ is compact and $U_1, U_2, \dotsc, U_n$ are open sets, it follows that there exists $\delta>0$ such that for any $\textbf{x}\in X^{(n)}$, $\lambda_\textbf{x}^n\left(U_1\times U_2\times \dots \times U_n\right)\ge \delta$. As the map $\textbf{x} \mapsto \lambda_\textbf{x}^n$ is an ergodic decomposition of $\mu^{(n)}$, there exists $B\subset X^{(n)}$ with $\mu^{(n)}(B)=1$ such that $\lambda_\textbf{x}^n$ is ergodic on $X^{(n)}$ for any $\textbf{x}\in B$.
For any $A\in\mathcal{B}_X$ with $\mu (A)>0$, there exists a subset $C$ of $X^{(n)}$ with $\mu^{(n)}(C)>0$ such that for any $\textbf{x}\in C$, \[\lambda_\textbf{x}^n(A^n)>0.\] Take $\textbf{x}\in B\cap C$, by the Birkhoff pointwise ergodic theorem, for $\lambda_\textbf{x}^n$-a.e. $(x_1',\dotsc,x_n')\in X^{(n)}$ \[\lim_{N\to \infty}\frac{1}{N}\sum_{m=0}^{N-1}1_{U_1\times U_2\times\dots\times U_n}(T^mx_1',\dotsc,T^mx_n')=\lambda_\textbf{x}^n\left(U_1\times U_2\times\dots\times U_n\right)\ge \delta.\] Since $\lambda_\textbf{x}^n\left(A^n\right)>0$, there exists $(x_1'',\dotsc,x_n'')\in A^n$ such that \begin{equation*} \begin{split} &\lim_{N\to \infty}\frac{1}{N}\#\{m\in[0,N-1]:(T^mx_1'',\dotsc,T^mx_n'')\in U_1\times U_2\times\dots\times U_n\}\\ &=\lim_{N\to \infty}\frac{1}{N}\sum_{m=0}^{N-1}1_{U_1\times U_2\times\dots\times U_n}(T^mx_1'',\dotsc,T^mx_n'')\\ &=\lambda_\textbf{x}^n\left(U_1\times U_2\times\dots\times U_n\right)\ge \delta. \end{split} \end{equation*} Let $F=\{m\in\mathbb{Z}_+:(T^mx_1'',\dotsc,T^mx_n'')\in U_1\times U_2\times\dots\times U_n\}$. Then $D(F)\ge \delta$ and hence $\textbf{y}\in MS_n^{\mu,e}(X,T).$ This finishes the proof. \end{proof}
\subsection{Non-ergodic case} \begin{lem}\label{lem4} Let $(X,T)$ be a t.d.s. For any $\mu\in M(X,T)$ with the form $\mu=\sum_{i=1}^{m}\lambda_i\nu_i$, where $\nu_i\in M^e(X,T)$, $\sum_{i=1}^m\lambda_i=1$ and $\lambda_i>0$, one has \begin{equation}\label{1} \bigcup_{i=1}^mSE_n^{\nu_i}(X,T)\subset SE_n^{\mu}(X,T) \end{equation} and \begin{equation}\label{2} \bigcap_{i=1}^mSM_n^{\nu_i}(X,T)= SM_n^{\mu}(X,T). \end{equation} \end{lem} \begin{proof} We first prove that \eqref{1}. For any $\textbf{x}=(x_1,\dotsc,x_n)\in\bigcup_{i=1}^mSE_n^{\nu_i}(X,T)$, there exists $i\in\{1,2,\ldots,m\}$ such that $\textbf{x}\in SE_n^{\nu_i}(X,T)$ and then for any admissible partition $\alpha$ with respect to $\textbf{x}$, there exists $S=\{s_j\}_{j=1}^\infty$ such that $h_{\nu_i}^S(T,\alpha)>0.$ By the definition of the sequence entropy \[h_{\mu}^S(T,\alpha)=\limsup_{N\to \infty}\sum_{i=1}^m\lambda_i\frac{1}{N}H_{\nu_i}(\bigvee_{j=0}^{N-1}T^{-s_j}\alpha)\ge \lambda_ih_{\nu_i}^S(T,\alpha)>0.\] So $\textbf{x}\in SE_n^{\mu}(X,T)$, which finishes the proof of \eqref{1}.
Next, we show \eqref{2}. For this, we only need to note that for any $A\in\mathcal{B}_X$, $\mu(A)>0$ if and only if $\nu_j(A)>0$ for some $j\in\{1,2,\ldots m\}.$ \end{proof}
\begin{proof}[Proof of Theorem \ref{thm:sm=/=se}] We first claim that there is a t.d.s. $(X,T)$ with $\mu_1,\mu_2\in M^e(X,T)$ such that $SE_n^{\mu_1}(X,T)\neq SE_n^{\mu_2}(X,T)$. For example, we recall that the full shift on two symbols with the measure defined by the probability vector $(1/2,1/2)$. It has completely positive entropy and the measure has the full support. Thus every non-diagonal $n$-tuple is a sequence entropy $n$-tuple for this measure. In particular, we consider such two full shifts $(X_1,T_1,\mu_1)=\left(\{0,1\}^{\mathbb{Z}},\sigma_1,\mu_1\right)$ and $(X_2,T_2,\mu_2)=\left(\{2,3\}^{\mathbb{Z}},\sigma_2,\mu_2\right)$
and define a new system $(X,T)$ as $X=X_1\bigsqcup X_2$, $T|_{X_i}=T_i, i=1,2$. Then $\mu_1,\mu_2\in M^e(X,T)$ and $SE_n^{\mu_1}(X,T)=X_1^{(n)}\setminus\Delta_n(X_1)\neq X_2^{(n)}\setminus\Delta_n(X_2)=SE_n^{\mu_2}(X,T).$
Let $\mu=\frac{1}{2}\mu_1+\frac{1}{2}\mu_2\in M(X,T)$. By Lemma \ref{lem4}, if $SE_n^\mu(X,T)=SM_n^\mu(X,T)$ then we have \[\cup_{i=1}^2SE_n^{\mu_i}(X,T)\subset SE_n^{\mu}(X,T)=SM_n^\mu(X,T)=\cap_{i=1}^2SM_n^{\mu_i}(X,T).\] However, applying Theorem \ref{cor:se=sm} to each $\mu_i\in M^e(X,T)$, one has \[SE_n^{\mu_i}(X,T)=SM_n^{\mu_i}(X,T), \text{ for }i=1,2.\] So $SE_n^{\mu_1}(X,T)= SE_n^{\mu_2}(X,T)$, a contradiction with our assumption. \end{proof}
\section{topological sequence entropy and mean sensitive tuples}\label{sec5}
This section is devoted to providing some partial evidences for the conjecture that in a minimal system every mean sensitive tuple is a topological sequence entropy tuple.
It is known that the topological sequence entropy tuple has lift property \cite{MS07}. We can show that under the minimality condition, the mean sensitive tuple also has lift property. Let us begin with some notions. For $2\le n\in\mathbb{N}$, we say that $(x_1,x_2,\dotsc,x_n)\in X^{(n)}\setminus \Delta_n(X)$ (resp. $(x_1,x_2,\dotsc,x_n)\in X^{(n)}\setminus \Delta'_n(X)$) is a \textit{mean $n$-sensitive tuple} (resp. an \textit{essential mean $n$-sensitive tuple}) if for any $\tau>0$, there is $\delta=\delta(\tau)> 0$ such that for any nonempty open set $U\subset X$ there exist $y_1,y_2,\dotsc,y_n\in U$ such that if we denote $F=\{k\in\mathbb{Z}_+\colon T^ky_i\in B(x_i,\tau),i=1,2,\ldots,n\}$ then $\overline{D}(F)>\delta$. Denote the set of all mean $n$-sensitive tuples (resp. essential mean $n$-sensitive tuples) by $MS_n(X,T)$ (resp. $MS^e_n(X,T)$).
\begin{thm}\label{lem:MSn-factor}
Let $\pi: (X,T)\rightarrow (Y,S)$ be a factor map between two t.d.s. Then
\begin{enumerate}
\item $\pi^{(n)} ( MS_n(X,T))\subset MS_n(Y,S)\cup \Delta_n(Y)$ for every $n\geq 2$;
\item $\pi^{(n)}\left(MS_n(X, T) \cup \Delta_n(X)\right)= MS_n(Y,S)\cup \Delta_n(Y)$ for every $n\geq 2$, provided that $(X,T)$ is minimal.
\end{enumerate} \end{thm} \begin{proof} (1) is easy to be proved by the definition. We only prove (2).
Supposing that $(y_1,y_2,\cdots,y_n)\in MS_n(Y,S)$, we will show that there exists $(z_1,z_2,\cdots,z_n)\in MS_n(X,T)$ such that $\pi(z_i)=y_i$. Fix $x\in X$ and let $U_m=B(x,\frac{1}{m})$. Since $(X,T)$ is minimal, $\operatorname{int}(\pi(U_m))\not= \emptyset$, where $\operatorname{int}(\pi(U_m))$ is the interior of $\pi(U_m)$. Since $(y_1,y_2,\cdots,y_n)\in MS_n(Y,S)$, there exists $\delta>0$ and $y_m^1, \cdots, y_m^n\in \operatorname{int}(\pi(U_m))$ such that $$\overline{D}(\{k\in \mathbb{Z}_+: S^ky_m^i \in \overline{B(y_i, 1)} \text{ for }i=1,\ldots,n\})\ge \delta.$$ Then there exist $x_m^1, \cdots, x_m^n\in U_m$ with $\pi(x_m^i)=y_m^i$ such that for any $m\in \mathbb{N}$, $$\overline{D}(\{k\in \mathbb{Z}_+: T^kx_m^i \in \pi^{-1}(\overline{B(y_i, 1)})\text{ for }i=1,\ldots,n\})\ge \delta.$$
Put $$ A=\prod_{i=1}^n \pi^{-1}(\overline{B(y_i, 1)}), $$ and it is clear that $A$ is a compact subset of $X^{(n)}$.
We can cover $A$ with finite nonempty open sets of diameter less than $1$, i.e., $A \subset \cup_{i=1}^{N_1}A_1^i$ and $\diam(A_1^i)<1$. Then for each $m\in \mathbb{N}$ there is $1\leq N_1^m\leq N_1$ such that $$\overline{D}(\{k\in \mathbb{Z}_+: (T^kx_m^1,\ldots, T^kx_m^n)\in \overline{A_1^{N_1^m}}\cap A \})\ge \delta/N_1.$$ Without loss of generality, we assume $N_1^m=1$ for all $m\in \mathbb{N}$. Namely, $$ \overline{D}(\{k\in \mathbb{Z}_+: (T^kx_m^1,\ldots, T^kx_m^n)\in \overline{A_1^{1}}\cap A\}) \ge \delta/N_1 \text{ for all }m\in\mathbb{N}. $$
Repeating above procedure, for $l\ge 1$ we can cover $\overline{A_l^{1}}\cap A$ with finite nonempty open sets of diameter less than $\frac{1}{l+1}$, i.e., $\overline{A_l^{1}}\cap A \subset \cup_{i=1}^{N_{l+1}}A_{l+1}^i$ and $\diam(A_{l+1}^i)<\frac{1}{l+1}$. Then for each $m\in \mathbb{N}$ there is $1\leq N_{l+1}^m\leq N_{l+1}$ such that $$ \overline{D}(\{k\in \mathbb{Z}_+: (T^kx_m^1,\ldots, T^kx_m^n)\in \overline{A_{l+1}^{N_{l+1}^m} }\cap A \}) \ge \frac{\delta}{N_1N_2\cdots N_{l+1}}. $$ Without loss of generality we assume $N_{l+1}^m=1$ for all $m\in \mathbb{N}$. Namely, $$ \overline{D}(\{k\in \mathbb{Z}_+: (T^kx_m^1,\ldots, T^kx_m^n)\in \overline{A_{l+1}^{1}}\cap A \}) \ge\frac{\delta}{N_1N_2\cdots N_{l+1}} \text{ for all }m\in\mathbb{N}. $$
It is clear that there is a unique point $(z_1^1,\ldots,z_n^1)\in \bigcap_{l=1}^{\infty} \overline{A_l^{1}}\cap A $. We claim that $(z_1^1,\ldots,z_n^1)\in MS_n(X, T)$. In fact, for any $\tau>0$, there is $l\in \mathbb{N}$ such that $\overline{A_{l}^{1}}\cap A \subset V_{1}\times\cdots \times V_{n}$, where $V_i=B(z_i^1,\tau)$ for $i=1,\ldots,n$. By the construction, for any $m\in\mathbb{N}$ there are $x_m^1,\ldots, x_m^n\in U_m$ such that $$ \overline{D}(\{k\in \mathbb{Z}_+: (T^kx_m^1,\ldots, T^kx_m^n)\in \overline{A_{l}^{1}}\cap A \}) \ge\frac{\delta}{N_1N_2\cdots N_{l}} $$ and so $$ \overline{D}(\{k\in \mathbb{Z}_+: (T^kx_m^1,\ldots, T^kx_m^n)\in V_{1}\times\cdots \times V_{n} \}) \ge \frac{\delta}{N_1N_2\cdots N_{l}}. $$ for all $m\in \mathbb{N}$. For any nonempty open set $U\subset X$, since $x$ is a transitive point, there is $s\in \mathbb{Z}$ such that $T^sx\in U$. We can choose $m\in \mathbb{Z}$ such that $T^sU_{m}\subset U$. This implies that $T^sx_{m}^1,\ldots, T^sx_{m}^n\in U$ and $$ \overline{D}(\{k\in \mathbb{Z}_+: (T^k(T^sx_{m}^1),\ldots, T^k(T^sx_{m}^n))\in V_{1}\times\cdots \times V_{n}\} ) \ge \frac{\delta}{N_1N_2\cdots N_{l}}. $$ So we have $(z_1^1,\ldots,z_n^1)\in MS_n(X, T)$.
Similarly, for each $p\in\mathbb{N}$, there exists $(z_1^p,\ldots,z_n^p)\in MS_n(X, T)\cap \prod_{i=1}^n \pi^{-1}(\overline{B(y_i, \frac{1}{p})})$. Set $z_i^p\rightarrow z_i$ as $p\rightarrow \infty$. Then $(z_1,\ldots,z_n)\in MS_n(X, T)\cup \Delta_n(X)$ and $\pi(z_i)=y_i$. \end{proof}
Denote by $\mathcal{A}(MS_2(X, T))$ the smallest closed $T\times T$-invariant equivalence relation containing $MS_2(X, T)$.
\begin{cor}\label{cor:max-me-factor} Let $(X,T)$ be a minimal t.d.s. Then $X/\mathcal{A}(MS_2(X, T))$ is the maximal mean equicontinuous factor of $(X,T)$. \end{cor}
\begin{proof} Let $Y=X/\mathcal{A}(MS_2(X, T))$ and $\pi:(X,T)\to (Y,S)$ be the corresponding factor map. We show that $(Y,S)$ is mean equicontinuous. Assume that $(Y,S)$ is not mean equicontinuous, by \cite[Corollary 5.5]{LTY15} $(Y,S)$ is mean sensitive. Then by \cite[Theorem 4.4]{LYY22}, $MS_2(Y,S)\not=\emptyset$. By Theorem \ref{lem:MSn-factor}, there exists $(x_1,x_2)\in MS_2(X, T)$ such that $(\pi(x_1),\pi(x_2))\in MS_2(Y,S)$. Then $(x_1,x_2)\not \in R_\pi:=\{(x,x')\in X\times X:\pi(x)=\pi(x')\}$, a contradiction with $R_\pi=\mathcal{A}(MS_2(X, T))$.
Let $(Z,W)$ be a mean equicontinuous t.d.s. and $\theta: (X,T)\to (Z,W)$ be a factor map. Since $(X,T)$ is minimal, so is $(Z,W)$. Then by \cite[Corollary 5.5]{LTY15} and \cite[Theorem 4.4]{LYY22}, $MS_2(Z,W)=\emptyset$. By Theorem \ref{lem:MSn-factor} $MS_2(X,T)\subset R_\theta$, where $R_\theta$ is the corresponding equivalence relation with respect to $\theta$. This implies that $(Z,W)$ is a factor of $(Y,S)$ and so $(Y,S)$ is the maximal mean equicontinuous factor of $(X,T)$. \end{proof}
In the following we show Theorem \ref{thm:ms=>it}. Let us begin with some preparations. \begin{defn}[\cite{KL07}]Let $(X,T)$ be a t.d.s.
\begin{itemize} \item For a tuple $(A_1,A_2,\ldots, A_n)$ of subsets of $X$, we say that a set $J\subseteq \mathbb{Z}_+$ is an {\em independence set} for $A$ if for every nonempty finite subset $I\subseteq J$ and function $\sigma: I\rightarrow \{1,2,\ldots, n\}$ we have $\bigcap_{k\in I} T^{-k} A_{\sigma(k)}\neq \emptyset.$
\item For $n\ge2$, we call a tuple $\textbf{x}=(x_1,\ldots,x_n)\in X^{(n)}$ an {\em IT-tuple} if for any product neighbourhood $U_1\times U_2\times \ldots \times U_n$ of $\textbf{x}$ in $X^{(n)}$ the tuple $(U_1,U_2,\ldots, U_n)$ has an infinite independence set. We denote the set of IT-tuples of length $n$ by ${\rm IT}_n (X, T)$.
\item For $n\ge2$, we call an IT-tuple $\textbf{x}=(x_1,\ldots,x_n)\in X^{(n)}$ an essential {\em IT-tuple} if $x_i\neq x_j$ for any $i\neq j$. We denote the set of all essential IT-tuples of length $n$ by ${\rm IT}^e_n (X, T)$.
\end{itemize} \end{defn}
\begin{prop}\cite[Proposition 3.2]{HLSY}\label{independent sets} Let $X$ be a compact metric topological group with the left Haar measure $\mu$, and let $n\in \mathbb{N}$ with $n\ge 2$. Suppose that $V_{1},\ldots,V_{n}\subset X$ are compact subsets satisfying that \begin{enumerate} \item[(i)] $\overline{\operatorname{int} V_i}=V_i$ for $i=1,2,\cdots,n$,
\item[(ii)] $\operatorname{int}(V_{i})\cap \operatorname{int}(V_{j})=\emptyset$ for all $1\le i\neq j\le n$,
\item[(iii)] $\mu(\bigcap_{1\leq i\leq n}V_{i})>0$. \end{enumerate} Further, assume that $T: X\rightarrow X$ is a minimal rotation and $\mathcal{G}\subset X$ is a residual set. Then there exists an infinite set $I\subset \mathbb{Z}_+$ such that for all $a\in\{1,2,\ldots,n\}^{I}$ there exists $x \in\mathcal{G}$ with the property that \begin{equation}\label{eq: in the int} x\in \bigcap_{k\in I} T^{-k} {\rm int}(V_{a(k)}),\quad {\rm i.e.}\ T^kx\in \operatorname{int}(V_{a(k)}) \ \text{ for any }k\in I. \end{equation} \end{prop} A subset $Z\subset X$ is called {\it proper} if $Z$ is a compact subset with $\overline{\operatorname{int}(Z)} = Z$. The following lemma can help us to complete the proof of Theorem \ref{thm:ms=>it}. \begin{lem}\label{lem:proper one to one} Let $(X,T)$ and $(Y,S)$ be two t.d.s., and $\pi:(X,T)\to (Y,S)$ be a factor map. Suppose that $(X,T)$ is minimal. Then the image of proper subsets of $X$ under $\pi$ are proper subset of $Y$. \end{lem} \begin{proof} Given a proper subset $Z$ of $X$, we will show $\pi(Z)$ is also proper. It is clear that $\pi(Z)$ is compact, as $\pi$ is continuous. Now we prove $\overline{\operatorname{int}(\pi(Z))} = \pi(Z)$.
It follows from the closeness of $\pi(Z)$ that $\overline{\operatorname{int}(\pi(Z))} \subset \pi(Z)$. On the other hand, for any $y\in \pi(Z), $ take $x\in \pi^{-1}(y)\cap Z$. Since $\pi^{-1}(y)\cap Z=\pi^{-1}(y)\cap\overline{\operatorname{int}(Z)}$, there exists a sequence $\{x_n\}_{n\in\mathbb{N}}$ such that $x_n\in \operatorname{int}(Z)$ and $\lim_{n\to \infty}x_n=x$. Let $\{r_n\}_{n\in\mathbb{N}}$ be a sequence of $\mathbb{R}$ satisfying $$\lim_{n\to\infty}r_n=0\text{ and }B(x_n,r_n)\subset \operatorname{int}(Z).$$ By the minimality of $(X,T)$, we have $\pi$ is semi-open, and hence $\operatorname{int}(\pi(B(x_n,r_n)))\neq \emptyset$. Thus, there exists $x_n'\in B(x_n,r_n)$ such that $\pi(x_n')\in \operatorname{int}(\pi(B(x_n,r_n)))\subset\operatorname{int}(\pi(Z))$. Since $x_n'\in B(x_n,r_n)$ and $\lim_{n\to \infty}x_n=x$, one has $\lim_{n\to \infty}x_n'=x$, and hence $\lim_{n\to \infty}\pi(x_n')=\pi(x)=y.$ This implies that $y\in \overline{\operatorname{int}(\pi(Z))}$, which finishes the proof. \end{proof}
Inspired by \cite[Proposition 3.7]{HLSY}, we can give the proof of Theorem \ref{thm:ms=>it}.
\begin{proof} [Proof of Theorem \ref{thm:ms=>it}]
It suffices to prove $MS^e_n(X,T)\subset IT_n^e(X,T)$. Given $\textbf{x}=(x_1,\ldots,x_n)\in MS^e_n(X,T)$, we will show that $\textbf{x}\in IT^e_n(X,T).$
Since the minimal t.d.s. $(X_{eq},T_{eq})$ is the maximal equicontinuous factor of $(X,T)$, then $X_{eq}$ can be viewed as a compact metric group with a $T_{eq}$-invariant metric $d_{eq}$. Let $\mu$ be the left Haar probability measure of $X_{eq}$, which is also the unique $T_{eq}$-invariant probability measure of $(X_{eq},T_{eq})$. Let $$X_1=\{x\in X: \#\{\pi^{-1}(\pi(x))\}=1\}, \quad Y_1=\pi(X_1).$$ Then $Y_1$ is a dense $G_\delta$-set as $\pi$ is almost one to one.
Without loss of generality, assume that $\epsilon=\frac 14 \min_{1\le i\neq j\le n}d(x_i,x_j)$. Let $U_i=\overline{B_\epsilon(x_i)}$ for $1\le i\le n$. Then $U_i$ is proper for each $1\le i\le n$. We will show that $U_1,U_2,\ldots,U_n$ is an infinite independent tuple of $(X,T)$, i.e. there is some infinite set $I\subseteq \mathbb{Z}_+$ such that $$\bigcap_{k\in I}T^{-k}U_{a(k)}\neq \emptyset, \ \text{for all} \ a\in \{1,2,\ldots,n\}^I.$$
Let $V_i=\pi(U_i)$ for $1\le i\le n$. By Lemma \ref{lem:proper one to one}, $V_i$ is proper for each $i\in \{1,2,\ldots,n\}$. We claim that ${\rm int }(V_i)\cap {\rm int}(V_j)=\emptyset$ for all $1\le i\neq j\le n$. In fact, if there is some $1\le i\neq j\le n$ such that ${\rm int }(V_i)\cap {\rm int}(V_j)\not=\emptyset$, then $${\rm int }(V_i)\cap {\rm int}(V_j)\cap Y_1\not=\emptyset,$$ as $Y_1$ is a dense $G_\d$-set. Let $y\in {\rm int }(V_i)\cap {\rm int}(V_j)\cap Y_1$. Then there are $x_i\in U_i$ and $x_j\in U_j$ such that $y=\pi(x_i)=\pi(x_j)$, which contradicts with $y\in Y_1$.
Choose a nonempty open set $W_m\subset X$ with $\operatorname{diam}(\pi(W_m))<\frac{1}{m}$ for each $m\in \mathbb{N}$. Since $\textbf{x}\in MS^e_n(X,T)$, there exist $\delta>0$ and $\textbf{x}^m=(x_1^m, x_2^m,\cdots, x_n^m)\in W_m\times \dots \times W_m$ such that $\overline{D}(N(\textbf{x}^m, U_1\times U_2\times \cdots \times U_n))\ge \delta.$ Let $\textbf{y}^m=(y_1^m,y_2^m,\cdots,y_n^m)=\pi^{(n)} (\textbf{x}^m)$. Then $$\overline{D}(N(\textbf{y}^m, V_1\times V_2\times \cdots \times V_n))\ge \delta.$$ For $p\in \overline{D}(N(\textbf{y}^m, V_1\times V_2\times \cdots \times V_n))$, $T_{eq}^py_i^m\in V_i$. As $\operatorname{diam}(\pi(W_m))<\frac{1}{m}$, $d_{eq}(y_1^m,y_i^m)<\frac{1}{m}$ for $1\le i\le n$. Note that $$d_{eq}(T_{eq}^py_1^m,T_{eq}^py_i^m)=d_{eq}(y_1^m,y_i^m)<\frac{1}{m}\quad\text{ for }1\le i\le n.$$ Let $V_i^m=B_{\frac{1}{m}}(V_i)=\{y\in X_{eq}:d_{eq}(y,V_i)<\frac{1}{m}\}$. Then $T_{eq}^py_1^m\in \cap_{i=1}^n V_i^m$ and $$\overline{D}(N(y_1^m, \cap_{i=1}^n V_i^m))\ge \delta.$$ Since $(X_{eq},T_{eq})$ is uniquely ergodic with respect to the measure $\mu$, $\mu(\cap_{i=1}^n V_i^m)\ge \delta$. Letting $m\to \infty$, one has $\mu(\cap_{i=1}^n V_i)\ge \delta>0.$
By Proposition \ref{independent sets}, there is an infinite $I\subseteq \mathbb{Z}_+$ such that for all $a\in\{1,2,\ldots,n\}^{I}$ there exists $y_0\in Y_1$ with the property that \begin{equation*} y_0\in \bigcap_{k\in I} T_{eq}^{-k} {\rm int}(V_{a(k)}). \end{equation*}
Set $\pi^{-1}(y_0)=\{x_0\}$. Then \begin{equation*} x_0\in \bigcap_{k\in I} T^{-k} U_{a(k)}, \end{equation*} which implies that $(x_1,x_2,\cdots,x_n)\in IT_n(X,T)$. \end{proof}
\section*{Acknowledgments}
We thank the referee for a very careful reading and many useful comments, which helped us to improve the paper. Research of Jie Li is supported by NNSF of China (Grant No. 12031019); Chunlin Liu is partially supported by NNSF of China (Grant No. 12090012); Siming Tu is supported by NNSF of China (Grant No. 11801584 and No. 12171175); and Tao Yu is supported by NNSF of China (Grant No. 12001354) and STU Scientific Research Foundation for Talents (Grant No. NTF19047).
\begin{appendix} \section{Proof of Lemma \ref{0726}}\label{APPENDIX} In this section, we give the proof of Lemma \ref{0726}.
\begin{lem}\label{0724} For a m.p.s. $(X,\mathcal{B}_X,\mu,T)$ with $\mathcal{K}_\mu$ its Kronecker factor, $n\in\mathbb{N}$ and $f_i\in L^\infty(X,\mu)$, $i=1,\dotsc,n$, we have \[ \lim_{M \to \infty} \dfrac{1}{M} \sum_{m=1}^M \prod_{i=1}^{n} f_i( T^m x_i) =
\lim_{M \to \infty} \dfrac{1}{M} \sum_{m=1}^M \prod_{i=1}^{n}\mathbb{E}(f_i | \mathcal{K}_\mu)(T^m x_i). \] \end{lem} \begin{proof} On the one hand, by the Birkhoff ergodic theorem, for $\textbf{x}=(x_1,\dotsc,x_n)\in X^{(n)}$, let $F(\textbf{x})=F(x_1,\dots,x_n)=\prod_{i=1}^{n} f_i(x_i)$, \[\lim_{M \to \infty} \dfrac{1}{M} \sum_{m=1}^M \prod_{i=1}^{n} f_i( T^m x_i) =\lim_{M\to\infty}\dfrac{1}{M} \sum_{m=1}^{M} F\left(\left(T^{(n)}\right)^m\textbf{x}\right)=
\mathbb{E}_{\mu^{(n)}}(\prod_{i=1}^{n}f_i|I_{\mu^{(n)}})(\textbf{x}),\] where $I_{\mu^{(n)}}=\{A\in \mathcal{B}^{(n)}_X: T^{(n)}A=A\}$.
On the other hand, following \cite[Lemma 4.4]{HMY04}, we have $(\mathcal{K}_\mu)^{\bigotimes n}=\mathcal{K}_{\mu^{(n)}}$. Then for $\textbf{x}=(x_1,\dotsc,x_n)\in X^{(n)}$,
\[\prod_{i=1}^{n}\mathbb{E}_{\mu}(f_i|\mathcal{K}_\mu)(x_i)
=\mathbb{E}_{\mu^{(n)}}(\prod_{i=1}^{n}f_i|(\mathcal{K}_\mu)^{\bigotimes n})(\textbf{x})
=\mathbb{E}_{\mu^{(n)}}(\prod_{i=1}^{n}f_i|\mathcal{K}_{\mu^{(n)}})(\textbf{x}).\] This implies that \begin{align*}
\lim_{M\to\infty}\dfrac{1}{M} \sum_{m=1}^{M} \prod_{i=1}^{n} \mathbb{E}_{\mu}(f_i|\mathcal{K}_\mu)(T^mx_i)
= & \mathbb{E}_{\mu^{(n)}}(\prod_{i=1}^{n}\mathbb{E}_{\mu}(f_i|\mathcal{K}_\mu)|I_{\mu^{(n)}})(\textbf{x})\\
= &\mathbb{E}_{\mu^{(n)}}(\mathbb{E}_{\mu^{(n)}}(\prod_{i=1}^{n}f_i|\mathcal{K}_{\mu^{(n)}})|I_{\mu^{(n)}})(\textbf{x})\\
= &\mathbb{E}_{\mu^{(n)}}(\prod_{i=1}^{n}f_i|I_{\mu^{(n)}})(\textbf{x}), \end{align*} where the last equality follows from the fact that $I_{\mu^{(n)}}\subset\mathcal{K}_{\mu^{(n)}}.$ \end{proof}
\begin{lem}\label{0725} Let $(Z,\B_Z,\nu,R)$ be a minimal rotation on a compact abelian group. Then for any $n\in\mathbb{N}$ and $\phi_i\in L^\infty(Z,\nu)$, $i=1,\dotsc,n$,, \[\lim_{M\to\infty}\dfrac{1}{M} \sum_{m=1}^{M} \prod_{i=1}^{n} \phi_i (R^mz_i) = \int_Z \prod_{i=1}^{n} \phi_i (z_i+z)d \nu(z) \quad\text{ for }\nu^{(n)}\text{-a.e. }(z_1,\ldots, z_n). \] \end{lem} \begin{proof} Since $(Z,\B_Z,\nu,R)$ be a minimal rotation on a compact abelian group, there exists $a\in Z$ such that $R^mz=z+ma$ for any $z\in Z$.
Let $F(z)=\prod_{i=1}^{n} \phi_i (z_i+z)$. Then $F(R^me_Z)=F(ma)$ where $e_Z$ is identity element of $Z$. Since $(Z,R)$ is minimal equicontinuous, $(Z,\B_Z,\nu,R)$ is uniquely ergodic.
By an approximation argument, we have, for $\nu^{(n)}$-a.e. $(z_1,\ldots, z_n)$, \begin{align*} \lim_{M\to\infty}\dfrac{1}{M} \sum_{m=1}^{M}\prod_{i=1}^{n} \phi_i(R^mz_i) =&\lim_{M\to\infty}\dfrac{1}{M} \sum_{m=1}^{M}\prod_{i=1}^{n} \phi_i (z_i+ma)\\ =&\lim_{M\to\infty}\dfrac{1}{M} \sum_{m=1}^{M} F(ma) =\lim_{M\to\infty}\dfrac{1}{M} \sum_{m=1}^{M} F(R^me_Z)\\ =&\int_Z F(z) d \nu(z) = \int_Z \prod_{i=1}^{n} \phi_i (z_i+z) d \nu(z). \end{align*} The proof is completed. \end{proof}
\begin{proof}[Proof of Lemma \ref{0726}.] Let $z \mapsto \eta_z$ be the disintegration of $\mu$ over the continuous factor map $\pi$ from $(X,\B_X,\mu,T)$ to its Kronecker factor $(Z,\B_Z,\nu,R)$. For $n\in\mathbb{N}$,
define
\begin{equation*}
\label{eqn:lambda_2_dim_definition_for_section_2_is_this_unqiue_yet}
\lambda^n_{\textbf{x}} = \int_Z \eta_{z + \pi(x_1)} \times\dots\times \eta_{z+\pi(x_n)} d\nu(z)
\end{equation*}
for every $\textbf{x}=(x_1,\dotsc,x_n) \in X^{(n)}$.
We first note that for each $\textbf{x} \in X^{(n)}$ the measures $\eta_{z + \pi(x_i)}$ are defined for $\nu$-a.e. $z \in Z$ and therefore is well-defined.
To prove that $\textbf{x} \mapsto \lambda^n_\textbf{x}$ is continuous first note that uniform continuity implies
\[
(u_1,\dotsc,u_n) \mapsto \int_Z \prod_{i=1}^{n}f_i(z + u_i) d\nu(z)
\]
from $Z^{(n)}$ to $\mathbb{C}$ is continuous whenever $f_i \colon Z \to \mathbb{C}$ are continuous.
An approximation argument then gives continuity for every $f_i \in L^\infty(Z,\nu)$.
In particular,
\[
\textbf{x} \mapsto \int_Z \prod_{i=1}^{n}\mathbb{E}(f_i \mid \B_Z)(z + \pi(x_i)) d\nu(z)
\]
from $X^{(n)}$ to $\mathbb{C}$ is continuous whenever $f_i \in L^\infty(X,\mu)$, which in turn implies continuity of $\textbf{x} \mapsto \lambda_{\textbf{x}}^n$.
To prove that $\textbf{x}\mapsto \lambda_{\textbf{x}}^n$ is an ergodic decomposition we first calculate
\begin{equation*}
\int_{X^{(n)}} \int_Z \prod_{i=1}^{n}\eta_{z + \pi(x_i)}d \nu(z) d \mu^{(n)}(\textbf{x}) =\int_Z \prod_{i=1}^{n}\int_X \eta_{z + \pi(x_i)} d \mu(x_i) d \nu(z),
\end{equation*}
which is equal to $\mu^{(n)}$ because all inner integrals are equal to $\mu$.
We conclude that
\begin{equation*}
\label{eq_continuousergodicdecompositionofmu1}
\mu^{(n)} = \int_{X^{(n)}}\lambda^n_\textbf{x} d \mu^{(n)}(\textbf{x}),
\end{equation*}
which shows $\textbf{x} \mapsto \lambda^n_\textbf{x}$ is a disintegration of $\mu^{(n)}$.
We are left with verifying that
\[
\int_{X^{(n)}} F d \lambda^n_\textbf{x} = \mathbb{E}_{\mu^{(n)}}(F \mid I_{\mu^{(n)}})(\textbf{x})
\]
for $\mu^{(n)}$-a.e. $\textbf{x}\in X^{(n)}$ whenever $F \colon X^{(n)} \to \mathbb{C}$ is measurable and bounded.
Recall that $I_{\mu^{(n)}}$ denotes the $\sigma$-algebra of $T^{(n)}$-invariant sets.
Fix such an $F$.
It follows from the pointwise ergodic theorem that
\[
\lim_{M \to \infty} \dfrac{1}{M} \sum_{m=1}^M F( T^m x_1,\dotsc, T^m x_n)
=
\mathbb{E}_{\mu^{(n)}}(F \mid I_{\mu^{(n)}})(\textbf{x})
\]
for $\mu^{(n)}$-a.e. $\textbf{x}\in X^{(n)}$.
We therefore wish to prove that
\[
\int_{X^{(n)}} F d \lambda^n_\textbf{x} = \lim_{M \to \infty} \dfrac{1}{M} \sum_{m=1}^M F( T^m x_1,\dotsc, T^m x_n)
\]
holds for $\mu^{(n)}$-a.e. $\textbf{x} \in X^{(n)}$.
By an approximation argument it suffices to verify that
\begin{equation*}
\label{eqn:proving_ergodic_kk}
\int_{X^{(n)}} f_1 \otimes\dots\otimes f_n d \lambda^n_\textbf{x} = \lim_{M \to \infty} \dfrac{1}{M} \sum_{m=1}^M \prod_{i=1}^{n} f_i( T^m x_i)
\end{equation*}
holds for $\mu^{(n)}$-a.e. $\textbf{x} \in X^{(n)}$ whenever $f_i$ belongs to $L^\infty(X,\mu)$ for $i=1,...,n$.
By Lemma \ref{0724},
\[
\lim_{M \to \infty} \dfrac{1}{M} \sum_{m=1}^M \prod_{i=1}^{n} f_i( T^m x_i)
=
\lim_{M \to \infty} \dfrac{1}{M} \sum_{m=1}^M \prod_{i=1}^{n}\mathbb{E}(f_i \mid \B_Z)(T^m x_i)
\]
for $\mu^{(n)}$-a.e. $\textbf{x}\in X^{(n)}$.
By Lemma \ref{0725}, for every $\phi_i$ in $L^\infty(Z,\nu)$,
\[\lim_{M \to \infty}\dfrac{1}{M} \sum_{m=1}^{M} \prod_{i=1}^{n} \phi_i (R^mz_i) = \int_Z \prod_{i=1}^{n} \phi_i (z_i+z)d \nu(z)\] for $\nu^{(n)}$-a.e. $\textbf{z}\in Z^{(n)}$.
Taking $\phi_i = \mathbb{E}(f_i \mid \B_Z)$ gives
\[
\lim_{M \to \infty} \dfrac{1}{M} \sum_{m=1}^M \prod_{i=1}^{n}\mathbb{E}(f_i \mid \B_Z)(T^m x_i)
=
\int_{X^{(n)}} f_1 \otimes \dots \otimes f_n d \lambda^n_{\textbf{x}}
\]
for $\mu^{(n)}$-a.e. $\textbf{x}\in X^{(n)}$. \end{proof}
\end{appendix}
\end{document} |
\begin{document}
\begin{abstract} This is the second in a pair of papers developing a framework to apply logarithmic methods in the study of stable maps and singular curves of genus $1$. This volume focuses on logarithmic Gromov--Witten theory and tropical geometry. We construct a logarithmically nonsingular and proper moduli space of genus $1$ curves mapping to any toric variety. The space is a birational modification of the principal component of the Abramovich--Chen--Gross--Siebert space of logarithmic stable maps and produces logarithmic analogues of Vakil and Zinger's genus one reduced Gromov--Witten theory. We describe the non-archimedean analytic skeleton of this moduli space and, as a consequence, obtain a full resolution to the tropical realizability problem in genus $1$. \end{abstract}
\maketitle
\setcounter{tocdepth}{1} \tableofcontents
\section{Introduction}
This paper is the second in a pair, exploring the interplay between tropical geometry, logarithmic moduli theory, stable maps, and moduli spaces of genus~$1$ curves. In the first volume, we used this interplay to construct new nonsingular moduli spaces compactifying the space of elliptic curves in projective space via \textbf{radially aligned} stable maps and quasimaps. In this paper, we focus on applications to logarithmic Gromov--Witten theory and tropical geometry.
\noindent {\bf I. Realizability of tropical curves.} We give a complete characterization of genus $1$ tropical maps that can be realized as tropicalizations of genus $1$ curves mapping to tori, completing a study initiated in Speyer's thesis. We show that a combinatorial condition identified by Baker--Payne--Rabinoff is always sufficient. Our proof is independent of these previous results, and is based on the geometry of logarithmic maps.
\noindent {\bf II. Logarithmic stable maps.} We construct a toroidal moduli space parametrizing maps from pointed genus $1$ curves to any toric variety with prescribed contact orders along the toric boundary. This is a desingularization of the principal component of the space of logarithmic stable maps. The boundary complex of this compactification is identified as a space of realizable tropical maps.
\subsection{Superabundant tropical geometries} The realization problem is the crux of the relationship between tropical geometry to algebraic geometry, and is unavoidable in enumerative applications. Given an abstract tropical curve\footnote{The first author continues his efforts to popularize Dan Abramovich's convention that algebraic curves be denoted by $C$, $\mathscr C$, while tropical curves be denoted $\plC$, approximating their appearance in nature.} $\plC$ of genus $g$ and a balanced piecewise linear map \[ F: \plC\to \RR^r, \] we ask, \textit{does there exist a non-archimedean field $K$ extending $\CC$, a smooth algebraic curve $C$ over $K$ and a map \[ \varphi: C\to \mathbf G_m^r, \] such that $\varphi^{\mathrm{trop}}$ coincides with $F$?}
{ When $\plC$ has genus $0$, the only obstruction to lifting is the local balancing condition, and all tropical curves satisfying that condition are realizable. This is reflected in the logarithmic smoothness of the moduli space of genus $0$ logarithmic maps~\cite{NS06,R15b,Tyo12}. In genus $1$, nonlocal obstructions already appear } for maps $\plC\to \RR$. The obstructions appear when the circuit of $\plC$ is contained in a proper affine subspace of $\RR^r$. Speyer discovered a sufficient condition for realizability in 2005~\cite{Sp-thesis,Sp07}. A weaker necessary condition was identified in~\cite[Section 6]{BPR16}. We provide a characterization of the realizable tropical curves in genus $1$ in Theorem~\ref{thm: tropical} in terms of the geometry of the skeleton of an analytic space of maps.
Let $\Gamma$ be a marked tropical curve of genus $1$ with a unique vertex and $n$ half-edges. Fix a balanced map $\Gamma\to \RR^r$. Let ${\cal M}_{\Gamma}(\mathbf G_m^r)$ be the moduli space of maps \[ \varphi: C\to \mathbf G_m^r, \] where $C$ is a non-compact smooth algebraic curve of genus $1$ with $n$ punctures, and the vanishing orders at infinity of these punctures are specified by the slopes along the edges of $\Gamma$ in $\RR^r$. Let $W_\Gamma(\RR^r)$ be the corresponding set of tropical maps \[ \plC\to \RR^r \] whose recession fan is given by $\Gamma\to \RR^r$, and satisfy the \textbf{well-spacedness condition}, as defined in Section~\ref{sec: tropical-moduli}.\footnote{We caution the reader that the meaning attributed by Speyer to well-spacedness is stronger than the one we use here; see Warning~\ref{warning: diff-defs}.} This set can be given the structure of a generalized cone complex.
Given a map $\varphi:\mathscr C\to \mathbf G_m^r$ over a valued field, one obtains a balanced piecewise linear map from a Berkovich skeleton $\plC$ of the punctured general fiber curve $\mathscr C_\eta$ to $\RR^r$, i.e., to the skeleton of the torus~\cite{R16}. This piecewise linear map is the \textbf{tropicalization} of $\varphi$ and is denoted $\varphi^{\mathrm{trop}}$.
\begin{customthm}{A}\label{thm: tropical} There exists a continuous and proper tropicalization map \[ \mathrm{trop}: {\cal M}_\Gamma^{\mathrm{an}}(\mathbf G_m^r)\to W_\Gamma(\RR^r) \] sending a map $[\varphi]$ over a valued field to its tropicalization. There is generalized cone complex $P_\Gamma(\RR^r)$ and a finite morphism \[ \mathrm{trop}_{\mathfrak S}: P_\Gamma(\RR^r) \to W_\Gamma(\RR^r), \] which is an isomorphism upon restriction to each cone of the source. The degree of this finite morphism is explicitly computable and the complex $P_{\Gamma}(\RR^r)$ is a skeleton of the analytic moduli space ${\cal M}_\Gamma^{\mathrm{an}}(\mathbf G_m^r)$. \end{customthm}
{ The theorem is proved as Theorem~\ref{thm:nonarch} of the text. }
The statement that the tropicalization has a finite cover that is a skeleton is a toroidal version of the {sch\"on} condition, frequently cited in tropical geometry. The skeleton $P_\Gamma(\RR^r)$ functions as a {parametrizing complex} for the tropicalization, as in work of Helm and Katz~\cite{HK12,Tev07}.
\subsection{Logarithmic stable maps} Our tropical investigation leads naturally to an understanding of the geometry of space of logarithmic stable maps to toric varieties in genus $1$. The open moduli problem we consider is that of maps \[ (C,p_1,\ldots, p_m) \to Z, \] where $C$ is a smooth pointed curve of genus $1$, the target $Z$ is a toric variety, and the contact orders of the points $p_i$ with the boundary divisors on $Z$ is fixed. There is a natural modular compactification of this space via the theory of logarithmic stable maps, due to Abramovich--Chen and Gross--Siebert~\cite{AC11,Che10,GS13}. When the genus of the source curve is $0$, the resulting moduli space is logarithmically smooth, but in genus $1$ can be highly singular and non-equidimensional. We use the insights of Theorem~\ref{thm: tropical} to construct a logarithmically smooth modular compactification, in parallel with the desingularization of the ordinary stable maps space due to Vakil and Zinger~\cite{RSW17A,VZ08}.
Let $Z$ be a proper complex toric variety and $\mathscr L_\Gamma(Z)$ the moduli space of genus $1$ logarithmic stable maps to $Z$ with discrete data $\Gamma$, i.e., $\Gamma$ records the genus and the contact orders of the marked points with the toric boundary of $Z$. Let $\mathscr L_\Gamma^\circ(Z)$ be the locus of parametrizing maps {of positive degree} from smooth domains, and let $\overnorm{\mathscr L_\Gamma^\circ}(Z)$ be the closure.
\begin{customthm}{B}\label{thm: toric-targets} Consider the following data as a moduli problem on logarithmic schemes: \begin{enumerate} \item a family of $n$-marked, radially aligned logarithmic curves $C\to S$, \item a logarithmic stable map $f:C\to Z$ with contact order $\Gamma$, \end{enumerate} such that the map $f$ is \textbf{well-spaced} (see Definition~\ref{def:well-spaced}). This moduli problem is represented by a proper and logarithmically smooth stack with logarithmic structure $\mathcal W_\Gamma(Z)$ and the natural morphism \[ \mathcal W_\Gamma(Z)\to \overnorm{\mathscr L_\Gamma^\circ}(Z) \] is proper and birational. \end{customthm}
{See Theorem~\ref{thm: realizability} for the proof.}
The well-spacedness property above is efficiently stated in tropical language, and this is done later in the paper. At a first approximation it may be thought of as forcing a factorization property after composing $C\to Z$ with any rational map $Z\dashrightarrow \mathbf{P}^1$ induced by a character. These logarithmic maps are precisely the ones that have \textbf{well-spaced} tropicalizations. A prototype for practical calculations on this space may be found in~\cite{LR15}.
\subsection{Motivation for the construction} The combinatorics of logarithmic stable maps are essentially part of tropical geometry. Indeed, if the variety $Z$ is a toric variety, taken with its toric boundary, the analytification of the moduli space of logarithmic maps maps continuously to a polyhedral complex parametrizing tropical curves~\cite{R16}. The connection is especially transparent in genus $0$, see~\cite{R15b}. In genus $1$, the tropical realizability problem can be used to predict the desingularization above, as we now explain. The moduli space $\mathscr L_\Gamma(Z)$ of genus $1$ logarithmic stable maps is highly singular, however, it maps naturally to a logarithmically smooth Artin stack. More precisely, if $\mathscr A_Z = [Z/T]$ is the Artin fan of $Z$ obtained by performing a stack quotient on $Z$ by its dense torus, there is a natural map \[ \mathscr L_\Gamma(Z)\to \mathscr L_\Gamma(\mathscr A_Z), \] where the latter is the space of prestable logarithmic maps to the Artin fan. This space is a logarithmically smooth Artin stack~\cite{AW}. Moreover, the toroidal skeleton of this space is naturally identified with the moduli space of all (not necessarily realizable {or even balanced}) tropical maps from genus $1$ curves to the fan of $Z$, see~\cite{R16}. The locus of realizable curves is a sublocus in the moduli space. After subdividing this cone complex, this sublocus is supported on a subcomplex. This subdivision induces a birational modification of $\mathscr L_\Gamma(\mathscr A_Z)$, and thus a modification of $\mathscr L_\Gamma(Z)$. This modification can naturally be identified with the moduli of well-spaced logarithmic maps $\mathcal W_\Gamma(Z)$ defined above. The radial alignments developed in~\cite{RSW17A} and recalled in Section~\ref{sec: prelims-from-prequel} give rise to the modular interpretation.
The construction of $\mathcal W_\Gamma(Z)$ is not a formal lifting of our previous results on ordinary stable maps to the logarithmic category~\cite{RSW17A}. Given an absolute genus $1$ stable map $[C\to \mathbf{P}^r]$, if no genus $1$ subcurve is contracted, then $[C\to \mathbf{P}^r]$ is a smooth point of the moduli space. However, for a toric variety $Z$ and a genus $1$ logarithmic map $[C\to Z]$, the deformations of the map can be obstructed even if no component of $C$ is contracted. This is true even if $Z= \mathbf{P}^r$ with its toric logarithmic structure. This behaviour is akin to the genus $1$ absolute stable maps theory for semipositive targets. While the tangent bundle of $\mathbf{P}^r$ is ample, the logarithmic tangent bundle of a toric variety is trivial. This allows for a larger space of obstructions to deforming genus $1$ logarithmic maps than in the absolute theory. We overcome this by identifying and forcing the stronger factorization property above.
\subsection{Tropical enumerative geometry and realizability} The realizability problem for tropical curves is a combinatorial shadow of the problem of characterizing the closure of the main component in the space of logarithmic maps. The difficulty of the problem has limited tropical enumerative techniques to low target dimensions~\cite{BBM14,CJM1,CMR14a, Mi03} or to genus $0$ curves~\cite{Gro14,Gro15,MR16,NS06,R15b}.
In the higher genus, higher dimensional situation, there are two directions in which one may generalize the picture above. The first is to develop a systematic method to decompose logarithmic Gromov--Witten invariants, as a sum of virtual invariants over tropical curves~\cite{ACGS15,Par11,R19}. The second is to analyze the tropical lifting problem and produce a ``reduced'' curve counting theory that captures the principal component contribution to the virtual count. This paper addresses the second of these in genus one. The realizability theorem in genus $1$ allows us to decompose these reduced invariants of any toric variety over tropical curves. The degeneration formula for these invariants is work that we hope to return to. Note that the analogous problem for smooth pairs has recently been treated in~\cite{BNR19}.
There have been a number of interesting partial results on tropical realizability in the last decade, thanks to the efforts of many~\cite{BPR16,CFPU,JR17,KatLift,Mi03,Ni09,NS06,R16,R15a,Sp07}. The genus $1$ story alone has seen heavy interest. Speyer identified the sufficiency of a strong form of well-spacedness condition for superabundant genus $1$ tropical curves using Tate's uniformization theory. Using the group law on the analytification of an elliptic curve, Baker--Payne--Rabinoff show that a weaker condition was necessary. The existence of genus $1$ tropical curves which failed Speyer's condition but were nonetheless realizable was established in~\cite{R16}.
In higher genus, very few results are known. That non-superabundant higher genus tropical curves are realizable was established by Cheung--Fantini--Park--Ulirsch~\cite{CFPU}, and limits of realizable curves can be shown to be realizable~\cite{R15a,R16}. Katz showed that the logarithmic tangent/obstruction complex for degenerate maps gives rise to necessary combinatorial conditions for realizability in higher genus, including a version of well-spacedness~\cite{KatLift}. These methods do not prove sufficiency in any cases. A sufficient condition for realizability for some superabundant chain of cycles geometries has recently been shown to hold and used to establish new results in Brill--Noether theory~\cite{JR17}.
\subsection{User's guide} We have written this paper so it may be read independently from the prequel, in which the space of ordinary stable maps to $\mathbf P^r$ was considered. In Section~\ref{sec: prelims-from-prequel} we recall the preliminary results on radial alignments and their contractions from~\cite{RSW17A}. The moduli space of well-spaced logarithmic maps is constructed in Section~\ref{sec: well-spaced-logmaps} and the logarithmic unobstructedness appears as Theorem~\ref{thm:toric-log-smooth}. The tropical well-spacedness condition is discussed and defined precisely in Section~\ref{sec: well-spacedness}. Finally, tropical realizability results are restated in Theorem~\ref{thm: realizability} and proved in Section~\ref{sec: realizability-proof}.
\subsection*{Funding} D.R. was partially supported by NSF grant DMS-1128155 (Institute for Advanced Study) and J.W. was partially supported by NSA Young Investigator's Grants H98230-14-1-0107 and H98230-16-1-0329.
\section{Preliminaries}
In this section, we recall some preliminaries on singularities of genus $1$ and on logarithmic and tropical geometry. There is some overlap between this section and the preliminary material appearing in the prequel to this article~\cite{RSW17A}, but we opt to include it for a more self-contained presentation.
\subsection{Genus 1 singularities}\label{sec: genus-1-singularities} Let $C$ be a reduced curve over an algebraically closed field $k$. For an isolated curve singularity $(C,p)$ with normalization $\pi: (\widetilde C,p_1,\ldots, p_m)\to (C,p)$, recall that $m$, the cardinality of $\pi^{-1} p$, is called the \textbf{number of branches of the singularity}. The \textbf{$\delta$-invariant} is defined as \[ \delta: = \dim_k \bigl( \pi_\star (\mathscr O_{\widetilde C})/\mathscr O_C \bigr). \]
Based on these two invariants, one defines the genus of $(C,p)$ as \[ g = \delta-m+1. \]
We will frequently make use of the \textbf{seminormalization} of $(C,p)$ in our arguments. The \textbf{seminormalization} is a partial resolution of $(C,p)$ to a singularity of genus $0$ that is homeomorphic to $(C,p)$. Explicitly, equip the underlying topological space of $(C,p)$ with the subring $\mathscr A$ of regular functions on the normalization $\widetilde C$ that are well-defined on the underlying topological space of $C$. In particular, there are $g$ additional conditions required for a function in $\mathscr A$ to descend to $(C,p)$, i.e., \begin{equation*} g = \dim_k \bigl( \mathscr A / \mathscr O_C \bigr) . \end{equation*}
Let $E$ be a proper Gorenstein curve of genus~$1$, smooth away from a unique genus $1$ singularity. Let $\nu : F \to E$ be the seminormalization and let $\mu : G \to F$ be the normalization. We have inclusions: \begin{gather*} \mathscr O_E \subset \nu_\star \mathscr O_F \subset \nu_\star \mu_\star \mathscr O_G \subset K \\ J \supset \omega_E \supset \nu_\star \omega_F \supset \nu_\star \mu_\star \omega_G \end{gather*} Here $K$ is the sheaf of meromorphic functions on $E$ and $J$ is the sheaf of meromorphic differentials. For each $X = E, F, G$, the pairs $\omega_X$ and $\mathscr O_X$ are dual to other another with respect to the residue pairing $K \otimes J \to k$, in the sense that each is the annihilator of the other~\cite[Proposition~1.16~(ii)]{AK}.
Consider the exact sequence~\eqref{eqn:seminorm-seq}: \begin{equation} \label{eqn:seminorm-seq} 0 \to \mathscr O_E \to \nu_\star \mathscr O_F \to \nu_\star(\mathscr O_F) / \mathscr O_E \to 0 \end{equation} In the long exact cohomology sequence~\eqref{eqn:seminorm-les} \begin{equation} \label{eqn:seminorm-les} 0 \to H^0(E, \mathscr O_E) \to H^0(F, \mathscr O_F) \to \nu_\star(\mathscr O_F) / \mathscr O_E \to H^1(E, \mathscr O_E) \to H^1(F, \mathscr O_F) \end{equation} the map $H^0(E, \mathscr O_E) \to H^0(F, \mathscr O_F)$ is an isomorphism because both $E$ and $F$ are proper, connected, and reduced; furthermore $H^1(F, \mathscr O_F) = 0$ since $F$ has genus~$0$. By Serre duality, $H^1(E, \mathscr O_E)$ is dual to $H^0(E, \omega_E)$. Since both are $1$-dimensional, the choice of a nonzero $\alpha \in H^0(E, \omega_E)$ induces an isomorphism $H^1(E, \mathscr O_E) \to k$. The composition \begin{equation*} \nu_\star(\mathscr O_F) / \mathscr O_E \to H^1(E, \mathscr O_E) \to k \end{equation*} may be identified with the residue pairing, sending $f \bmod{\mathscr O_E}$ to $\res f\alpha$. This follows, for example, by the construction of the dualizing sheaf in~\cite[Remark~1.9 and Remark~1.12]{AK}.
We know that $\omega_F / \mu_\star(\omega_G)$ is spanned by the differentials \begin{equation} \label{eqn:omega-F} \frac{dx_i}{x_i} - \frac{dx_j}{x_j} \end{equation} where the $x_i$ are local coordinates of the branches of $E$ at the singular point. As $\omega_E / \nu_\star(\omega_F)$ is $1$-dimensional, $\omega_E$ is generated relative to $\nu_\star (\omega_F)$ by a differential of the following form: \begin{equation} \label{eqn:omega-E} \sum_i \frac{c_i d x_i}{x_i^2} + \frac{c' dx_1}{x_1} \end{equation} If $f \in \mathscr O_E$ has the expansion $f(0) + b_i x_i + \cdots$ on the $i$th component of $F$ then this differential imposes the constraint \begin{equation*} c' f(0) + \sum b_i c_i = 0 . \end{equation*} In order for $E$ to be Gorenstein, $\omega_E$ must be a line bundle, so the generators~\eqref{eqn:omega-F} of $\omega_F$ must be multiples of the generator~\eqref{eqn:omega-E}. This immediately implies $c' = 0$ and that all of the $c_i$ are nonzero. Conversely, if $c' = 0$ and all of the $c_i$ are nonzero, then $c_j x_i - c_i x_j \in \mathscr O_E$ and \begin{equation*} (c_j x_i - c_i x_j) \sum_k \frac{c_k d x_k}{x_k^2} = c_j c_i \frac{d x_i}{x_i} - c_i c_j \frac{d x_j}{x_j} \end{equation*} implies that the generators~\eqref{eqn:omega-F} are multiples of~\eqref{eqn:omega-E}. This proves the following proposition:
\begin{proposition} \label{prop:dualizing-generator} If $E$ is a Gorenstein curve with a genus~$1$ singularity then $\omega_E$ is generated in a neighborhood of its singular point by a meromorphic form~\eqref{eqn:omega-E}, with $c' = 0$, where the $x_i$ are local parameters for the branches of $E$ at the singular point. \end{proposition}
By consideration of the residue condition imposed by the form~\eqref{eqn:omega-E}, we can also obtain a local description of the Gorenstein, genus~$1$ curve singularities. A more conceptual proof of this result can be found in \cite[Proposition~A.3]{Smyth}.
\begin{proposition} For each integer $m\geq 0$, there exists a unique Gorenstein singularity $(C,p)$ of genus $1$ with $m$ branches. If $m = 1$ then $(C,p)$ can be identified with the cusp $\mathbf{V}(y^2-x^3)$, if $m = 2$ then $(C,p)$ can be identified with the ordinary tacnode $\mathbf V(y^2-yx^2)$, and if $m\geq 3$, then $(C,p)$ is the germ at the origin of the union of $m$ general lines through the origin in $\mathbf A^{m-1}$. \end{proposition}
\subsection{Tropical curves} We follow standard conventions and definitions for tropical curves and tropical stable maps.
\begin{definition} An \textbf{$n$-marked tropical curve} $\plC$ is a finite graph $G$ with vertex and edge sets $V$ and $E$, enhanced by \begin{enumerate} \item a \textbf{marking function} $m: \{1,\ldots,n\}\to V$, \item a \textbf{genus function} $g:V\to \NN$, \item a \textbf{length function} $\ell: E\to \RR_{+}$. \end{enumerate} The \textbf{genus} of a tropical curve $\plC$ is defined to be \[ g(\plC) = h_1(G)+\sum_{v\in V} g(v) \] where $h_1(G)$ is the first Betti number of the geometric realization of $\plC$. An $n$-marked tropical curve is \textbf{stable} if (1) every genus $0$ vertex has valence at least $3$ and (2) every genus $1$ vertex has valence at least $1$. \end{definition}
More generally, one may permit the length function $\ell$ above to take values in an arbitrary toric monoid $P$. This presents us with a natural notion of a family of tropical curves.
\begin{definition} Let $\sigma$ be a rational polyhedral cone with dual cone $S_\sigma$. A \textbf{family of $n$-marked prestable tropical curves over $\sigma$} is a tropical curve whose length function takes values in $S_\sigma$. \end{definition}
We note that given a tropical curve over $\sigma$, each point of $\sigma$ determines a tropical curve in the usual sense. Indeed, choosing a point of $\sigma$ is equivalent to choosing a monoid homomorphism \[ \varphi: S_\sigma \to \RR_{\geq 0}. \] Applying this homomorphism to the edge length $\ell(e)\in S_\sigma$ produces a real and positive length for each edge.
\subsection{Logarithmic geometry: working definitions} Let $N$ be a free abelian group of finite rank and $X^\circ$ be a subscheme of a torus $T = \mathbf G_m\otimes N$ over a field $k$ equipped with the trivial valuation. Let $K$ be a valued field extending $K$, with valuation surjective onto $\RR$. Then, the tropicalization of $X$ is the image of $X(K)$ under the coordinatewise valuation map \[ T(K)\to \RR\otimes N. \] This set is denoted $X^{\mathrm{trop}}$, and can be given the structure of a fan. This fan distinguishes a partial compactification of $T$ to a toric variety $Y$. The embedding of the closure $ X\hookrightarrow Y$ determines, locally on $X$, a natural class of \textit{monomial} functions obtained by restricting the monomials on $T$. These monomials form a sheaf of monoids $M_X$ under multiplication, and a tautological map of monoids \[ \mathcal O_X^\star\subset M_X\to \mathcal O_X. \] The quotient is another sheaf of monoids $\overline M_X:=M_X/\mathcal O_X^\star$, and amounts to considering monomial functions up to scalars.
Sections of the groupification $\overline M_X^{\mathrm{gp}}$ can be interpreted as piecewise linear functions on $X^{\mathrm{trop}}$. Just as in the toric case, piecewise linear functions on $X^{\mathrm{trop}}$ give rise to line bundles on $X$. Specifically, given a piecewise linear function, the set of algebraic lifts of it in $\overline M^{\mathrm gp}_X$ form a torsor under the multiplicative group, and therefore a line bundle. This is explained more precisely in Section~\ref{sec: lb-from-pl} below.
A logarithmic scheme is an object that possesses the main features present above. The requirement that $X$ be embedded in a toric variety can be dropped. Instead, one need only assume that $X$ (locally) admits a morphism to a toric stack. The data of the sheaf $\overline M_X$ may be thought of as the sheaf of piecewise linear functions on $X$.
To be more precise, it is convenient to reverse the logical order and specify the monomials first. Given a scheme $S$, a logarithmic structure is a sheaf of monoids $M_S$ in its \'etale topology and sharp homomorphism $\varepsilon : M_S \to \mathcal O_S$ (the codomain given its multiplicative monoid structure). Sharpness means that each local section of $\mathcal O_S^\star$ has a unique preimage along $\varepsilon$. The quotient $M_X / \varepsilon^{-1} \mathcal O_X^\star$ is called the \emph{characteristic monoid} and is denoted $\overnorm M_X$ with its operation denoted \emph{additively}; the image of section $\alpha$ of $M_X^{\rm gp}$ in $\overnorm M_X^{\rm gp}$ is denoted $\overnorm\alpha$. We assume all logarithmic structures are integral ($M_X$ is contained in its associated group $M_X^{\rm gp}$) and saturated (if $\alpha \in M_X^{\rm gp}$ and $n\alpha \in M_X$ for some integer $n \geq 1$ then $\alpha \in M_X$).
Such objects may be assembled into a category. The category of logarithmic schemes has the analogous constructions and notions from scheme theory, keeping track of the tropical data through the sheaves of piecewise linear functions.
For more of the general theory of logarithmic structures, we refer the reader to Kato's original article~\cite{Kat89}. A detailed study of the relationship between tropical and logarithmic geometry from a categorical point of view is undertaken in~\cite{CCUW}.
\subsection{Curves {\it \&} logarithmic structures} Let $(S,M_S)$ be a logarithmic scheme. A \textbf{family of logarithmically smooth curves over $S$} is a logarithmically smooth, flat, and proper morphism \[ \pi: (C,M_C) \to (S,M_S), \] with connected and reduced geometric fibers of dimension $1$. We recall F. Kato's structure theorem for logarithmic curves~\cite{Kat00}.
\begin{theorem} Let $C\to S$ be a family of logarithmically smooth curves. If $x\in C$ is a geometric point, then there is an \'etale neighborhood of $C$ over $S$, with a strict morphism to an \'etale-local model $\pi:V\to S$, and $V\to S$ is one of the following: \begin{itemize} \item { (the smooth germ)} $V = \mathbf A^1_S\to S$, and the logarithmic structure on $V$ is pulled back from the base; \item { (the germ of a marked point)} $V = \mathbf A^1_S\to S$, with logarithmic structure pulled back from the toric logarithmic structure on $\mathbf A^1$; \item { (the node)} $V = \mathscr O_S[x,y]/(xy = t)$, for $t\in \mathscr O_S$. The logarithmic structure on $V$ is pulled back from the multiplication map $\mathbf A^2 \to \mathbf A^1$ of toric varieties along a morphism $t : S \to \mathbf A^1$ of logarithmic schemes. \end{itemize}
The image of $t\in M_S$ in $\overnorm M_S$ is referred to as the \textbf{deformation parameter of the node.}
\end{theorem}
Associated to a logarithmic curve $C\to S$ is a family of tropical curves.
\begin{definition} Let $C\to S$ be a family of logarithmically smooth curves and assume that the underlying scheme of $S$ is the spectrum of an algebraically closed field. Then, \textbf{the tropicalization $C$}, denoted $\plC$, is obtained as follows: (1) the underlying graph is the marked dual graph of $C$ equipped with the standard genus and marking functions, and (2) given an edge $e$, the generalized length $\ell(e) = \delta_e\in \overnorm M_S$ is the deformation parameter of the corresponding node of $C$. \end{definition}
For more about logarithmic curves and their relationship to tropical curves, the reader may consult~\cite{CCUW}.
\subsection{Geometric interpretation of the sections of a logarithmic structure} \label{sec:char-sect} Given a logarithmic curve $C\to S$, it will be helpful to interpret sections of the sheaves $M_C^{\rm gp}$, and $\overnorm M_C^{\rm gp}$ geometrically.
\numberwithin{theorem}{subsubsection} \subsubsection{The affine and projective lines} \label{sec:monoid-sections}
Let $(X,\varepsilon: M_X\to \mathscr O_X)$ be a logarithmic scheme. A section of $M_X$ corresponds to a map $X \to \mathbf A^1$, the target given its toric logarithmic structure. Let $\alpha$ be such a section and $\overnorm\alpha$ be its image in $\overnorm M_X$. Then $\varepsilon(\alpha)$ is a unit if and only if $\overnorm\alpha = 0$.
With its logarithmic structure, $\mathbf{P}^1$ can be constructed as the quotient of $\mathbf A^2 - \{ 0 \}$ by $\mathbf{G}_m$. Any map $X \to \mathbf{P}^1$ lifts locally to $\mathbf A^2 - \{ 0 \}$ and can therefore be represented by a pair of sections $(\xi,\eta)$ of $M_X$. The ratio $\xi^{-1} \eta$, which is a section of $M_X^{\rm gp}$, is invariant under the action of $\mathbf{G}_m$, since $\mathbf{G}_m$ acts with the same weight on $\xi$ and $\eta$.
Therefore a map $X \to \mathbf{P}^1$ gives a well-defined section $\alpha$ of $M_X^{\rm gp}$. Not every section of $M_X^{\rm gp}$ arises this way, because the map $(\xi, \eta) : X \to \mathbf A^2$ from which $\alpha$ was derived could not meet the origin. This condition implies that, for each geometric point $x$ of $X$, either $\overnorm\xi_x = 0$ or $\overnorm\eta_x = 0$. In terms of $\overnorm\alpha$, this means that $\overnorm\alpha_x \geq 0$ or $\overnorm\alpha_x \leq 0$. We term this property being \textbf{locally comparable to $0$}.
{Our observations prove the following proposition:}
\begin{proposition} Let $X$ be a logarithmic scheme. Maps $X \to \mathbf{P}^1$, the latter given its toric logarithmic structure, may be identified with sections $\alpha$ of $M_X^{\rm gp}$, whose images $\overnorm\alpha$ in $\overnorm M_X^{\rm gp}$ are locally comparable to $0$. \end{proposition}
Because it has charts, the sheaf $\overnorm M_X^{\rm gp}$ locally admits a surjection from a constant sheaf, so the condition on $\overnorm\alpha$ in the proposition is open on the base: if $X$ is a family of logarithmic schemes over $S$ and a section $\alpha$ of $\overnorm M_X^{\rm gp}$ verifies $\overnorm\alpha \geq 0$ or $\overnorm\alpha \leq 0$ for all $x$ in a geometric fiber $X_s$ of $X$ over $S$ then it also verifies that condition for all $t$ in some open neighborhood of $s$.
This observation is particularly useful for studying infinitesimal deformations of logarithmic maps to $\mathbf{P}^1$, as it is equivalent to deform the section $\alpha$ of $M_X^{\rm gp}$.
\begin{definition} For any logarithmic scheme $X$, we define $\mathbf{G}_{\log}(X) = \Gamma(X, M_X^{\rm gp})$. Identifying $X$ with its functor of points, we also write $\mathbf{G}_{\log}(X) = \operatorname{Hom}(X, \mathbf{G}_{\log})$. \end{definition}
\begin{remark} The functor $\mathbf{G}_{\log}$ is not representable by a logarithmic scheme; it is analogous to an algebraic space (see \cite{logpic}, for example). The above considerations may be seen as a demonstration that $\mathbf{P}^1$ is logarithmically \'etale over $\mathbf{G}_{\log}$. {A discussion of the simpler spaces rational curves in $\mathbf{G}_{\log}$ may be found in~\cite{RW19}.}
We prefer to avoid a discussion of the geometric structure of $\mathbf{G}_{\log}$ in this paper. The reader should feel free to regard maps to $\mathbf{G}_{\log}$ as a convenient shorthand for sections of $M_X^{\rm gp}$ and nothing more. {The advantage of treating $\mathbf G_{\log}$ as a geometric object, and not merely an abstract sheaf, and working with that object instead of geometric models like $\mathbf P^1$, is that the latter approach necessitates an apparently endless process of subdivision and refinement that obscures the geometric essence of our arguments.} \end{remark}
\subsubsection{Maps to toric varieties} \label{sec:to-toric}
The observations above concerining logarithmic maps to $\mathbf{P}^1$ may be extended to all toric varieties. Indeed, if $Z = \operatorname{Spec} k[S_\sigma]$ is an affine toric variety defined by a cone $\sigma$ and character lattice $N^\vee$, then there is a canonical map \begin{equation*} S_\sigma \to \Gamma(Z, M_Z) , \end{equation*} which extends to a map \begin{equation*} N^\vee \to \Gamma(Z, M^{\rm gp}_Z) . \end{equation*} The construction of this map commutes with restriction to open torus invariant subvarieties, and therefore glues to a well-defined map on any toric variety.
\begin{proposition} Let $X$ be a logarithmic scheme and let $Z$ be a toric variety with fan $\Sigma$ and character lattice $N^\vee$. Morphisms $X \to Z$ may be identified with morphisms $N^\vee \to \Gamma(X, M_X^{\rm gp})$ such that, for each geometric point $x$ of $X$, there is a cone $\sigma \in \Sigma$, such that the map \begin{equation*} S_\sigma \to \Gamma(X, M_X^{\rm gp}) \to \Gamma(X, \overnorm M_X^{\rm gp}) \to \overnorm M_{X,x}^{\rm gp} \end{equation*} factors through $\overnorm M_{X,x}$. \end{proposition}
\begin{definition} Let $N$ be a finitely generated free abelian group. We write $(N \otimes \mathbf{G}_{\log})(X) = \operatorname{Hom}(N^\vee, \Gamma(X, M_X^{\rm gp}))$ and use $\operatorname{Hom}(X, N \otimes \mathbf{G}_{\log})$ for the same notion. \end{definition}
\begin{remark} The discussion above shows that, if $Z$ is a toric variety with cocharacter lattice $N$, then there is a canonical logarithmic modification $Z \to N \otimes \mathbf{G}_{\log}$. \end{remark}
\subsubsection{Sections of the characteristic monoid} \label{sec:char-mon}
Since logarithmic maps $X \to \mathbf A^1$ correspond to sections of $M_X$, maps $X \to [ \mathbf A^1 / \mathbf{G}_m ]$ correspond to sections of $M_X / \mathscr O_X^\star = \overnorm M_X$. The quotient $[ \mathbf A^1 / \mathbf{G}_m ]$ is usually denoted $\mathscr A$ and is called the \textbf{Artin fan} of $\mathbf A^1$.
It is shown in \cite[Remark~7.3]{CCUW} that, if $X$ is a logarithmic curve over $S$, and the underlying scheme of $S$ is the spectrum of an algebraically closed field, then sections of $\overnorm M_X$ (which is to say, maps $X \to \mathscr A$) may be interpreted as piecewise linear functions on the tropicalization of $X$ that are valued in $\overnorm M_S$ and are linear along the edges with integer slopes.
Similar reasoning, combined with the discussion in Section~\ref{sec:monoid-sections} shows that maps $X \to [ \mathbf P^1 / \mathbf{G}_m ]$ correspond to sections $\alpha$ of $\overnorm M_X^{\rm gp}$ that are locally comparable to $0$. If $X$ is a curve, then these sections are the piecewise linear functions on the tropicalization that are everywhere valued in $\overnorm M_S$ or in $- \overnorm M_S$.
\begin{remark} Even though its underlying ``space'' is an algebraic stack, $[\mathbf A^1 / \mathbf{G}_m]$ represents a \textbf{functor} on logarithmic schemes. { This contrasts with the more common situation, where algebraic stacks typically only represent categories fibered in groupoids over schemes.} \end{remark}
\numberwithin{theorem}{subsection} \subsection{Line bundles from piecewise linear functions}\label{sec: lb-from-pl} For any logarithmic scheme $X$, there is a short exact sequence \begin{equation*} 0 \to \mathscr O_X^\star \to M_X^{\rm gp} \to \overnorm M_X^{\rm gp} \to 0 \end{equation*} of the sheaves associated to the logarithmic structure. Given a section $\alpha \in \Gamma(X, \overnorm M_X^{\rm gp})$, the image of $\alpha$ under the coboundary map \begin{equation*} H^0(X, \overnorm M_X^{\rm gp}) \to H^1(X, \mathscr O_X^\star) \end{equation*} is represented by an $\mathscr O_X^\star$-torsor $\mathscr O_X^\star(-\alpha)$ on $X$ and gives rise to an associated line bundle. Thus, to each piecewise linear function $f$ on $\plC$ that is linear on the edges with integer slopes and takes values in $\overnorm M_S$, we have an associated line bundle $\mathscr O(-f)$.
The explicit line bundle obtained by this construction is recorded in~\cite[Section 2]{RSW17A}.
\numberwithin{theorem}{subsection} \subsection{Tropicalization of morphisms to toric varieties} \label{sec:tropicalization}
Let $Z$ be a toric variety with dense torus $T$, equipped with its standard logarithmic structure, and let $N$ and $N^\vee$ be the cocharacter and character lattices of $Z$.
Let $C$ be a logarithmic curve over $S$, and assume that the underlying scheme of $S$ is the spectrum of an algebraically closed field. A logarithmic map $\varphi : C \to Z$ induces a map \begin{equation} \label{eqn:map-to-tropGm} N^\vee \to \Gamma(Z, \overnorm M_Z) \to \Gamma(C, \overnorm M_C) \end{equation} by the discussion in Section~\ref{sec:to-toric}.
As remarked in Section~\ref{sec:char-sect}, the sections of $\overnorm M_C$ are piecewise linear functions on the tropicalization $\plC$ of $C$ that are linear with integer slopes along the edges and are valued in $\overnorm M_S^{\rm gp}$. If we assume in addition that $\overnorm M_S = \RR_{\geq 0}$ then we obtain a piecewise linear map \begin{equation*} \plC \to \operatorname{Hom}(N^\vee,\RR) = N_\RR \end{equation*} that we call the \emph{tropicalization} of $C \to Z$. It will sometimes be convenient to think of this as a map from $\plC \to \Sigma$, where $\Sigma$ is the fan of $Z$.
\begin{lemma} The map $\plC \to N_\RR$, constructed above, satisfies the balancing condition. \end{lemma} \begin{proof} This is proved in~\cite[Section 1.4]{GS13}.
\end{proof}
\subsection{Minimality} \label{sec:minimality}
\numberwithin{theorem}{subsubsection} \subsubsection{Minimal logarithmic structures} \label{sec:minlog}
A crucial concept in the theory of logarithmic moduli problems is that of \textbf{minimality}. Let $\mathbf{LogSch}$ denote the category of fine and saturated logarithmic schemes. Given a moduli stack $\mathfrak{M}$ over $\mathbf{LogSch}$ and a logarithmic scheme $S$, the fiber $\mathfrak{M}(S)$ of the fibered category $\mathfrak{M}$ over $S$ is the groupoid logarithmic geometric objects $[\mathscr X\to S]$ defined over $S$, as specified by the moduli problem.
Logarithmic geometric objects are algebraic schemes or stacks with the additional structure of a sheaf of monoids. The description of $\mathfrak{M}$ as a category fibered in groupoids over $\mathbf{LogSch}$ does not furnish such an object: if $\underline S$ is a scheme without a chosen logarithmic structure, it does not make mathematical sense to consider the fiber of $\mathfrak{M}$ over $\underline S$. Said differently, there is no ``underlying scheme, or underlying stack, or underlying category fibered in groupoids over schemes'' of $\mathfrak{M}$.
The difficulty that must be overcome is that given an ordinary scheme $\underline S$, there are many choices for logarithmic schemes $(S,M_S)$ enhancing $\underline S$, and it is unclear which one to pick. The notion of minimality, introduced by F. Kato and recently clarified and expanded~\cite{AC11,Che10,Gi12,GS13,Wis16a,Wis16b} identifies the correct logarithmic structures to allow on $\underline S$ as those satisfying a universal property, recalled below.
Assuming that $\mathfrak{M}$ does have an underlying scheme, we arrive at a \emph{necessary} condition for $\mathfrak{M}$ to be representable by a logarithmic scheme. Suppose that $S \to \mathfrak{M}$ is a morphism of logarithmic schemes then the logarithmic structure of $\mathfrak{M}$ pulls back to a logarithmic structure $M$ on the underlying scheme $\underline S$ of $S$. Moreover there is a factorization \begin{equation*} \xymatrix@R=10pt{
S \ar[dr] \ar[dd] \\
& \mathfrak{M} \\
(\underline S, M) \ar[ur] } \end{equation*} that is \emph{final} among all such factorizations. This finality condition can be phrased entirely in terms of the moduli problem defining $\mathfrak{M}$, and Gillam shows that if minimal factorizations exist for all $S \to \mathfrak{M}$, and are preserved by base change, then $\mathfrak{M}$ comes from a logarithmic structure on a moduli problem over \emph{schemes}~\cite{Gi12,Wis16a}.
\begin{theorem}[Gillam] \label{thm:gillam} When $\mathfrak M$ is a category fibered in groupoids over logarithmic schemes that comes from a logarithmic structure on a category fibered in groupoids $\mathfrak N$ over schemes, $\mathfrak N$ can be recovered from $\mathfrak M$ as the subcategory of minimal objects. \end{theorem}
Throughout this paper, we present logarithmic moduli problems and indicate monoidal and tropical (see Section~\ref{sec:mintrop}) characterizations of their minimal objects to recover the underlying schematic moduli problems.
\subsubsection{Minimality as tropical representability} \label{sec:mintrop}
We explain the concept in the case of stable maps for concreteness, where it becomes a tropical concept. This expands on~\cite[Remark~1.21]{GS13}. Let $\mathfrak{M}_{g,n}(Z)$ denote stack over $\mathbf{LogSch}$ parametrizing logarithmic maps from genus $g$, $n$-pointed curves to a toroidal scheme $Z$. Let $\Sigma$ be the fan of $Z$.
Let $S$ be a standard logarithmic point $\spec(\NN\to \CC)$ and let $[C\to Z]$ be a logarithmic map over $S$. As explained in Section~\ref{sec:tropicalization}, the morphisms on sheaves of monoids may be dualized to produce a tropical map \[ \plC\to \Sigma. \] Replacing $\NN$ with an arbitrary toric monoid, one obtains a \textbf{family} of tropical maps.
From our discussion of minimality, we see that given a logarithmic stable map over $\spec(P\to \CC)$, the monoid be cannot be arbitrary, since by pulling back via a morphism $P\to \RR_{\geq 0}$, we must obtain a tropical map. With this observation, there is a clear choice for a universal $P^{\min}$ such that all other enhancements $\spec(P\to \CC)$ of the same underlying map must be pulled back from $\spec(P^{\min}\to \CC)$. That is, we may choose $P^{\min}$ to be the monoid whose dual cone $\operatorname{Hom}(P^{\min},\RR_{\geq 0})$ is the cone of \textbf{all} tropical maps of the given combinatorial type. {Succinctly, a logarithmic structure is minimal for a given moduli problem if it represents the \emph{tropical deformation space}.}
In Figure~\ref{fig: non-minimal-family} below, taking $Z = pt$, we depict the duals of the characteristic monoid on the base of a non-minimal family. If one drops the condition that $\ell_1$ and $\ell_2$ coincide, we obtain the corresponding minimal monoid.
\begin{figure}
\caption{Consider the cone of tropical curves whose underlying graph is shown on the right, such that the edge lengths of $e_1$ and $e_2$ are equal. This cone is $3$-dimensional. An associated family of logarithmic curves whose minimal monoid is dual to this cone associated family of logarithmic curves is non-minimal, due to the relation that these two edge lengths coincide coincide.}
\label{fig: non-minimal-family}
\end{figure}
Applying this reasoning at each geometric fiber gives a criterion to check whether any given family of logarithmic maps $C \to Z$ over a logarithmic scheme $S$ is minimal. With the minimal objects identified, we construct a moduli stack as a fibered category over \textbf{Sch}, whose fiber over a scheme $\underline S$ is the groupoid of \textbf{minimal} logarithmic maps over $S$.
\numberwithin{theorem}{subsection} \subsection{Preliminaries from the prequel: radial alignments}\label{sec: prelims-from-prequel} The results of this paper rely on the notion of a radially aligned logarithmic curve and its canonical contraction to a curve with elliptic singularities. These concepts were developed in the companion article~\cite{RSW17A}, and we briefly recall the statements that we require.
Let $S$ be a logarithmic scheme enhancing the spectrum of an algebraically closed field and let $C\to S$ be a logarithmic curve over it whose fibers have genus~$1$ and let $\plC$ be its tropicalization. Given an edge $e$, we write $\ell(e)\in \overnorm M_S$ for the generalized edge length of this edge. For each vertex $v\in \plC$, there is a unique path from the circuit of $\plC$, namely the smallest subgraph of genus $1$, to the chosen vertex $v$. Write this path as $e_1,\ldots, e_n$. Define \[ \lambda(v) = \sum_{i=1}^n \ell(e_i). \] The resulting function $\lambda$ is a piecewise linear function on $\plC$ with integer slopes, and thus, a global section of $\overnorm M_C$. When $S$ is a general logarithmic scheme and $\pi: C\to S$ a curve, this section glues along specialization morphisms to give rise to a well-defined and canonical global section in $\Gamma(S,\pi_\star \overnorm M_S)$.
Given a logarithmic curve $C\to S$ and a geometric point $s\in S$, we let $\plC_s$ denote the corresponding tropical curve associated to $C_s$. Recall also that we view a monoid $P$ as being the positive elements in a partially ordered group, with the partial order defined by $a\geq b$ if $a-b\in P\subset P^{\mathrm{gp}}$.
\begin{definition} We say that a logarithmic curve $C\to S$ is \textbf{radially aligned} if $\lambda(v)$ and $\lambda(w)$ are comparable for all geometric points $s$ of $S$ and all vertices $v, w \in \plC_s$.
We write ${\mathfrak M}_{1,n}^{\mathrm{rad}}$ for the category fibered in groupoids over logarithmic schemes whose fiber over $S$ is the groupoid of radially aligned logarithmic curves over $S$ having arithmetic genus~$1$ and~$n$ marked points. \end{definition}
The following result is proved in~\cite[Section 3]{RSW17A}.
\begin{theorem} The category of radially aligned, prestable, logarithmic curves of genus~$1$ with $n$~marked points is represented by an algebraic stack with logarithmic structure $\mathfrak{M}_{1,n}^{\mathrm{rad}}$. The natural map \[ \mathfrak{M}_{1,n}^{\mathrm{rad}}\to \mathfrak{M}_{1,n}, \] is a logarithmic blowup. \end{theorem}
The second major construction in op.\ cit.\ is the construction of a contraction to a curve with elliptic singularities, from the data of a radially aligned curve with a chosen ``radius of contraction''. Let $C\to S$ be a radially aligned logarithmic curve of genus~$1$. We say that a section $\delta\in \overnorm M_S$ is \textbf{comparable to the radii of $C$} if for each geometric point $s\in S$, the section $\delta$ is comparable to $\lambda(v)$ for all vertices $v\in \plC_s$, in the monoid $\overnorm M_S$.
\begin{theorem} Let $C\to S$ be a radially aligned logarithmic curve and $\delta\in \overnorm M_S$ a section comparable to the radii of $C$. Then, there exists a partial destablilization \[ \widetilde C\to C, \] and a contraction \[ \widetilde C\to \overnorm C, \] where $\overnorm C\to S$ is a family genus~$1$ curves at worst Gorenstein genus $1$ singularities, {such that, for every geometric point $s$ of $S$, if $E$ is a component of $C_s$ such that $\lambda(E) < \delta_s$ then $E$ is contracted to a point in $\overnorm C$}. \end{theorem}
An intuitive discussion of these concepts are presented in~\cite[Section~3.1]{RSW17A}. For working knowledge, reader may visualize the section $\delta$ as giving rise to a \textbf{circle of radius $\delta$} around the circuit of the tropical curve $\plC$. By subdividing the edges of $\plC$, one may produce a new tropical curve $\widetilde \plC$ such that every point of $\widetilde \plC$ at radius $\delta$ from the circuit is a vertex. This introduces valency $2$ vertices into the tropicalization, and induces the partial destabilization. By contracting the interior of the circle of radius $\delta$ in a versal family, one produces a curve with a Gorenstein singularity.
\numberwithin{theorem}{section} \section{Logarithmic maps to toric varieties} \label{sec:log-maps-to-toric}
We construct the space of radially aligned logarithmic maps to a toric variety. The framework of radial alignments, together with the well-spacedness condition from tropical geometry, will lead to a proof of Theorem~\ref{thm: toric-targets}, which is the main result of this section. The symbol $Z$ will denote a proper toric variety with fan $\Sigma$.
Recall that a morphism of polyhedral complex $\mathscr P\to \mathscr Q$ is a continuous map of the underlying topological spaces sending every polyhedron of $\mathscr P$ linearly to a polyhedron of $\mathscr Q$.
\begin{definition}\label{def: tsm} A \textbf{tropical prestable map} or \textbf{tropical map} for short, is a morphism of polyhedral complexes \[ F:\plC\to \Sigma \] where $\plC$ is an $n$-marked tropical curve, and the following conditions are satisfied. \begin{enumerate} \item For each edge $e\in \plC$, the direction of $F(e)$ is an integral vector. When restricted to $e$, the map has integral slope $w_e$, taken with respect to this integral direction. This integral slope is referred to as the \textbf{expansion factor} of $F$ along $e$. The expansion factor and primitive edge direction are together referred to as the \textbf{contact order} of the edge. \item The map $f$ is \textbf{balanced}: at all points of $\plC$ the sum of the directional derivatives of $F$ in each tangent direction is zero. \end{enumerate} The map is \textbf{stable} if it satisfies the following condition: if $p\in \plC$ has valence $2$, then the image of $\mathrm{Star}(v)$ is not contained in the relative interior of a single cone of $\Sigma$. \end{definition}
Following Section~\ref{sec:tropicalization}, given a logarithmic prestable map to a toric variety \[ \begin{tikzcd} (C,M_C) \arrow{d}\arrow{r}{f} & Z \\ (S,M_S), & \end{tikzcd} \] there is an associated family $\plC$ of tropical curves together with a map $[F: \plC\to \Sigma]$, satisfying the axioms of a tropical prestable map.
\setcounter{subsection}{\value{theorem}} \numberwithin{theorem}{subsection} \subsection{Radial logarithmic maps} We begin with a construction of the stack of radially aligned logarithmic maps.
\begin{proposition} \label{prop:W-alg} Let $Z$ be a toric variety. There is an algebraic stack with logarithmic structure, $\mathfrak W(Z)$, parameterizing families of radially aligned curves $C$ and logarithmic morphisms $C \to Z$.
The underlying algebraic stack of $\mathfrak W(Z)$ is locally quasifinite over the stack of ordinary prestable maps from radially aligned curves to $Z$, and its restriction to the open and closed substack of maps with fixed contact orders is quasifinite. \end{proposition}
\begin{proof} Let $\mathfrak{M}^{\mathrm{rad}}_{1,n}$ be the stack of radially aligned, $n$-marked, genus~$1$ logarithmic curves (Section~\ref{sec: prelims-from-prequel}) and let $C$ be its universal curve. Then $\mathfrak W(Z)$ is the space of logarithmic prestable maps from $C$ to $Z$, and this is representable by an algebraic stack with a logarithmic structure~\cite[Corollary~1.1.1]{Wis16a}. The local quasifiniteness is a consequence of~\cite[Theorem~1.1]{Wis16b} or~\cite[Proposition~3.6.3]{R15b}; under the assumption of fixed contact orders, the combinatorial types of a map $[C \to Z]$ are bounded~\cite[Theorem~3.8]{GS13}, and therefore $\mathfrak W(Z)$ is quasifinite over the space of maps of underlying schemes. \end{proof}
Stability in $\mathfrak W(Z)$ is defined in terms of the underlying schematic map:
\begin{definition} A radial map $[f:C\to Z]$ in $\mathfrak W(Z)$ over $\spec(\CC)$ is said to be \textbf{stable} if it satisfies the following conditions. \begin{enumerate} \item If $D\subset C$ is an irreducible component of genus $0$ contracted by $f$, then $D$ supports at least $3$ special points. \item If $C$ is a smooth curve of genus $1$, then $C$ is not contracted. \end{enumerate} A family of ordered logarithmic maps is stable if each geometric fiber is stable. \end{definition}
\setcounter{subsubsection}{\value{theorem}} \subsubsection{Minimal monoids}
We give a tropical description of the logarithmic structure of $\mathfrak W(Z)$. We leave it to the reader to verify that this description is correct, either using \cite[Appendix~C.3]{Wis16a} or adapting the arguments from~\cite[Section 3]{Che10} or~\cite[Proposition~1.22]{GS13}.
The minimality condition may be checked on geometric fibers, so we assume that the underlying scheme of $S$ is $\operatorname{Spec} C$. Let $\sigma_S$ be the corresponding dual cone $\operatorname{Hom}(\overnorm M_S,\RR_{\geq 0})$ of the characteristic monoid of $S$. By forgetting the alignment, a radial map $[f]$ above produces a usual logarithmic map with combinatorial type $\Theta$. Letting $\sigma_\Theta$ be the associated cone of tropical maps, we have a morphism of cones \[ \sigma_S \to \sigma_\Theta. \] In the tropical moduli cone $\sigma_\Theta$ above, the locus of tropical curves whose vertices are ordered in the same manner as $C$ forms a cone $\sigma(f)$.
\setcounter{theorem}{\value{subsubsection}} \begin{definition} Let $f:C\to Z$ be a family of ordered logarithmic maps over a logarithmic base $S$. The map $[f]$ is a \textbf{minimal} ordered logarithmic map if for each geometric point $\overnorm s\in S$, there is an isomorphism of cones \[ \operatorname{Hom}(\overnorm M_{S,\overnorm s},\RR_{\geq 0})\cong \sigma(f_{\overnorm s}). \] \end{definition}
\subsection{The factorization property} \label{sec:factorization} To detect the curves that smooth to the main component, we will need to identify certain contractions of the source curve constructed from the tropical maps and use the methods developed in~\cite{RSW17A}.
Let $\overnorm C$ be a Gorenstein curve of arithmetic genus $1$. We will refer to $E$, the smallest connected subcurve of $C$ of arithmetic genus $1$, as the \textbf{circuit component} of $C$. Given a family $C\to S$, we give the nodes and markings the standard logarithmic structure, and we give $C$ the trivial logarithmic structure near any genus~$1$ singularities.
Given an aligned logarithmic curve $C$ of genus $1$ and a contraction $C\to \overnorm C$, we may equip $\overnorm C$ with the logarithmic structure defined above. This enhances $C\to \overnorm C$ to a logarithmic morphism.
Let $(C,M_C)\to (S,M_S)$ be a radially aligned logarithmic curve and let $Z$ be a toric variety with cocharacter lattice $N$. We associate a section $\delta_f \in \overnorm M_S$ to a logarithmic map $f:C\to Z$ over $S$. Let $\plC$ be the tropicalization of the curve $C$ with circuit $\plC_0$. Consider the associated family of tropical maps \[ \varphi: \plC\to N_{\RR} \] If $\varphi$ does not contract the circuit, then let $\delta_f = 0$. Otherwise, let $\delta_f$ be the minimum distance from $\plC_0$ to a $1$-valent vertex of $\varphi^{-1}(\varphi(\plC_0))$. That is, the distance to the closest vertex supporting a flag that is not contracted by $\varphi$. See Figure~\ref{fig: tropical-cricles} for an example.
\begin{figure}\label{fig: circle-around-circuit}
\label{fig: tropical-cricles}
\end{figure}
These data will produce a rational bubbling of the source curve and a contraction thereof. First, subdivide $\plC$ such that every edge of $\varphi^{-1}(\varphi(\plC_0))$ terminates at a vertex; in Figure~\ref{fig: circle-around-circuit}, this amounts to introducing a vertex where the dotted circle crosses the lower vertical edge. This induces a logarithmic modification \[ \tau: \widetilde C\to C. \] By the constructions of Section~\ref{sec: prelims-from-prequel}, we obtain a section comparable to the radii of $C$, and there is now an induced contraction \[ \gamma: \widetilde C\to \overnorm C, \] to a curve with a Gorenstein elliptic singularity.
\begin{definition} Keeping the notations above, an ordered logarithmic map $C\to Z$ as above is said to have the \textbf{factorization property} if the associated map $\widetilde C\to C\to Z$ factorizes as \[ \begin{tikzcd} \widetilde{C} \arrow{rr}{f}\arrow{rd} & & Z \\ & \overnorm{C}\arrow[swap]{ur}{\overnorm f}. \end{tikzcd} \] \end{definition}
\begin{remark} Note that it need not be the case that the map $\overnorm f$ is nonconstant on a branch of the elliptic singularity. This is because the map $\overnorm C\to Z$ may have highly degenerate contact order with the boundary of $Z$; it could be that the entire elliptic component is contracted. However, one may always replace $Z$ with a logarithmic modification $\mathscr Z$, i.e., a toric degeneration, such that the genus $1$ component maps to the dense torus of one of the components of $\mathscr Z$. In such an expansion, there will be at least one branch of the singularity along which the map is nonconstant. For example, if $Z = \mathbf{P}^1$, it could be that the genus $1$ subcurve is contracted to one of the relative points $0$ or $\infty$ on $\mathbf{P}^1$. In this case, one must first expand $\mathbf{P}^1$ to a chain $\mathbf{P}^1_{\mathrm{exp}}$, until the curve maps to the dense torus of a component in $\mathbf{P}^1_{\mathrm{exp}}$. The choice of radius forces that the factorization is nonconstant on a branch of the singularity. \end{remark}
Recall from Section~\ref{sec:to-toric} that a map $f : C \to Z$, where $Z$ is a toric variety with character lattice $N^\vee$, induces a homomorphism \begin{equation*} \alpha : N^\vee \to \Gamma(C, M_C^{\rm gp}) . \end{equation*} The factorization property depends only on $\alpha$ and not specifically on the morphism of toric varieties $C \to Z$. For example, if $Z$ were a toric modification of another toric variety $Z'$, then the factorization properties for $C \to Z$ and $C \to Z'$ would coincide. We offer a definition of the factorization property that makes this independence explicit.
\begin{definition} \label{def:factorization-no-toric} Let $N$ and $N^\vee$ be finitely generated, free abelian groups, dual to each other, and let $C$ be a logarithmic curve over $S$. Assume given $\alpha : N^\vee \to \Gamma(C, M_C^{\rm gp})$ and let $\overnorm\alpha$ be the induced morphism valued in $\Gamma(C, \overnorm M_C^{\rm gp})$. Let $\plC_s$ be the tropicalization of $C_s$, for each geometric point $s$ of $S$. Define $\delta_\alpha \in \Gamma(S, \overnorm M_S)$ fiberwise to be the largest $\lambda(v)$, among $v \in \plC$, such that $\overnorm\alpha$ is constant when viewed as a piecewise linear function on $\plC$.
Let $\upsilon : \widetilde C \to C$ and $\tau : \widetilde C \to \overnorm C$ be the destabilization and contraction constructed as above. We say that $\alpha$ \textbf{satisfies the factorization property} if $\upsilon^\star \alpha$ descends along $\tau$ to $N^\vee \to \Gamma(\overnorm C, M_{\overnorm C}^{\rm gp})$. \end{definition}
\begin{remark} The factorization property is equivalent to requiring that $\widetilde C \to C \to N\otimes \mathbf G_m^{\mathrm{log}}$ factor through the contracted curve $\overnorm C$. \end{remark}
\subsection{The stack of well-spaced logarithmic maps}\label{sec: well-spaced-logmaps} In this section, we construct a stack that we will later identify as the main component of the moduli space of genus~$1$ maps to a toric variety.
We begin with some geometric motivation. Let $H$ be a subtorus of the dense torus $T$ of $Z$. After replacing $Z$ with a toric modification, there is a toric compactification $Z_H$ of the quotient torus $T/H$ and a toric morphism \[ Z\to Z_H, \] extending the projection $T\to T/H$.
Let $f:C\to Z$ be a radial map over $S$, let $H$ a subtorus of the dense torus $T$, and assume that $Z\to Z_H$ exists for some $T/H$-toric variety $Z_H$. We say that $[f]$ satisfies the \textbf{factorization property for $H$} if the induced logarithmic map \[ C\to Z\to Z_H \] satisfies the factorization property.
This definition cannot be applied to an arbitrary toric variety $Z$ and an arbitrary subtorus $H \subset T$, since there may not be a toric map from $Z$ to an equivariant compactification of $T/H$. For example, consider $Z = \mathbf{P}^2$ and let $H$ be any $1$-dimensional subtorus. Since there is no non-constant map from $\mathbf{P}^2$ to $\mathbf{P}^1$, the assumption fails.
There are two ways in which to overcome the issue. The first is to replace $Z$ with a logarithmic modification, which requires replacing $C$ with a logarithmic modification. This logarithmic modification may not be defined over the base $S$, until we perform a logarithmic modification of $S$ as well~\cite[Proposition~4.5.2]{ACMW}.
It is conceptually simpler to use Definition~\ref{def:factorization-no-toric}, which does not require the map $Z \to Z_H$, but only the map of tori $T \to T/H$. Indeed, let $N^\vee$ be the character lattice of $T$ and let $N^\vee_{T/H}$ be the character lattice of $T/H$. Then the factorization property for $C \to Z \to Z_H$ is equivalent to the factorization property for the composition \begin{equation*} N^\vee_{T/H} \to N^\vee \to \Gamma(C, M_C^{\rm gp}) . \end{equation*} With this as motivation, we arrive at our definition:
\begin{definition} \label{def:fact-subtorus} Let $f : C \to Z$ be a map from a radially aligned logarithmic curve to a toric variety $Z$ with dense torus $T$ and character lattice $N^\vee$. Let $H$ be a subtorus of $T$ and let $N^\vee_{T/H}$ be the character lattice of $T/H$. We say that $f$ \textbf{satisfies the factorization property} for $H$ if the map \begin{equation*} N^\vee_{T/H} \to N^\vee \to \Gamma(C, M_C^{\rm gp}) . \end{equation*} satisfies the factorization property of Definition~\ref{def:factorization-no-toric}. \end{definition}
Geometrically, the condition is that $C \to N\otimes \mathbf{G}_{\log}\to N_{T/H}\otimes \mathbf{G}_{\log}$ should factor through $\overnorm C_H$, where $\overnorm C_H$ is constructed from $C \to N_{T/H} \otimes \mathbf{G}_{\log}$ as in Section~\ref{sec:factorization}. The equivalence between the formulations is a tautology: $\mathbf{G}_{\log}$ may simply be defined as the representing objects for global sections of $M_X^{\rm gp}$.
\begin{definition} \label{def:well-spaced} Let $Z$ be a toric variety. A radial logarithmic map $f:C\to Z$ is \textbf{well-spaced} if $f$ satisfies the factorization property for all subtori $H$ of $T$.
Let $\mathcal W(Z)$ denote the category fibered in groupoids, over logarithmic schemes, of stable, well-spaced, radially aligned logarithmic stable maps to $Z$. \end{definition}
Given a splitting $N_{T/H}^\vee \simeq \mathbf Z^r$, the factorization property for $H$ is the conjunction of the factorization properties with respect to the tori dual to the direct summands of $\mathbf Z^r$. When $N_{T/H}^\vee$ has rank~$1$, the factorization property asserts that a section of $M_{\tilde C}^{\rm gp}$ descends to a section of $M_{\overnorm C}^{\rm gp}$. It is in this form that we will verify the algebraicity of the factorization property, below.
Let $C$ be a family of radially aligned logarithmic curves over $S$, assume that a section $\delta \in \Gamma(S, \overnorm M_S)$ is given, that $\tilde C \to C$ and $\tau : \tilde C \to \overnorm C$ are the associated semistable model and contraction of genus~$1$ component, respectively, and that $E \subset \tilde C$ is the exceptional locus of $\tau$. We write $\pi : C \to S$ and $\overnorm\pi : \overnorm C \to S$ for the projections. We fix a section $\alpha$ of $M_C^{\rm gp}$. We will use $F$ for the subfunctor of the functor represented by $S$ on logarithmic schemes consisting of those $f : T \to S$ such that the restriction of $\alpha$ along $f$ has the factorization property --- in other words, the pullback of $\alpha$ to $\tilde C_T$ descends along $\tilde C_T \to \overnorm C_T$.
In the following statements, we will say that the factorization property has a certain trait to mean that the functor $F$ has that trait, relative to the functor represented by $S$. To unclutter the notation slightly, we will also assume without loss of generality, that $\tilde C \to C$ is an isomorphism, since doing so entails no loss of generality.
We begin by showing that the moduli functor of factorized maps is representable.
\begin{theorem} \label{thm:fact-rep} The factorization property is representable by a (not necessarily strict) closed embedding of logarithmic schemes. \end{theorem}
The proof proceeds as in Artin's criteria for algebraicity, although we do not need to invoke Artin's criteria directly because the morphism in question will turn out to be a closed embedding.
The following two propositions refine \cite[Theorem~4.3]{RSW17A}, with essentially the same proof:
\begin{proposition} \label{prop:constructible} The set of points of $S$ where $\alpha$ satisfies the factorization property is constructible. \end{proposition} \begin{proof} We wish to show that the locus of points in $S$ where the section $\alpha$ of $M_C^{\rm gp}$ descends to $M_{\overnorm C}^{\rm gp}$ is constructible in $S$.
This assertion is local in the constructible topology on $S$, so we may assume that the dual graph of $C$ is locally constant over $S$. The assertion is also local in the \'etale topology, so we can even assume the dual graph is constant. Then there are two obstructions to descending $\alpha$ to a section of $M_{\overnorm C}^{\rm gp}$. Since $M_{\overnorm C}^{\rm gp}$ is pulled back from $S$ near $\tau(E)$, the first obstruction is that $\overnorm\alpha$ should be constant on $E$. But $\pi_\star \overnorm M_{C}^{\rm gp}$ is constant over $S$, so this holds on an open and closed subscheme of $S$. In particular, this locus is quasicompact and constructible. We may now restrict attention to this locus and assume the first obstruction vanishes.
Now $\alpha$ is a section of $\pi^\star M_S^{\rm gp} \subset M_C^{\rm gp}$ near $E$. Since $\overnorm\alpha$ is constant on $E$ by assumption, we may, after localization in $S$, divide off a section pulled back from $S$, to ensure that $\overnorm\alpha(E) = 0$ and $\alpha$ is therefore a section of $\mathcal O_C^\star$. We must show that the locus in $S$ where $\alpha$ is pulled back from $\mathcal O_{\overnorm C}^\star$ is constructible.
Regarding $\alpha$ instead as a section of $\mathcal O_C$, it is equivalent to show that the locus where $\alpha$ is pulled back from $\mathcal O_{\overnorm C}$ is constructible. The rest of the proof is now the same as in \cite[Theorem~4.3]{RSW17A}. \end{proof}
\begin{proposition} \label{prop:proper} The factorization property satisfies the valuative criterion for properness. \end{proposition} \begin{proof} We assume that $S$ is the spectrum of a valuation ring, with $j : \eta \to S$ the inclusion of the generic point. We wish to show that if $\alpha$ is a section of $M_C^{\rm gp}$ such that $j^\star \alpha$ is pulled back from a section of $M_{\overnorm C_\eta}$, then $\alpha$ is pulled back from a section of $M_{\overnorm C}$ over $\overnorm C$.
It suffices to assume that $S$ has the maximal logarithmic structure extending the logarithmic structure over $\eta$. That is, $M_S = \mathcal O_S \mathop\times_{j_\star \mathcal O_\eta} j_\star M_\eta$. This implies that $M_S^{\rm gp} = j_\star M_\eta^{\rm gp}$.
Our task is equivalent to showing that the square~\eqref{eqn:2} is cartesian: \begin{equation} \label{eqn:2} \vcenter{\xymatrix{ M_{\overnorm C}^{\rm gp} \ar[r] \ar[d] & j_\star M_{\overnorm C_\eta}^{\rm gp} \ar[d] \\ \tau_\star M_C^{\rm gp} \ar[r] & j_\star \tau_\star M_{C_\eta}^{\rm gp} }} \end{equation}
Away from $\tau(E)$, we know that $\varphi$ is an isomorphism, so it suffices to demonstrate the bijectivity of $\varphi$ near $\tau(E)$, where $M_{\overnorm C} = \overnorm\pi^\star M_S$. Since the map $M_{\overnorm C}^{\rm gp} = \overnorm\pi^\star M_S^{\rm gp} \to \tau_\star M_C^{\rm gp}$ factors through $\tau_\star \pi^\star M_S^{\rm gp}$, it suffices to show that~\eqref{eqn:3} is cartesian: \begin{equation} \label{eqn:3} \vcenter{\xymatrix{ \overnorm\pi^\star M_S^{\rm gp} \ar[r] \ar[d] & j_\star \overnorm\pi^\star M_S^{\rm gp} \ar[d] \\ \tau_\star \pi^\star M_S^{\rm gp} \ar[r] & j_\star \tau_\star \pi^\star M_S^{\rm gp} }} \end{equation} This reduces to showing that both of the following two squares are cartesian: \begin{equation} \label{eqn:4} \vcenter{\hbox{\xymatrix{ \overnorm\pi^\star \overnorm M_S^{\rm gp} \ar[r] \ar[d] & j_\star \overnorm\pi^\star \overnorm M_S^{\rm gp} \ar[d] \\ \tau_\star \pi^\star \overnorm M_S^{\rm gp} \ar[r] & j_\star \tau_\star \pi^\star \overnorm M_S^{\rm gp} } \qquad\qquad \xymatrix{ \mathcal O_{\overnorm C}^\star \ar[r] \ar[d] & j_\star \mathcal O_{\overnorm C_\eta}^\star \ar[d] \\ \tau_\star \mathcal O_C^\star \ar[r] & j_\star \tau_\star \mathcal O_{C_\eta}^\star }}} \end{equation} We can check that the first square is cartesian on fibers, by proper base change for \'etale pushforward. In that situation it is immediate, because $\pi^\star \overnorm M_S^{\rm gp}$ is constant on the fibers over $S$ and the fibers of $\tau$ are connected. To see that the second square is cartesian, it is sufficient to see that~\eqref{eqn:5} is cartesian: \begin{equation} \label{eqn:5} \vcenter{\xymatrix{ \mathcal O_{\overnorm C} \ar[r] \ar[d] & j_\star \mathcal O_{\overnorm C_\eta} \ar[d] \\ \tau_\star \mathcal O_C \ar[r] & j_\star \tau_\star \mathcal O_{C_\eta} }} \end{equation} The rest of the proof is exactly the same as the end of the proof of \cite[Theorem~4.3]{RSW17A}. \end{proof}
Propositions~\ref{prop:constructible} and~\ref{prop:proper} combine to imply that the locus in $S$ where $\alpha \in M_C^{\rm gp}$ satisfies the factorization property is a closed subset of $S$. To give that closed subset a scheme structure, we have two more propositions, the first of which requires the notion of a homogeneous functor, cf.\ \cite[Definition~2.5]{Rim}, \cite[Section~2]{obs}:
\begin{definition} Let $F$ be a category fibered in groupoids over schemes. We say $F$ is homogeneous if the map \begin{equation*} F(S') \to F(S) \mathop\times_{F(T)} F(T') \end{equation*} is an equivalence whenever $S' = S \mathop\amalg_T T'$ is the pushout of an infinitesimal extension $T \subset T'$ and an affine morphism $T \to S$. \end{definition}
\begin{proposition} \label{prop:homogeneous} The factorization property is homogeneous. \end{proposition} \begin{proof} Let $\tau : C' \to \overnorm C'$ be a contraction of genus~$1$ components over $S'$, where $S = T' \amalg_{T} S$ for a strict infinitesimal extension $T \subset T'$ and a strict affine morphism $\rho : T \to S$. We must show that if $\alpha' \in \Gamma(C', M_{C'}^{\rm gp})$ and its restrictions $\beta'$ to $T'$ and $\alpha$ to $S$ satisfy the factorization property, then so does $\alpha'$. This is in fact immediate in view of~\eqref{eqn:6}: \begin{equation} \label{eqn:6} M_{S'}^{\rm gp} = \rho_\star M_{T'}^{\rm gp} \mathop\times_{\rho_\star M_T^{\rm gp}} M_S^{\rm gp} \end{equation} Indeed, this assertion holds trivially on characteristic monoids, since $\overnorm M_{T'}^{\rm gp} \to \overnorm M_T^{\rm gp}$ is an isomorphism, so it comes down to the identification~\eqref{eqn:7}, \begin{equation} \label{eqn:7} \mathcal O_{S'} = \rho_\star \mathcal O_{T'} \mathop\times_{\rho_\star \mathcal O_T} \mathcal O_S \end{equation} which is the definition of $S'$. \end{proof}
\begin{proposition} \label{prop:integration} The factorization property holds over a complete noetherian local ring if and only if it holds formally. \end{proposition} \begin{proof}
Here we must show that if $S$ is the spectrum of a complete noetherian local ring with maximal ideal $m$ and $S_i$ is the vanishing locus of $m^{i+1}$ then $\alpha \in \Gamma(C, M_S^{\rm gp})$ satisfies the factorization property if and only if its restriction to $S_i$ satisfies the factorization property for every $i$. It is certainly the case that if $\overnorm\alpha \in \Gamma(C, \overnorm M_S^{\rm gp})$ is pulled back from $\Gamma(\overnorm C, \overnorm M_{\overnorm C}^{\rm gp})$ modulo every (in fact, \emph{any}) power of $m$ then so does $\overnorm\alpha$. Indeed, this claim amounts to the assertion that if $\overnorm\alpha$ is constant on $E \cap \pi^{-1}(S_0)$ then $\overnorm\alpha$ is constant on $E$. But $\overnorm M_C$ is an \'etale sheaf on $C$, so if $\overnorm\alpha$ vanishes at a point then it vanishes in an open neighborhood of that point. Since the only open subset of $E$ containing $E \cap \pi^{-1}(S_0)$ is $E$ itself, we conclude that $\overnorm\alpha$ descends to $\overnorm C$ if $\overnorm\alpha \big|_{S_0}$ descends to $\overnorm\pi^{-1}(S_0)$.
Dividing $\alpha$ by a section of $M_S^{\rm gp}$, we can assume that $\overnorm\alpha = 0$ on $E$ and therefore that $\alpha$ is a section of $\mathcal O_C^\ast$ near $E$. We can find an open subsets $\overnorm U \subset \overnorm C$ covering $\tau(E)$ such that $\overnorm\alpha$ vanishes on $U = \tau^{-1} \overnorm U$. Then, by assumption, we we have $\tau_\star \alpha \big|_{\overnorm U} \in \mathcal O_{\overnorm U} / m^{i+1} \mathcal O_{\overnorm U} \subset \tau_\star(\mathcal O_U / m^{i+1} \mathcal O_U)$. Passing to the limit, it follows that the image of $\tau_\star \alpha$ along $\tau_\star \mathcal O_U \to \varprojlim \tau_\star (\mathcal O_U / m^{i+1} \mathcal O_U)$ lies in $\varprojlim \mathcal O_{\overnorm U} / m^{i+1} \mathcal O_{\overnorm U} = \mathcal O_{\overnorm U}$. But the theorem on formal functions guarantees that $\tau_\star \mathcal O_U \to \varprojlim \tau_\star(\mathcal O_U / m^{i+1} \mathcal O_U)$ is an isomorphism, so $\alpha$ has the factorization property, as required. \end{proof}
As we will detail below, the propositions proved so far show that $F$ is representable by a closed subscheme of $S$ when restricted to the category of \emph{strict} logarithmic schemes over $S$. To show that it is representable on the category of all logarithmic schemes over $S$, we require one more proposition.
\begin{proposition} \label{prop:minimal} The factorization property has minimal monoids and the pullback of a minimal monoid is a minimal monoid. \end{proposition} \begin{proof} Suppose that $\alpha$ satisfies the factorization property over $S_0$, where $S_0 \to S$ is a morphism of logarithmic schemes that is an isomorphism on the underlying schemes. Write $C_0 = C \mathop\times_S S_0$ and let $\alpha_0$ be the image of $\alpha$ in $M_{C_0}^{\rm gp}$, which satisfies the factorization property by assumption. Working locally, we can assume that $S$ is atomic and that the dual graph of $C$ is constant on the closed stratum. Let $\plC$ be that dual graph. Adjusting $\alpha$ by a section of $M_S^{\rm gp}$, we can assume that $\overnorm\alpha_0$ vanishes on the components contracted by $\tau$. Let $Q$ be the set of all elements of $\overnorm M_S^{\rm gp}$ that arise as $\overnorm\alpha(v)$ as $v$ ranges over the vertices of $\plC$ contracted by $\tau$. Then $Q$ is contained in the kernel of $\overnorm M_S^{\rm gp} \to \overnorm M_{S_0}^{\rm gp}$. The minimal characteristic monoid on which we have the factorization property is therefore the saturation $\overnorm N_S$ of the image of $\overnorm M_S$ in the quotient of $\overnorm M_S^{\rm gp}$ by the subgroup generated by $Q$. Since we have a map $\overnorm N_S \to \overnorm M_{S_0}$, we can pull back the logarithmic structure of $S_0$ to get a logarithmic structure $N_S$ on $S$ on which $\alpha$ has the factorization property. It is immediate from the construction that $\overnorm N_S$ is minimal and that the construction commutes with pullback. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:fact-rep}] Let $\tau : C \to \overnorm C$ be a contraction of genus~$1$ components over $S$ and let $\alpha$ be a section of $M_C^{\rm gp}$. We wish to show that there is a closed subscheme $S' \subset S$ such that the pullback of $\alpha$ along $f : T \to S$ has the factorization property if and only if $f$ factors through $S'$. We may assume that $S$ is of finite type, since all of the data in play are locally of finite presentation.
By Proposition~\ref{prop:constructible} and Proposition~\ref{prop:proper}, the subset of points of $S$ where $\alpha$ satisfies the factorization property is a closed subset $S_0$ of $S$. If $S_0$ is given the reduced subscheme structure then $\alpha$ has the factorization property over $S_0$. By Proposition~\ref{prop:homogeneous}, the subscheme structures on $S_0$ over which $\alpha$ has the factorization property are filtered. Taking the limit of these closed subschemes yields a scheme $S'$, the spectrum of a complete noetherian local ring, such that the factorization property holds formally over $S'$. But the underlying scheme of $S'$ must be the same as that of $S_0$, so the ideal of $S_0$ in $S'$ is nilpotent. Thus $S'$ is actually an infinitesimal neighborhood of $S_0$ in $S$ and $S'$ is the maximal closed subscheme of $S$ over which the factorization property holds.
Now suppose that $f : T \to S$ is a strict morphism and that $\alpha$ has the factorization property over $T$. We wish to show $f$ factors through $S'$. We may assume $T$ is of finite type. Certainly $f(T) \subset S_0$ as a set, so if $T_0$ is the reduced subscheme structure on $T$ the $f \big|_{T_0}$ factors through $S_0$. Then $T$ is an infinitesimal extension of $T_0$ so the pushout $S_1 = T \amalg_{T_0} S_0$ is an infinitesimal extension of $S_0$ and $\alpha$ has the factorization property over $S_1$ by Proposition~\ref{prop:integration}. It follows that $S_1 \subset S'$ and therefore $f$ factors through $S'$ as required.
By Proposition~\ref{prop:minimal} the factorization property has minimal monoids, so by Theorem~\ref{thm:gillam}, $S'$ with its minimal logarithmic structure represents the factorization property. \end{proof}
\begin{theorem} \label{thm:w-rep-proper} The category $\mathcal W(Z)$ is representable by a logarithmic algebraic stack. After fixing the contact orders $\Gamma$, the substack $\mathcal W_\Gamma(Z)$ of maps with those contact orders is proper. \end{theorem}
\begin{proof} We have just seen that the factorization property is is representable by closed (hence proper) morphisms. Since stability is an open condition, this shows $\mathcal W(Z)$ is a locally closed substack of $\mathfrak W(Z)$. It also shows that $\mathcal W(Z)$ is a closed substack of the space $\overnorm{{\cal M}}\vphantom{{\cal M}}^{\rm rad}_{1,n}(Z)$ of stable logarithmic maps from radially aligned curves to $Z$; this is a logarithmic modification of $\overnorm{{\cal M}}\vphantom{{\cal M}}_{1,n}(Z)$, the space of stable logarithmic maps to $Z$, and is therefore proper. It follows that $\mathcal W(Z)$ is proper. \end{proof}
\subsection{Logarithmic smoothness}\label{sec: log-smooth} The logarithmic tangent bundle of a toric variety $Z$ is trivial, and is naturally identified with $N\otimes_\ZZ\mathscr O_Z$, where $N = N(T) = \operatorname{Hom}(\mathbf{G}_m, T)$ is the cocharacter lattice of the dense torus. Given a radial map $[f: C\to Z]$, the obstructions to deforming the map $[f]$ fixing the deformation of $[C]$ lie in the group \begin{equation*} \operatorname{Obs}([f]) = H^1(C,f^\star T_Z^{\mathrm{log}}) = H^1(C,\mathscr O_C^{\dim Z}) \end{equation*} with dimension \begin{equation*} h^1(C,\mathscr O_C^{\dim Z}) = g(C)\cdot \dim Z. \end{equation*}
Consider a torus quotient $T\to T/H$ and choose a compatible equivariant compactification \[ Z\to Z_H, \] possibly passing from $Z$ to a modification, as in the previous section. The quotient map induces a projection map on logarithmic tangent bundles, extending scalars from \[ N(T)\to N(T/H) \] Choosing a splitting for the induced map on obstruction groups, we see that if the map $[\overnorm f: C\to \overnorm Z]$ is obstructed, then the map $[f]$ is also obstructed. The well-spacedness condition for radial logarithmic maps removes obstructions arising in this fashion. We now show that these obstructions are the only obstructions that arise.
\begin{theorem} \label{thm:toric-log-smooth} For any toric variety $Z$, the stack $\mathcal{W}(Z)$ is logarithmically smooth and unobstructed. \end{theorem}
The proof will require the following lemma.
\begin{lemma} \label{lem:gen-pic} Let $E$ be a connected Gorenstein curve of genus $1$ without genus~$0$ tails. Let $E_\circ$ be the smooth locus of $E$. The map $E_\circ \to \operatorname{Pic}^1(E)$ sending $x$ to $\mathcal O_E(x)$ is \'etale. \end{lemma}
\begin{proof} Consider the problem of deforming $x$, while fixing its image $\mathcal O_E(x)$ in the Picard group. The obstructions to these deformations lie in $H^1(E, \mathcal O_E(x))$. Since $E$ has no genus~$0$ tails then $\omega_E$ is trivial, and Serre duality yields the requisite vanishing.
To see that the map has relative dimension $0$, note that the relative tangent space may be identified with the quotient of $H^0(E, \mathcal O_E(x))$ by $H^0(E, \mathcal O_E)$. An application of Riemann--Roch shows that this quotient is trivial. \end{proof}
\begin{corollary} \label{cor:pic-vec} Let $E$ be a connected Gorenstein curve of genus $1$ without genus~$0$ tails and let $a_1, \ldots, a_n \in \mathbf Z^m$. Let $E_0$ be the smooth locus of $E$. If the $a_i$ span $\mathbf Q^m$ then the map $E_0^n \to \operatorname{Pic}(E)^m$ sending $(x_1, \ldots, x_n)$ to the tuple of line bundles associated to the divisor with $\mathbf Z^m$-coefficients $\sum a_ix_i$. \end{corollary}
\begin{proof} By the elementary divisors theorem, we can assume that the $a_i$ are multiples of the standard basis vectors. Since $E_0$ is smooth, we may project off the factors of $E_0^n$ where $a_i$ vanishes. Then $E_0^m \to \operatorname{Pic}(E)^m$ is the product of the maps $E_0 \to \operatorname{Pic}^1(E) \xrightarrow{a_i} \operatorname{Pic}(E)$. The maps $E_0 \to \operatorname{Pic}^1(E)$ are \'etale by Lemma~\ref{lem:gen-pic} and multiplication by $a_i$ is \'etale because we work in characteristic zero. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:toric-log-smooth}]
We will use the logarithmic infinitesimal criterion for smoothness. We must show that whenever $S \subset S'$ is a strict infinitesimal extension of logarithmic schemes, any morphism $S \to \mathcal W(Z)$ can be extended to $S'$, completing diagrams of the following form: \begin{equation} \label{eqn:log-lift2} \vcenter{ \xymatrix{ S \ar[r] \ar[d] & \mathcal W(Z) \\ S' \ar@{-->}[ur] }} \end{equation}
This assertion is local in $S$, so we can restrict to a neighborhood $U$ of a geometric point $s$, such that the map $\overline M^{\mathrm{gp}}_S|_U\to\overline M^{\mathrm{gp}}_{S,s}$ is an isomorphism. Let $\plC$ be the tropicalization of $C_s$.
\noindent {\sc Filtering the deformations.} Let $N$ and $N^\vee$ be the character and cocharacter lattices of $Z$, respectively. The moduli map $S \to \mathcal W(Z)$ gives the data of a curve $C$ and a section $\alpha \in N \otimes \Gamma(C, M_C^{\rm gp})$. For every torsion free quotient $N \to N'$ we obtain a map $\alpha_{N'} \in N' \otimes \Gamma(C, M_C^{\rm gp})$ by composition, which we also view as a map $C \to N' \otimes \mathbf G_{\log}$. For each such map $\plC\to N'\otimes \mathbf R$, there is a largest radius $\delta_{N'} \in \Gamma(S, \overnorm M_S)$ around the minimal circuit in $\plC$ whose interior is contracted by the map.
The radii $\delta_{N'}$ for varying $N'$ are totally ordered and necessarily finite in number. Rename these distinct radii $\delta_1 \geq \delta_2 \geq \cdots \geq \delta_k$.
Since $\overnorm\alpha$ is a piecewise linear function on $\plC$ valued in $\overnorm M_S^{\rm gp}$ we have $\overnorm\alpha(v) - \overnorm\alpha(w) \in N \otimes \ell \subset N \otimes \overnorm M_S^{\rm gp}$ whenever $v$ and $w$ are connected by an edge of $\plC$ of length $\ell$. We call $\frac{\overnorm\alpha(v) - \overnorm\alpha(w)}{\ell}$ the \emph{slope} of $\overnorm\alpha$ along that edge.
For each $i$, we take $N_i$ be the quotient of $N$ by the saturated sublattice spanned of the slopes of $\overnorm\alpha$ along the edges of $\plC$ contained inside the circle of radius $\delta_i$. This gives an sequence of torsion free quotients $N \to N_k \to N_{k-1} \to \cdots \to N_1$.
For each $i$, let $Z_i = N_i \otimes \mathbf G_{\log}$. Then we obtain a sequence of maps: \begin{equation*} \mathcal W(Z) \to \mathcal W(Z_k) \to \mathcal W(Z_{k-1}) \to \cdots \to \mathcal W(Z_1) \end{equation*} The first map is logarithmically \'etale since $Z$ is logarithmically \'etale over $\operatorname{Hom}(N, \mathbf G_{\log})$. It will now suffice to show that $\mathcal W(Z_i) \to \mathcal W(Z_{i-1})$ is logarithmically smooth for all $i$. For each $i$, let $\alpha_i$ be the image of $\alpha$ in $N_i \otimes \Gamma(C, M_C^{\rm gp})$. We make the following observations: \begin{enumerate} \item We have $\delta_i \in \Gamma(S, \overnorm M_S)$ such that $\overnorm\alpha_i \in N_i \otimes \Gamma(C, \overnorm M_C^{\rm gp})$ is constant on the interior of the circle of radius $\delta_i$ around the central vertex of $\plC$; \item the slopes of $\overnorm\beta$ on the edges of $\plC$ exiting the circle of radius $\delta_i$ span the kernel of $N_{i+1} \to N_i$ as a rational vector space. \end{enumerate} The second observation requires a slight argument. By definition, the slopes of $\overnorm\alpha$ within the circle of radius $\delta_i$ span the kernel of $N \to N_i$ rationally. But if the edges inside the circle of radius $\delta_{i}$ together with those immediately exiting it spanned a smaller saturated subgroup of than the kernel of $N \to N_{i+1}$ then there would have been another $\delta_j$ in between $\delta_{i}$ and $\delta_{i+1}$.
\noindent {\sc The iterative procedure.} The map $\alpha : S \to \mathcal W(Z)$ gives families $C \leftarrow \widetilde C \to \overnorm C$ where $\overnorm C$ is the contraction of the circle of radius $\delta$. We can regard $\alpha$ as an element of $N \otimes \Gamma(\overnorm C, M_{\overnorm C}^{\rm gp})$. We examine extensions of these data to $C' \leftarrow \widetilde C' \to \overnorm C'$ and $\alpha' \in N \otimes M_{\overnorm C'}^{\rm gp}$. The problem is addressed in two steps: first we choose a deformation of $C$ (entailing deformations of $\widetilde C$ and $\overnorm C$), which is an obstructed problem, and then we try to lift $\alpha$, which can be obstructed for a fixed choice of $C'$. We then revise our choice of deformation $C'$ to eliminate the obstruction to lifting $\alpha$.
The choices of $C'$ form a torsor under $H^1(C, T_{C/S}^{\log})$. We will adjust $C'$ iteratively, lifting $\alpha_i$ to $\alpha'_i \in N_i \otimes \Gamma(\overnorm C', M_{\overnorm C'}^{\rm gp})$ based on an already selected lift of $\alpha_{i-1}$. At each step, we will adjust $C'$ by a section of $H^1(C, T_{C/S}^{\log})$ that vanishes on the interior of the circle of radius $\delta_i$, thereby ensuring that our earlier choices are not broken by the later adjustments.
\noindent {\sc The obstruction group.} Let $N'_i$ be the kernel of $N_i \to N_{i-1}$. We indicate how $N'_i \otimes H^1(\overnorm C, \mathscr O_{\overnorm C}))$ functions as an obstruction group to deforming $\alpha_i$ once $\alpha_{i-1}$ and $C'$ are fixed.
By definition, the lifting problem~\eqref{eqn:log-lift2} is equivalent to the problem of extending $\alpha \in N_i \otimes \Gamma(\overnorm C, M_{\overnorm C}^{\rm gp})$ to $\alpha' \in N_i \otimes \Gamma(\overnorm C', M_{\overnorm C'}^{\rm gp})$. Recall that $\alpha$ gives an invertible sheaf, $\mathscr O_{\overnorm C}(-f(\overnorm\alpha))$ for each $f \in N_i^\vee$, and $f(\alpha)$ is a nowhere vanishing global section of $\mathscr O_{\overnorm C}(-f(\overnorm\alpha))$. We will abuse notation slightly and think of $\mathscr O_{\overnorm C}(-\overnorm\alpha)$ as a family of invertible sheaves indexed by $N_i^\vee$ and $\alpha$ as a trivialization of this family. Let $\overnorm\alpha'$ denote the unique extension of $\overnorm\alpha \in \Gamma(\overnorm C, \overnorm M_{\overnorm C}^{\rm gp})$ to $\overnorm M_{\overnorm C'}^{\rm gp}$. Our task is to extend $\alpha$ to a trivialization of $\mathscr O_{\overnorm C'}(\overnorm\alpha')$.
If it exists, an extension will necessarily be a trivialization, so the obstruction to the existence of an extension is the isomorphism class of the deformation $\mathscr O_{\overnorm C'}(\overnorm\alpha')$, which lies in $N_i \otimes H^1(\overnorm C, \mathscr O_{\overnorm C})$. By induction, the image of this obstruction in $N'_i \otimes H^1(\overnorm C, \mathscr O_{\overnorm C})$ vanishes, so our obstruction lies in $N'_i \otimes H^1(\overnorm C, \mathscr O_{\overnorm C})$.
\noindent {\sc Deformations of the curve.} This obstruction may well be nonzero, but we are still free to choose $C'$. The choice of $C'$ is a torsor under the deformation group $\operatorname{Def}(C) = H^1(C, T_{C/S}^{\log})$. This gives a homomorphism \begin{equation} \label{eqn:def-obs1} H^1(C, T_{C/S}^{\log}) = \operatorname{Def}(C) \to \operatorname{Obs}_C(f) = N_i \otimes H^1(\overnorm C, \mathscr O_{\overnorm C}) \end{equation} that we wish to show surjects onto $N'_i \otimes H^1(\overnorm C, \mathscr O_{\overnorm C})$. Once this is done, we can modify $C'$ to eliminate the obstruction.
Since $C$ is a curve, the formation of $H^1(C, T^{\log}_{C/S})$ and of $H^1(\overnorm C, \mathcal O_{\overnorm C})$ commutes with base change. By Nakayama's lemma, we may therefore demonstrate the surjectivity of~\eqref{eqn:def-obs1} by checking it on the fibers. We may therefore replace $S$ with a geometric point and assume that $S$ is the spectrum of an algebraically closed field.
We let $\overnorm\plC$ be the dual graph of $\overnorm C$, where the interior of the circle of radius $\delta_i$ is treated as a single vertex. For each vertex $v_i$ of $\overnorm\plC$ other than the central vertex $v_0$, let $e_j$ be the edge of $\overnorm\plC$ that is closest to $v_0$ (in other words, the edge on which $\lambda$ has negative slope when it is oriented away from $v_j$). Let $p_j$ be the node of $\overnorm C$ corresponding to $e_j$. For $j \neq 0$, the corresponding component of $\overnorm C$ is rational, and therefore $T_{C_j}^{\log}$ has nonnegative degree. It follows that the maps \begin{equation*} H^0(C_j, T_{C_j}^{\log}) \to H^0(p_j, T_{C_j}^{\log}) \qquad \text{and} \qquad H^0(C_j, \mathcal O_{C_j}) \to H^0(p_j, \mathcal O_{C_j}) \end{equation*} are surjective for all $j \neq 0$. From the normalization sequence, we see that there are decompositions: \begin{equation*} H^1(C, T_C^{\log}) = \bigoplus_j H^1(C_j, T_{C_j}^{\log}) \qquad \text{and} \qquad H^1(\overnorm C, \mathcal O_C) = \bigoplus_j H^1(C_j, \mathcal O_{C_j}) \end{equation*} Since $H^1(C_j, \mathcal O_{C_j}) = 0$ for $j \neq 0$, it follows that we may reduce to the case $C = C_0$ and $\overnorm C = \overnorm C_0$. Note that in this case, $\overnorm C_0$ has no genus~$0$ tails.
\noindent {\sc The obstruction class.} Now let $p_1, \ldots, p_n$ be the external points of $C$ corresponding to the edges $e_1, \ldots, e_n$ of $\overnorm\plC$ adjacent to the central vertex. Then $H^1(C, T_{C/S}^{\log})$ contains a copy of $\sum_j T_{p_j/C}$ corresponding to deformation of $C$ as a logarithmic curve by moving the points $p_j$.
Let $a_j \in N'_i$ be the slope of $\overnorm\alpha$ on $e_j$ and recall that the $a_j$ span $N'_i$ as a rational vector space. Then the obstruction class in $N'_i \otimes H^1(\overnorm C, \mathcal O_{\overnorm C})$ is given by the following formula: \begin{equation*} \mathcal O_{\overnorm C}(\overnorm\alpha) = \mathcal O_{\overnorm C}(\sum a_j p_j) \in N'_i \otimes H^1(\mathcal O_{\overline C}) \end{equation*} Thus the obstruction map \begin{equation}\label{eqn: obs-class} \sum_{j = 1}^n T_{p_j/C} \to H^1(C, T_{C/S}^{\log}) \to N_i \otimes H^1(\overnorm C, \mathcal O_{\overnorm C}) \end{equation} restricts to the tangent map $\sum T_{p_j/C} \to N'_i \otimes H^1(\overnorm C, \mathscr O_{\overnorm C})$ considered in Corollary~\ref{cor:pic-vec}. Since the $a_i$ span $N'_i$ rationally and $\overnorm C$ has no genus~$0$ tails, that corollary implies the desired surjectivity. The points $p_j$ all lie on the boundary of the circle of radius $\delta_i$, so any class in $\sum T_{p_j/C} \subset H^1(C, T_{C/S}^{\log})$ vanishes on the interior of the circle of radius $\delta_i$, as required. \end{proof}
\begin{remark} The proof shows a stronger smoothness property, because we were able to cancel obstructions using only deformations of the marked points without needing to smooth any of the singularities of $\overnorm C$. \end{remark}
It is a consequence of the proof that we can make following ``codimension $1$'' characterization of the moduli space.
\begin{proposition}\label{prop: reduction-of-dimension} The substack of $\mathfrak W(Z)$ parametrizing stable radial maps that satisfy the factorization property for all subtori of codimension $1$ coincides with the space $\mathcal W(Z)$ of well-spaced radial maps. \end{proposition}
\begin{proof} It suffices to treat the case of $Z = \GG_{\log}^n$. Assume that a map $C\to \GG_{\log}^n$ fails to satisfy the factorization property for some subtorus $H$ in $T$. In keeping with the notation of the previous proof, we let $N'_H\subset N$ be the associated cocharacter subspace, and let $N_H$ be the quotient of $N$ by $N'_H$. We demonstrate that the map fails to satisfy the factorization property for a codimension $1$ subtorus.
If the map is obstructed, then the cokernel in Equation~(\ref{eqn: obs-class}) is nonzero. It follows from Corollary~\ref{cor:pic-vec} that for the radius $\delta$ associated to the map $\plC\to N_H$, the exiting edge directions $a_i$ of $\plC\to N\otimes \RR$ at this radius do not span $N'_H$ rationally. The obstruction class described in the proof above therefore lies in the cokernel \[ N_H/\mathrm{Span}(a_i)\otimes H^1(\overline C,\mathcal O_{\overline C}). \] Given any such obstruction class, we may find a projection by a character $N_H/\mathrm{Span}(a_i)\to \RR$, such that the class remains nonzero, by projecting onto the $1$-dimensional span of the obstruction class. This gives rise to a composition $N\to N_H\to \mathbf R$, and an associated map on tori $\GG_{\log}^n\to \GG_{\log}$. The induced map on logarithmic tangent bundle is given by extending scalars from the projection $N\to \RR$. The obstruction to lifting the map to $\GG_{\log}$ is the image of the obstruction to lifting the map to $\GG_{\log}^n$, under the projection \[ N_H\otimes H^1(\overline C,\mathcal O_{\overline C})\to \RR \otimes H^1(\overline C,\mathcal O_{\overline C}). \] The resulting radial map $C\to \GG_{\log}$ therefore does not factorize, as the obstruction class is nonzero by construction. \end{proof}
\section{Realizability for genus one tropical curves}\label{sec: tropical-moduli}
In this section, we use the geometry of the moduli spaces $\mathcal W(Z)$ constructed in Section~\ref{sec:log-maps-to-toric} to resolve the tropical realizability problem in genus $1$. The results of this section give a precise description of the boundary complex of $\mathcal W(Z)$. As a consequence of the smoothness and properness of $\mathcal W(Z)$, tropical realizability reduces to a \emph{pointwise} calculation: we examine the unique non-topological condition characterizing the descent of a function from the normalization of a genus~$1$ singularity, and interpret it tropically as the realizability condition.
\subsection{Moduli of tropical maps} Fix a pair of dual lattices $N$ and $N^\vee$ of rank $r$ and a complete fan $\Sigma$ in the vector space $N_\RR$.
\begin{definition} The \textbf{combinatorial type} of a tropical stable map $[\plC\xrightarrow{f} \Sigma]$ consists of \begin{enumerate} \item The finite graph model $G$ underlying $\plC$. \item For each vertex $v\in G$, the cone $\sigma_v\in \Sigma$ containing the image of $v$. \item For each edge $e$, the slope $w_e$ and the primitive vector $u_e$ of $f(e)$. \end{enumerate} \end{definition}
For tropical maps, the discrete data can be captured by the ``least generic'' map, defined below.
\begin{definition} The \textbf{recession type} of a combinatorial type $\Theta$ is obtained from $[\plC\to \Sigma]$ by collapsing all bounded edges of $\plC$ to a single vertex, retaining the contact orders on the unbounded edges. \end{definition}
As explained in~\cite[Section 2]{R16}, once one fixes the recession type, there are finitely many combinatorial types of tropical stable maps with this recession type. This boundedness of combinatorial types is the essential content of~\cite[Section~3.1]{GS13}.
Given a type $\Theta$, there is a polyhedral cone $\sigma_\Theta$, whose relative interior parameterizes tropical stable maps with a fixed combinatorial type. This cone serves as a deformation space for maps of type $\Theta$. In~\cite[Section 2.2]{R16}, a generalized cone complex $T_\Gamma(\Sigma)$ is constructed, by taking a colimit of the cones above over a natural gluing operation. This is a coarse moduli space for maps of fixed recession type.
\begin{remark}{\it (A moduli stack of tropical maps).} It is possible to promote this construction to a fine moduli stack of tropical maps. By replacing the real edge lengths in $\plC$ with monoid-valued edge lengths, one obtains an appropriate notion of a family of tropical stable maps over a cone $\sigma$. With this notion of family, the framework in \cite{CCUW} produces a cone stack $\mathcal{T}_\Gamma(\Sigma)$, with well-defined evaluation morphisms. The addition of a marked point with trivial contact order functions as a universal curve in this context. We avoid further discussion of this for two reasons. First, we will not need the stacks directly in this work, and can make do with the less conceptually natural, but more concrete generalized cone complex. Second, and more importantly, the precise relationship between the analytification of the moduli space of maps --- which coincides with the analytification of the coarse moduli space --- remains unclear at present. \end{remark}
\subsection{Traditional tropicalization {\it \&} realizability} The tropicalization procedure discussed in the early parts of the paper uses the logarithmic structure, and differs from the one involving non-archimedean geometry. Accounting for the difference is the \textbf{tropical realizability problem}, and is the focus of this final section.
Let $K$ be a non-archimedean field extending $\CC$, where the latter is equipped with the trivial valuation. Let $Y$ be a $K$-scheme or stack, locally of finite type. The \textbf{Berkovich analytification} $Y^{\mathrm{an}}$ is a locally compact, Hausdorff topological space whose points are naturally identified with equivalence classes of pairs $(L,y)$ where $L$ is a valued field extension of $K$ and $y$ is an $L$-valued point of $Y$. The equivalence is the one generated by identifying two such triples $(L,y)\sim (L',y')$ whenever there is an embedding of valued extensions $L\hookrightarrow L'$ sending $y$ to $y'$. See~\cite{Ber90,U14b,Yu14a} for Berkovich spaces and stacks and~\cite{ACMUW} for an introduction to analytic spaces in the context of logarithmic geometry.
Given a torus $\mathbf G_m^r = \spec(K[N^\vee])$, the \textbf{tropicalization} map is the continuous map \[ \mathrm{trop}: \mathbf G_{m,\mathrm{an}}^n\to N\otimes \RR, \] that associates to an $L$-valued point of $\mathbf G_m^n$, its coordinatewise valuation. The tropicalization of a subvariety is defined by restriction.
Let $C\to \mathbf G_m^n$ be a map to a torus from a smooth curve of genus $g$. There is a natural factorization of topological spaces \[ \begin{tikzcd} C^{\mathrm{an}} \arrow{d}\arrow[end anchor={[xshift=4.3em]west}]{r} & \hskip4.3em \mathbf G_{m,\mathrm{an}}^r \arrow[d,shift left=2.5em] \\ \plC \arrow{r} & \mathrm{trop}(C^{\mathrm{an}})\subset \RR^r, \end{tikzcd} \] The left vertical map is a deformation retraction onto a \textbf{skeleton}; see~\cite{BPR16} for details. There are at least two natural ways to extract the tropical curve $\plC$ from $[C\to \mathbf G_m^r]$.
\subsubsection{Abstract stable reduction}
After choosing coordinates on the target, the map $[\varphi: C\to \mathbf G_m^n]$ is given by $n$ invertible functions on $C$. Let $\widehat C$ be the smooth projective model for $C$, and $q_1,\ldots, q_n$ the points at which these invertible functions acquire zeros or poles. If the map $\varphi$ is nonconstant, the pair $(\widehat C,q_1,\ldots, q_n)$ has negative Euler characteristic and thus admits a minimal model $\mathscr C\to \spec(R)$ over the valuation ring of $K$. Take the underlying graph of $\plC$ to be the dual graph of the special fiber of $\mathscr C$. Given an edge $e$ of $\plC$, the corresponding node $q_e$ of $\mathscr C$ has a local equation \[ xy = \omega, \ \ \omega\in R. \] Set the length $\ell(e)$ equal to the valuation of the parameter $\omega$.
\subsubsection{Universal property of minimality} Let $\widehat C\supset C$ be the projective model of $C$ with boundary $\partial \widehat C = \{q_1,\ldots, q_n\}$ and choose a toric compactification $Z$ of $\mathbf G_m^n$ such that the morphism \[ (\widehat C,\partial \widehat C) \to (Z,\partial Z). \] is a logarithmic map. Letting $\mathscr L(Z)$ be the space of logarithmic stable maps to $Z$, this gives rise to a moduli map $\spec(K)\to \mathscr L(Z)$, which, after a base change, extends to a map \[ \spec(R)\to \mathscr L(Z), \] from the valuation ring. Let $k$ denote the residue field and $\Gamma$ the value group. Consider the logarithmic map \[ \spec(\Gamma\to k)\to \mathscr L(Z), \] from the closed point, endowed with the (not necessarily coherent) logarithmic structure from the value group. By the universal property of minimality, this induces a factorization \[ \spec(\Gamma\to k)\to \spec(P^{\mathrm{min}}\to k), \] where $P^{\mathrm{min}}$ is the stalk of the minimal monoid of $\mathscr L(Z)$ at the image of the closed point. We obtain a point of the dual cone $\operatorname{Hom}(P^{\mathrm{min}},\Gamma)$, which, as was previously discussed, is identified with a point in the cone of tropical maps of a fixed combinatorial type. See~\cite[Section 2]{R16} for details.
\subsection{Expected dimension {\it \&} superabundance} Every tropical stable map $[f]$ of combinatorial type $\Theta$ has a deformation space, the moduli cone $\sigma_\Theta$. Superabundance is the phenomenon wherein this deformation space is larger than expected.
The \textit{overvalence} of a type $\Theta$ with underlying graph $G$ is defined as \[ \mathrm{ov}(\Theta) = \sum_{p\in G:\val(p)\geq 4} \val(p)-3. \] The overvalence allows us to determine an expected topological dimension of the tropical deformation space as: \[ \operatorname{exp.dim} \sigma_\Theta = (\dim(\Sigma)-3)(1-b_1(G))+n-\mathrm{ov}(\Theta), \] where $b_1(\plC)$ is the first Betti number of $G$. The actual dimension of $\sigma_\Theta$ cannot be less than the expected dimension, but may exceed it. For further details, see~\cite{Mi03,NS06,R16}.
\begin{definition} A combinatorial type $\Theta$ is \textbf{superabundant} if the dimension of $\sigma_\Theta$ is strictly larger than the expected dimension. \end{definition}
\setcounter{subsubsection}{\value{theorem}} \subsubsection{Superabundance as tropical obstructedness} The deformation space of a map $[\varphi: C\to \mathbf{P}^r]$ can be larger than expected because deformations can be obstructed. The dimension of the deformation space can be estimated using Riemann--Roch and the tangent-obstruction complex~\cite[Section 24.4]{Hori03}. One examines the restrictions on the complex structure of the curve that are forced by the map. In some cases, such as when $\varphi$ multiple covers its image or contracts a component, there are fewer such restrictions than expected.
The situation in tropical geometry is similar. Given a tropical stable map $[f: \plC\to \Sigma]$ and a cycle of edges in $\plC$, the piecewise linearity of $f$ imposes restrictions on the edge lengths of this cycle. In particular, the edge lengths of a cycle are constrained by the condition that the total displacement around each cycle must vanish. If $\dim \Sigma = r$ the map is expected to impose $r$ conditions on the edge lengths of $\plC$ for each cycle, and the conditions imposed by different cycles are expected to be independent. However, if cycles are mapped to linear subspaces, or contracted altogether, there are fewer than the expected number of restrictions.
In genus $1$, superabundance can be stated in a simplified form. In the following proposition, and the rest of the section, it will sometimes be convenient to forget the precise fan structure of $\Sigma$ and consider the map of metric spaces $\plC\to N_\RR$.
\setcounter{theorem}{\value{subsubsection}} \begin{proposition} Let $f:\plC\to \Sigma$ be a tropical map from a tropical curve of genus $1$. Then, $f$ is superabundant if and only if the image of the circuit $\plC_0$ is contained in a proper affine subspace of $\Sigma$. Equivalently, $f$ is superabundant if and only if there exists a character $\chi:N_\RR\to \RR$ such that the circuit $\plC_0$ is contracted under the composition \[ \plC\to N_\RR\xrightarrow{\chi} \RR \] \end{proposition}
\begin{proof} The first formulation is well known~\cite{KatLift,R16,Sp07}. For the second, choose a hyperplane containing the circuit and quotient by it. \end{proof}
\subsection{Tropical realizability {\it \&} well-spacedness}\label{sec: well-spacedness}
The tropical realizability problem is as follows.
\begin{question} Given a tropical stable map $f: \plC\to N_\RR$, does there exist a smooth curve $C$ over a non-archimedean field $K$ and a map \[ \varphi : C\to \mathbf G_m^r, \] such that $\varphi^\mathrm{trop} = f$? \end{question}
\noindent Such a tropical map is said to be \textbf{realizable}. Superabundance is intimately related to realizability, as the following result shows. For proofs, see~\cite{CFPU,R16,Sp07}.
\begin{theorem} Let $f: \plC\to \Sigma$ be a tropical stable map of genus $1$ and combinatorial type $\Theta$. If $\plC$ has a vertex $v$ of genus $1$, then assume that the local map \[ \mathrm{Star}(v)\to\Sigma \] is realizable. If the combinatorial type $\Theta$ is non-superabundant, then $f$ is realizable. \end{theorem}
When a combinatorial type $\Theta$ is superabundant, there are additional constraints that are required to characterize the realizable locus.
A \textit{flag} of a tropical curve $\plC$ is a vertex $v$ together with a choice of tangent direction along an edge incident to $v$. The vertex $v$ will be referred to as the \textit{base} of the flag. Given a piecewise-linear function $f$ on a tropical curve $\plC$, we may speak of the \textbf{slope} of $f$ along a flag.
\begin{definition}\label{def: well-spacedness} Let $\plC$ be a tropical curve and let $\plC_0$ be its circuit. Given a flag $t\in \plC$, let $d(t,\plC_0)$ be the distance from the circuit to the base of the flag. A tropical stable map \[ F: \plC\to \RR \] of genus $1$ is \textbf{well-spaced} if one of the following two conditions are met: either \begin{enumerate} \item no open neighborhood of the circuit of $\plC$ is contracted, or \item if a neighborhood of the circuit is contracted, let $t_1,\ldots,t_k$ be the flags whose base is mapped to $F(\plC_0)$ but along which $F$ has nonzero slope. Then, the minimum of the distances $\{d(t_i,\plC_0)\}_{i=1}^k$ occurs at least three times. \end{enumerate} \end{definition}
Well-spacedness when the target is a general fan is formulated by considering projections to $\RR$.
\begin{definition} A tropical stable map $\plC\to \Sigma$ of genus $1$ is \textbf{well-spaced} if for each character \[ \chi: N_\RR\to \RR, \] the induced map $\plC\to \RR$ is well-spaced. \end{definition}
\begin{figure}
\caption{A superabundant tropical stable map to a Hirzebruch surface. The circuit is depicted to be flattened, indicating that its image is a line segment. Projection onto the vertical axis contracts the circuit. The curve is well-spaced if and only if $\ell_1 = \ell_2$.}
\end{figure}
\begin{warning}\label{warning: diff-defs} The condition we call well-spacedness is strictly weaker condition than the one given originally by Speyer. In particular, the definition allows that the set of flags with nonzero $F$-slope $\{t_i\}$ can all be based at the same vertex. In Speyer's definition, there must be distinct vertices achieving this minimum. It has already been shown that Speyer's condition is not a necessary condition in the nontrivalent case~\cite[Theorem C]{R16}. The two definitions coincide when working with trivalent tropical curves whose vertex function is identically zero. To see this, observe that by the balancing condition, if a vertex supports one flag of nonzero $F$-slope. Thus, if two distinct vertices support flags with nonzero $F$-slope, then there are at least $4$ such flags. We will refer to this stronger condition as \textbf{Speyer's condition}; see Figure~\ref{fig: well-spaced-not-speyer}. \end{warning}
\begin{remark} We have chosen to state well-spacedness in terms of projections to $1$-dimensional vector spaces, as this is closest to the existing versions of the condition present in the literature. A reader who wishes to see the parallelism with Section~\ref{sec:factorization} one could instead impose an appropriate condition on the quotient by any real subspace of $N_\RR$. \end{remark}
\begin{remark} We make note of a consequence of this condition that is often useful in calculations. Let $\plC\to N_\RR$ be a tropical map from a genus $1$ curve. Let $L\subset N_\RR$ be the real span of the edge directions of the circuit of $\plC$. Let $\delta$ be the minimal radius around the circuit such that the edge directions inside the circle of radius $\delta$ span a subspace $L'$ strictly containing $L$. Let $m$ the difference in dimensions of $L$ and $L'$. Then if the tropical map is well-spaced, then at the circle of radius $\delta$, the curve $\plC$ exits the circle along least $m+2$ flags. \end{remark}
This brings us to the main result of this section.
\begin{theorem}[Realizability of genus $1$ tropical curves]\label{thm: realizability} Let $[\plC\to \Sigma]$ be a tropical stable map of genus $1$, and assume there is a minimal logarithmic map $[C\to Z]$ whose combinatorial type is that of $[\plC\to \Sigma]$. Then $[\plC\to \Sigma]$ is realizable if and only if it is well-spaced. \end{theorem}
The proof will be completed in Section~\ref{sec: realizability-proof} after we establish some preliminaries in Section~\ref{sec:moduli-well-spaced}.
\begin{figure}
\caption{A tropical stable map that is well-spaced, but fails Speyer's condition. This map is well-spaced provided $\ell_1\leq\ell_2$, as there are three flags with nonzero slope based at the point of minimum distance to the circuit. Speyer's condition forces the equality $\ell_1 = \ell_2$.}
\label{fig: well-spaced-not-speyer}
\end{figure}
\subsection{Moduli of well-spaced tropical stable maps} \label{sec:moduli-well-spaced} Let $T_\Gamma(\Sigma)$ be the moduli space of genus $1$ tropical stable maps with a fixed recession type $[\Gamma\to \Sigma]$. We abuse notation by understanding that the map to $\Sigma$ is part of the notation $\Gamma$. The well-spacedness condition commutes with automorphisms of tropical curves, and thus descends to a well-defined subset $W_\Gamma(\Sigma)$ of well-spaced tropical stable maps. We specify a subdivision of $T_\Gamma(\Sigma)$ such that $W_\Gamma(\Sigma)$ becomes an equidimensional subcomplex of the expected dimension.
\begin{definition} A \textbf{radially aligned} combinatorial type for a genus $1$ tropical stable map is a combinatorial type $\Theta$ for a tropical stable map, together with a choice of a reflexive and transitive binary relation $\preccurlyeq$ on the vertices such that, if $P$ is a path from the circuit of $G$ to a vertex $v$ that passes a vertex $v'$, then we have the relation \[ v'\preccurlyeq v. \] Such a \textbf{radial combinatorial type} will be denoted $(\Theta,\preccurlyeq)$. \end{definition}
\setcounter{subsubsection}{\value{theorem}} \subsubsection{Constructing the tropical moduli space} Let $\widetilde W_\Gamma(\Sigma)$ denote the coarse moduli space of radially aligned tropical stable maps with recession type $\Gamma$. Given a radial combinatorial type $(\Theta,\preccurlyeq)$, tropical maps of this type are parametrized by a face of a subdivision of the moduli cone $\sigma_\Theta$. There is a specialization relation among ordered combinatorial types: a cone $\sigma_{(\Theta',\preccurlyeq')}$ is a face of a cone $\sigma_{(\Theta,\preccurlyeq)}$ if and only if the following conditions hold. \begin{enumerate} \item Let $G$ and $G'$ be the underlying graphs of $\Theta$ and $\Theta'$ respectively. Then, $G'$ is obtained from $G$ by a (possibly trivial) sequence of edge contractions $\alpha: G\to G'$. \item The edge contraction $G\to G'$ is order preserving: if $v\preccurlyeq w$ then $\alpha(v)\preccurlyeq \alpha(w)$. \item If $v'\in G'$ is a vertex with $\alpha(v') = v\in G$, then the cone $\sigma_{v'}$ is a face of $\sigma_v$. \end{enumerate}
Let $W_\Gamma(\Sigma)$ be the subcomplex of $\widetilde W_\Gamma(\Sigma)$ parameterizing well-spaced radial tropical maps.
\setcounter{theorem}{\value{subsubsection}} \begin{lemma} The locus $W_\Gamma(\Sigma)$ is a subcomplex of $\widetilde W_\Gamma(\Sigma)$, and thus, is itself a generalized cone complex. \end{lemma}
\begin{proof} The well-spacedness condition can be described in terms of the equality of the vertices at minimum distance from the circuit, and thus form a cone of the generalized cone complex. The result follows immediately from this observation. \end{proof}
\begin{remark} A close relative of the space $W_\Gamma(\Sigma)$ appears in the thesis of Carolin Torchiani, namely the dense open set of $W_\Gamma(\Sigma)$ parametrizing curves with identically zero genus function. In particular, it is proved that this subcomplex is pure-dimensional of the expected dimension~\cite[Theorem 3.2.10]{Torch-thesis}. It follows from this that $W_\Gamma(\Sigma)$ is also pure-dimensional. In particular, we consider the following. It would be interesting to examine the fine structure of $W_\Gamma(\Sigma)$ further. What can one say, for instance, about its homotopy type and connectivity properties? \end{remark}
\subsection{Proof of Theorem~\ref{thm: realizability}}\label{sec: realizability-proof} We know from Section~\ref{sec: log-smooth} that the moduli space of well-spaced logarithmic stable maps $\mathcal W_\Gamma(Z)$ is proper and smooth. By definition, it is the locus of stable maps in $\mathfrak W_\Gamma(Z)$ that satisfy the factorization property for every subtorus of the dense torus of $Z$. Our task is to show that the logarithmic well-spacedness condition is equivalent to the tropical well-spacedness condition. By Proposition~\ref{prop: reduction-of-dimension}, the logarithmic well-spacedness condition is the conjunction of the factorization properties for all $1$-dimensional quotients of $Z$. Since the tropical well-spacedness condition was formulated in terms of $1$-dimensional quotients, it suffices to check the equivalence for every subtorus $H$ in $Z$ of codimension $1$. Replacing $Z$ with a modification and passing to the quotient, our obligation reduces to checking that \emph{a tropical map $\plC \to \RR$, in which all vertices of $\plC$ have genus~$0$, is well-spaced if and only if it is the tropicalization of a radial map $C \to \mathbf{P}^1$ satisfying the factorization property}.
Let $C$ be a logarithmic curve with tropicalization $\plC$. The map $[\plC \to \RR]$ induces a destabilization $\upsilon : \widetilde C \to C$ and a contraction $\tau : \widetilde C \to \overnorm C$. The map itself can be regarded as a section $\overnorm\alpha$ of $\overnorm M_C^{\rm gp}$. This pulls back to $\overnorm M_{\widetilde C}^{\rm gp}$ and then descends to $\overnorm M_{\overnorm C}^{\rm gp}$, since it is constant on the components collapsed by $\tau$. Adding a constant to $\overnorm\alpha$ does not change whether it is well-spaced in either the logarithmic or the tropical sense, so we assume that $\overnorm\alpha$ takes the value $0$ on the circuit component of $\overnorm C$.
We must show that $\overnorm\alpha$ lifts to a section $\alpha$ of $M_{\overnorm C}^{\rm gp}$ if and only if $\plC \to \RR$ is well-spaced. Indeed, if $\alpha$ is a section of $M_{\overnorm C}^{\rm gp}$ then $\upsilon_\star \tau^\star \alpha$ is a section of $\upsilon_\star M_{\widetilde C}^{\rm gp} = M_C^{\rm gp}$ by~\cite[Appendix~B]{AMW12}, and gives a map $C \to \mathbf{P}^1$ with the factorization property.
Let $E$ denote the circuit component of $\overnorm C$ and $E^\circ$ its interior, excluding the nodes where $E$ is joined to the rest of $\overnorm C$ (in other words, the locus in $E$ where the logarithmic structure is pulled back from the base). Since $\overnorm\alpha(E) = 0$, the lift $\alpha \big|_{E^\circ}$, if it exists, will be in $\mathcal O_{E^\circ}^\star \subset M_{E^{\circ}}$. Regarded as a rational function on $E$, this lift must have zeroes and poles along the points of attachment between $E$ and the rest of $\overnorm C$ as specified by the outgoing slopes of $\overnorm\alpha$ along the corresponding edges (see Section~\ref{sec:tropicalization}). Once $\alpha \big|_{E^\circ}$ has been found, there is no obstruction to extending it to all of $\overnorm C$, since the rest of the curve is a forest of rational curves and $\overnorm\alpha$ is balanced. The following lemma determines whether $\alpha \big|_{E^\circ}$ can be found, and completes the proof of the theorem.
\begin{lemma} Let $E$ be a Gorenstein, genus~$1$ curve with no nodes and $m$ branches, let $a_1, \ldots, a_n$ be nonzero integers, and let $P$ be a partition of $1, \ldots, n$ into $m$ parts. Assume that, for each $p \in P$, we have $\sum_{i \in p} a_i = 0$. If $n \geq 3$ then there is a configuration of distinct points $x_1, \ldots, x_n$ on $E$, with each point lying in the component corresponding to its part of the partition, such that $\mathcal O_E(\sum a_i x_i)$ is trivial. If $n = 2$ then there is no such configuration. \end{lemma} \begin{proof} Let $\nu : F \to E$ be the seminormalization and let $\omega_E$ be the dualizing sheaf. For any configuration of the $x_i$, subject to the degree constraint in the statement, there is a rational function $f$ on $F$ with divisor $\sum a_i x_i$, and $f$ is unique up to scaling. We wish to determine whether $f$ descends to~$E$.
Let $y \in F$ be the preimage of the singular point of $E$ and let $\phi$ be a nonzero global differential on $E$. Let $F_j$ be the components of $F$ and let $\nu_j : F_j \to E$ be the restrictions of $\nu$ and let $f_j$ be the restriction of $f$ to $F_j$. Let $t_j$ be a local parameter for $F_j$ at $y$ and let $b_j$ be the linear term of the expansion of $f_j$ in terms of $t_j$. It was shown in Section~\ref{sec: genus-1-singularities} that there are nonzero constants $c_j$ such that $f$ descends to $E$ if and only if \begin{equation} \label{eqn:descent-obstruction} \sum_j c_j b_j = 0 . \end{equation} We argue that under the hypothesis of the Lemma it is possible to configure the $x_i$ on each component $F_j$ to make $b_j$ take any value we like. Indeed, if we decide $f(y)$ should be $1$ then $f_j$ has the formula \begin{equation*} f_j = \prod_i (1 - x_i^{-1} t_j)^{a_i} \end{equation*} with the product taken over those $i$ such that $x_i$ lies on $F_j$. The linear part is \begin{equation*} b_j = - \sum a_i x_i^{-1} . \end{equation*} By adjusting the positions of the $x_i$, we can arrange for $b_j$ to have any nonzero value we like. If $p_j$ consists of at least $3$ points $x_i$ then it is possible to achieve any value for $b_j$, including $0$, but if $p_j$ consists of only two points, $x_{i}$ and $x_{i'}$ then $a_{i'} = -a_{i}$ and it is impossible for $b_j$ to take the value $0$.
Thus we can solve~\eqref{eqn:descent-obstruction} provided either that there are at least two branches at $y$ or there is one branch containing at least $3$ of the $x_i$. The one remaining case is where there is one branch containing $2$ points. In that case, $c_1 \neq 0$ and the remarks in the last paragraph show there is no solution to~\eqref{eqn:descent-obstruction}.
\end{proof}
The above result determines the dual complex of the space $\mathcal W_\Gamma(Z)$, and we obtain the following as a consequence of general structural results about tropicalizations of logarithmic schemes. Let $\mathcal W^\circ_\Gamma(Z)$ denote the locus of maps with trivial logarithmic structure.
\begin{theorem} \label{thm:nonarch} There is a continuous tropicalization map \[ \mathrm{trop}: \mathcal W^{\circ,\mathrm{an}}_{\Gamma}(Z) \to W_\Gamma(\Sigma), \] functorial with respect to evaluation moprhisms and forgetful morphisms to the moduli space of curves. Set theoretically, this map sends a family of logarithmic stable maps to its tropicalization. There is a factorization \[ \begin{tikzcd} \mathcal W^{\circ,\mathrm{an}}_{\Gamma}(Z) \arrow{rr}{\mathrm{trop}} \arrow[swap]{dr}{\bm p} & & W_\Gamma(\Sigma) \\ & \mathrm{P}_\Gamma(\Sigma) \arrow[swap]{ur}{\mathrm{trop}_{\mathfrak{S}}}, & \end{tikzcd} \] where the map $\bm p$ is a deformation retraction onto a generalized cone complex, and admits a canonical continuous section. The map $\mathrm{trop}_{\mathfrak{S}}$ is finite and is an isomorphism of cones upon restriction to any face. \end{theorem}
\begin{proof} With the identification of the tropical maps that arise as tropicalizations of one-parameter families, the proof of the result is a cosmetic variation on similar results in the literature~\cite{CMR14a,R15b,R16}. By Theorem~\ref{thm: realizability}, the tropicalization of any family of logarithmic stable maps over a valuation ring is well-spaced. Once this is established, the continuity, functoriality, and finiteness of $\mathrm{trop}_{\mathfrak S}$ follow from~\cite[Theorem 2.6.2]{R16} and the uniqueness of minimal morphisms of logarithmic schemes up to saturation~\cite{Wis16b}. The saturation index of a combinatorial type $(\Theta,\preccurlyeq)$ is equal to the cardinality of the fibers of $\mathrm{trop}_{\mathfrak S}$, as explained in~\cite{R15b,R16}. Since $\mathcal W_\Gamma(Z)$ is a toroidal compactification, the existence of a section from the skeleton follows from results of Thuillier~\cite{ACP,Thu07}. Compatbility with forgetful and evaluation morphisms follows from~\cite[Theorem 1.1]{U13}. \end{proof}
\end{document} |
\begin{document}
\title[Centrality of the congruence kernel]{Centrality of the congruence kernel for elementary subgroups of Chevalley groups of rank $> 1$ over noetherian rings}
\begin{abstract} Let $G$ be a universal Chevalley-Demazure group scheme associated to a reduced irreducible root system of rank $>1.$ For a commutative ring $R$, we let $\Gamma = E(R)$ denote the elementary subgroup of the group of $R$-points $G(R).$ The congruence kernel $C(\Gamma)$ is then defined to be the kernel of the natural homomorphism $\widehat{\Gamma} \to \overline{\Gamma},$ where $\widehat{\Gamma}$ is the profinite completion of $\Gamma$ and $\overline{\Gamma}$ is the congruence completion defined by ideals of finite index. The purpose of this note is to show that for an arbitrary noetherian ring $R$ (with some minor restrictions if $G$ is of type $C_n$ or $G_2$), the congruence kernel $C(\Gamma)$ is central in $\widehat{\Gamma}.$ \end{abstract}
\author[A.S.~Rapinchuk]{Andrei S. Rapinchuk}
\address{Department of Mathematics, University of Virginia, Charlottesville, VA 22904}
\email{[email protected]}
\author[I.A.~Rapinchuk]{Igor A. Rapinchuk}
\address{Department of Mathematics, Yale University, New Haven, CT 06502}
\email{[email protected]}
\maketitle
\section{Introduction}\label{S:I}
Let $G$ be a universal Chevalley-Demazure group scheme associated to a reduced irreducible root system $\Phi$ of rank $> 1$. Given a commutative ring $R$, we let $G(R)$ denote the group of $R$-points of $G$, and let $E(R) \subset G(R)$ be the corresponding elementary subgroup. (We recall that $E(R)$ is defined as the subgroup generated by the images $e_{\alpha} (R) =: U_{\alpha} (R)$ for all $\alpha \in \Phi$, where $e_{\alpha} \colon \mathbb{G}_a \to G$ is the canonical 1-parameter subgroup corresponding to a root $\alpha \in \Phi$ --- see \cite{Bo1} for details.) The goal of this note is to make a contribution to the analysis of the congruence subgroup problem for $E(R)$ over a general commutative noetherian ring $R$ (with some minor restrictions if $\Phi$ is of type $C_n$ $(n \geq 2)$ or $G_2$).
While the congruence subgroup problem for $S$-arithmetic groups is a well-established subject (see \cite{PR} for a recent survey), its analysis over general rings, at least from the point of view we adopt in this note, has been rather limited, despite a large number of results dealing with arbitrary normal subgroups of Chevalley groups over commutative rings. For this reason, we begin with a careful description of our set-up. Let $R$ be a commutative ring and $n \geq 1.$ Then to every ideal $\mathfrak{a} \subset R$, one associates the congruence subgroup $GL_n (R, \mathfrak{a}) = \ker (GL_n (R) \to GL_n (R/ \mathfrak{a}))$, where the map is the one induced by the canonical homomorphism $R \to R/ \mathfrak{a}$. Clearly, if $\mathfrak{a}$ is of {\it finite index} (i.e. the quotient $R/ \mathfrak{a}$ is a finite ring), then $GL_n (R, \mathfrak{a})$ is a normal subgroup of $GL_n (R)$ of {\it finite index}. Given a subgroup $\Gamma \subset GL_n (R),$ we set $\Gamma (\mathfrak{a}) = \Gamma \cap GL_n (R, \mathfrak{a}).$ Then, by the congruence subgroup problem for $\Gamma$, we understand the following question: \vskip3mm
(CSP) \hskip2mm \parbox{14.9cm}{Does every normal subgroup $\Delta \subset \Gamma$ of {\it finite index} contain the congruence subgroup $\Gamma (\mathfrak{a})$ for some ideal $\mathfrak{a} \subset R$ of {\it finite index}?}
\vskip3mm
\noindent The affirmative answer would give us information about the profinite completion $\widehat{\Gamma}$, which is precisely what is needed for the analysis of representations of $\Gamma$, as well as other issues (cf. \cite{BMS}, \cite{KN}, \cite{Sh}). However, even when $\Gamma$ is $S$-arithmetic, the answer to (CSP) is often negative. So one is instead interested in the computation of the congruence kernel, which measures the deviation from a~positive solution. For this, just as in the arithmetic case, we introduce two topologies on $\Gamma$: the profinite topology $\tau_p^{\Gamma}$ and the congruence topology $\tau_c^{\Gamma}.$ The fundamental system of neighborhoods of the identity for the former consists of all normal subgroups $N \subset \Gamma$ of finite index, and for the latter of the congruence subgroups $\Gamma (\mathfrak{a}),$ where $\mathfrak{a}$ runs through all ideals of $R$ of finite index. The corresponding completions are then given by $$ \widehat{\Gamma} = \lim_{\longleftarrow} \Gamma / N, \ \ \ \text{where} \ N \lhd \Gamma \ \text{and} \ [\Gamma :N ] < \infty $$ and $$ \overline{\Gamma} = \lim_{\longleftarrow} \Gamma / \Gamma (\mathfrak{a}), \ \ \ \text{where} \ \vert R/ \mathfrak{a} \vert < \infty. $$ As $\tau_p^{\Gamma}$ is stronger than $\tau_c^{\Gamma}$, there exists a continuous surjective homomorphism $\pi^{\Gamma} \colon \widehat{\Gamma} \to \overline{\Gamma},$ whose kernel is called the {\it congruence kernel} and denoted $C(\Gamma).$ Clearly, $C(\Gamma)$ is trivial if and only if the answer to (CSP) is affirmative; in general, its size measures the extent of deviation from the affirmative answer. Unfortunately, as remarked above, in many situations, $C(\Gamma)$ is nontrivial, and the focus of this note is on a different property, viz. the {\it centrality} of $C(\Gamma)$ (which means that $C(\Gamma)$ is contained in the center of $\widehat{\Gamma}$). We note that in some cases, centrality is almost as good as triviality (cf. \cite{KN}, \cite{Sh}), and in arithmetic cases actually implies the finiteness of $C (\Gamma).$
Returning to Chevalley groups, we observe that congruence subgroups $G(R, \mathfrak{a}) \subset G(R)$ can be defined either as pullbacks of the congruence subgroups $GL_n (R, \mathfrak{a})$ under a faithful representation of group schemes $G \hookrightarrow GL_n$ over ${\mathbb Z}$, or, intrinsically, as the kernel of the natural homomorphism $G(R) \to G(R/\mathfrak{a}).$
Our main result concerns the congruence kernel of the elementary group $\Gamma = E(R).$ We note that the congruence topology on $\Gamma$ is induced by that on $G(R)$, i.e. is defined by the intersections $\Gamma \cap G(R, \mathfrak{a})$, where $\mathfrak{a}$ runs over all ideals $\mathfrak{a} \subset R$ of finite index. On the other hand, the profinite topology on $\Gamma$ may {\it a priori} be different from the topology induced by the profinite topology of $G(R)$ (cf. the remarks at the end of \S 4).
\vskip2mm
\noindent {\bf Main Theorem.} {\it Let $G$ be a universal Chevalley-Demazure group scheme corresponding to a reduced irreducible root system $\Phi$ of rank $>1.$ Furthermore, let $R$ be a noetherian commutative ring such that $2 \in R^{\times}$ if $\Phi$ is of type $C_n$ ($n \geq 2$) or $G_2$, and let $\Gamma = E(R)$ be the corresponding elementary subgroup. Then the congruence kernel $C(\Gamma)$ is central.}
\vskip2mm
The centrality of the congruence kernel for $SL_n$ ($n \geq 3$) and $Sp_{2n}$ ($n \geq 2$) over rings of algebraic integers was proved by Bass, Milnor, and Serre \cite{BMS}. Their result was generalized to arbitrary Chevalley groups of rank $> 1$ over rings of algebraic integers by Matsumoto \cite{M1}. The only known result for general rings is due to Kassabov and Nikolov \cite{KN}, where centrality was established for $SL_n ({\mathbb Z} [x_1, \dots, x_k])$, with $n \geq 3$, and hence for the elementary group $E_n (R)$ over any finitely generated ring $R$, using $K$-theoretic methods. Although our proof shares some elements with the argument in \cite{KN}, it is purely group-theoretic and is inspired by the proof of centrality for $SL_n$ ($n \geq 3$) over arithmetic rings given in \cite{AR}; in addition, we do not use any results of Matsumoto \cite{M1}.
\vskip2mm
\noindent {\bf Conventions and notations.} All of our rings will be assumed to be commutative and unital. Unless explicitly stated otherwise, $G$ will always denote a universal Chevalley-Demazure group scheme corresponding to a reduced irreducible root system $\Phi$ of rank $> 1$. Furthermore, if $R$ is a commutative ring, then for a subgroup $\Gamma \subset G(R)$, we let $\widehat{\Gamma}$ and $\overline{\Gamma}$ denote the profinite and congruence completions of $\Gamma$, respectively.
\section{Structure of $\overline{G(R)}$}
Let $\mathcal{I}$ be the set of all ideals $\mathfrak{a} \subset R$ of finite index, and let $\mathcal{M} \subset \mathcal{I}$ be the subset of maximal ideals. It is not difficult to see (cf. the proof of Proposition \ref{P-2}) that $\overline{G(R)}$ can be identified with the closure of the image of $G(R)$ in $G(\widehat{R})$, where $$ \widehat{R} = {\lim_{\longleftarrow}}_{\mathfrak{a} \in {\mathcal{I}}} R/ \mathfrak{a} $$ is the profinite completion of $R$. The proof of the Main Theorem relies on the fact that $G(\widehat{R})$ has the bounded generation property with respect to the set $\widehat{S} = \{ e_{\alpha} (t) \mid t \in \widehat{R}, \ \alpha \in \Phi \}$ of elementaries, which we will establish at the end of this section (cf. Corollary \ref{C-1}). We begin, however, by describing the structure of $\widehat{R}$ itself. For each $\mathfrak{m} \in {\mathcal{M}}$, we let $$R_{\mathfrak{m}} = \lim_{\longleftarrow} R/ \mathfrak{m}^n$$ denote the ${\mathfrak{m}}$-adic completion of $R$ (cf. \cite{At}, Chapter 10).
\begin{lemma}\label{L-1} Let $R$ be a noetherian ring.
\vskip1mm
\noindent {\rm (1)} \parbox[t]{15cm}{There exists a natural isomorphism of topological rings $$ \widehat{R} = \prod_{{\mathfrak{m}} \in {\mathcal{M}}} R_{{\mathfrak{m}}}. $$}
\vskip1mm
\noindent {\rm (2)} \parbox[t]{15cm}{Each $R_{{\mathfrak{m}}}$ is a complete local ring.} \end{lemma} \begin{proof} (1) Since $R$ is noetherian, for any $\mathfrak{a} \in {\mathcal{I}}$ and any $n \geq 2$, the quotient $\mathfrak{a}^{n-1}/ \mathfrak{a}^n$ is a finitely generated $R/ \mathfrak{a}$-module, hence finite. It follows that $R/ \mathfrak{a}^n$ is finite for any $n \geq 1.$ In particular, for any ${\mathfrak{m}} \in {\mathcal{M}}$ and $n \geq 1,$ there exists a natural continuous surjective projection $$ \rho_{{\mathfrak{m}}, n} \colon \widehat{R} \to R/ {\mathfrak{m}}^n. $$ For a fixed ${\mathfrak{m}}$, the inverse limit of the $\rho_{{\mathfrak{m}}, n}$ over all $n \geq 1$ yields a continuous ring homomorphism $\rho_{{\mathfrak{m}}} \colon \widehat{R} \to R_{{\mathfrak{m}}}.$ Taking the direct product of the $\rho_{{\mathfrak{m}}}$ over all ${\mathfrak{m}} \in {\mathcal{M}},$ we obtain a continuous ring homomorphism $$ \rho \colon \widehat{R} \to \prod_{{\mathfrak{m}} \in {\mathcal{M}}} R_{{\mathfrak{m}}} =: \overline{R}. $$ We claim that $\rho$ is the required isomorphism.
Note that ideals of the form $$ \overline{{\mathfrak{a}}} = {\mathfrak{m}}_1^{\alpha_1} R_{{\mathfrak{m}}_1} \times \cdots \times {\mathfrak{m}}_n^{\alpha_n} R_{{\mathfrak{m}}_n} \times \prod_{{\mathfrak{m}} \neq {\mathfrak{m}}_i} R_{{\mathfrak{m}}}, $$ where $\{ {\mathfrak{m}}_1, \dots, {\mathfrak{m}}_n \} \subset {\mathcal{M}}$ is a finite subset and $\alpha_i \geq 1,$ form a base of neighborhoods of zero in $\overline{R}$, with $$ \overline{R}/ \overline{{\mathfrak{a}}} = R / {\mathfrak{m}}_1^{\alpha_1} \times \cdots \times R/ {\mathfrak{m}}_{n}^{\alpha_n} $$ (cf. \cite{At}, Proposition 10.15). Set ${\mathfrak{a}} = {\mathfrak{m}}_1^{\alpha_1} \cdots {\mathfrak{m}}_n^{\alpha_n}.$ By the Chinese Remainder Theorem, $$ R / {\mathfrak{a}} \simeq R/ {\mathfrak{m}}_1^{\alpha_1} \times \cdots \times R/ {\mathfrak{m}}_n^{\alpha_n}, $$ which implies that the composite map $$ \widehat{R} \to \overline{R} \to \overline{R}/ \overline{{\mathfrak{a}}} $$ is surjective. Since this is true for all $\overline{{\mathfrak{a}}},$ we conclude that the image of $\rho$ is dense. On the other hand, $\widehat{R}$ is compact, so the image is closed, and we obtain that $\rho$ is in fact surjective.
To prove the injectivity of $\rho$, we observe that for any ${\mathfrak{a}} \in {\mathcal{I}}$, the quotient $R/ {\mathfrak{a}}$, being a finite, hence artinian ring, is a product of finite local ring $R_1, \dots, R_r$ (\cite{At}, Theorem 8.7). Furthermore, for each maximal ideal ${\mathfrak{n}}_i \subset R_i$, there exists $\beta_i \geq 1$ such that ${\mathfrak{n}}_i^{\beta_i} = 0$ (cf. \cite{At}, Proposition 8.4). Letting ${\mathfrak{m}}_i$ denote the pullback of ${\mathfrak{n}}_i$ in $R$, we obtain that ${\mathfrak{a}}$ contains $\mathfrak{b} := {\mathfrak{m}}_1^{\beta_1} \cdots {\mathfrak{m}}_r^{\beta_r} \in {\mathcal{I}}.$ It follows that any nonzero $x \in \widehat{R}$ will have a nonzero projection to some $R/ \mathfrak{b} = R/ {\mathfrak{m}}_1^{\beta_1} \times \cdots \times R/ {\mathfrak{m}}_r^{\beta_r}$, and hence to some $R_{{\mathfrak{m}}_i}$, as required.
\vskip1mm
\noindent (2) It is well-known that $R_{{\mathfrak{m}}}$ is both complete and local (cf. \cite{At}, Propositions 10.5 and 10.16). \end{proof}
As a first step towards establishing bounded generation of $G(\widehat{R})$ with respect to the set of elementaries, we prove
\begin{prop}\label{P-1} There exists an integer $N = N(\Phi)$, depending only on the root system $\Phi$, such that for any commutative local ring ${\mathcal{R}}$, any element of $G({\mathcal{R}})$ is a product of $\leq N$ elements of $S = \{ e_{\alpha} (r) \mid r \in {\mathcal{R}}, \ \alpha \in \Phi \}.$ \end{prop} \begin{proof} Fix a system of simple roots $\Pi \subset \Phi,$ and let $\Phi^+$ and $\Phi^-$ be the corresponding sets of positive and negative roots. Let $T \subset G$ be the canonical maximal torus, and $U^+$ and $U^-$ be the canonical unipotent ${\mathbb Z}$-subschemes corresponding to $\Phi^+$ and $\Phi^-.$ It is well-known (see, for example, \cite{Bo1}, Lemma 4.5) that the product map $\mu \colon U^- \times T \times U^+ \to G$ is an isomorphism onto a principal open subscheme $\Omega \subset G$ defined by some $d \in {\mathbb Z}[G].$ We have decompositions $$ U^{\pm} = \prod_{\alpha \in \Phi^{\pm}} U_{\alpha} \ \ \ \text{and} \ \ \ T = \prod_{\alpha \in \Pi} T_{\alpha}, $$ where $T_{\alpha}$ is the maximal diagonal torus in $G_{\alpha} = <U_{\alpha}, U_{-\alpha}> = SL_2.$ So, the identity $$ \left( \begin{array}{cl} a & 0 \\ 0 & a^{-1} \end{array} \right) = \left( \begin{array}{lr} 1 & -1 \\ 0 & 1 \end{array} \right) \left( \begin{array}{cc} 1 & 0 \\ 1-a & 1 \end{array} \right) \left( \begin{array}{ll} 1 & a^{-1} \\ 0 & 1 \end{array} \right) \left( \begin{array}{cc} 1 & 0 \\ a(a-1) & 1 \end{array} \right) $$ shows that there exists $N_1 = N_1 (\Phi)$ such that any element of $\Omega (R)$ is a product of $\leq N_1$ elementaries, for {\it any} ring ${\mathcal{R}}.$
On the other hand, it follows from the existence of the Bruhat decomposition in Chevalley groups over fields that there exists $N_2 = N_2 (\Phi)$ such that any element of $G(k)$ is a product of $\leq N_2$ elementaries, for any field $k.$ We will now show that $N:= N_1 + N_2$ has the required property for any local ring ${\mathcal{R}}.$ Indeed, let ${\mathfrak{m}} \subset {\mathcal{R}}$ be the maximal ideal, and $k = {\mathcal{R}}/ {\mathfrak{m}}$ be the residue field. As $G(k)$ is generated by elementaries, the canonical homomorphism $\omega \colon G({\mathcal{R}}) \to G(k)$ is surjective. Given $g \in G({\mathcal{R}})$, there exists $h \in G({\mathcal{R}})$ that is a product of $\leq N_2$ elementaries and for which we have $\omega (g) = \omega (h).$ Then, for $t = gh^{-1}$, we have $\omega (t) = 1$ (in particular, $\omega (t) \in \Omega (k)$), and therefore $d(t) \not\equiv 0 (\text{mod} \ {\mathfrak{m}})$. Since ${\mathcal{R}}$ is local, this means that $d(t) \in {\mathcal{R}}^{\times}$, and therefore $t \in \Omega ({\mathcal{R}})$. Thus, $t$ is a product of $\leq N_1$ elementaries, and the required fact follows. \end{proof}
Next, we have the following \begin{lemma}\label{L-2} Let ${\mathcal{R}}_i$ ($i \in I$) be a family of commutative rings such that there exists an integer $N$ with the property that for any $i \in I,$ any $x_i \in G({\mathcal{R}}_i)$ is a product of $\leq N$ elementaries. Set ${\mathcal{R}} = \prod_{i \in I} {\mathcal{R}}_i.$ Then any $x \in G({\mathcal{R}})$ is a product of $\leq N \cdot \vert \Phi \vert$ elementaries. \end{lemma} \begin{proof} It is enough to observe that any element of the form $$ (e_{\alpha_i} (r_i)) \in G({\mathcal{R}}) = \prod_{i \in I} G({\mathcal{R}}_i), $$ with $\alpha_i \in \Phi,$ $r_i \in {\mathcal{R}}_i$, can be written as $$ \prod_{\alpha \in \Phi} e_{\alpha} (t_{\alpha}) $$ for some $t_{\alpha} \in {\mathcal{R}}.$ \end{proof} Using this result, together with Lemma \ref{L-1} and Proposition \ref{P-1}, we obtain \begin{cor}\label{C-1} Let $R$ be a noetherian ring. Then there exists an integer $M > 0$ such that any element of $G(\widehat{R})$ is a product of $\leq M$ elementaries from the set $\widehat{S} = \{e_{\alpha} (t) \mid t \in \widehat{R}, \alpha \in \Phi \}.$ \end{cor} As we noted earlier, one can identify the congruence completion $\overline{G(R)}$ with the closure of the image of $G(R)$ in $G(\widehat{R})$. The following proposition gives more precise information.
\begin{prop}\label{P-2} Let $R$ be a noetherian ring. Then $\overline{E(R)} = \overline{G(R)}$ can be naturally identified with $G(\widehat{R})$. Furthermore, there exists an integer $M > 0$ such that any element of $\overline{E(R)} = \overline{G(R)}$ is a product of $\leq M$ elements of the set $\overline{S} := \overline{ \{ e_{\alpha} (r) \mid \alpha \in \Phi, r \in R \} } $ (closure in the congruence topology). \end{prop} \begin{proof} For any $\mathfrak{a} \in {\mathcal{I}}$, there exists a natural injective homomorphism $\omega_{\mathfrak{a}} \colon G(R) / G(R, \mathfrak{a}) \to G(R/ \mathfrak{a}),$ where as before, $G(R, \mathfrak{a})$ is the principal congruence subgroup of level $\mathfrak{a}.$ Taking the inverse limit over all $\mathfrak{a} \in {\mathcal{I}},$ we obtain a continuous injective homomorphism $$ \omega \colon \overline{G(R)} \to G(\widehat{R}). $$ Clearly, the image of $\omega$ coincides with the closure of the image of the natural homomorphism $G(R) \to G(\widehat{R})$. From the definitions, one easily sees that if $\overline{e_{\alpha} (r)}$ is the image of $e_{\alpha} (r)$ ($\alpha \in \Phi, r \in R$) in $\overline{G(R)}$, then $$ \omega (\overline{e_{\alpha} (r)}) = e_{\alpha} (\hat{r}), $$ where $\hat{r}$ is the image of $r$ in $\widehat{R}.$ It follows that $\omega$ maps $\overline{S}$ onto $\widehat{S} = \{ e_{\alpha} (t) \mid \alpha \in \Phi, \ t \in \widehat{R} \}.$ Since by Corollary \ref{C-1}, $\widehat{S}$ generates $G(\widehat{R}),$ we obtain that $\omega (\overline{E(R)}) = G(\widehat{R})$, and consequently $\omega$ identifies $\overline{E(R)} = \overline{G(R)}$ with $G(\widehat{R})$. Furthermore, if $M$ is the same integer as in Corollary \ref{C-1}, then since every element of $G(\widehat{R})$ is a product of $\leq M$ elements of $\widehat{S}$, our second claim follows. \end{proof}
\vskip1mm
\noindent {\bf Remark.} Recall that a group ${\mathcal G}$ is said to have {\it bounded generation} with respect to a generating set $X \subset {\mathcal G}$ if there exists an integer $N > 0$ such that every $g \in {\mathcal G}$ can be written as $g = x_1^{\varepsilon_1} \cdots x_d^{\varepsilon_d}$ with $x_i \in X$, $d \leq N$, and $\varepsilon_i = \pm 1.$ It follows from the Baire category theorem (cf. \cite{Mun}, Theorem 48.2) that if a compact topological group ${\mathcal G}$ is (algebraically) generated by a compact subset $X$, then in fact, ${\mathcal G}$ is automatically {\it boundedly} generated by $X$. Indeed, replacing $X$ by $X \cup X^{-1} \cup \{1 \},$ we may assume that $X = X^{-1}$ and $1 \in X.$ Set $X^{(n)} = X \cdots X$ ($n$-fold product). Then the fact that ${\mathcal G} = < X>$ means that $$ {\mathcal G} = \bigcup_{n \geq 1} X^{(n)}. $$ Since each $X^{(n)}$ is compact, hence closed, we conclude from Baire's theorem that for some $n \geq 1,$ $X^{(n)}$ contains an open set. Then ${\mathcal G}$ can be covered by finitely many translates of $X^{(n)}$, and therefore there exists $M > 0$ such that $X^{(M)} = {\mathcal G}$, as required. This remark shows, in particular, that (algebraic) generation of $\overline{G (R)}$ by $\overline{S}$, or that of $G(\widehat{R})$ by $\widehat{S},$ automatically yields bounded generation.
\vskip2mm
We would like to point out that the fact that $\overline{G(R)} = \overline{E(R)}$ is not used in the proof of the Main Theorem; all we need is that $\overline{E(R)}$ is boundedly generated by $\overline{S}.$ So, we will indicate another way to prove this based on some ideas of Tavgen (cf. \cite{Tav}, Lemma 1), which also gives an explicit bound on the constant $M$ in Proposition \ref{P-2}. First we observe that it is enough to establish the bounded generation of $E(\widehat{R})$ by $\widehat{S} = \{ e_{\alpha} (t) \mid \alpha \in \Phi, \ t \in \widehat{R} \}$ (indeed, this will show that $E(\widehat{R})$ is a continuous image of $\widehat{R}^N$ for some $N > 0$, hence compact, implying that the map $\omega$ from the proof of Proposition \ref{P-2} identifies $\overline{E(R)}$ with $E(\widehat{R})$, and also $\overline{S}$ with $\widehat{S}$). In turn, by the same argument as above, we see that to prove bounded generation of $E(\widehat{R})$, it suffices to show that there exists an integer $N >0$ depending only on $\Phi$ such that for any local ring $R$, any element of $E(R)$ is a product of $\leq N$ elementaries. We will show that in fact \begin{equation}\label{E-BG} E(R) = (U^+ (R) U^- (R))^4, \end{equation} so one can take $N = 4 \cdot \vert \Phi \vert.$ Let us now prove (\ref{E-BG}) by induction on the rank $\ell$ of $\Phi$. If $\ell = 1,$ then $G = SL_2$, and one easily checks that $$G(R) = E(R) = (U^+ (R) U^- (R))^4.$$ Now, we assume that (\ref{E-BG}) is valid for every reduced irreducible root system of rank $\leq \ell-1$, with $\ell \geq 2$, and prove it for a root system $\Phi$ of rank $\ell.$ Set $X = (U^+(R) U^-(R))^4$, and let $\Delta \subset \Phi$ be a system of simple roots. Since the group $E(R)$ is generated by $e_{\pm \beta} (t)$ for $\beta \in \Delta$ and $t \in R$ (cf. the proof of (\ref{E:St-1}) in \S 4), to prove (\ref{E-BG}), it suffices to show that $$ e_{\pm \beta} (t) X \subset X. $$ Pick $\alpha \in \Delta,$ $\alpha \neq \beta$, that corresponds to an extremal node in the Dynkin diagram of $\Phi.$ Let $\Phi_0$ (resp., $\Phi_1$) be the set of roots in $\Phi$ that do not contain (resp., contain) $\alpha$, and let $\Phi_i^{\pm} = \Phi_i \cap \Phi^{\pm}.$ Then $\Phi_0$ is an irreducible root system having $\Delta_0 = \Delta \setminus \{ \alpha \}$ as a system of simple roots; in particular, $\Phi_0$ has rank $\ell - 1.$ If we let $G_0$ denote the corresponding universal Chevalley-Demazure group scheme, then by the induction hypothesis $$ E_0 (R) = (U_0^+ (R) U_0^- (R))^4, $$ with the obvious notations. Let $U_1^{\pm} (R)$ be the subgroup generated by $e_{\alpha} (r)$ for $\alpha \in \Phi_1^+$ (resp., $\alpha \in \Phi_1^-$) and $r \in R.$ Then $U^{\pm} (R) = U_0^{\pm} (R) U_1^{\pm} (R)$, and according to (\cite{Stb}, Lemma 17), $$ U_0^{\pm} (R) U_1^{\mp} (R) = U_1^{\mp} (R) U_0^{\pm} (R). $$ So, $$ X = (U_0^+ (R) U_1^+ (R) U_0^- (R) U_1^- (R))^4 = (U_0^+ (R) U_0^- (R))^4 (U_1^+ (R) U_1^- (R))^4 = E_0 (R) (U_1^+ (R) U_1^- (R))^4. $$ Since $e_{\pm \beta} (t) \in E_0 (R),$ we obtain that $$ e_{\pm \beta} (t) X = e_{\pm \beta} (t) E_0 (R) (U_1^+ (R) U_1^- (R))^4 = X, $$ as required.
\section{Profinite and congruence topologies coincide on 1-parameter root subgroups}
\begin{prop}\label{P-3} Let $\Phi$ be a reduced irreducible root system of rank $\geq 2$, $G$ be the corresponding universal Chevalley-Demazure group scheme, and $E(R)$ be the elementary subgroup of the group $G(R)$ over a commutative ring $R$. Furthermore, suppose $N \subset E(R)$ is a normal subgroup of finite index. If $\Phi$ is not of type $C_n$ $(n \geq 2)$ or $G_2$, then there exists an ideal ${\mathfrak{a}} \subset R$ of finite index such that \begin{equation}\label{E-Ideal} e_{\alpha} ({\mathfrak{a}}) \subset N \cap U_{\alpha} (R) \end{equation} for all $\alpha \in \Phi$, where $e_{\alpha} ({\mathfrak{a}}) = \{ e_{\alpha} (t) \mid t \in {\mathfrak{a}} \}.$ The same conclusion holds for $\Phi$ of type $C_n$ $(n \geq 2)$ and $G_2$ if $2 \in R^{\times}$. Thus, in these cases, the profinite and the congruence topologies of $E(R)$ induce the same topology on $U_{\alpha} (R)$, for all $\alpha \in \Phi.$ \end{prop} \begin{proof} We begin with two preliminary remarks. First, for any root $\alpha \in \Phi$, $$ {\mathfrak{a}} (\alpha) := \{ t \in R \mid e_{\alpha} (t) \in N \} $$ is obviously a finite index subgroup of the additive group of $R$. What one needs to show is that either ${\mathfrak{a}}(\alpha)$ itself is an ideal of $R$, or that it at least contains an ideal of finite index. Second, if $\alpha_1, \alpha_2 \in \Phi$ are roots of the same length, then by (\cite{H1}, 10.4, Lemma C), there exists an element $\tilde{w}$ of the Weyl group $W(\Phi)$ such that $\alpha_2 = \tilde{w} \cdot \alpha_1$. Consequently, it follows from (\cite{St1}, 3.8, relation (R4)) that we can find $w \in E(R)$ such that $$ w e_{\alpha_1}(t) w^{-1} = e_{\alpha_2} (\varepsilon(w) t) $$ for all $t \in R$, where $\varepsilon(w) \in \{ \pm 1 \}$ is independent of $t.$ Since $N$ is a normal subgroup of $E(R)$, we conclude that \begin{equation}\label{E-Ideal1} {\mathfrak{a}} (\alpha_1) = {\mathfrak{a}} (\alpha_2). \end{equation} Thus, it is enough to find a finite index ideal ${\mathfrak{a}} \subset R$ such that (\ref{E-Ideal}) holds for a {\it single} root of each length.
Let us now prove our claim for $\Phi$ of type $A_2$ using explicit computations with commutator relations. We will use the standard realization of $\Phi$, described in \cite{Bour}, where the roots are of the form $\varepsilon_i - \varepsilon_j$, with $i,j \in \{1, 2, 3 \}, i \neq j.$ To simplify notation, we will write $e_{ij} (t)$ to denote $e_{\alpha} (t)$ for $\alpha = \varepsilon_i - \varepsilon_j.$ Set $\alpha_1 = \varepsilon_1 - \varepsilon_2.$ We will now show that ${\mathfrak{a}} (\alpha_1)$ is an ideal of $R$, and then it will follow from our previous remarks that ${\mathfrak{a}} := {\mathfrak{a}}(\alpha_1)$ is as required. Let $r \in {\mathfrak{a}} (\alpha_1)$ and $s \in R.$ Since $N \lhd E(R)$, the (well-known) relation $$ [e_{12} (r), e_{23} (s)] = e_{13} (rs), $$ where $[g,h] = gh g^{-1} h^{-1}$, shows that $rs \in {\mathfrak{a}} (\alpha_2)$ for $\alpha_2 = \varepsilon_1 - \varepsilon_2.$ But then (\ref{E-Ideal1}) yields $rs \in {\mathfrak{a}}(\alpha_1),$ completing the argument.
Now let $\Phi$ be any root system of rank $\geq 2$ in which all roots have the same length. Then clearly $\Phi$ contains a subsystem $\Phi_0$ of type $A_2$, so our previous considerations show that there exists a~finite index ideal ${\mathfrak{a}} \subset R$ with the property that ${\mathfrak{a}} \subset {\mathfrak{a}}(\alpha)$ for all $\alpha \in \Phi_0.$ But then, by (\ref{E-Ideal1}), the same inclusion holds for all $\alpha \in \Phi.$
Next, we consider the case of $\Phi$ of type $B_n$ with $n \geq 3$. Note that since the system of type $F_4$ contains a subsystem of type $B_3$, this will automatically take care of the case when $\Phi$ is of type $F_4$ as well. We will use the standard realization of $\Phi$ of type $B_n$, where the roots are of the form $\pm \varepsilon_i$, $\pm \varepsilon_i \pm \varepsilon_j$ with $i, j \in \{ 1, \dots, n \}, i \neq j.$ The system $\Phi$ contains a subsystem $\Phi_0$ of type $A_{n-1}$, all of whose roots are long roots in $\Phi.$ Arguing as above, we see that there exists an ideal ${\mathfrak{a}} \subset R$ of finite index such that (\ref{E-Ideal}) holds for all $\alpha \in \Phi_0,$ and hence for all long roots $\alpha \in \Phi.$ To show that the same ideal also works for short roots, we will use the following relation, which is verified by direct computation: \begin{equation}\label{E-3} [e_{\varepsilon_1 + \varepsilon_2} (r), e_{-\varepsilon_2}(s)] = e_{\varepsilon_1} (rs) e_{-\varepsilon_1 - \varepsilon_2} (-rs^2). \end{equation} for any $r, s \in R$. Now, if $r \in {\mathfrak{a}},$ then $e_{\varepsilon_1 + \varepsilon_2} (r), e_{- \varepsilon_1 - \varepsilon_2} (-r) \in N.$ So, setting $s = 1$ in (\ref{E-3}) and noting that $[e_{\varepsilon_1 + \varepsilon_2} (r), e_{- \varepsilon_2} (1)] \in N$ as $N \lhd E(R),$ we obtain that $e_{\varepsilon_1} (r) \in N.$ Thus, (\ref{E-Ideal}) holds for $\alpha = \varepsilon_1$, and therefore for all short roots.
Next, we proceed to the case of $\Phi$ of type $B_2 = C_2$, where we assume that $2 \in R^{\times}.$ We will use the same realization of $\Phi$ as in the previous paragraph (for $n = 2$). Set ${\mathfrak{a}} = {\mathfrak{a}} (\varepsilon_1).$ Then for $r \in {\mathfrak{a}}$, $s \in R$, one can check by direct computation that \begin{equation}\label{E-2} [e_{\varepsilon_1} (r), e_{\varepsilon_2} (s/4)] = e_{\varepsilon_1 + \varepsilon_2} (rs/2) \in N. \end{equation} Next, using (\ref{E-3}), in conjunction with the fact that $e_{\varepsilon_1} (u)$ and $e_{\varepsilon_1 - \varepsilon_2} (v)$ commute for all $u, v \in R$, we obtain $$ [e_{\varepsilon_1 + \varepsilon_2} (rs/2), e_{- \varepsilon_2} (1)][e_{\varepsilon_1 + \varepsilon_2} (rs/2), e_{- \varepsilon_2} (-1)]^{-1} = e_{\varepsilon_1} (rs) \in N, $$ i.e. $rs \in {\mathfrak{a}}$, which shows that ${\mathfrak{a}}$ is an ideal. Furthermore, from (\ref{E-2}), we see that for any $r \in {\mathfrak{a}},$ we have $$ [e_{\varepsilon_1} (r), e_{\varepsilon_2} (1/2)] = e_{\varepsilon_1 + \varepsilon_2} (r) \in N. $$ Thus, $e_{\varepsilon_1 + \varepsilon_2} ({\mathfrak{a}}) \subset N,$ and therefore (\ref{E-Ideal}) holds for all $\alpha \in \Phi.$
Finally, suppose that $\Phi$ is of type $G_2$ and assume again that $2 \in R^{\times}.$ We will use the realization of $\Phi$ described in \cite{CK}: one picks a system of simple roots $\{k, c \}$ in $\Phi$, where $k$ is long and $c$ is short, and then the long roots of $\Phi$ are $$\pm k, \pm (3c + k), \pm (3c + 2k),$$ and the short roots are $$\pm c, \pm (c+k), \pm (2c + k).$$ Set ${\mathfrak{a}} = {\mathfrak{a}} (k).$ Since the long roots of $\Phi$ form a subsystem of type $A_2$, for which our claim has already been established, we conclude that ${\mathfrak{a}}$ is a finite index ideal in $R$ and that (\ref{E-Ideal}) holds for all long roots. To show that (\ref{E-Ideal}) is true for the short roots as well, we need to recall the following explicit forms of the Steinberg commutator relations that were established in (\cite{CK}, Theorem 1.1): \begin{equation}\label{E-4} [e_{k}(s), e_{c} (t)] = e_{c+k} (\varepsilon_1 st) e_{2c + k} (\varepsilon_2 st^2) e_{3c + k} (\varepsilon_3 st^3) e_{3c + 2k} (\varepsilon_4 s^2 t^3), \end{equation} \begin{equation}\label{E-5} [e_{c+k} (s), e_{2c+k}(t)] = e_{3c+2k} (3 \varepsilon_5 st), \end{equation}
where $\varepsilon_i = \pm 1.$ Using (\ref{E-4}), we obtain $$ [e_k(s), e_c(1)] [e_k (s), e_c(-1)] = $$ $$ =e_{c+k} (\varepsilon_1 s) e_{2c+k} (\varepsilon_2 s) e_{3c + k} (\varepsilon_3 s) e_{3c+2k} (\varepsilon_4 s^2) e_{c+k} (-\varepsilon_1 s) e_{2c+k} (\varepsilon_2 s) e_{3c + k} (-\varepsilon_3 s) e_{3c+2k} (-\varepsilon_4 s^2). $$ Since the terms $e_{3c+k} (-\varepsilon_3 s)$ and $e_{3c + 2k} (-\varepsilon_4 s^2)$ commute with all other terms, the last expression reduces to $$ e_{c+k} (\varepsilon_1 s) e_{2c +k} (\varepsilon_2 s) e_{c+k}(-\varepsilon_1 s) e_{2c + k} (\varepsilon_2 s), $$ which, using (\ref{E-5}), can be written in the form $$ e_{3c + 2k} (3 \varepsilon_5 \varepsilon_1 \varepsilon_2 s^2) e_{2c + k} (2 \varepsilon_2 s). $$ Hence if $s \in {\mathfrak{a}}$, we obtain that $$ [e_k(s/2), e_c(1)] [e_k (s/2), e_c(-1)] = e_{3c + 2k} (3 \varepsilon_5 \varepsilon_1 \varepsilon_2 s^2/4) e_{2c + k} (\varepsilon_2 s) \in N. $$ But $e_{3c + 2k} (3 \varepsilon_5 \varepsilon_1 \varepsilon_2 s^2/4) \in N,$ from which it follows that $e_{2c + k} (\mathfrak{a}) \subset N.$ This completes the proof. \end{proof}
\vskip1mm
\noindent {\bf Remark.} If $R$ is the ring of algebraic $S$-integers, then any subgroup of finite index of the additive group of $R$ contains an ideal of finite index, so the conclusion of Proposition \ref{P-3} holds for root systems of rank $>1$ of all types without any additional restrictions on $R$. On the other hand, if $R$ is the ring of $S$-integers in a global field of positive characteristic $>2$, then $2 \in R^{\times}$, and Proposition \ref{P-3} again applies to all root systems without any extra assumptions.
\section{Proof of the main theorem}
We return to the notations introduced in \S \ref{S:I}. In particular, we set $\Gamma = E(R)$, where $R$ is a commutative noetherian ring such that $2 \in R^{\times}$ if our root system $\Phi$ is of type $C_n$ ($n \geq 2$) or $G_2$, and let $\widehat{\Gamma}$ and $\overline{\Gamma}$ denote the profinite and congruence completions of $\Gamma$, respectively. Furthermore, we let $\pi \colon \widehat{\Gamma} \to \overline{\Gamma}$ denote the canonical continuous homomorphism, so that $C(\Gamma) := \ker \pi$ is the congruence kernel. For each root $\alpha \in \Phi$, we let $\widehat{U}_{\alpha}$ and $\overline{U}_{\alpha}$ denote the closures of the images of the natural homomorphisms $U_{\alpha} (R) \to \widehat{\Gamma}$ and $U_{\alpha} (R) \to \overline{\Gamma}.$ By Proposition \ref{P-3}, the profinite and congruence topologies of $\Gamma$ induce the same topology on each $U_{\alpha} (R)$, which implies that $\pi \vert_{\widehat{U}_{\alpha}} \colon \widehat{U}_{\alpha} \to \overline{U}_{\alpha}$ is a group isomorphism. From the definitions, it is clear that $\overline{U}_{\alpha}$ coincides with $\overline{e}_{\alpha} (\widehat{R})$, where $\overline{e}_{\alpha} \colon \widehat{R} \to G(\widehat{R}) = \overline{G(R)}$ is the 1-parameter subgroup associated with $\alpha$ over the ring $\widehat{R}.$ Set $$ \widehat{e}_{\alpha} = (\pi \vert_{\widehat{U}_{\alpha}})^{-1} \circ \overline{e}_{\alpha}. $$ Then $\widehat{e}_{\alpha} \colon \widehat{R} \to \widehat{U}_{\alpha}$ is an isomorphism of topological groups, and in particular, we have $$ \widehat{e}_{\alpha} (r+s) = \widehat{e}_{\alpha} (r) \widehat{e}_{\alpha} (s) $$ for all $r, s \in \widehat{R}$ and any $\alpha \in \Phi.$
Before establishing some further properties of the $\widehat{e}_{\alpha}$, let us recall that for any commutative ring $S$ and any $\alpha, \beta \in \Phi$, $\beta \neq -\alpha$, there is a relation in $G(S)$ of the form \begin{equation}\label{E-Steinberg} [e_{\alpha} (s), e_{\beta} (t)] = \prod e_{i \alpha + j \beta} (N_{\alpha, \beta}^{i,j} s^i t^j) \end{equation} for all $s,t \in S$, where the product is taken over all roots of the form $i \alpha + j \beta$ with $i, j \in {\mathbb Z}^+$, listed in an arbitrary (but {\it fixed}) order, and the $N^{i,j}_{\alpha, \beta}$ are integers depending only on $\alpha, \beta \in \Phi$ and the order of the factors in (\ref{E-Steinberg}), but not on $s, t \in S$. Furthermore, recall that the abstract group $\tilde{G}(S)$ with generators $x_{\alpha} (s)$ for all $s \in S$ and $\alpha \in \Phi$ subject to the relations \vskip1mm
(R1) $\tilde{x}_{\alpha}(s) \tilde{x}_{\alpha}(t) = \tilde{x}_{\alpha} (s+t)$,
\vskip1mm
(R2) \parbox[t]{15cm}{$[\tilde{x}_{\alpha} (s), \tilde{x}_{\beta} (t)] = \prod \tilde{x}_{i \alpha + j \beta} (N^{i,j}_{\alpha, \beta} s^i t^j)$, where $N_{\alpha, \beta}^{i,j}$ are the same integers, and the roots are listed in the same order, as in (\ref{E-Steinberg}),}
\vskip1mm \noindent is called the {\it Steinberg group}. It follows from (\ref{E-Steinberg}) that there exists a canonical homomorphism $\tilde{G}(S) \to G(S)$, defined by $x_{\alpha} (s) \mapsto e_{\alpha} (s)$, whose kernel is denoted by $K_2 (\Phi, S).$ \begin{lemma}\label{L-3} {\rm (1)} \parbox[t]{15cm}{For any $\alpha, \beta \in \Phi$, $\beta \neq -\alpha$, and $s, t \in \widehat{R}$, we have $[\widehat{e}_{\alpha} (s), \widehat{e}_{\beta} (t)] = \prod \widehat{e}_{i \alpha + j \beta} (N_{\alpha, \beta}^{i,j} s^i t^j).$}
\vskip2mm
\noindent Let $\widehat{R} = \prod_{{\mathfrak{m}} \in {\mathcal{M}}} R_{{\mathfrak{m}}}$ be the decomposition from Lemma \ref{L-1}(1), and for ${\mathfrak{m}} \in {\mathcal{M}}$, let $\widehat{\Gamma}_{{\mathfrak{m}}}$ (resp. $\widehat{\Gamma}_{{\mathfrak{m}}}'$) be the subgroup of $\widehat{\Gamma}$ (algebraically) generated by $\widehat{e}_{\alpha} (r)$ for all $r \in R_{{\mathfrak{m}}}$ (resp., $r \in R_{{\mathfrak{m}}}' := \prod_{{\mathfrak{n}} \neq {\mathfrak{m}}} R_{{\mathfrak{n}}}$) and all $\alpha \in \Phi$. Then
\noindent {\rm (2)} \parbox[t]{16cm}{There exists a surjective group homomorphism $\theta_{{\mathfrak{m}}} \colon \tilde{G}(R_{{\mathfrak{m}}}) \to \widehat{\Gamma}_{{\mathfrak{m}}}$ such that $x_{\alpha} (r) \mapsto \widehat{e}_{\alpha} (r)$ for all $r \in R_{{\mathfrak{m}}}$ and $\alpha \in \Phi.$}
\vskip1mm
\noindent {\rm (3)} \parbox[t]{16cm}{$\widehat{\Gamma}_{{\mathfrak{m}}}$ and $\widehat{\Gamma}_{{\mathfrak{m}}}'$ commute elementwise inside $\widehat{\Gamma}.$} \end{lemma} \begin{proof} (1) Define two continuous maps $$ \varphi \colon \widehat{R} \times \widehat{R} \to \widehat{\Gamma}, \ \ \ (s,t) \mapsto [\widehat{e}_{\alpha} (s), \widehat{e}_{\beta} (t)] $$ and $$ \psi \colon \widehat{R} \times \widehat{R} \to \widehat{\Gamma}, \ \ \ (s,t) \mapsto \prod \widehat{e}_{i \alpha + j \beta} (N_{\alpha, \beta}^{i,j} s^i t^j). $$ It follows from (\ref{E-Steinberg}) that these maps coincide on $R \times R.$ Since $R \times R$ is dense in $\widehat{R} \times \widehat{R},$ we have $\varphi \equiv \psi,$ yielding our claim.
(2) Since we have shown that the $\widehat{e}_{\alpha}(r)$, $r \in R_{{\mathfrak{m}}}$, $\alpha \in \Phi,$ satisfy the relations (R1) and (R2), the existence of the homomorphism $\theta_{{\mathfrak{m}}}$ follows.
(3) It suffices to show that for any $\alpha, \beta \in \Phi$ and any $r \in R_{{\mathfrak{m}}}, \ s \in R_{{\mathfrak{m}}}',$ the elements $\widehat{e}_{\alpha} (r), \widehat{e}_{\beta} (s) \in \widehat{\Gamma}$ commute. Since $r s= 0$ in $\widehat{R},$ this fact immediately follows from (1) if $\beta \neq -\alpha.$ To handle the remaining case $\beta = - \alpha,$ we observe that for any ring $S$ and the corresponding Steinberg group $\tilde{G}(S)$, we have \begin{equation}\label{E:St-1} \tilde{G}(S) = <x_{\gamma} (r) \mid \gamma \in \Phi \setminus \{ \alpha \}, r \in S>. \end{equation} Indeed, it is well-known that $\tilde{G}(S)$ is generated by the elements $x_{\gamma} (r)$ for all $r \in R$ and all $\gamma$ in an arbitrarily chosen system $\Pi \subset \Phi$ of simple roots (this follows, for example, from the fact that the Weyl group of $\Phi$ is generated by the reflections corresponding to simple roots, and moreover, every root lies in the orbit of a simple root under the action of the Weyl group). On the other hand, since $\Phi$ is of rank $\geq 2$, for any $\alpha \in \Phi,$ one can find a system of simple roots $\Pi \subset \Phi$ that does not contain $\alpha$, and (\ref{E:St-1}) follows. Using the homomorphism $\theta_{{\mathfrak{m}}}$ constructed in part (2), we conclude from (\ref{E:St-1}) that $\widehat{\Gamma}_{{\mathfrak{m}}} = \theta_{{\mathfrak{m}}} (\tilde{G}(R_{{\mathfrak{m}}}))$ is generated by $\widehat{e}_{\gamma}(r)$ for $r \in R_{{\mathfrak{m}}}$, $\gamma \in \Phi \setminus \{ \alpha \}$. So, since we already know that $\widehat{e}_{-\alpha} (s)$, with $s \in R_{{\mathfrak{m}}}'$, commutes with all of these elements, it also commutes with $\widehat{e}_{\alpha} (r),$ yielding our claim.
\end{proof}
The following lemma, which uses results of Stein \cite{St2} on the computation of $K_2$ over semi-local rings, is a key ingredient in the proof of the Main Theorem. \begin{lemma}\label{L-4} The kernel $\ker (\pi \vert_{\widehat{\Gamma}_{{\mathfrak{m}}}})$ of the restriction $\pi \vert_{\widehat{\Gamma}_{{\mathfrak{m}}}}$ lies in the center of $\widehat{\Gamma}_{{\mathfrak{m}}}$, for any ${\mathfrak{m}} \in {\mathcal{M}}.$ \end{lemma} \begin{proof} Stein has shown that if $\Phi$ has rank $\geq 2$ and $S$ is a semi-local ring which is generated by its units, then $K_2 (\Phi, S)$ lies in the center of $\tilde{G}(S)$ (cf. \cite{St2}, Theorem 2.13). Since $S = R_{{\mathfrak{m}}}$ is local, it is automatically generated by its units, hence $K_2 (\Phi, R_{{\mathfrak{m}}}) = \ker (\tilde{G}(R_{{\mathfrak{m}}}) \stackrel{\mu}{\longrightarrow} E(R_{{\mathfrak{m}}}))$ is central. On the other hand, $\mu$ admits the following factorization: $$ \tilde{G}(R_{{\mathfrak{m}}}) \stackrel{\theta_{{\mathfrak{m}}}}{\longrightarrow} \widehat{\Gamma}_{{\mathfrak{m}}} \stackrel{\pi \vert_{\widehat{\Gamma}_{{\mathfrak{m}}}}}{\longrightarrow} E(R_{{\mathfrak{m}}}). $$ Since $\theta_{{\mathfrak{m}}}$ is surjective, we conclude that $$ \ker (\pi \vert_{\widehat{\Gamma}_{{\mathfrak{m}}}}) = \theta_{{\mathfrak{m}}} (K_2 (\Phi, R_{{\mathfrak{m}}})) $$ is central in $\widehat{\Gamma}_{{\mathfrak{m}}}.$ \end{proof}
Now fix ${\mathfrak{m}} \in {\mathcal{M}}$ and let $\Delta_{{\mathfrak{m}}} = \widehat{\Gamma}_{{\mathfrak{m}}} \widehat{\Gamma}_{{\mathfrak{m}}}'$ be the subgroup of $\widehat{\Gamma}$ (algebraically) generated by $\widehat{\Gamma}_{{\mathfrak{m}}}$ and $\widehat{\Gamma}_{{\mathfrak{m}}}'.$ Let $c \in C(\Gamma) \cap \Delta_{{\mathfrak{m}}},$ and write $c = c_1 c_2,$ with $c_1 \in \widehat{\Gamma}_{{\mathfrak{m}}}, c_2 \in \widehat{\Gamma}_{{\mathfrak{m}}}'.$ We have $\overline{\Gamma} = \overline{\Gamma}_{{\mathfrak{m}}} \times \overline{\Gamma}_{{\mathfrak{m}}}'$, where $\overline{\Gamma}_{{\mathfrak{m}}} = E(R_{{\mathfrak{m}}})$ and $\overline{\Gamma}_{{\mathfrak{m}}}' = E(R_{{\mathfrak{m}}}').$ Since $\pi (c_1) \in \overline{\Gamma}_{{\mathfrak{m}}}$, $\pi(c_2) \in \overline{\Gamma}_{{\mathfrak{m}}}',$ we conclude from $$\pi(c) = e = \pi(c_1) \pi(c_2)$$ that $\pi(c_1) = e,$ i.e. $c_1 \in \ker (\pi \vert_{\widehat{\Gamma}_{{\mathfrak{m}}}}).$ Then by Lemma \ref{L-4}, $\widehat{\Gamma}_{{\mathfrak{m}}}$ centralizes $c_1.$ On the other hand, $\widehat{\Gamma}_{{\mathfrak{m}}}$ centralizes $c_2 \in \widehat{\Gamma}_{{\mathfrak{m}}}'$ by Lemma \ref{L-3}(3). So, $\widehat{\Gamma}_{{\mathfrak{m}}}$ centralizes $c.$ Thus, we have shown that $C \cap \Delta_{{\mathfrak{m}}}$ is centralized by $\widehat{\Gamma}_{{\mathfrak{m}}}.$ To prove that $\widehat{\Gamma}_{{\mathfrak{m}}}$ actually centralizes all of $C$, we need the following \begin{lemma}\label{L-5} Let $\varphi \colon \mathcal{G}_1 \to \mathcal{G}_2$ be a continuous homomorphism of topological groups, and let $\mathcal{F} = \ker \varphi.$ Suppose $\Theta \subset \mathcal{G}_1$ is a dense subgroup such that there exists a compact set $\Omega \subset \Theta$ whose image $\varphi(\Omega)$ is a neighborhood of the identity in $\mathcal{G}_2.$ Then $\mathcal{F} \cap \Theta$ is dense in $\mathcal{F}.$ \end{lemma} \begin{proof} Since $\varphi(\Omega)$ is a neighborhood of the identity in $\mathcal{G}_2$, we can find an open set $U \subset \mathcal{G}_1$ such that $$ \mathcal{F} \subset U \subset \varphi^{-1}(\varphi(\Omega)) = \Omega \mathcal{F}. $$ Now since $\Theta$ is dense in $\mathcal{G}_1$, we have $U \subset \overline{\Theta \cap U},$ where the bar denotes the closure in $\mathcal{G}_1.$ Thus, $$ \mathcal{F} \subset \overline{\Theta \cap U} \subset \overline{\Theta \cap \Omega \mathcal{F}}. $$ But $\Theta \cap \Omega \mathcal{F} = \Omega (\Theta \cap \mathcal{F}),$ and since $\Omega$ is compact, the product $\Omega \overline{(\Theta \cap \mathcal{F})}$ is closed. So $$ \mathcal{F} \subset \overline{\Theta \cap \Omega \mathcal{F}} \subset \Omega \overline{(\Theta \cap \mathcal{F})}. $$ Since $\mathcal{F}$ is closed, we have $\overline{\Theta \cap \mathcal{F}} \subset \mathcal{F},$ so $$ \mathcal{F} = (\Omega \cap \mathcal{F}) \overline{(\Theta \cap \mathcal{F})} \subset (\Theta \cap \mathcal{F}) \overline{(\Theta \cap \mathcal{F})} = \overline{\Theta \cap \mathcal{F}}, $$ as required. \end{proof} In order to apply Lemma \ref{L-5} in our situation, we noted the following simple fact \begin{lemma}\label{L-6} The subgroup $\Delta \subset \widehat{\Gamma}$ (algebraically) generated by the $\widehat{\Gamma}_{{\mathfrak{m}}}$ for all ${\mathfrak{m}} \in {\mathcal{M}}$ is dense. Consequently, for any ${\mathfrak{m}} \in {\mathcal{M}},$ the subgroup $\Delta_{{\mathfrak{m}}} = \widehat{\Gamma}_{{\mathfrak{m}}} \widehat{\Gamma}_{{\mathfrak{m}}}' \subset \widehat{\Gamma}$ is dense. \end{lemma} \begin{proof} Let $$ R_0 := \sum_{{\mathfrak{m}} \in {\mathcal{M}}} R_{{\mathfrak{m}}} \subset \widehat{R} = \prod_{{\mathfrak{m}} \in {\mathcal{M}}} R_{{\mathfrak{m}}}. $$ Clearly $R_0$ is a dense subring of $\widehat{R}.$ On the other hand, $\Delta$ obviously contains $\widehat{e}_{\alpha} (R_0)$ for any $\alpha \in \Phi.$ So, the closure $\overline{\Delta}$ contains $\widehat{e}_{\alpha} (R)$ for all $\alpha \in \Phi$, and therefore coincides with $\widehat{\Gamma},$ yielding our first assertion. Furthermore, for any ${\mathfrak{m}} \in {\mathcal{M}},$ the subgroup $\Delta_{{\mathfrak{m}}}$ contains $\Gamma_{{\mathfrak{n}}}$ for all ${\mathfrak{n}} \in {\mathcal{M}},$ so our second assertion follows. \end{proof}
\vskip1mm
\noindent {\it Conclusion of the proof of the Main Theorem}: Fix ${\mathfrak{m}} \in {\mathcal{M}}.$ We have already seen that $\widehat{\Gamma}_{{\mathfrak{m}}}$ centralizes $C \cap \Delta_{{\mathfrak{m}}}.$ We claim that $C \cap \Delta_{{\mathfrak{m}}}$ is dense in $C$, and hence $\widehat{\Gamma}_{{\mathfrak{m}}}$ centralizes $C$. Indeed, by Lemma~\ref{L-6}, $\Delta_{{\mathfrak{m}}}$ is dense in $\widehat{\Gamma}.$ On the other hand, it follows from Corollary \ref{C-1} that there exists a string of roots $(\alpha_1, \dots, \alpha_L)$ such that the map $$ \widehat{R}^L \to \overline{\Gamma}, \ \ \ \ \ (r_1, \dots, r_L) \mapsto \prod_{i=1}^L \overline{e}_{\alpha_i} (r_i) $$ is surjective. Then $$ \Omega := \widehat{e}_{\alpha_1} (\widehat{R}) \cdots \widehat{e}_{\alpha_L} (\widehat{R}) = \left( \widehat{e}_{\alpha_1} (R_{{\mathfrak{m}}}) \cdots \widehat{e}_{\alpha_L} (R_{{\mathfrak{m}}}) \right) \left( \widehat{e}_{\alpha_1} (R_{{\mathfrak{m}}}') \cdots \widehat{e}_{\alpha_L} (R_{{\mathfrak{m}}}') \right) $$ is a compact subset of $\widehat{\Gamma}$ that is contained in $\Delta_{{\mathfrak{m}}}$ and has the property that $\pi(\Omega) = \overline{\Gamma}.$ Invoking Lemma \ref{L-5}, we obtain that $C \cap \Delta_{{\mathfrak{m}}}$ is dense in $C$, as required.
We now see that $\widehat{\Gamma}_{{\mathfrak{m}}}$ centralizes $C$ for all ${\mathfrak{m}} \in {\mathcal{M}}.$ Since the subgroup $\Delta \subset \widehat{\Gamma}$ generated by the $\widehat{\Gamma}_{{\mathfrak{m}}}$ is dense in $\widehat{\Gamma}$ by Lemma \ref{L-6}, we obtain that $\widehat{\Gamma}$ centralizes $C$, completing the proof.
$\Box$
\vskip2mm
To put our proof of the Main Theorem into perspective, we recall the following criterion for the centrality of the congruence kernel in the context of the congruence subgroup problem for algebraic groups over global fields (see \cite{PR}, Theorem 4). Let $G$ be an absolutely almost simple simply connected algebraic group over a global field $K$, and $S$ be a set of places of $K$, which we assume to contain all archimedean places if $K$ is a number field, such that the corresponding $S$-arithmetic group $G(\mathcal{O}_S)$ is infinite (where $\mathcal{O}_S$ is the ring of $S$-integers in $K$). Then by the Strong Approximation Theorem, the $S$-congruence completion $\overline{G(K)}$ of the group $G(K)$ of $K$-rational points can be identified with the group of $S$-adeles $G(\mathbb{A}_S)$, and in particular the group $G(K_v)$, for $v \notin S$, can be viewed as a subgroup of $\overline{G(K)}.$ Assume furthermore that $S$ contains no nonarchimedean anisotropic places for $G$ and that $G/K$ satisfies the Margulis-Platonov conjecture. If for each $v \in S$, there exists a subgroup $H_v$ of the $S$-arithmetic completion $\widehat{G(K)}$ such that
\vskip1mm
(1) \parbox[t]{16cm}{$\pi (H_v) = G(K_v)$ for all $v \notin S$, where $\pi \colon \widehat{G(K)} \to \overline{G(K)}$ is the canonical projection;}
\vskip1mm
(2) \parbox[t]{16cm}{$H_{v_1}$ and $H_{v_2}$ commute elementwise for $v_1 \neq v_2$;}
\vskip1mm
(3) \parbox[t]{16cm}{the $H_v$, for $v \notin S$, (algebraically) generate a dense subgroup of $\widehat{G(K)}$,}
\vskip1mm
\noindent then the congruence kernel $C^S(G) := \ker \pi$ is central. So, this criterion basically states that in the arithmetic situation, the mere existence of elementwise commuting lifts of ``local groups" implies the centrality of the congruence kernel. In our situation, the existence of elementwise commuting lifts (which we denoted $\widehat{\Gamma}_{{\mathfrak{m}}}$ above) also plays a part in the proof of centrality (cf. Lemma \ref{L-4}(3)), but some additional considerations (such as the result of Stein and the bounded generation property for $E(\widehat{R}) = G(\widehat{R})$) are needed; the facilitating factor in the arithmetic situation is the action of the group $G(K)$ on the congruence kernel, which is not available over more general rings.
Finally, we will relate our result on the centrality of the congruence kernel $C(\Gamma)$ for $\Gamma = E(R)$ to the congruence subgroup problem for $G(R).$ We have the following commutative diagram induced by the natural embedding $\Gamma \hookrightarrow G(R)$: $$ \xymatrix{1 \ar[r] & C(\Gamma) \ar[r] \ar[d]_{\alpha} & \widehat{\Gamma} \ar[d]_{\beta} \ar[r]^{\pi^{\Gamma}} & \overline{\Gamma} \ar[d]_{\gamma} \ar[r] & 1 \\ 1 \ar[r] & C(G(R)) \ar[r] & \widehat{G(R)} \ar[r]^{\pi^{G(R)}} & \overline{G(R)} \ar[r] & 1} $$ We note that by Proposition \ref{P-2}, $\gamma$ is an isomorphism. So, $\alpha(C(\Gamma)) = C(G(R)) \cap \beta (\widehat{\Gamma})$, and $\beta (\widehat{\Gamma})$ coincides with the closure $\check{\Gamma}$ of $\Gamma$ in $\widehat{G(R)}$. Thus, our Main Theorem yields the following \begin{cor} $C(G(R)) \cap \check{\Gamma}$ is centralized by $\check{\Gamma}.$ \end{cor} The exact relationship between $C(G(R))$ and $C(G(R)) \cap \check{\Gamma}$ (or $C(\Gamma)$) remains unclear except in a few cases. Matsumoto \cite{M1} showed that $G(R) = E(R)$ for any ring $R$ of algebraic $S$-integers, which combined with our Main Theorem and the remark at the end of \S 3, yields the centrality of $C(E(R)) = C(G(R))$, established by Matsumoto himself. Furthermore, for $G = SL_n$ $(n \geq 3)$ and $R = {\mathbb Z}[x_1, \dots, x_k]$, by a result of Suslin \cite{Su}, we again have $G(R) = E(R),$ so $C(G(R)) = C(E(R))$ is central in $\widehat{E(R)} = \widehat{G(R)},$ which was established in \cite{KN}. On the other hand, there exist principal ideal domains $R$ for which $SL_n (R) \neq E(R)$ (cf. \cite{G}, \cite{I}), and then the analysis of $C(G(R))$ requires more effort. We only note that if $\Gamma = E(R)$ has finite index in $G(R)$, then the profinite topology on $\Gamma$ is induced by the profinite topology of $G(R)$, which implies that $\beta$ is injective, and therefore $C(\Gamma)$ is identified with a finite index subgroup of $C(G(R)).$
\vskip2mm
\noindent {\bf Acknowledgements.} The first-named author was partially supported by NSF grant DMS-0965758 and the Humboldt Foundation. The paper was finalized when both authors were visiting SFB 701 (Bielefeld), whose hospitality is gratefully acknowledged.
\end{document} |
\begin{document}
\title [Carath\'eodory functions in Riemann surfaces] {Carath\'eodory functions on Riemann surfaces and reproducing kernel spaces}
\author[D. Alpay]{Daniel Alpay} \address{(DA) Schmid College of Science and Technology, Chapman University, One University Drive Orange, California 92866, USA} \email{[email protected]}
\author[A. Pinhas]{Ariel Pinhas} \address{(AP) Department of mathematics, Ben-Gurion University of the Negev, P.O. Box 653, Beer-Sheva 84105, Israel} \email{[email protected]}
\author[V. Vinnikov]{Victor Vinnikov} \address{(VV) Department of mathematics, Ben-Gurion University of the Negev, P.O. Box 653, Beer-Sheva 84105, Israel} \email{[email protected]}
\date{}
\thanks{The first author thanks the Foster G. and Mary McGaw Professorship in Mathematical Sciences, which supported this research}
\begin{abstract} Carath\'eodory functions, i.e. functions analytic in the open upper half-plane and with a positive real part there, play an important role in operator theory, $1D$ system theory and in the study of de Branges-Rovnyak spaces. The Herglotz integral representation theorem associates to each Carath\'eodory function a positive measure on the real line and hence allows to further examine these subjects. In this paper, we study these relations when the Riemann sphere is replaced by a real compact Riemann surface. The generalization of Herglotz's theorem to the compact real Riemann surface setting is presented. Furthermore, we study de Branges-Rovnyak spaces associated with functions with positive real-part defined on compact Riemann surfaces. Their elements are not anymore functions, but sections of a related line bundle. \end{abstract} \subjclass{46E22,30F15} \keywords{compact Riemann surface, de Branges-Rovnyak spaces, Carath\'eodory function}
\maketitle
\setcounter{tocdepth}{1} \tableofcontents
\section{Introduction and overview}
A Carath\'eodory function $\varphi(z)$, that is, analytic with positive real part in the open upper half-plane ${\mathbb C}_+$, admits an integral representation, also known by the Herglotz's representation theorem (see e.g \cite{MR48:904,RosenblumRovnyak}). More precisely, the Carath\'eodory function $\varphi(z)$ can be written as: \begin{equation} \label{27-octobre-2000} \varphi(z)=iA-iBz - i\int_{{\mathbb R}}\left(\frac{1}{t-z}-\frac{t}{t^2+1} \right)d\mu(t), \end{equation} where $A\in{\mathbb R}$, $B\geq 0$ and $d\mu(t)$ is a positive measure on the real line such that $$\int_{\mathbb R}\frac{d\mu(t)}{t^2+1}<\infty.$$ One of the main two objectives of this paper is to extend \eqref{27-octobre-2000} to the non zero genus case; this is done in Theorem \ref{caraTmRS}. The second point is to extend \eqref{4-juin-2000} also to the non zero genus case, and this result is presented in Theorem \ref{thm41}.
The right handside of \eqref{27-octobre-2000} defines an analytic extension of $\varphi(z)$ to ${\mathbb C}\setminus{\mathbb R}$ such that $\overline{\varphi(\overline{z})}+\varphi(z)=0$.
Thus, for any $z,w \in{\mathbb C}\setminus {\mathbb R}$, we have \begin{equation} \label{4-juin-2000} \frac{\varphi(z)+\overline{\varphi(w)}}{-i(z-\overline{w})}= B+\int_{\mathbb R}\frac{d\mu(t)}{(t-z)(t-\overline{w})}= B+\innerProductTri{\frac{1}{t-z}}{\frac{1}{t-w}}{{\bf L}^2(d\mu)}, \end{equation} where ${\bf L}^2(d\mu)$ stands for the Lebesgue (Hilbert) space associated with the measure $d\mu$. Thus, the kernel $\frac{\varphi(z)+\overline{\varphi(w)}}{-i(z-\overline{w})}$ is positive in ${\mathbb C}\setminus{\mathbb R}$. When $B=0$, the associated reproducing kernel Hilbert space, denoted by $\mathcal{L}(\varphi)$, is described in the theorem below. \begin{Tm}[{\cite[Section 5]{MR0229011}}] \label{Thm21} The space $\mathcal{L}(\varphi)$ consists of the functions of the form \begin{equation} \label{Thm21A} F(z)=\int_{\mathbb R}\frac{f(t)d\mu(t)}{t-z} \end{equation} where $f\in{\bf L}^2(d\mu)$. Furthermore, $\mathcal{L}(\varphi)$ is invariant under the resolvent-like operators $R_\alpha$, where for $\alpha \in \mathbb C \setminus \mathbb R$, is given by: \begin{equation}\label{liberte'} (R_\alpha F)(z)=\frac{F(z)-F(\alpha)}{z-\alpha}. \end{equation}
Finally, under the hypothesis $\int_{\mathbb R}d\mu(t)<\infty$, the elements of $\mathcal{L}(\varphi)$ satisfy: $$ \lim_{y\rightarrow\infty}F(iy)=0.$$ \end{Tm}
Moreover, the resolvent operator satisfies $R_\alpha = (M- \alpha I)^{-1}$, where $M$ is the multiplication operator defined by \[ M \, F(z) = z F(z) - \lim_{z \rightarrow \infty} z F(z). \] We note that $M$ corresponds, through \eqref{Thm21A}, to the operator of multiplication by $t$ in ${\bf L}^2(d\mu)$.
We provide here the outline of the proof in order to motivate the analysis presented in the sequel in the compact real Riemann setting.
\begin{pf}[of Theorem \ref{Thm21}] Let $N\in{\mathbb N}$, $w_1,\ldots , w_N \in{\mathbb C}\setminus{\mathbb R}$ and $c_1 \cdots c_N\in{\mathbb C}$. Then, \[ F(z) \overset{\text{def} } {=} \sum_{j=1}^{N} c_j \frac{\varphi(z)+\overline{\varphi(w_j)}}{-i(z-\overline{w_j})} = \int_{\mathbb R}\frac{d\mu (t)}{t-z}f(t) \] where \[ f(t)=\sum_{j=1}^{N}\frac{c_j}{t-\overline{w_j}}\in{\bf L}^2(d\mu). \] In view of \eqref{4-juin-2000}, we have \[
\|F\|^2_{\mathcal{L}(\varphi)} =
\|f\|^2_{{\bf L}^2(d\mu)} = \sum_{\ell,j}\overline{c_\ell} \frac{\varphi(w_\ell)+\overline{\varphi(w_j)}}{-i(w_\ell-\overline{w_j})}c_j. \] The first claim (Equation \ref{Thm21A}) follows by the fact that the linear span of the functions $\frac{1}{-i(z-\overline{w})}$, $w\in{\mathbb C}\setminus{\mathbb R}$ is dense in ${\bf L}^2(d\mu)$.
Next, let $F(z)=\int_{\mathbb R}\frac{f(t)d\mu(t)}{t-z}\in\mathcal{L}(\varphi)$. Then, \begin{equation} \label{tatche} (R_\alpha F)(z)=\int_{\mathbb R}\frac{f(t)d\mu(t)}{(t-\alpha)(t-z)} \end{equation} belongs to $\mathcal{L}(\varphi)$ since $f(t)/(t-\alpha)\in{\bf L}^2( d\mu)$ where $\alpha$ is lying outside the real line. \end{pf}
Furthermore, using \eqref{tatche}, the structure identity
\begin{equation} \label{strucId} [R_\alpha f, g] - [f, R_\beta g] - (\alpha - \overline{\beta}) [R_\alpha f, R_\beta g] = 0,\quad \alpha,\beta\in\mathbb C\setminus\mathbb R, \end{equation} holds in the $\mathcal{L}(\varphi)$ spaces. In fact, this is an "if and only if" relation. If the identity \eqref{strucId} holds in the space $\mathcal L$ of functions analytic in $\mathbb C\setminus\mathbb R$, then $\mathcal{L}=\mathcal{L}(\varphi)$ for some Carath\'eodory function $\varphi(z)$ (see \cite[Theorem 6]{MR0229011}).
Using the observation that an ${\bf L}^2(d\mu)$ space is finite dimensional if and only if the measure $d\mu$ is has only a singular part, consisting of a finite number of jumps, we may continue and mention the following result (see for instance \cite{dbbook}):
\begin{Tm} \label{finiteDimentionalLphi} Let $\varphi(z)$ be a Carath\'eodory function associated via \eqref{27-octobre-2000} to a positive measure $d \mu$ and let $\mathcal{L}(\varphi)$ be the corresponding reproducing kernel Hilbert space. Then the following are equivalent: \begin{enumerate} \item $\mathcal{L}(\varphi)$ is finite dimensional. \item ${\rm dim} \, {\bf L}^2(d \mu) < \infty$. \item $d \mu$ is a jump measure with a finite number of jumps. \item The Carath\'eodory function is of the form $$\varphi(z) = i A + i B z + \sum _{j=1}^N \frac{i c_j}{z-t_j},$$ where $c_j,B>0$, $A, t_j\in \mathbb R$ for all $1\leq j\leq N$. \end{enumerate} \end{Tm}
\begin{Rk} There are two different ways to obtain the positive measure $d\mu$ given in \eqref{27-octobre-2000}. \begin{enumerate} \item Using the Cauchy formula on the boundary and the Banach-Alaoglu Theorem. \item Using the spectral theorem for $R_0$ in the space $\mathcal{L}(\varphi)$. In this case, $R_0$ is self-adjoint and the measure $d\mu$ is given by $d\mu(t) = \innerProductReg{dE(t)u}{u}$ where $E$ is the spectral measure of $R_0$. \end{enumerate} In this paper we focus on the first approach, while in \cite{AVP3} we explore the second approach. \end{Rk}
We mention here that Carath\'eodory functions are the characteristic functions or transfer functions of selfadjoint vessel or impedance $2D$ systems, respectively. Furthermore, they are also related to de Branges-Rovnyak spaces $\mathcal{L}(\varphi)$ of sections of certain vector bundles defined on compact Riemann surfaces of non zero genus. These subjects and interconnections are further studied by the authors in \cite{AVP3}.\\
{\bf Outline of the paper:} The paper consists of five sections besides the introduction. In Section \ref{secPrel}, we give a brief overview of compact real Riemann surfaces and the associated Cauchy kernels.
In Section \ref{secHerg}, we describe explicitly the Green function on $X$ in terms of the canonical homology basis. As a consequence, we present the Herglotz representation theorem for compact real Riemann surfaces. We utilize the integral representation of Carath\'eodory functions in order to study, in Section \ref{secdBLphi}, the de Branges space $\mathcal{L}(\varphi)$.
In Section \ref{secPhiSingleVal}, we examine the case where $\varphi(z)$ is a single-valued function which defines a contractive function $s(z)$ through the Cayley transformation. Hence, we may determine the relation between the de Branges spaces $\mathcal{L}(\varphi)$ and the the de Branges Rovnyak space $\mathcal H (s)$ associated to $s$. Finally, in Section \ref{chSumm43}, we summarize some of the results by comparing the $\mathcal{L}(\varphi)$ theory in the genus zero case and the real compact Riemann surfaces of genus $g>0$.
\section{Preliminaries} \label{secPrel}
In this section, we give a brief review of the basic properties and definitions of compact real Riemann surfaces. We replace the open upper-half plane (or, more precisely, its double, i.e the Riemann sphere) by a compact real Riemann surface $X$ of genus $g>0$.
A survey of the main tools required in the present study (including the prime form and the Jacobian) can be found in \cite[Section 2]{av3}, and in particular the descriptions of the Jacobian variety of a real curve and the real torii is in \cite{vinnikov5}. For general background, we refer to \cite{fay1,GrHa,gunning2,mumford1} and \cite{mumford2}.
It is crucial to choose a canonical basis to the homology group $H_1(X,\mathbb Z)$ which is symmetric, in some sense, under the involution $\tau$
(for more details we refer to \cite{gross1981real}. Here we use the conventions as in \cite{av3,vinnikov5}). Let $X_{\mathbb R}$ be the be set of the invariant points under $\tau$, $X_{\mathbb R} = \{ p\in X | \tauBa{p} = p\}$, which is always assumed to be not empty. Then $X_{\mathbb R}$ consists of $k$ connected components denoted by $X_j$ where $j=0,...,k-1$ (disjoint analytic simple closed curves). We choose for each component $X_j$ a point $p_j \in X_j$. Then, we set $A_{g+1-k+j} = X_j$ and $B_{g+1-k+j}=C_j - \tauBa{C_j}$, where $j=1,...,k-1$ and $C_j$ is a path from $p_0$ to $p_j$ which does not contain any other fixed point. We can extend to the homology basis $A_1,...,A_g,B_1,...,B_g$ under which the involution is given by $\begin{psmallmatrix}I & H\\0 & -I\end{psmallmatrix}$, where the matrix $H$ is given by \[ H= \left( \begin{smallmatrix} 0 & 1 \\ 1 & 0 \\ & & \ddots \\ & & & 0 & 1 \\ & & & 1 & 0 \\ & & & & & & 0 \\ & & & & & & & \ddots \\ & & & & & & & & 0 \\ \end{smallmatrix} \right) \quad {\rm and} \quad H= \left( \begin{smallmatrix} 1 & \\
& \ddots \\
& & 1 & \\
& & & 0 \\
& & & & \ddots \\
& & & & & 0 \\ \end{smallmatrix} \right) , \] for the dividing case and the non-dividing case, respectively. In both cases, $H$ is of rank of $g+1-k$. Then, we choose a normalized basis of holomorphic differentials on $X$ satisfying $\int _{A_i } \omega_j = \delta_{ij}$. The matrix $Z \in \mathbb C ^{g \times g}$, with entries $Z_{i,j} = \int _{B_i } \omega_j $, is symmetric, with positive real part, satisfies \[ Z^* = H - Z \] and is referred as the period matrix of $X$ associated with the basis $\left( \omega_j \right)_{j=1} ^g$.
The Jacobian variety is defined by $J(X) = \mathbb C ^ g \backslash \Gamma$, where $\Gamma = \mathbb Z ^g + Z \mathbb Z ^g$, and the Abel-Jacobi map from $X$ to the Jacobian variety is given by \[ \mu:p \rightarrow \begin{pmatrix}\int_{p_0}^p \omega_1\\ \vdots\\ \int_{p_0}^p \omega_g\end{pmatrix}. \]
It is convenient to define \begin{equation}\label{eqZHY}Z= \frac{1}{2}H + i Y^{-1}.\end{equation}
We denote the universal covering of $X$ by $\pi:\widetilde{X}\rightarrow X$. The group of deck transformations of $X$, denoted by the $\mathrm{Deck} (\widetilde{X} / X)$, consists of the homeomorphisms $\mathcal{T}: \widetilde{X} \rightarrow \widetilde{X}$ such that $\pi_X \circ \mathcal{T} = \pi_X$. It is well-known that the group of deck transformations on the universal covering is isomorphic to the fundamental group $\pi_1(X)$.
The analogue of the kernel $\frac{1}{-i(z-\overline{w})}$, is given by $\frac{K_{\zeta}(u,\tauBa{v})}{-i}$ where \[ K_{\zeta}(u,v) \overset{\text{def} } {=} \frac{ \vartheta [{\zeta}](v-u)} { \vartheta [ \zeta ](0)E(v,u)}. \] The analogue of the kernel $\frac{1-s(z)\overline{s(w)}}{-i(z-\overline{w})}$ is now given by the expression \begin{equation*}
K_{\tilde{\zeta},s}(u,v)=\frac{\vartheta [\tilde{\zeta} ](\tauBa{v}-u)} {i\vartheta [\tilde{\zeta} ](0)E(u,\tauBa{v})}- s(u) \frac{ \vartheta [{\zeta}](\tauBa{v}-u)} {i\vartheta [ \zeta ](0)E(u,\tauBa{v})} \overline{s(v)} , \end{equation*} where $u$ and $v$ are points on $X$ (see \cite{vinnikov4} and \cite{vinnikov5}). Furthermore, $\zeta$ and $\tilde{\zeta}$ are points on the Jacobian $J(X)$ (in fact $\zeta$ and $\tilde{\zeta}$ belong to the real torii $T_\nu$, see \cite{MR1634421}) of $X$ such that $\vartheta(\zeta)$ and $\vartheta(\tilde{\zeta})$ are nonzero and:
\begin{enumerate}
\item $\vartheta [\zeta ]$ denotes the theta function of $X$ with characteristic $\left[ \begin{array}{c} a \\ b \end{array} \right]$ where $\zeta=b+Za$ (with $a$ and $b$ in ${\mathbb R}^g$). \item $E(u,v)$ is the prime form on $X$, for more details see \cite{fay1,mumford2}. \item For fixed $v$, the map $u\mapsto K_{\widetilde{\zeta},s}(u,v)$ is a multiplicative half order differential (with multipliers corresponding to $\tilde{\zeta}$). \item $s$ is a map of line bundles on $X$ with multipliers corresponding to $\tilde{\zeta}-\zeta$ and satisfying $s(u)s(\tauBa{u})^*=1$. \end{enumerate}
The analogue of the operators \eqref{liberte'} is given now by \begin{equation*}
R_\alpha^{y}f(u)= \frac{f(u)}{y(u)-\alpha}-\sum_{j=1}^n \frac{1}{ d y(u^{(j)})} \frac{\vartheta[\zeta](u^{(j)}-u)}{\vartheta[\zeta](0) E(u^{(j)},u)} f(u^{(j)}), \end{equation*} where $y$ is a real meromorphic function of degree $n$ and $\alpha\in\mathbb C$ is such that there are $n$ distinct points $u^{(j)}$ in $X$ such that $y(u^{(j)})= \alpha$ and where $f$ is a section of $L_{\zeta}\otimes\Delta$ analytic at the points $u^{(j)}$. Furthermore, ({\cite[Lemma 4.3]{av3}}) the Cauchy kernels are eigenvectors of $R^y_\alpha$ with eigenvalues $\frac{1}{\overline{y(w)}-\alpha}$.
We conclude with the definition of the model operator, $M^{y}$ \cite[Equation 3-3]{MR1634421}, satisfying $(M^y - \alpha I ) ^{-1} = R_\alpha ^y$ for $\alpha$ large enough. It is defined on sections of the line bundle $L_{\zeta}\otimes \Delta$ analytic at the neighborhood of the poles of $y$ and is explicitly given by \begin{equation} M^{y}f(u) \label{m_y} = y(u)f(u) + \sum_{m=1}^{n}{c_m f(p^{(m)}) \frac {\vartheta[\zeta](p^{(m)}-u)} {\vartheta[\zeta] (0)E(p^{(m)},u)}}, \end{equation} where $y(u)$ is a meromorphic function on $X$ with $n$ distinct simple poles, $p^{(1)},...,p^{(n)}$.
\section{Herglotz theorem for compact real Riemann surfaces} \label{secHerg}
We first develop the analogue of Herglotz's formula for analytic functions with a positive real part in ${X}_+$, instead of $\mathbb C_+$. We consider the case of multi-valued functions but with purely imaginary period, i.e. multi-valued functions that satisfy \begin{equation*} \varphi(\mathcal{T}(\widetilde{p}))=\varphi(\widetilde{p})+\chi(\mathcal{T}). \end{equation*} Here $\mathcal{T}$ is an element in the group of deck transformations on the universal covering of ${X}$ and $$\chi:\,\,\pi_1(X)\rightarrow i{\mathbb R},$$ is a homomorphism of groups. We call such a mapping an {\it additive function}. Although in general it is not uniquely defined, the real part of $\varphi(p)$ is well-defined.
The involution $\tau$ is extended on the universal covering of $X$. In particular, for $\widetilde{x} \in \widetilde{X}$, an inverse under $\pi$ of an element in $X_\mathbb R$, there exists $\mathcal{T}_{\widetilde{x}} \in \mathrm{Deck}(\widetilde{X} / X)$ such that $\tauBa{\widetilde{x}} = \mathcal{T} _{\widetilde{x}}(\widetilde{x})$. Hence, since $\mathrm{Deck}(\widetilde{X} / X)$ is isomorphic to $\pi_1(X)$, we write $\mathcal{T}_{\widetilde{x}}$ in the form $\mathcal{T}_{\widetilde{x}} = \sum_{j=1}^{g}{m_j A_j + n_j B_j}$, and we extensively use the notation \begin{align*} n( \cdot ) : & \widetilde{X} \longrightarrow \mathbb Z^g \\
& \widetilde{x} \longrightarrow (n_1 \cdots n_g)^{t}. \end{align*} We note that when $\widetilde{x} \in \widetilde{X}_0$ it follows that $n(\widetilde{x}) = 0$ and $n(\widetilde{x}) = e_{g+1-k+j}$ whenever $\widetilde{x} \in \widetilde{X}_j$ for $j=1,...,k-1$ (where the set $e_1,...,e_g$ forms the canonical basis of $\mathbb R ^g$). \begin{theorem} \label{harmonicIntRep} Let $X$ be a compact real Riemann surface and let $\psi(p)$ be a positive harmonic function defined on ${X}\setminus {X}_{\mathbb R}$. Then for every $p \in {X}\setminus {X}_{\mathbb R}$ there exists a positive measure $d \eta(p,x)$ on $X_{\mathbb R}$ such that \begin{align} \label{la-guerre-commence1} \psi(p)
= & \int_{X_{\mathbb R}} \psi(x) d \eta(p,x). \end{align} \end{theorem}
We start by presenting a preliminary lemma, revealing a useful property of the prime form.
\begin{lem} \label{primeFormA} Let $x$ be an element of $X_j$. Then the prime form satisfies the following relation \begin{align} \label{primeFormEqA} \overline{\frac{\partial}{\partial x}\ln E(\tauBa{p},x)} & = \frac{\partial}{\partial x}\ln E(p,x)-2\pi i \omega_{g-k-1+j}(x) \\ & = \frac{\partial}{\partial x}\ln E(p,x)-2\pi i \Big[ \omega_1(x) \cdots \omega_g(x) \Big] n(x). \nonumber \end{align} \end{lem} \begin{pf} Let $x\in{ X}_j$, where $j=1,2,\ldots , k-1$. Then, $\tau$ is lifted to the universal covering as follows \[ \widetilde{x} - \tauBa{\widetilde{x}} = \sum_{\ell=1}^g m_\ell A_\ell + n_\ell B_\ell, \] where $n_\ell = \delta (g-k-1+j - \ell )$ and where $\delta$ stands for the Kronecker delta. We also recall that the prime form (see \cite[Lemma 2.3]{av3}) satisfies \begin{align} \nonumber E(\widetilde{p},\widetilde{u}_1) = & E(\widetilde{p},\widetilde{u}_2) \exp \left( {-i \pi n^t \Gamma n + 2 \pi i (\widetilde{\mu}(\widetilde{p})-\widetilde{\mu}(\widetilde{u}_2))^t n} \right) \times \\&\times \exp \left( 2 \pi i (\beta^t_0 n - \alpha ^t _0 m ) \right) \label{eqPrimeFormConj} , \end{align} where $\widetilde{\mu}$ is the lifting of the Abel-Jacobi mapping to the universal covering and where $\zeta = \alpha_0 + \beta_0 \Gamma$. Thus, choosing $\tauBa{\widetilde{u}_2} = \widetilde{u}_1 = \widetilde{x}$, the relation in \eqref{eqPrimeFormConj} becomes \begin{align} \label{primeFormProp} \ln \, E(\widetilde{p},\tauBa{\widetilde{x}}) = & \ln \, E(\widetilde{p},\widetilde{x}) -i \pi \Gamma_{jj} + 2 \pi i (\widetilde{\mu}(\widetilde{p})-\widetilde{\mu}(\widetilde{x}))_j + \\ &+ 2 \pi i (\beta^t_0 n - \alpha ^t _0 m ) . \nonumber \end{align} We note that by using \cite[Lemma 2.4]{av3}, the prime form satisfies the identity $\overline{E(\tauBa{p},x)} = E(p,\tauBa{x})$. It remains to differentiate \eqref{primeFormProp} with respect to $x$ and \eqref{primeFormEqA} follows. \end{pf}
\begin{pf}[of Theorem \ref{harmonicIntRep}] We show that the expression \begin{align} \label{la-guerre-commence2} G(p,x) \overset{\text{def} } {=} & \pi \Big[ \omega_1(x) \cdots \omega_g(x) \Big] \cdot \left( \frac{n(x)}{2} + i (Yp) \right) - \\ & \nonumber -\frac{i}{2} \frac{\partial}{\partial x} \ln E(p,x) , \end{align} is the differential with respect to $x$ of the Green function, where $x\in X_\mathbb R$, $p \in X\setminus X_\mathbb R$ and where $\omega(x)$ is a section of the canonical bundle (denoted by $K_X$ and for an atlas $(V_j,z_j)$ defining the analytic structure of $X$, is given by cocycles $dz_j/dz_i$). Here and in the following pages, with an abuse of notation, $Yp$ denotes $Y \widetilde{\mu}(\widetilde{p})$. The existence of a Green function on a Riemann surface is a well-known result, see for instance \cite[Chapter V]{bergman} or \cite[Chapter X]{Tsuji}. Therefore, there exists a (unique) Green function, denoted by $g(p,x)$, with the differential $G(p,x)$ which contains singularities of the form $\frac{1}{x-p}$ along its diagonal. Hence, it is enough to show that the expression in \eqref{la-guerre-commence2} and the Green function satisfy the upcoming properties: \begin{enumerate} \item {\it The function $g(x,p)$ contains a logarithmic singularity while $G(x,p)$ has a simple pole at $p=x$.} It follows immediately by using the prime form properties and moving to local coordinates that the following relation holds (see for instance \cite[Section II]{fay1}): \[ \frac{i}{2} \frac{\partial}{\partial x} \ln E(p,x) = \frac{i}{2} \frac{\partial}{\partial v} \ln (t(u)-t(v)) = \frac{i}{2(t(u)-t(v))} . \]
\item {\it The real part of the differential $G(x,p)$ is single-valued:} Let $p$ and $p_1$ be two elements of $\widetilde{X}$ which are the pre-images of the same element in $X_j$, i.e. $\pi(p)=\pi(p_1) \in X_j $. It follows, using \eqref{eqZHY}, that \begin{equation} \label{eqPp1} \widetilde{\mu}(p) - \widetilde{\mu}(p_1) = n + \Gamma m = n + \left(\frac{1}{2} H + i Y^{-1}\right) m, \end{equation} for some $n,m \in \mathbb R^g$ and thus, using again \cite[Lemma 2.3]{av3} we have, modulo $2\pi i$: \begin{align*} \ln\left( E(p_1,x) \right) = & \ln \bigg( E(p,x) \exp \big( 2 \pi i (\mu(x) - \mu(p))^t m \big) \times \\ & \exp \left( 2 \pi i (\beta_0^t m - \alpha_0^t n) -i \pi m^t \Gamma m \right) \bigg) \\ = & \ln \, E(p,x) - \frac{i}{2} \pi m^t H m - \pi m^t Y^{-1} m + \\ & 2 \pi i (\widetilde{\mu}(x) - \widetilde{\mu}(p))^t m + 2 \pi i (\beta_0^t m - \alpha_0^t n). \end{align*} Then, the real part of a multiplier of ${\it ln}\left( E(p,x) \right)$ is: \begin{align} \label{eqReLnMult} \mathfrak{Re} ~ \big( \ln E(p,x) - &\ln E(p_1,x) \big) = \\ & \nonumber 2 \pi \left( \frac{1}{2} m^t Y^{-1} + \imagg { \mu(p) - \mu(x)} ^t \right) m. \end{align} Clearly, using \eqref{eqPp1}, we have that $$ m = Y \, \imagg { \widetilde{\mu}(p) - \widetilde{\mu}(p_1)}$$ and hence the derivative with respect to $x$ of \eqref{eqReLnMult}, is \begin{align*} \frac{\partial}{\partial x} \mathfrak{Re} ~ (\ln \, \left( E(p,x)\right) & - \ln \, \left( E(p_1,x) \right) ) \\ = & - 2 \pi \, \mathfrak{Im} ~ \Big[\omega_1(x) \cdots \omega_g(x) \Big] m \\ = & - 2 \pi \, \mathfrak{Im} ~ \Big[\omega_1(x) \cdots \omega_g(x) \Big] Y \widetilde{\mu} (p - p_1) . \end{align*} Hence, $G(x,p)$ has the appropriate singularity and has a single-valued real-part if it is of the following form: \[ \frac{\partial}{\partial x} \ln\left( E(p,x) \right)+ 2 \pi \Big[\omega_1(x) \cdots \omega_g(x) \Big] Y \widetilde{\mu} (p) + h(x), \] for some $h(x)$ with purely imaginary periods.
\item {\it The real part of the complex Green function vanishes on the boundary components:} Let $x \in X_j$ for some $0 \leq j \leq k-1$ and let $p \in X_l$ for some $0 \leq l \leq k-1$ such that $p \neq x$. Then, we integrate $G(x,p)$ with respect to $x$ and note that the integration of the vector $\Big[ \omega_1(x) \cdots \omega_g(x) \Big]$ is just the Abel-Jacobi mapping at $x$. Then, the Green function is: \[ g(x,p)= \left( \frac{n(x)}{2} + i (Yp) \right) \mu(x) - \frac{i}{2} \ln E(p,x). \] We use the equality, see \cite[Lemma 2.4]{av3}, $$ E(x,p) = \overline{E(\tauBa{x},\tauBa{p})} $$ to conclude that whenever $x$ and $p$ are both real, the relation \begin{align*} \overline{ \frac{\partial}{\partial x} \ln{(E(x,p))}} = & \frac{\partial}{\partial x} \ln{(E(\tauBa{x},\tauBa{p}))} \\ = & \frac{\partial}{\partial x} \ln{(E(x,p))} + 2\pi i \omega _{g-k+j-1}(x) \end{align*} holds. Hence, the real part of $g(x,p)$, using Equation \ref{primeFormEqA}, is equal to \begin{align*} \reall{g(x,p)} = & i (Yp) \mu(x) - \frac{i}{2} \ln E(p,x) + \overline{i (Yp) \mu(x) } - \\ & \overline{\frac{i}{2} \ln E(p,x)} + \reall{h(x)} \\ = & \mathfrak{Re} ~ \frac{i}{2} \left( \ln E(\tauBa{p},\tauBa{x}) - \ln E(p,x) \right) + \reall{h(x)} \\ = & \frac{\pi }{2} \omega(x) n(x) + \reall{h(x)} , \end{align*} and therefore, setting $\mathfrak{Re} ~ h(x) = - \frac{\pi}{2} w(x) n(x)$, the Green function vanishes on the real points. \end{enumerate} Thus, $G(x,p)$ is the differential of the complex Green function and so, for any $p \in X \setminus X_\mathbb R$ and for sufficient small $\varepsilon$, defines the solution to the Dirichlet problem, i.e. \[ \psi(p) = \int_{X_\mathbb R(\varepsilon)} \psi(x) G(x,p). \] Here, the integration contour is a collection of smooth simple closed curves located within a distance $\varepsilon$ approximating $X_\mathbb R$. We then consider a sequence $(\varepsilon_n)_{n \in \mathbb N}$ such that $\varepsilon_n \rightarrow 0$ as $n \rightarrow \infty$. Then, by the Banach-Alaoglu Theorem (see for instance, \cite[p. 223]{MR1681462}), there exists a subsequence $(\varepsilon_{n_k})_{k \in \mathbb N}$ such that the limit $$\lim_{k \rightarrow \infty} \int_{X_\mathbb R(\varepsilon_{n_k})} \psi(x) G(x,p)$$ exists. Thus, the weak-star limit defines a positive measure on $X_\mathbb R$ satisfying \eqref{la-guerre-commence1}. \end{pf}
Using the previous result, we may state the Herglotz theorem for real compact Riemann surfaces.
\begin{Tm} \label{caraTmRS} Let $X$ be a compact real Riemann surface of dividing-type. Then an additive function $\varphi(x)$ analytic in ${X}\setminus {X}_{\mathbb R}$ with positive real part in ${X}\setminus {X}_{\mathbb R}$ and, furthermore, satisfies \begin{equation*} \varphi(p)+\overline{\varphi(\tauBa{p})}=0,\quad p\in{ X}\setminus { X}_{\mathbb R}, \end{equation*} if and only if \begin{align} \nonumber \varphi(p)
= & \frac{\pi}{2} \int_{X_{\mathbb R}} \Big[\omega_1(x) \cdots \omega_g(x) \Big] n(\widetilde{x}) \, \frac{d \eta(x)}{\omega(x)} - \frac{i}{2}\int_{X_{\mathbb R}} \frac{\partial}{\partial x} \ln E(p,x) \, \frac{d \eta(x)}{\omega(x)} + \\ \label{la-guerre-commence} & \pi i\int_{X_{\mathbb R}} \Big[ \omega_1(x) \cdots \omega_g(x) \Big] (Yp) \, \frac{d \eta(x)}{\omega(x)} +iM. \end{align} Here, $M$ is a real number, $d \eta$ is a positive finite measure on $X _{\mathbb R}$, $\omega(x)$ is a section of the canonical line bundle which is positive with respect to the measure $d \eta$. \end{Tm} \begin{pf} We start with the "if" part as we compute $\overline{\varphi(\tauBa{p})}$: \begin{eqnarray*} \overline{\varphi(\tauBa{p})} &=& \frac{\pi}{2} \int_{{X}_{\mathbb R}} [\omega_1(x) \cdots \omega_g(x) ] n(\widetilde{x}) \frac{d \eta(x)}{\omega(x)} - \\ & & \pi i \int_{{X}_{\mathbb R}} [\omega_1(x) \cdots \omega_g(x)] (\overline{Y p})~ \frac{d \eta(x)}{\omega(x)} + \\ & & \frac{i}{2} \int_{{X}_{\mathbb R}} \frac{\partial}{\partial x} \ln \overline{E(\tauBa{p},x)} \frac{d \eta(x)}{\omega(x)} -iM . \end{eqnarray*} Thus, using Lemma \ref{primeFormA} and since $\omega$ is real (i.e. $\overline{\tauBa{ \omega_i}} = \omega_i$), we have: \begin{align} \nonumber \overline{\varphi(\tauBa{p})} =& \frac{\pi}{2} \int_{{X}_{\mathbb R}} [\omega_1(x) \, \cdots \, \omega_g(x)] n(\widetilde{x}) \frac{d\eta(x)}{\omega(x)} - \\ \nonumber & \pi i \int_{{ X}_{\mathbb R}} [\omega_1(x) \, \cdots \, \omega_g(x)] (Yp)~ \frac{d \eta(x)}{\omega(x)} + \\ \label{la-guerre-commence11} & \frac{i}{2} \int_{{X}_{\mathbb R}} \frac{\partial}{\partial x} \ln E(p,x) \frac{d \eta(x)}{\omega(x)} - \pi \sum_{j=1}^{k-1}\int_{{X}_j}\omega_j(x) \frac{d \eta(x)}{{\omega(x)}} -iM. \end{align} Summing up \eqref{la-guerre-commence} and \eqref{la-guerre-commence11}, leads to \begin{align*} \varphi(p)+\overline{\varphi(\tauBa{p})} = & \pi \int_{{X}_{\mathbb R}} [ \omega_1(x) \, \cdots \, \omega_g(x)] n (\widetilde{x}) \frac{d \eta(x)}{\omega(x)} - \\ & \pi \sum_{j=1}^{k-1}\int_{{X}_j} \omega_j(x) \frac{d \eta(x)}{\omega(x)} = 0. \end{align*} For the "only if" statement: The real part of $\varphi(p)$ is positive, harmonic and with a single-valued real part in $X \setminus X_{\mathbb R}$. Thus, by Theorem \ref{harmonicIntRep}, $\mathfrak{Re} ~{ \varphi(p)}$ has an integral representation as given in \eqref{la-guerre-commence1} for some positive measure $d \, \eta_{\varphi}$ on $X_\mathbb R$. Finally, it is well-known that two analytic functions defined on a connected domain with the same real part differ only by some imaginary constant. Hence we may summarize that \[ \varphi(p) = \int_{X_{\mathbb R}} G(p,x) d \nu_{\varphi}(x) + iM, \] for some $M \in \mathbb R$. \end{pf}
In the case where $X = \mathbb P^1$ coupled with the anti-holomorphic involution $z \rightarrow \overline{z}$, we set $\omega = \frac{d \, t}{t^2 + 1}$ and then \eqref{27-octobre-2000} can be extracted from \eqref{la-guerre-commence} by setting, \begin{align*} d \nu (t) & = \frac{1}{2} d \eta (t) (t^2 +1), \qquad B = \frac{1}{2} \eta (\infty), \\ A & = M - \frac{1}{2} \int_{I} t \, d \eta (t) + \frac{1}{2} \int_{\mathbb R \backslash I} \frac{d \eta (t)}{t}, \end{align*} where $I$ is any interval of $\mathbb R$ containing zero.
Similarly, in the case of the torus, one may deduce H. Villat's formula, see \cite{MR1629812}. (Akhiezer in \cite[Section 56]{MR1054205} presented a different but equivalent formula).
\section{de Branges \texorpdfstring{$\mathcal{L}(\varphi)$}{ $\mathcal{L}(\varphi)$ } spaces in the nonzero genus case} \label{secdBLphi}
In this section, we further study the reproducing kernel Hilbert space associated with an additive function defined on a real compact Riemann space. To do so, we utilize the Herglotz's integral representation proved inthe previous section in order to examine $\mathcal{L}(\varphi)$ spaces and their properties. First, we introduce the analogue of formula \eqref{4-juin-2000}. \begin{Tm} \label{thm41} Let $X$ be a compact real Riemann surface of dividing type, $\zeta \in T_0$ and let $\varphi$ be an analytic with positive real part in $X_+$. Then, the identity \begin{align} \nonumber \int_{{X}_{\mathbb R}} & \frac{\vartheta[\zeta](p-x)}{\vartheta[\zeta](0)E(x,p)} \frac{\vartheta[\zeta](x-{\tauBa{q}})}{\vartheta[\zeta](0)E(x,{\tauBa{q}})} \frac{d \eta(x)}{\omega(x)} = \frac{\vartheta[\zeta](p-{\tauBa{q}})}{\vartheta[\zeta](0)E({\tauBa{q}},p)} \times \\ \nonumber & \times\bigg[ \left({\varphi}(p)+\overline{{\varphi}(\tauBa{q})}\right) + \sum_{j=0}^{k-1} a_{jj}\frac{\partial}{\partial z_j} \ln\frac{\vartheta(\zeta)}{\vartheta(\zeta+p-{\tauBa{q}})} - \\ & -2\pi i \row_{i=0,\ldots,g} \left( \sum_{j=0}^{k-1}
a_{ji} \right)
Y(p-{\tauBa{q}})
, \label{jfk-le-26-octobre-2000} \end{align} \label{la-fin-du-sionisme?} holds where \begin{equation} \label{a_j} a_{ji} \overset{\text{def} } {=} \int_{{X}_{j}} \frac{\omega_i(x)}{\omega(x)}d \eta(x),\quad j=0,\ldots,k-1, \quad i=1,\ldots,g. \end{equation} \end{Tm} Before heading to prove Theorem \ref{thm41}, we make a number of remarks. The left hand side of \eqref{jfk-le-26-octobre-2000} may be written as $$ \innerProductTri{K_{\zeta}(p,x)}{K_{\zeta}(x,\tauBa{q})} {{\bf L}^2\left( X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \right) } $$ and hence, it is precise the counterpart of the right hand side of \eqref{4-juin-2000}. Furthermore, whenever we additionally assume zero cycles along the boundary components, that is, \begin{equation} \label{eqCycles} \int_{{ X}_j}\frac{\omega_i(x)}{\omega(x)}d \eta(x)=0, \quad j=0,\ldots,k-1, \end{equation} the right hand side of \eqref{jfk-le-26-octobre-2000} is the counterpart of the left hand side of \eqref{4-juin-2000}. Hence, we may summarize and present the following result. \begin{corollary} \label{kernelAj0} Let $X$ be a compact real Riemann surface of dividing type, let $\zeta \in T_0$ and let $\varphi$ be an additive function on $X$ such that \eqref{eqCycles} holds. Then, the identity \begin{align} \left({\varphi}(p)+\overline{{\varphi}(\tauBa{q})}\right) & \frac{\vartheta[\zeta](p-{\tauBa{q}})}{\vartheta[\zeta](0)E({\tauBa{q}},p)} \nonumber = \\ & \innerProductTri{K_{\zeta}(p,u)}{K_{\zeta}(u,\tauBa{q})} {{\bf L}^2\left( X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \right) } , \label{jfk-le-26-octobre-2000_SV} \end{align} holds. \end{corollary} \begin{pf}[of Theorem \ref{la-fin-du-sionisme?}] Using the Herglotz-type formula \eqref{la-guerre-commence}, we may write \begin{align} \nonumber (\varphi(p) + &\overline{\varphi(\tauBa{q})}) \frac{\vartheta[\zeta](p-{\tauBa{q}})}{\vartheta[\zeta](0)E({\tauBa{q}},p)} = \\ =& 2\pi i\left(\int_{X_{\mathbb R}} [ \omega_1(x) \cdots \omega_g(x) ] Y(p-\tauBa{q}) \frac{d\eta(x)}{\omega(x)}\right) \frac{\vartheta[\zeta](p-{\tauBa{q}})}{\vartheta[\zeta](0)E({\tauBa{q}},p)} \nonumber - \\ \label{Thm43A} & \frac{i}{2} \int_{X_{\mathbb R}} \left(\frac{\partial}{\partial x}\ln E(p,x)-\frac{\partial}{\partial x}\ln E({\tauBa{q}},x)\right) \frac{d\eta(x)}{\omega(x)} \frac{\vartheta[\zeta](p-{\tauBa{q}})}{\vartheta[\zeta](0)E({\tauBa{q}},p)}. \end{align} Furthermore, by \cite[Proposition 2.10, p. 25]{fay1}, we have \begin{align} \frac{\partial}{\partial x}\ln\frac{E(p,x)}{E({\tauBa{q}},x)} + & \sum_{j=1}^g\left(\frac{\partial}{\partial z_j}\ln \vartheta(\zeta+p-{\tauBa{q}}) -\frac{\partial}{\partial z_j}\ln \vartheta(\zeta)\right)\omega_j(x) = \nonumber \\ \label{eq123} & \frac{E({\tauBa{q}}, p)}{E(x,{\tauBa{q}})E(x,p)} \frac {\vartheta[\zeta](x-{\tauBa{q}})\vartheta[\zeta](p-x)} {\vartheta[\zeta](p-{\tauBa{q}})\vartheta[\zeta](0)}. \end{align} Thus, multiplying both sides of \eqref{eq123} by $\frac{\vartheta[\zeta](p-{\tauBa{q}})}{\vartheta[\zeta](0)E({\tauBa{q}},p)}$, leads to \begin{align} \nonumber & \frac{\vartheta[\zeta](p-{\tauBa{q}})}{\vartheta[\zeta](0)E({\tauBa{q}},p)} \frac{\partial}{\partial x}\ln\frac{E(p,x)}{E({\tauBa{q}},x)} = \frac{\vartheta[\zeta](x-{\tauBa{q}})}{ \vartheta[\zeta](0)E(x,{\tauBa{q}})} \frac{\vartheta[\zeta](p-x)}{ \vartheta[\zeta](0)E(x,p)}- \\ & \sum_{j=1}^g \frac{\partial}{\partial z_j} \left( \ln \vartheta(\zeta+p-{\tauBa{q}}) - \ln \vartheta(\zeta)\right)\omega_j(x) \frac{\vartheta[\zeta](p-{\tauBa{q}})}{ \vartheta[\zeta](0)E({\tauBa{q}},p)}. \label{Thm43B} \end{align} Finally, by substituting \eqref{Thm43B} into \eqref{Thm43A}, we conclude that the identity \begin{align*} (\varphi(p)-\overline{\varphi(\tauBa{q})}) & \frac{\vartheta[\zeta](p-{\tauBa{q}})}{\vartheta[\zeta](0)E({\tauBa{q}},p)} = \frac{\vartheta[\zeta](p-{\tauBa{q}})}{ \vartheta[\zeta](0)E({\tauBa{q}},p)} \times \\ & \bigg[ \frac{i}{2} \sum_{j=1}^{g} \int_{X_{j}} \frac{\omega_j(x) d \eta(x)}{\omega(x)} \frac{\partial}{\partial z_j} \left( \ln \vartheta(\zeta+p-{\tauBa{q}}) -\ln \vartheta(\zeta) \right) + \\ & 2\pi i\int_{X_{\mathbb R}} \frac{[\omega_1(x)\,\cdots \, \omega_g(x)]}{\omega(x)}Y(p-{\tauBa{q}})d \eta(x) \bigg] - \\ & \frac{i}{2} \int_{{X}_{\mathbb R}} \frac{\vartheta[\zeta](p-x)}{\vartheta[\zeta](0)E(x,p)} \frac{\vartheta[\zeta](x-{\tauBa{q}})}{ \vartheta[\zeta](0)E(x,{\tauBa{q}})}\frac{d \eta(x)}{\omega(x)} \end{align*} follows. Setting $a_j$ as in \eqref{a_j}, completes the proof. \end{pf}
From this point and onward we assume that \eqref{eqCycles} holds.
\begin{definition} Let $\varphi(x)$ be analytic in ${X}\setminus {X}_{\mathbb R}$ with positive real part in ${X}\setminus {X}_{\mathbb R}$. The reproducing kernel Hilbert space of sections of the line bundle $L_\zeta \otimes \Delta$ with the reproducing kernel \[ K(p,q) = (\varphi(p) + \overline{\varphi(\tauBa{q})}) \frac{\vartheta[\zeta](p-{\tauBa{q}})}{\vartheta[\zeta](0)E({\tauBa{q}},p)}, \] is denoted by $\mathcal{L}(\varphi)$. \end{definition} The analogue of the first part of Theorem \ref{Thm21} is given below in Theorem \ref{phiIntPresentation}. However, we first present a preliminary lemma that is required during this section (see \cite[Ex. 6.3.2]{capb2}, in the unit-disk case). \begin{lem} \label{denseL2} Let $X$ be a compact real Riemann surface of dividing type. Then the linear span of Cauchy kernels {\allowbreak $\frac{\vartheta[\zeta](x-u)}{i \vartheta[\zeta](0)E(u,x)}$ } where $u$ varies in $ X \setminus X_{\mathbb R}$ is dense in {\allowbreak ${\bf L}^2\left( X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \right)$}. \end{lem} \begin{pf} Let us assume that a section $f$ of $L_{\zeta} \otimes \Delta$ satisfies \begin{equation} \label{eqCkDense} \int_{{X}_{\mathbb R}}K_{\zeta}(u, x)f(x)\frac{d \eta(x)}{\omega(x)} = 0, \end{equation} for all $u\in X \setminus X_\mathbb R$. We recall that by \cite{av2}, there exists an isometric isomorphism from ${\bf L}^2\left( X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \right)$ to $ ({\bf L}^2(\mathbb T))^n$ and therefore there exists an orthogonal decomposition, see \cite[Equation 4.14]{av2}, \begin{align*} {\bf L}^2\bigg( & X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \bigg) \\ = & {\bf H}^2\left( X_{+} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \right) \oplus {\bf H}^2\left( X_{-} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \right)
. \end{align*} Furthermore, Equation \ref{eqCkDense}, for $u \in X_+$ is just the projection from ${\bf L}^2 \bigg( X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \bigg)$ into ${\bf H}^2 \left( X_{+} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \right)$. Thus, $P_+(f)(u)=0$ and, similarly, $P_-(f)(u)=0$ and we may conclude that $f=0$ and the claim follows. \end{pf} \begin{Tm} \label{phiIntPresentation} The elements of $\mathcal{L}(\varphi)$ are of the form \begin{equation} \label{l-phi} F(u) = \int_{{X}_{\mathbb R}}K_{\zeta}(u, x)f(x)\frac{d \eta(x)}{\omega(x)}, \end{equation} where $f(x)$ is a section of $L_\zeta\otimes\Delta$ which is square summable with respect to $\frac{d \eta (x)}{\omega(x)}$. \end{Tm} \begin{pf} Equation \ref{l-phi} follows by Corollary \ref{kernelAj0}. Let us set $N\in{\mathbb N}$, then for any choice of $w_1,\ldots,w_N \in{X}\setminus X_{\mathbb R}$ and $c_1 \cdots c_N \in {\mathbb C}$, the identity \begin{align} \label{lPhinorm} F(u) \overset{\text{def} } {=} & \sum_{j=1}^{n} c_j (\varphi(u) + \overline{\varphi(w_j)}) K_{\zeta}( u, w_j) \\ = & \int_{X_{\mathbb R}} K_{\zeta}(u, x) f(x) \frac{d \eta (x)}{w(x)} \nonumber \end{align} holds, where \[ f(u) = \sum_{j=1}^{n} c_j K_{\zeta}( w_j, u) \in {{\bf L}^2\left( X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \right)} . \] Due to Lemma \ref{denseL2}, the linear span of the kernels \eqref{jfk-le-26-octobre-2000_SV} is dense in ${\bf L}^2\left( X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(x)}{w(x)} \right)$ and hence \eqref{l-phi} follows. \end{pf} \begin{Tm} The norm of an element $F$ in $\mathcal{L}(\varphi)$ is given by \begin{equation*} \normTwo{ F }{\mathcal{L}(\varphi)} \overset{\text{def} } {=} \normTwo{f}{{\bf L}^2 \left( X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(t)}{w(t)} \right)}. \end{equation*} \end{Tm} \begin{pf} Since, by Lemma \ref{denseL2}, the linear span of the kernels \eqref{jfk-le-26-octobre-2000_SV} is dense in ${\bf L}^2(d \eta)$, it is enough to check the eqaulity of the norms for a linear combination of the Cauchy kernels. The norm of an element in the reproducing kernel Hilbert space $\mathcal{L}(\varphi)$ is given by \[ \norm{F}^2_{\mathcal{L}(\varphi)} = \sum_{\ell,j=1}^{n} \overline{c_\ell} (\varphi(u_\ell) + \overline{\varphi(u_j)}) K_{\zeta}( u_\ell, u_j)c_j. \] Then, by \eqref{lPhinorm} \[ \norm{F}^2_{\mathcal{L}(\varphi)} = \sum_{\ell,j=1}^{n} \overline{c_j} \innerProductTri {K_{\zeta}( u_\ell, w )} {K_{\zeta}( w, u_j)} {{\bf L}^2\left( X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(t)}{w(t)} \right)} c_\ell, \] which is exactly the norm of $f(w)$ in ${\bf L}^2\left( X_{\mathbb R} , L_{\zeta} \otimes \Delta , \frac{d \eta(t)}{w(t)} \right)$. \end{pf}
As an immediate consequence, whenever $y$ is a real function, we may state an additional result. It follows that $M^y$ is simply the multiplication operator in ${\bf L}^2(d \eta)$.
\begin{Tm} Let $y$ be a meromorphic function with simple poles such that the poles of $y$ do not belong to the support of the measure $d \eta$. Then, the multiplication model operator $M^{y}$, defined on $\mathcal{L}(\varphi)$, satisfies the following properties: \begin{enumerate} \item $M^{y}$ is given explicitly by \begin{equation} \label{MyLPhi} \left(M^y F \right) (u) = \int_{{X}_{\mathbb R}} { K_{\zeta}( u, x )}f(x)y(x)\frac{d \eta(x)}{\omega(x)}, \end{equation} where $f$ is a section of $L_\zeta\otimes\Delta$, which is square summable with respect to $d \eta$. \item $\mathcal{L}(\varphi)$ is invariant under $M^{y}$. \item $M^{y}$ is bounded. \end{enumerate} \end{Tm} \begin{pf} Considering the model operator \eqref{m_y} together with Theorem \ref{phiIntPresentation}, we conclude the following: \begin{align*} (M^y F)(u) = & y(u)F(u) + \sum_{m=1}^{n}{c_m F(p_m) K_{\zeta}( u, p_m)} \\ = & y(u)F(u) + \sum_{m=1}^{n}{c_m \int_{X_{\mathbb R}} f(x) K_{\zeta} (p_m,x) \frac{d \eta(x)}{\omega(x)} K_{\zeta} ( u, p_m)} \\ = & y(u)F(u) + \int_{X_{\mathbb R}} f(x) \frac{d \eta(x)}{\omega(x)} \sum_{m=1}^{n}{c_m K_{\zeta}( p_m,x) K_{\zeta} ( u, p_m)}, \end{align*} where $p^{(1)},...,p^{(n)}$ are the distinct poles of $y$. Using the collection formula \cite[Proposition 3.1]{av2} and using again Theorem \ref{phiIntPresentation}, we have: \begin{align*} (M^y F)(u) = & y(u)F(u) + \int_{X_{\mathbb R}} f(x) \frac{d \eta(x)}{\omega(x)} K_{\zeta} (u,x) (y(x)-y(u)) \\ = & \int_{{X}_{\mathbb R}} { K_{\zeta}( u, x )}f(x)y(x)\frac{d \eta(x)}{\omega(x)}. \end{align*} We note that $f$ is a section of $L_\zeta\otimes\Delta$ and remains so after multiplication by a meromorphic function $y$. Furthermore, it is square summable with respect to the measure $d \eta$, since, by assumption, the poles of $y$ lie outside the support of $d \eta$. \end{pf}
\begin{corollary} Let $y$ be a real meromorphic function on $X$ such that the poles of $y$ do not belong to the support of the measure $d \eta$. Then, the multiplication model operator $M^{y}$ is selfadjoint. \end{corollary} \begin{pf} The model operator $M^y$ satisfies $\innerProductReg{M^y F}{G} = \innerProductReg{ F}{M^yG}$ as follows from \[ \innerProductReg{M^y F}{G} = \int_{{X}_{\mathbb R}} f(x)y(x) \overline{ g(x)} \frac{d \eta(x)}{\omega(x)} \] and from the assumption that $y$ is a real meromorphic function.
\end{pf}
Equation \ref{MyLPhi} immediately produces the $\mathcal{L}(\varphi)$ counterpart of \cite[Theorem 4.6]{av3}.
\begin{corollary}
Let $y_1$ and $y_2$ be two meromorphic functions of degree $n_1$ and $n_2$, respectively. Furthermore, we assume that the poles of $y_1$ and $y_2$ lie outside the support of $d \eta$ on $X _{\mathbb R}$. Then, $M^{y_1}$ and $M^{y_2}$ commute on $\mathcal{L}(\varphi)$, that is, for every $F(z) \in \mathcal{L}(\varphi)$, $M^{y_1} M^{y_2} F = M^{y_2} M^{y_1} F$ holds. \end{corollary}
Using the observation that $R^y_{\alpha}$ is just the operator $M^{\frac{1}{y(u)-\alpha}}$, we present the counterpart of \eqref{tatche}, that is, an integral representation of the resolvent operator at $\alpha$.
\begin{corollary} $\mathcal{L}(\varphi)$ is invariant under the resolvent operator $R^y_\alpha$ where $\alpha$ is a non-real complex number. Moreover, the resolvent operator has the integral representation: \begin{equation} \label{RyInLPhi} (R^y_\alpha F)(u) = \int_{X _ {\mathbb R}}K_{\zeta}(u,x)\frac{f(u)}{(y(u)-\alpha)} \frac{d \eta(x)}{\omega(x)} , \end{equation} where the poles of $y$ do not belong to the support of $d \eta$. \end{corollary}
As another immediate corollary, we mention that any pair of resolvent operator commutes.
\begin{corollary} Let $y_1$ and $y_2$ be two meromorphic functions of degree $n_1$ and $n_2$, respectively. Furthermore, assume the poles of $y_1$ and $y_2$ lie outside the support of $d \eta$ on $X _{\mathbb R}$ and let $\alpha$ and $\beta$ be two elements in $\mathbb C \setminus \mathbb R$. Then the resolvent operators $R^{y_1}_{\alpha}$ and $R^{y_2}_{\beta}$ commute, i.e. for any $F(z) \in \mathcal{L}(\varphi)$ the following holds $R^{y_1}_{\alpha} R^{y_2}_{\beta} F= R^{y_2}_{\beta} R^{y_1}_{\alpha} F$. \end{corollary} The counterpart of Theorem \ref{finiteDimentionalLphi} is given below.
\begin{Tm} Let $\varphi$ be analytic in $X \backslash X_{\mathbb R}$ and with positive real part. Then the following are equivalent: \begin{enumerate} \item \label{tfre1} The reproducing kernel space $\mathcal{L}(\varphi)$ is finite dimensional. \item \label{tfre2} $\varphi$ is meromorphic on ${X}$. \item \label{tfre3} ${\bf L}^2(d \eta)$ is finite dimensional. \end{enumerate} \end{Tm} \begin{pf} Since $\mathcal{L}(\varphi)$ is isomorphic to ${\bf L}^2(d \eta)$, $\mathcal{L}(\varphi)$ is finite dimensional if and only if ${\bf L}^2(d \eta)$ is finite dimensional.
If ${\bf L}^2(d \eta)$ is finite dimensional then $d \eta$ has a finite number of atoms and hence $\varphi$ is meromorphic. On the other hand assume that $\varphi$ is meromorphic on $X$. As in the classical case, the measure in the integral representation of $\varphi$ is obtained as the weak star limit of $\varphi(p) + \overline{\varphi(p)}$, and the poles of $\varphi$ correspond to the atoms of $d \eta$. \end{pf}
\begin{Tm} \label{thm432} Let $f,g \in \mathcal{L}(\varphi)$ and $\alpha, \beta \in \mathbb C$ with non-zero imaginary part. Then the following identity holds, \begin{equation} \label{strucIdPhi} \innerProductReg{R^y_\alpha f}{ g} - \innerProductReg{f}{ R^y_\beta g} - (\alpha - \overline{\beta}) \innerProductReg{R^y_\alpha f}{ R^y_\beta g} = 0. \end{equation} \end{Tm} \begin{pf} Using \eqref{RyInLPhi}, the left hand side of \eqref{strucIdPhi} can be written as \begin{align*} \innerProductReg{R^y_\alpha f}{ g} & - \innerProductReg{f}{ R^y_\beta g} - (\alpha - \overline{\beta}) \innerProductReg{R^y_\alpha f}{ R^y_\beta g} = \int_{X _ {\mathbb R}} f(u) g(u) K_{\zeta}(p,u) \times \\ & \left( \frac{1}{(y(u)-\alpha)} - \frac{1}{(y(u)-\overline{\beta})} - \frac{\alpha - \overline{\beta}}{(y(u)-\alpha)(y(u)-\overline{\beta})} \right) \frac{ d \eta(u)}{\omega(u)}. \end{align*}
One may note that \[ \frac{1}{(y(u)-\alpha)} - \frac{1}{(y(u)-\overline{\beta})} - \frac{\alpha - \overline{\beta}}{(y(u)-\alpha)(y(u)-\overline{\beta})} \] is identically zero, hence the result follows.
\end{pf}
In fact Theorem \ref{thm432} is an if and only if relation, and we refer the reader to \cite{AVP3} for the related de Branges structure theorems.
\section{The \texorpdfstring{$\mathcal{L}(\varphi)$}{ $\mathcal{L}(\varphi)$ } spaces in the single-valued case} \label{secPhiSingleVal}
Whenever an additive function $\varphi$ is single-valued, the formula \begin{equation*}
s(p)= \frac{1-\varphi(p)}{1+\varphi(p)} \end{equation*} makes sense and defines a single-valued function $s(p)$. Then, the reproducing kernel associated with $s(p)$, denoted by ${\mathcal H}(s)$, is of the form \[ i(1-s(p)s(q)^*)K_{\zeta}( p,{\tauBa{q}}). \] These spaces were studied in \cite{av3} in the finite dimensional setting and in \cite{AVP1} in the infinite dimensional case.
We note that the multiplication operator $ u \mapsto \frac{(1+\varphi(p))}{{\sqrt{2}}} u $ maps, as in the zero genus case, ${\mathcal H}(s)$ onto $\mathcal{L}(\varphi)$ unitarily. Hence, we may pair any $u\in{\mathcal H}(s)$ to a function $f\in{\bf L}^2(d \eta)$ through the corresponding $\mathcal{L}(\varphi)$ space, such that \begin{equation*}
\frac{1}{\sqrt{2}}(1+\varphi(p))u(p) = \int_{{X}_{\mathbb R}} K_{\zeta}( p,x)f(x)\frac{d \eta(x)}{\omega(x)}. \end{equation*} We denote the mapping from ${\mathcal H}(s)$ onto ${\bf L}^2(d \eta)$ by $$\Lambda: u(p) \longrightarrow f(x).$$ We now turn to express the operator $M^y$ using the operator of multiplication by $y$ in ${\bf L}^2(d \eta)$.
\begin{Tm} Let $\varphi$ be a single-valued function with positive real part on a dividing-type compact Riemann surface $X$ and let $y$ be a meromorphic function of degree $n$ on $X$. Then, any $f\in {\bf L}^2(d \eta)$ satisfies \begin{align} \nonumber (\Lambda M^y \Lambda^*)f(x) = & y(x)f(x)+ \\ \label{jardin-des-plantes} & i \sum_{j=1}^{n} \frac{c_j}{1+\varphi(p^{(j)})}K_{\zeta}( x, p^{(j)}) \left(\int_{{X}_{\mathbb R}} K_{\zeta}( p^{(j)}, p)f(p) \frac{d \eta(p)}{\omega(p)}\right), \end{align} where $p^{(1)},...,p^{(n)}$ are the $n$ distinct poles of $y$. \end{Tm} \begin{pf} Let $u\in{\mathcal H}(s)$ and $f\in{\bf L}^2(d \eta)$ such that, $\Lambda u = f$, that is, they satisfy the relation \begin{equation} \label{phiInt} \frac{1+\varphi(p)}{{\sqrt{2}}}u(p)=\int_{{ X}_{\mathbb R}}K_{\zeta}(x,p)f(x)\frac{d \eta(x)}{\omega(x)}. \end{equation} Then, multiplying both sides of \eqref{phiInt} by $y(p)$, we obtain \begin{align} \nonumber y(p) \frac{1+\varphi(p)}{\sqrt{2}}u(p) = & \int_{{ X}_{\mathbb R}}y(p)K_{\zeta}( p, x)f(x)\frac{ d \eta(x)}{\omega(x)} \\ \nonumber =& \int_{{ X}_{\mathbb R}}y(x)K_{\zeta}( p, x)f(x)\frac{d \eta(x)}{\omega(x)}+ \\ & \int_{{ X}_{\mathbb R}}(y(p)-y(x))K_{\zeta}( p, x)f(x) \frac{d \eta(x)}{\omega(x)}. \label{phiInt1} \end{align} Then, using the collection-type formula (see \cite[Proposition 3.1, Eq. 3.5]{av2}), we have \begin{equation*} (y(p)-y(q))K_{\zeta}( p, q) = - \sum_{j=1}^{n} \frac{c_j}{dt_j(p^{(j)})}K_{\zeta}( p, p^{(j)})K_{\zeta}( p^{(j)},q). \end{equation*} Then \eqref{phiInt1} becomes: \begin{align} \nonumber \int_{{X}_{\mathbb R}}y(x)K_{\zeta}( p, x)f(x) \frac{d \eta(x)}{\omega(x)} = & \frac{(1+\varphi(p))}{\sqrt{2}} y(p)u(p) - \\ & \sum_{j=1}^{n} \frac{c_j K_{\zeta}( p, p^{(j)})}{dt_j(p^{(j)})} \int_{{X}_{\mathbb R}} K_{\zeta}( p^{(j)}, x)f(x)\frac{d \eta(x)}{\omega(x)}. \label{phiInt2} \end{align} On the other hand, using the equality \begin{equation} (1+\varphi(p))\displaystyle{\frac{1+s(p)}{2}} = 1. \label{phiInt3} \end{equation} Equation \ref{phiInt} becomes \begin{equation} \label{phiInt4} \int_{{X}_{\mathbb R}} K_{\zeta}(x,p^{(j)})f(x)\frac{d \eta(x)}{\omega(x)}= \displaystyle{\frac{\sqrt{2}}{1+s(p^{(j)})}}u(p^{(j)}). \end{equation} Now, substituting \eqref{phiInt3} and \eqref{phiInt4} in \eqref{phiInt2}, we obtain the following calculation: \begin{align} \frac{\sqrt{2}}{1+\varphi(p)} & \int_{{X}_{\mathbb R}} K_{\zeta} ( p, x) y(x)f(x) \frac{d \eta(x)}{\omega(x)} = \nonumber \\ = & y(p)u(p) - \frac{1+s(p)}{\sqrt{2}} \sum_{j=1}^{n} c_j K_{\zeta}( p, p^{(j)})\frac{\sqrt{2}u(p^{(j)})}{1+s(p^{(j)})} \nonumber \\ =& y(p)u(p) - \sum_{j=1}^{n} c_j\frac{1+s(p)}{1+s(p^{(j)})} K_{\zeta}( p , p^{(j)})u(p^{(j)}) \nonumber \\ \nonumber =& y(p)u(p) - \sum_{j=1}^{n} c_j K_{\zeta}( p , p^{(j)})u(p^{(j)}) \left(1 + \frac{s(p)-s(p^{(j)})}{1+s(p^{(j)})} \right) \\ = & (M^y u)(p) + \sum_{j=1}^{n} c_j \frac{s(p)-s(p^{(j)})}{\sqrt{2}} K_{\zeta}( p , p^{(j)}) u(p^{(j)}) . \label{phiInt5} \end{align} On the other hand, using \eqref{phiInt3}, we have \begin{align} \nonumber \varphi(p)-\varphi(p^{(j)}) =& \frac{2(s(p^{(j)})-s(p))}{(1+s(p))(1+s(p^{(j)}))} \\ =& (1+\varphi(p))(1+\varphi(p^{(j)})) \frac{s(p^{(j)})-s(p)}{2}. \label{phiInt6} \end{align} Thus, we take \eqref{phiInt6} and multiply it on the right by the Cauchy kernel $K_{\zeta}( p, p^{(j)})$ in both sides and use \eqref{jfk-le-26-octobre-2000_SV} to conclude \begin{align*} (1+\varphi(p)) \frac{s(p)-s(p^{(j)})}{2} & K_{\zeta}( p, p^{(j)}) = \frac{\varphi(p^{(j)})-\varphi(p)}{1+\varphi(p^{(j)})} K_{\zeta}( p, p^{(j)}) \\ =& \frac{i}{1+\varphi(p^{(j)})} \int_{{X}_{\mathbb R}} K_{\zeta}( p, x)K_{\zeta}( x,p^{(j)})\frac{d \eta(x)}{\omega(x)} . \end{align*} Thus, \eqref{phiInt5} becomes \begin{align*} \frac{1+\varphi(p)}{\sqrt{2}} & (M^yu)(p) = \int_{{X}_{\mathbb R}} K_{\zeta}( p, x)y(x)f(x) \frac{d \eta(x)}{\omega(x)} + i \sum_{j=1}^{n} \frac{c_j}{1+\varphi(p^{(j)})} \times \nonumber \\
& \times\left( \int_{{X}_{\mathbb R}}K_{\zeta}( p, x)K_{\zeta}( x,p^{(j)})\frac{d \eta(x)}{\omega(x)} \right) \left( \int_{{X}_{\mathbb R}} K_{\zeta}( p^{(j)}, s)f(s)\frac{d \eta(s)}{\omega(s)} \right), \end{align*} and by setting \begin{align*} {\bf \widehat{f}}(q) = y(q)f(q) +
i \sum_{j=1}^{n} \frac{c_jK_{\zeta}( q, p^{(j)})}{1+\varphi(p^{(j)})} \left( \int_{{X}_{\mathbb R}} K_{\zeta}( p^{(j)}, x)f(x) \frac{d \eta(x)}{\omega(x)} \right), \end{align*} the identity in \eqref{phiInt6} becomes \[ \frac{1+\varphi(p)}{\sqrt{2}}(M^yu)(p) = \int_{{X}_{\mathbb R}} K_{\zeta}( p, x){\bf \widehat{f}}(x)\frac{d \eta(x)}{\omega(x)} . \] \end{pf} \begin{Cy} Let $\varphi$ be a single-valued function with positive real part on a dividing-type compact Riemann surface $X$. Furthermore, let us assume that $y(p)$ is a real meromorphic function of degree $n$ such that $s(p^{(j)})=1$ for all $1\leq j \leq n$. Then the following identity holds: \begin{equation*}
(\Lambda (\mathfrak{Re} ~ M^y)\Lambda^*)f(p)=y(p)f(p). \end{equation*} \end{Cy} We note that, in fact, one may assume that $s(p^{(j)})$ for all $1 \leq j \leq n$ equal to a common constant of modulus one.
Furthermore, for an arbitrary $f\in{\bf L}^2(d \eta)$ we set \[ \Phi_{y}(f) \overset{\text{def} } {=} \col_{1\leq j \leq n}~\left( \frac{1}{1+\varphi(p^{(j)})}\int_{{X}_{\mathbb R}} K_{\zeta}( p^{(j)}, x)f(x)\frac{d \eta(x)}{\omega(x)}\right), \] and then, for an element $d= (d_1,...,d_n)^t \in {\mathbb C}^{n}$, the adjoint operator $\Phi_{y}^*:\mathbb C^n \rightarrow \mathcal{L}(\varphi)$ is given explicitly by \[ \Phi_{y}^*d = \sum_{j=1}^{n} \frac{1}{1+\overline{\varphi(p^{(j)})}}d_j K_{\zeta}( p, \tauBa{p^{(j)}}). \] Then, if we further use the notation $$\sigma_y \overset{\text{def} } {=} {\rm diag}~ c_j \, (1+\overline{\varphi(p^{(j)})}),$$ the formula in \eqref{jardin-des-plantes} may be rewritten and simplified as follows \begin{equation*} (\Lambda M^y \Lambda^*)f(p) = y(p)f(p)+\frac{i}{2}\Phi_{y}^*\sigma_y\Phi_{y} f, \end{equation*} while the real part of the operator $M^y$ has the form \begin{equation*} (\Lambda (\mathfrak{Re} ~ M^y)\Lambda^*)f(p)=y(p)f(p)+ \Phi_{y}^*{\rm diag}~({\rm Im}~\varphi(p^{(j)}))\Phi_{y} f. \end{equation*}
\begin{landscape}
\section{Summary} \label{chSumm43} \setcounter{equation}{0} The table below summarizes the comparison between the $\mathcal{L}(\varphi)$ spaces in the Riemann sphere case and in a compact real Riemann surfaces setting. \begin{center}
\begin{tabular}{|m{5cm}||M{6.0cm}|M{8.3cm}|}
\hline & {\bf The $g=0$ setting} & {\bf The $g>0$ setting} \\
\hline\hline & $z-w$ & $E(p,q)$ \\
\hline The Cauchy kernel & $\frac{1}{-i(z-\overline{w})}$ & $K_{\zeta}(p,\tauBa{q}) \overset{\text{def} } {=} \frac {\vartheta[\zeta](p-{\tauBa{q}})} {i\vartheta[\zeta](0)E(p,{\tauBa{q}})} $ \\
\hline The Hardy space $H^2$ & The reproducing kernel Hilbert space with kernel $\frac{1}{-i(z-\overline{w})}$ & The reproducing kernel Hilbert space the with kernel $\frac{1}{-i}K_{\zeta}(p, \tauBa{q})$ where $\zeta\in T_0$. \\ \hline The kernel of $\mathcal{L}(\varphi)$ & $\frac{\varphi(z)+\varphi(w)^*}{-i(z-\overline{w})} = \innerProductTri{\frac{1}{t-z}}{\frac{1}{t-w}}{{\bf L}^2(d \eta)},$ & $ \left({\varphi}(p)+{\varphi}(q)^*\right) \frac{\vartheta[\zeta](p-{\tauBa{q}})}{\vartheta[\zeta](0)E({\tauBa{q}},p)}$ \\
\hline Reproducing kernel in ${\bf L}^2( d \mu) $ & $\int_{\mathbb R}\frac{d \eta(t)}{(t-z)(t-\overline{w})}$ & \small $\begin{aligned}
\innerProductTri {K_{\zeta}(\tauBa{q},x)}{ K_{\zeta}( \tauBa{p}, x)}{{\bf L}^2\left(\frac{d \eta}{ \omega}\right)} \end{aligned}$ \\
\hline The elements of $\mathcal{L}(\varphi)$ & $F(z)=\int_{\mathbb R}\frac{f(t)d \eta(t)}{t-z}$ & \small $ \innerProductReg {f}{ K_{\zeta}( \tauBa{p}, x)} = \int_{{X}_{\mathbb R}}f(x)K_{\zeta}(x, p)f(x)\frac{d \eta(x)}{\omega(x)} $ \\ \hline {The Herglotz integral representation formula} & \small $\begin{aligned} \varphi(z)= & iA-iBz + \\ & i\int_{{\mathbb R}}\left(\frac{1}{t-z}-\frac{t}{t^2+1} \right)d \eta(t) \end{aligned}$ & \small $\setlength{\jot}{0pt}\begin{aligned} \varphi(z) = & \frac{\pi}{2} \int_{X_{\mathbb R}} \frac{[\omega_1(x) \cdots \omega_g(x)]}{\omega(x)} n(\widetilde{x}) \, d \eta(x) + \\ & \pi i\int_{X_{\mathbb R}} \frac{[\omega_1(x)\,\cdots \,\omega_g(x)]}{\omega(x)}(Yp)d \eta(x) - \\ & \frac{i}{2}\int_{X_{\mathbb R}}\frac{ \frac{\partial}{\partial x} \ln E(p,\widetilde{x})}{\omega(x)}d \eta(x) + iM \end{aligned}$ \\ \hline Integral representation of the model operator $M^y$ & $(M F)(z)=\int_{\mathbb R}{\frac{t f(t)d \eta(t)}{t-z}}$ & $ (M^y F)(z)= \int_{{X}_{\mathbb R}} K_{\zeta}( u, x) f(x)y(x)\frac{d \eta(x)}{\omega(x)}$ \\ \hline Integral representation of the resolvent operator $R_\alpha^y$ & $(R_\alpha F)(z)=\int_{\mathbb R}\frac{f(t)d \eta(t)}{(t-z)(t-\alpha)}$ & \small $\begin{aligned} (R_\alpha F)(p) & = \int_{ X_{\mathbb R}} K_{\zeta}( p,u) \frac{f(u)}{y(u) - \alpha} \frac{ d \mu(u)}{\omega(u)} \end{aligned}$ \\ \hline \end{tabular} \end{center}
\end{landscape} \normalsize
\def\cfgrv#1{\ifmmode\setbox7\hbox{$\accent"5E#1$}\else
\setbox7\hbox{\accent"5E#1}\penalty 10000\relax\fi\raise 1\ht7
\hbox{\lower1.05ex\hbox to 1\wd7{\hss\accent"12\hss}}\penalty 10000
\hskip-1\wd7\penalty 10000\box7} \def$'${$'$} \def$'${$'$}
\def$'${$'$} \def\lfhook#1{\setbox0=\hbox{#1}{\ooalign{\hidewidth
\lower1.5ex\hbox{'}\hidewidth\crcr\unhbox0}}} \def$'${$'$}
\def$'${$'$} \def$'${$'$} \def$'${$'$} \def$'${$'$}
\def$'${$'$}
\end{document} |
\begin{document}
\begin{abstract}
Many fundamental concepts in network-based epidemic modeling depend on the branching factor, which captures a sense of dispersion in the network connectivity and quantifies the rate of spreading across the network. Moreover, contact network information generally is available only up to some level of error. We study the propagation of such errors to the estimation of the branching factor. Specifically, we characterize the impact of network noise on the bias and variance of the observed branching factor for arbitrary true networks, with examples in sparse, dense, homogeneous and inhomogeneous networks. In addition, we propose a method-of-moments estimator for the true branching factor. We illustrate the practical performance of our estimator through simulation studies and with contact networks observed in British secondary schools and a French hospital.\\
Keywords: Branching factor; Noisy network; Method-of-moments. \end{abstract}
\section{Introduction}
Epidemic modeling, while not at all new, has taken on renewed importance this year due to the COVID-19. Many key concepts in mathematical epidemiology depend on the branching factor -- for example, the basic reproduction number $R_0$. The latter is generally defined as the number of secondary infections expected in the early stages of an epidemic by a single infective in a population of susceptibles \citep{anderson1991infectious,diekmann2000mathematical}. The importance of $R_0$ in the study of epidemics arises from its role in so-called threshold theorems, which state under which conditions the presence of an infective individual in a population will lead to an epidemic \citep{whittle1955outcome}. In network-based susceptible-exposed-infectious-removed (SEIR) models, $R_0$ can be shown to equal $\theta(\kappa-1)/(\theta+\gamma)$. Here $\theta$ and $\gamma$ are infection and recovery rates, respectively (\cite{trapman2016inferring}), while the branching factor, $\kappa$, is a measure of heterogeneity of a network. The branching factor captures a notion of the average degree of the vertex reached by following an edge from a vertex and, therefore, measures the rate of spreading across the network. It is evident that knowing the value of $\kappa$ is vital for effective control responses in the early stages of an epidemic. In addition, various thresholds in epidemiological and percolation theory rely on the branching factor. In the discussion section, we provide details on how knowledge of the branching factor informs those statistics.
Increasingly, contact networks are playing an important role in the study of epidemiology. Knowledge of the structure of the network allows models to take into account individual-level behavioral heterogeneities and shifts. Network-based approaches have been explored for investigating disease outbreaks in human (\cite{eubank2004modelling}), livestock (\cite{kao2006demographic}) and wildlife (\cite{craft2009distinguishing}) populations. Moreover, contact network information generally is available only up to some level of error -- also known as network noise. For example, there is often measurement error associated with network constructions, where, by `measurement error' we will mean true edges being observed as non-edges, and vice versa. Such edge noise occurs in self-reported contact networks where participants may not perceive and recall all contacts correctly (\cite{smieszek2012collecting}). It can also be found in sensor-based contact networks where automated proximity loggers are used to report frequency and duration of contacts (\cite{drewe2012performance}). Contact tracing, and the contact networks that result, currently is playing a central role in the fight to control COVID-19 globally (especially in conjunction with testing) (\cite{cevik2020sars}, \cite{juneau2020effective}, \cite{kretzschmar2020impact}). We investigate how network noise impacts on the observed value of $\kappa$ and, therefore, on our understanding of infectious diseases spreading.
Extensive work regarding uncertainty quantification has been done in the field of non-network epidemic modeling, where populations are assumed uniform and with homogeneous mixing. Given adequate data, estimates of model parameters, such as $\theta$ and $\gamma$, can be produced with accompanying standard errors. Methods for this purpose are reviewed in \citet[Chapter~9--12]{andersson2012stochastic} and \cite{becker1999statistical}. Many studies have explored the effects of uncertainty in parameter estimation on basic epidemic quantities. For instance, there have been efforts to quantify uncertainty in $R_0$ around recent high profile emergent events, including severe acute respiratory syndrome (SARS) (\cite{chowell2004model}), the new influenza A (H1N1) (\cite{white2009estimation}), and Ebola (\cite{chowell2004basic}). But, to our best knowledge, there has been little attention to date given towards uncertainty analysis of $\kappa$ and relevant quantities in network-based epidemic models. Exceptions include real-time estimation of $R_0$ at an early stage of an outbreak by considering the heterogeneity in contact networks (\cite{davoudi2012early}), and measurability of $R_0$ in highly detailed sociodemographic data with the clustered contact structure assumed of the population (\cite{liu2018measurability}).
As remarked above, there appears to be little in the way of a formal and general treatment of the error propagation problem in network-based epidemic models. However, there are several areas in which the probabilistic or statistical treatment of uncertainty enters prominently in network analysis. Model-based approaches include statistical methodology for predicting network topology or attributes with models that explicitly include a component for network noise (\cite{jiang2011network}, \citet{jiang2012latent}), the `denoising' of noisy networks (\cite{chatterjee2015matrix}), the adaptation of methods for vertex classification using networks observed with errors (\cite{priebe2015statistical}), and a general Bayesian framework for reconstructing networks from observational data (\cite{young2020robust}). The other common approach to network noise is based on a `signal plus noise' perspective. For example, \cite{balachandran2017propagation} introduced a simple model for noisy networks that, conditional on some true underlying network, assumed we observed a version of that network corrupted by an independent random noise that effectively flips the status of (non)edges. Later, \cite{chang2020estimation} developed method-of-moments estimators for the underlying rates of error when replicates of the observed network are available. In a somewhat different direction, uncertainty in network construction due to sampling has also been studied in some depth. See, for example, \citet[Chapter~5]{kolaczyk2009statistical} or \cite{ahmed2014network} for surveys of this area. However, in this setting, the uncertainty arises only from sampling---the subset of vertices and edges obtained through sampling are typically assumed to be observed without error.
Our contribution in this paper is to quantify how such errors propagate to the estimation of the branching factor, and to provide estimators for $\kappa$ when as few as three replicates of the observed network are available. Adopting the noise model proposed by \cite{balachandran2017propagation}, we characterize the impact of network noise on the bias and variance of the observed branching factor for arbitrary true networks, and we illustrate the asymptotic behaviors of these quantities on networks for varying densities and degree distributions. Our work shows that, in general, the bias in empirical branching factors can be expected to be nontrivial and is likely to dominate the variance. Accordingly, we propose a parametric estimator of the branching factor, motivated by \cite{chang2020estimation}, who recently developed method-of-moments estimators for network subgraph densities and the underlying rates of error when replicates of the observed network are available. Numerical simulation suggests that high accuracy is possible for estimating branching factors in networks of even modest size. We illustrate the practical use of our estimators in the context of contact networks in British secondary schools and a French hospital, where a small number of replicates are available.
The organization of this paper is as follows. In Section \ref{sec2} we provide background on the noise model and branching factor. In Section \ref{sec3} we then present results for the bias and variance of the observed branching factor in sparse, dense, homogeneous and inhomogeneous networks. Section \ref{sec5} proposes our method-of-moments estimator for the true branching factor. Numerical illustration is reported in Section \ref{sec6}. All proofs are relegated to supplementary materials.
\section{Background}\label{sec2}
In this section, we provide essential notation and background.
\subsection{Noise model}
We assume the observed graph is a noisy version of a true graph. Let $G=(V,E)$ be an undirected graph and $G^\text{obs}=(V,E^\text{obs})$ be the observed graph, where we implicitly assume that the vertex set $V$ is known. Denote the adjacency matrix of $G$ by $\bm A=(A_{i,j})_{N_v\times N_v}$ and that of $G^\text{obs}$ by $\tilde {\bm A}=(\tilde A_{i,j})_{N_v\times N_v}$. Hence $A_{i,j} = 1$ if there is a true edge between the $i$-th vertex and the $j$-th vertex, and 0 otherwise, while $\tilde A_{i,j} = 1$ if an edge is observed between the $i$-th vertex and the $j$-th vertex, and 0 otherwise. And denote the degree of the $i$-th vertex in $G$ and $G^\text{obs}$ by $d_i$ and $\tilde d_i$, respectively. We assume throughout that $G$ and $G^\text{obs}$ are simple.
We express the marginal distributions of the $\tilde A_{i,j}$ in the form (\cite{balachandran2017propagation}): \begin{equation*}\label{eq2.1} \begin{aligned} \tilde A_{i,j}\sim \begin{cases} \text{Bernoulli}(\alpha_{i,j}), & \text{if } \{i,j\}\in E^c\\ \text{Bernoulli}(1-\beta_{i,j}), & \text{if } \{i,j\}\in E,\\ \end{cases} \end{aligned} \end{equation*} where $E^c=\{\{i,j\} : i,j\in V; i< j\} \backslash E$. Drawing by analogy on the example of network construction based on hypothesis testing, $\alpha_{i,j}$ can be interpreted as the probability of a Type-I error on the (non)edge status for vertex pair $\{i,j\}\in E^c$, while $\beta_{i,j}$ is interpreted as the probability of Type-II error, for vertex pair $\{i,j\}\in E$.
Our interest is in characterizing the manner in which the uncertainty in the $\tilde A_{i,j}$ (as a noisy version of $A_{ij}$) propagates to the branching factor. Here we focus on a general formulation of the problem in which we make the following three assumptions.
\begin{assumption}[Constant marginal error probabilities]\label{a1}
Assume that \\ $\alpha_{i,j}=\alpha$ and $\beta_{i,j}=\beta$ for all $i< j$, so the marginal error probabilities are $\mathbb P(\tilde A_{i,j}=0|A_{i,j}=1)=\beta$ and $\mathbb P(\tilde A_{i,j}=1|A_{i,j}=0)=\alpha$. \end{assumption}
\begin{assumption}[Independent noise]\label{a2}
The random variables $\tilde A_{i,j}$, for all $i< j$, are conditionally independent given $A_{i,j}$. \end{assumption}
\begin{assumption}[Large Graphs]\label{a3}
$N_v\rightarrow\infty$. \end{assumption}
In Assumption \ref{a1}, we assume that both $\alpha$ and $\beta$ remain constant over different edges. Under Assumption \ref{a2}, the distributions of $\tilde d_i$ is \begin{equation*} \begin{aligned} \tilde d_i=\sum_{j=1}^{N_v} \tilde A_{j,i} \sim \text{Binomial}(N_v-1-d_i,\alpha ) + \text{Binomial}(d_i,1-\beta ). \end{aligned} \end{equation*} Assumption \ref{a2} is not strictly necessary. See Remark \ref{remark3} in Section \ref{sec5}. Assumption \ref{a3} reflects both the fact that the study of large graphs is a hallmark of modern applied work in complex networks and, accordingly, our desire to understand the asymptotic behavior of the branching factor and provide concise descriptions in terms of the bias and variance for large graphs.
\begin{remark}
Note that $\alpha$ and $\beta$ can be constants or approach 0 as $N_v\rightarrow \infty$. For example, under Assumption \ref{a4}, if $\beta$ is constant and $|E|$ is dominated by $|E^c|$ asymptotically, then $\alpha$ approaches 0 as $N_v\rightarrow \infty$. Thus, $\alpha$ and $\beta$ are actually $\alpha(N_v)$ and $\beta(N_v)$. For notational simplicity, we omit $N_v$. \end{remark}
In addition to the core Assumptions \ref{a1} -- \ref{a3}, we add a fourth assumption, upon which we will call periodically throughout the paper when desiring to illustrate our results in the special case.
\begin{assumption}[Edge Unbiasedness]\label{a4}
$\alpha |E^c|=\beta |E|$, so that the expected number of observed edges equals the actual number of edges. \end{assumption}
Our use of Assumption \ref{a4} reflects the understanding that a `goodβ observation $G^\text{obs}$ of the graph $G$ should at the very least have roughly the right number of edges.
\begin{remark}\label{r2}
Assumption \ref{a4} cannot guarantee the unbiasedness of higher-order subgraph counts. (\cite{balachandran2017propagation}) \end{remark}
\subsection{The branching factor in network-based epidemic models}
In general, the epidemic threshold of a network is the inverse of the largest eigenvalue of the adjacency matrix. Under some configuration models, the branching factor is often a good approximation of the largest eigenvalue (\cite{pastor2015epidemic}).
Let $G$ be a network graph describing the contact structure among $N_v$ elements in a population. If $G$ derives from a so-called configuration model, as is commonly assumed in the network-based epidemic modeling literature, then the branching factor takes the following form (\cite{buono2014epidemics}).
\begin{definition}
For graph $G$ with $N_v$ nodes, the branching factor is
\begin{align*}
\kappa=\begin{cases}
\displaystyle \frac{\sum_{i=1}^{N_v} d_i^2/N_v}{\sum_{i=1}^{N_v} d_i/N_v} & \text{if }\sum_{i=1}^{N_v} d_i>0\\
0& \text{if }\sum_{i=1}^{N_v} d_i=0,
\end{cases}
\end{align*}
where $d_i$ is the degree of node $i$. \end{definition}
Accordingly, we denote the branching factor in the noisy network by $\tilde \kappa$. Besides the basic reproduction number, $R_0$, described in the introduction, there are other quantities depending on the observed branching factor. These include the percolation threshold $1/(\tilde \kappa-1)$, the epidemic threshold $1/(\tilde \kappa-1)$, and the immunization threshold $1-1/(\lambda\tilde\kappa)$, where $\lambda$ is the spreading rate (\cite{pastor2015epidemic}).
\section{Bias and variance of the observed branching factor}\label{sec3}
In this section, we first quantify the asymptotic bias and variance of the observed branching factor for four typical classes of networks: sparse and homogeneous, sparse and inhomogeneous, dense and homogeneous, and dense and inhomogeneous. We then provide numerical illustrations. In the supplementary material A and B, we present general results for the asymptotic bias and variance of the observed branching factor in arbitrary true networks. See supplementary material C - F for all proofs related to the observed branching factor.
\subsection{Bias of the observed branching factor}\label{sec3.2}
By making assumptions on the network density and degree distribution, we can obtain a nuanced understanding of the limiting behavior of the observed branching factor in terms of bias when the number of nodes tends towards infinity. Specifically, we consider the combinations of sparse versus dense and homogeneous versus inhomogeneous networks. By the term sparse we will mean a graph for which the average degree $\bar d$ is bounded both above and below by $\log N_v$ asymptotically, and by dense, $\bar d$ is bounded both above and below by $N_v^c$ asymptotically, where $0<c< 1$. By the term homogeneous we mean the degrees follow a Poisson distribution, and by inhomogeneous, the degrees follow a truncated Pareto distribution.
\begin{theorem}[Sparse and homogeneous, dense and homogeneous] \label{coro1}
We define $Y=\sum_{i=1}^{N_v} \tilde d_i$. In the sparse homogeneous graph and dense homogeneous graph, under Assumption \ref{a1} - \ref{a4}, $\mathbb EY>0$, $\mathbb EY$ is bounded below by $N_v$ asymptotically, and $\beta$ is bounded, we have that the bias of $\tilde \kappa$ is dominated by $\kappa$ asymptotically. \end{theorem}
\begin{theorem}[Sparse and inhomogeneous, dense and inhomogeneous] \label{coro2}
In the sparse inhomogeneous graph and dense inhomogeneous graph, under the assumptions in Theorem \ref{coro1}, we have
(i) if $0<\zeta\leq 2$, the bias of $\tilde \kappa$ is equal to $-\beta (2-\alpha-\beta)\kappa$ asymptotically,
(ii) if $\zeta> 2$, the bias of $\tilde \kappa$ is equal to $-\beta (2-\alpha-\beta)\dfrac{\kappa}{ (\zeta-1)^2}$ asymptotically,
where $\zeta$ is the shape parameter of the truncated Pareto distribution. \end{theorem}
In summary, the observed branching factor is asymptotically unbiased in the homogeneous network setting, but asymptotically biased in the inhomogeneous network setting. The bias of the observed branching factor is negative which reflects the fact that the observed graph is typically more homogeneous then the true graph in the inhomogeneous setting. The bias depends on $\alpha$, $\beta$, and $\zeta$, and when the shape $\zeta>2$, the bias decreases as $\zeta$ increases. The different results in the homogeneous and inhomogeneous network setting also reflect Remark \ref{r2} since the branching factor is related to the second-order moment.
\subsection{Variance of the observed branching factor}
Again, by making assumptions on the network density and degree distribution, we can describe the limiting behavior of the observed branching factor in term of variance when the number of nodes tends towards infinity.
\begin{theorem}[Sparse, dense, homogeneous, and inhomogeneous]
In the combinations of sparse versus dense and homogeneous versus inhomogeneous networks, under the assumptions in Theorem \ref{coro1}, we have that the variance of $\tilde \kappa$ is dominated by the bias of $\tilde \kappa$ asymptotically. \end{theorem}
Note that the orders of the variances are asymptotically dominated by the corresponding biases for all four cases. Therefore, in noisy contact networks, bias would appear to be the primary concern for the observed branching factor. In turn, our simulation results (below) suggest that in practice this empirical bias can be quite substantial.
\subsection{Simulation study}
We focus on two types of networks in the simulation study: random Erd\H{o}s-R\'{e}nyi networks and random scale-free networks using a preferential attachment mechanism. The first type has a Poisson degree distribution, and the second type has a power law distribution. We construct two types of networks with 10,000 nodes and average degree around 50 or 100 and view them as true networks. Then we generate 10,000 noisy, observed networks according to (\ref{eq2.1}). We set $\beta=0.1,0.2,0.3$ and $\alpha=\beta |E|/|E^c|$ (i.e., to enforce edge-unbiasedness). For each observed network, we compute $\tilde \kappa$. Also, we run 1,000 times bootstrap resampling to obtain 95\% confidence intervals for biases and variances. Biases and variances are shown in Figure \ref{fig0}. Error bars are 95\% confidence intervals.
\begin{figure}
\caption{Biases and variances of observed branching factors in homogeneous and inhomogeneous networks with different average degrees. Error bars are 95\% confidence intervals (and often not visible, due to the scale of bias versus variance). }
\label{fig0}
\end{figure}
From the plots, we see that the noisy branching factor is unbiased in the homogeneous network setting, but biased in the inhomogeneous network setting. The bias of the observed branching factor is negative (i.e., the empirical branching factor generally underestimates the truth). And the bias increases when error rates increase. When the average degree increases from 50 to 100, the value of the true branching factor decreases from 3579.76 to 3356.34 and the bias decreases, which is consistent with Theorem \ref{coro2}. Also, variances are dominated by the corresponding biases in all cases.
\section{Estimator for the true branching factor}\label{sec5}
As we saw in Section \ref{sec3}, the observed branching factor is biased in the inhomogeneous network setting. Due to the presence of heterogeneity in the level of connectivity of contact neighborhoods for most real-world contact network data, it is important to have new estimators for bias reduction. Simultaneous estimation of Type I and II errors, $\alpha$ and $\beta$, as well as network quantities like $\kappa$, from a single noisy network is in general impossible \cite[Thm 1]{chang2020estimation}. We present a method-of-moments estimator, which needs a minimum of three replicates.
We adapt the method-of-moments estimators (MME) of subgraph density in \cite{chang2020estimation}, which require at least three replicates of the observed network. Let $C_{\mathcal V_1}$ and $C_{\mathcal V_2}$ denote the edge density and the two-stars density, respectively. Then \begin{align*}
C_{\mathcal V_1}=\frac{1}{|\mathcal V_1|}\sum_{\bm v=(i_1,i_1')\in\mathcal V_1} A_{i_1,i_1'} \end{align*} and \begin{align*}
C_{\mathcal V_2}=\frac{1}{|\mathcal V_2|}\sum_{\bm v=(i_1,i_1',i_2,i_2')\in\mathcal V_2} A_{i_1,i_1'} A_{i_2,i_2'}, \end{align*} where $\mathcal V_1=\{(i_1,i_1'):i_1<i_1'\}$ and $\mathcal V_2=\{ (i_1,i_1',i_2,i_2') :i_1'=i_2, i_1\neq i_2\neq i_2' \}$.
Next we define \begin{align*} \hat{\bar d}&=(N_v-1) \hat C_{\mathcal V_1},\\ \hat{\bar {d^2}}&= (N_v-1)(N_v-2) \hat C_{\mathcal V_2}+ \hat{\bar d},\\ \end{align*} where $\hat C_{\mathcal V_1}$ and $\hat C_{\mathcal V_2}$ are method-of-moments estimators of $C_{\mathcal V_1}$ and $C_{\mathcal V_2}$, which we will define later. Thus, our estimator of $\kappa$ is given by: \begin{align}\label{eq5.1} \hat \kappa= \frac{\hat{\bar {d^2}}}{\hat{\bar d}}= (N_v-2) \frac{\hat C_{\mathcal V_2}}{\hat C_{\mathcal V_1}}+1. \end{align}
\begin{theorem}\label{thm:MME}
Under Assumptions \ref{a1} and \ref{a2}, $\hat\kappa$ has asymptotic normal distribution with mean $\kappa$. \end{theorem}
See supplementary material G for proof of Theorem \ref{thm:MME}. Note that $\hat \kappa$ is an asymptotically unbiased estimator for $\kappa$, where the asymptotics is in $N_v^2$, i.e., the square of the number of vertices in the network. To compute $\hat \kappa$, we first estimate $C_{\mathcal V_1}$ and $C_{\mathcal V_2}$ by methods used in \cite{chang2020estimation}. Define relevant quantities as follows: \begin{align*} u_1&= (1-\delta)\alpha+\delta(1-\beta),\\ u_2&= (1-\delta)\alpha(1-\alpha)+\delta\beta(1-\beta),\\ u_3&= (1-\delta)\alpha(1-\alpha)^2+\delta\beta^2(1-\beta), \end{align*} where $\delta$ is the edge density in the true network, $u_1$ is the expected edge density in one observed network, $u_2$ is the expected density of edge differences in two observed networks, and $u_3$ is the average probability of having an edge between two arbitrary nodes in one observed network but no edge between same nodes in the other two observed networks. The method-of-moments estimators for $u_1$, $u_2$ and $u_3$ are \begin{equation}\label{eq5.3} \begin{aligned} \hat u_1&=\frac{2}{N_v(N_v-1)}\sum_{i<j}\tilde A_{i,j}, \\
\hat u_2&=\frac{1}{N_v(N_v-1)}\sum_{i<j}|\tilde A_{i,j,*}-\tilde A_{i,j}|,\\ \hat u_3&=\frac{2}{3N_v(N_v-1)}\sum_{i<j} I( \text{Exactly one of }\tilde A_{i,j,**}, \tilde A_{i,j,*}, \tilde A_{i,j} \text{ equals } 1) . \end{aligned} \end{equation} where $ \tilde {\bm A}_*=(\tilde A_{i,j,*})_{N_v\times N_v},\ \tilde {\bm A}_{**}=(\tilde A_{i,j,**})_{N_v\times N_v}$ are independent and identically distributed replicates of $\tilde {\bm A}$.
Calculation of the estimator $\hat\kappa$ in (\ref{eq5.1}) and the estimation of its asymptotic variance can be accomplished as detailed in Algorithm \ref{algo1} below and Algorithm 1 in the supplementary material H, respectively. The variance estimation is based on a nonstandard bootstrap.
\begin{algorithm}[!h]
\caption{Method-of-moments estimator $\hat \kappa$}
\hspace*{0.02in} {\bf Input:}
$\tilde {\bm A}=(\tilde A_{i,j})_{N_v\times N_v}, \ \tilde {\bm A}_*=(\tilde A_{i,j,*})_{N_v\times N_v},\ \tilde {\bm A}_{**}=(\tilde A_{i,j,**})_{N_v\times N_v},\ \alpha_0,\ \varepsilon$\\
\hspace*{0.02in} {\bf Output:}
$\hat\alpha$, $\hat\beta$, $\hat \kappa$
\begin{algorithmic}
\State Compute $\hat u_1,\ \hat u_2,\ \hat u_3$ defined in (\ref{eq5.3});
\State Initialize $\hat \alpha=\alpha_0$, $\alpha_0=\hat \alpha+10\varepsilon$;
\While {$|\hat \alpha-\alpha_0|>\varepsilon$}
\State $\alpha_0\gets\hat \alpha,\ \hat \beta\gets\frac{\hat u_2-\alpha_0+\hat u_1 \alpha_0}{\hat u_1-\alpha_0},\ \hat\delta\gets\frac{(\hat u_1-\alpha_0)^2}{\hat u_1-\hat u_2-2\hat u_1\alpha_0+\alpha_0^2},\ \hat\alpha\gets\frac{\hat u_3-\hat\delta\hat\beta^2(1-\hat\beta)}{(1-\hat\delta)(1-\alpha_0)^2}$;
\EndWhile
\State Compute $\hat k_3=1-\hat\alpha-\hat\beta,\ \hat C_{\mathcal V_1}=\frac{2}{\hat k_3N_v(N_v-1)}\sum_{i<j}(\tilde A_{i,j}-\hat\alpha)$,
\State \hspace{1.55cm} $\hat C_{\mathcal V_2}=\frac{1}{\hat k_3^2N_v(N_v-1)(N_v-2)}\sum_{i\neq j\neq l}(\tilde A_{i,j}-\hat\alpha)(\tilde A_{j,l}-\hat\alpha),\ \hat \kappa=(N_v-2) \frac{\hat C_{\mathcal V_2}}{\hat C_{\mathcal V_1}}+1$.
\end{algorithmic}
\label{algo1} \end{algorithm}
\begin{remark}\label{remark3}
Since our estimation of the unknown parameters is based on moment estimation, the independent noise dictated by Assumption \ref{a2} is not strictly necessary. As is shown in the proof of \cite{chang2020estimation}, the convergence rate for the moment estimation of the unknown parameters is determined by the convergence rates of $\hat u_1 -u_1$, $\hat u_2 -u_2$ and $\hat u_3 -u_3$. When some limited dependency among observed edges is present, the convergence rates of $\hat u_1 -u_1$, $\hat u_2 -u_2$ and $\hat u_3 -u_3$ still are bounded above by $1/N_v$ asymptotically. \end{remark}
\section{Numerical illustration}\label{sec6}
In this section, we conduct some simulations and experiments to illustrate the finite sample properties of the proposed estimation methods. We consider two types of contact networks. One is the self-reported British secondary school contact network, described in \cite{kucharski2018structure}. These data were collected from 460 unique participants across four rounds of data collection conducted between January and June 2015 in year 7 groups in four UK secondary schools, with 7,315 identifiable contacts reported in total. They used a process of peer nomination as a method for data collection: students were asked, via the research questionnaire, to list the six other students in year 7 at their school that they spend the most time with. For each pair of participants in a specific round of data collection, a single link was defined if either one of the participants reported a contact between the pair (i.e. there was at least one unidirectional link, in either direction). Our analysis focuses on the single link contact network.
The other contact network we used is a sensor-based contact network in a French Hospital, reported by \cite{vanhems2013estimating}. These data contain records of contacts among patients and various types of health care workers in the geriatric unit of a hospital in Lyon, France, in 2010, from 1pm on Monday, December 6 to 2pm on Friday, December 10. Each of the 75 people in this study consented to wear RFID sensors on small identification badges during this period, which made it possible to record when any two of them were in face-to-face contact with each other (i.e., within 1-1.5 m of each other) during a 20-second interval of time. A primary goal of this study was to gain insight into the pattern of contacts in such a hospital environment, particularly with an eye towards the manner in which infection might be transmitted. We define a link if duration of contacts in one day is greater than 5 minutes and construct networks for Tuesday, Wednesday and Thursday.
Each data set has at least three replicates. And we consider two settings, a simulation setting where noise is added to a βtrueβ network derived from the data and an application setting where three replicates are each treated as noisy versions of an unknown true network. The former results allow us to understand what finite-sample properties can be expected of our estimators, while the latter are reflective of what would be observed in practice with such data.
\subsection{Simulations}
For each data set, we artificially constructed a `true' adjacency matrix $\bm A$: if an edge occurs between a pair of vertices more than once in observed networks, we view that pair to have a true edge. The noisy, observed adjacency matrices $\tilde{\bm A}$, $\tilde{\bm A}_*$, $\tilde{\bm A}_{**}$ are generated according to (\ref{eq2.1}). We set $\alpha=0.005$ or 0.010, and $\beta=0.01,\ 0.15$, or 0.20. We assume that both $\alpha$ and $\beta$ are unknown.
We evaluate the method-of-moments estimate for $\kappa$ and 95\% confidence intervals. Figure \ref{fig2} shows the simulation results, in which we replicate 500 times for each setting. The mean absolute errors (MAE) for the point estimates for the branching factor $\kappa$ and the relative frequency (RF) of coverage for the estimated 95\% confidence interval for $\kappa$ are shown in Figure \ref{fig2}. Note that, $\text{MAE}(\hat\kappa)=\frac{1}{500}\sum_{i=1}^{500}|\hat\kappa_i-\kappa|$, where $\hat\kappa_1,\cdots,\hat\kappa_{500}$ denote the estimated values in 500 replications of simulation, and $\kappa$ denotes the true value.
In the hospital and school networks, the estimation errors for $\kappa$ increase when $\alpha$ and $\beta$ increase. And the estimated coverage probabilities are indeed around 95\%. The average interval lengths in the French hospital are larger than that in the four schools due to smaller sample size.
\begin{figure}
\caption{Mean absolute errors (MAE) of $\hat \kappa$, and 95\% confidence intervals for $\kappa$ in the simulation with 500 replications for noisy networks in the hospital and schools. Reported in the plots are the relative frequencies (RF) of the event that a confidence interval covers the corresponding true value, and also the average Length of the intervals.}
\label{fig2}
\end{figure}
\subsection{Application}
In the school data sets, the nodes are not all the same within a given school over the four rounds. So, we choose the nodes common over four rounds and their edges to obtain four replicates of the noisy networks. Since our estimation methods only need three replicates, we select rounds 1, 2, and 3 (analogous results hold for other choices). Similarly, for the hospital data set, we choose the nodes common over three days and their edges to obtain three replicates of the noisy networks.
We evaluate the method-of-moments estimates for $\kappa$, 95\% confidence intervals, and the observed branching factor $\tilde \kappa$. Point estimates and 95\% confidence intervals for $\alpha$ and $\beta$ are reported in Table \ref{table2}. Figure \ref{fig1} show the point estimates for the branching factor $\kappa$ and the observed branching factor $\tilde \kappa$ in each round. The error bars are the estimated 95\% confidence interval for $\kappa$.
Table \ref{table2} indicates there exists nontrivial noise in all networks. The estimate of $\alpha$ in the hospital network is one order of magnitude larger than that in the school networks. Figure \ref{fig1} shows that, in schools 2 and 3, the resulting method-of-moments estimates for $\kappa$ are lower than all of their observed values, indicating a nontrivial downward adjustment for network noise. And most of the observed branching factors are not in the estimated 95\% confidence intervals, which further reinforces the evidence that the true branching factor is less than those observed empirically. In schools 1 and 4, the resulting method-of-moments estimates for $\kappa$ are close to their observed values. In contrast, in the French hospital, the estimate for $\kappa$ is higher than all of their observed values, indicating a nontrivial upward adjustment.
Ultimately, we see that the ability to account for network noise appropriately in reporting the branching factor can lead to substantially different conclusions than use of the original, empirically observed branching factor. These differences can then in turn be translated to specific epidemic-related quantities of interest in a study.
\begin{table}
\caption{Point estimates and 95\% confidence intervals for $\alpha$ and $\beta$ in the hospital and four schools. }
\fbox{
\begin{tabular}{l@{\hskip 0.6cm} S[table-format = 1.1]
@{\hskip 0.8cm(\,\hskip -.3cm }S[table-format = -1.2]@{ \hskip .1cm,\,\hskip -.3cm}S[table-format = - 1.2]@{\,\hskip .2cm) \ }
@{\hskip 0.6cm} S[table-format = 1.1]
@{\hskip 0.6cm(\,\hskip -.1cm }S[table-format = -1.2]@{ \hskip .1cm,\,\hskip -.1cm}S[table-format = - 1.2]@{\,\hskip .2cm) \ }
}
\\[-.8em] & \multicolumn{3}{c}{$\alpha$ } & \multicolumn{3}{c}{$\beta$ } \\
\\[-.8em]
Networks & \multicolumn{1}{c@{\quad\space}}{Estimates} & \multicolumn{2}{c@{\hskip .8cm}}{CI} & \multicolumn{1}{c@{\hskip 0.9cm}}{Estimates} & \multicolumn{2}{c}{CI} \\
\hline
\\[-.8em]
Hospital & 0.116 & 0.080 &0.153 & 0.162 & -0.173& 0.499 \\
School 1 &0.005 & 0.004 & 0.007 & 0.207 & 0.140 & 0.275 \\
School 2 &0.013 & 0.012 & 0.015 & 0.141 & 0.092 & 0.191 \\
School 3 &0.013 & 0.012 & 0.015 & 0.000 & -0.057 & 0.057 \\
School 4 &0.020 & 0.014 & 0.025 & 0.123 & 0.025 & 0.222 \\
\end{tabular}}
\label{table2} \end{table}
\begin{figure}
\caption{The point estimates and 95\% confidence intervals for $\kappa$ in the hospital and four schools and the observed branching factor $\tilde \kappa$ in each round/day.}
\label{fig1}
\end{figure}
\begin{figure}
\caption{The point estimates and 95\% confidence intervals for $R_0$ in the hospital and four schools. }
\label{fig3}
\end{figure}
For example, recall that $R_0$ equals $\theta(\kappa-1)/(\theta+\gamma)$ in the network-based SEIR model, where $\theta$ and $\gamma$ are infection and recovery rates. Therefore, if we are interested in characterizing the manner in which the uncertainty in the branching factor propagates to $R_0$, we can do so given knowledge or conjecture of values for these rates. Consider the context of COVID-19, for example, for which current best knowledge suggests parameter settings of $\theta=0.016$ or $0.026$ and $1/\gamma$ from 8 to 24.6 (\cite{luo2020modes,lauer2020incubation,linton2020incubation,wang2020clinical,wolfel2020virological,verity2020estimates}). Estimating infection and recovery rates are important in epidemic modeling, but we treat $\theta$ and $\gamma$ as constants here for illustration, and only consider the uncertainty in the branching factor.
Figure \ref{fig3} shows the point estimates and 95\% confidence intervals for $R_0$ in the hospital and four schools. School 2 consistently has the highest estimated $\hat R_0$. The infection will be able to start spreading in a population when $R_0>1$, but not if $R_0<1$. For school networks, most of the 95\% confidence intervals include 1 or are below 1 when $\theta=0.016$, while some are higher when $\theta=0.026$. The 95\% confidence intervals include 1 in all cases for the French hospital.
\section{Discussion}
Here we have quantified the bias and variance of the observed branching factor in noisy networks and developed a general framework for estimation of the true branching factor in contexts wherein one has observations of noisy networks. Our approach requires as few as three replicates of network observations, and employs method-of-moments techniques to derive estimators and establish their asymptotic consistency and normality. Simulations demonstrate that substantial inferential accuracy by method-of-moments estimators is possible in networks of even modest size when nontrivial noise is present. And our application to contact networks in British secondary schools and a French hospital shows that the gains offered by our approach over presenting the observed branching factor can be pronounced.
We have pursued a frequentist approach to the problem of uncertainty quantification for the branching factor. If the replicates necessary for our approach are unavailable in a given setting, a Bayesian approach is a natural alternative. For example, posterior-predictive checks for goodness-of-fit based on examination of a handful of network summary measures is common practice (e.g., \cite{bloem2018random}). Note, however, that the Bayesian approach requires careful modeling of the generative process underlying $G$ and typically does not distinguish between signal and noise components. Our analysis is conditional on $G$, and hence does not require that $G$ be modeled. It is effectively a `signal plus noiseβ model, with the signal taken to be fixed but unknown. Related work has been done in the context of graphon modeling, with the goal of estimating network motif frequencies (e.g., \cite{latouche2016variational}). However, again, one typically does not distinguish between signal and noise components in this setting. Additionally, we note that the problem of practical graphon estimation itself is still a developing area of research.
Our work here sets the stage for extensions to various thresholds and statistics which depend on the branching factor. Recall that these include the percolation threshold $1/(\kappa-1)$, the epidemic threshold $1/(\kappa-1)$, and the immunization threshold $1-1/(\lambda\kappa)$, where $\lambda$ is the spreading rate (\cite{pastor2015epidemic}). Replacing $\kappa$ with $\hat\kappa$, we obtain asymptotically unbiased estimators for the corresponding thresholds. The asymptotic distributions can be derived from the delta method. In addition, the total branching factor of the network is important for epidemic spreading and immunization strategy in multiplex networks (e.g., \cite{buono2014epidemics}).
Our choice to work with independent network noise is both natural and motivated by convenience. And our results of method-of-moments estimators still hold when there is some dependency across (non)edges. A precise characterization of the dependency is typically problem-specific and hence a topic for further investigation.
\section{Data accessibility}
No primary data are used in this paper. Secondary data sources are taken from \cite{kucharski2018structure} and \cite{vanhems2013estimating}. These data and the code necessary to reproduce the results in this paper are available at \url{https://github.com/KolaczykResearch/EstimNetReprodNumber}.
\end{document} |
\begin{document}
\setcounter{page}{1}
\title[Local and 2-local derivations of locally finite simple Lie algebras]{Local and 2-local derivations of locally finite simple Lie algebras}
\author[ Ayupov Sh.A., Kudaybergenov K.K., Yusupov B.B. ]{Shavkat Ayupov$^{1,3}$, Karimbergen Kudaybergenov$^2$, Bakhtiyor Yusupov$^3$} \address{$^1$ V.I.Romanovskiy Institute of Mathematics\\
Uzbekistan Academy of Sciences, 81 \\ Mirzo Ulughbek street, 100170 \\
Tashkent, Uzbekistan}
\address{$^2$ Department of Mathematics, Karakalpak State University, 1, Academician Ch.~Abdirov street, 230113, Nukus, Uzbekistan} \address{$^3$ National University of Uzbekistan, 4, University street, 100174, Tashkent, Uzbekistan} \email{\textcolor[rgb]{0.00,0.00,0.84}{sh$_{-}[email protected], [email protected]}}
\email{\textcolor[rgb]{0.00,0.00,0.84}{[email protected]}} \email{\textcolor[rgb]{0.00,0.00,0.84}{baxtiyor\_yusupov\[email protected]}} \maketitle
\begin{abstract} In the present paper we study local and 2-local derivations of locally finite split simple Lie algebras. Namely, we show that every local and 2-local derivation on such Lie algebra is a derivation.
\end{abstract} {\it Keywords:} Lie algebras, locally finite simple Lie algebras, derivation, local derivation, 2-local derivation. \\
{\it AMS Subject Classification:} 17B65, 17B20, 16W25.
\section{Introduction}
The notion of local derivation were first introduced in 1990 by R.V.Kadison \cite{Kadison} and D.R.Larson, A.R.Sourour \cite{Larson}. A linear operator $\Delta$ on an algebra $\mathcal{A}$ is called a \textit{local derivation} if given any $x\in\mathcal{A}$ there exists a derivation $D_x$(depending on $x$) such that $\Delta(x)=D_x(x).$ The main problems concerning this notion are to find conditions under which local derivations become derivations and to present examples of algebras with local derivations that are not derivations. R.V.Kadison proved that each continuous local derivation of a von Neumann algebra $M$ into a dual Banach $M$-bimodule is a derivation.
Investigation of local and 2-local derivations on finite dimensional Lie algebras were initiated in papers \cite{Ayupov7, AyuKudRak}. In \cite{Ayupov7} the first two authors have proved that every local derivation on semi-simple Lie algebras is a derivation and gave examples of nilpotent finite-dimensional Lie algebras with local derivations which are not derivations. In \cite{Ayupov6} local derivations of solvable Lie algebras are investigated and it is shown that any local derivation of solvable Lie algebra with model nilradical is a derivation.
In 1997, P.\v{S}emrl \cite{Sem} introduced the notion of 2-local derivations and 2-local automorphisms on algebras. Namely, a map \(\nabla : \mathcal{A} \to \mathcal{A}\) (not necessarily linear) on an algebra \(\mathcal{A}\) is called a \textit{2-local derivation}, if for every pair of elements \(x,y \in \mathcal{A}\) there exists a derivation \(D_{x,y} : \mathcal{A} \to \mathcal{A}\) such that \(D_{x,y} (x) = \nabla(x)\) and \(D_{x,y}(y) = \nabla(y).\) The notion of 2-local automorphism is given in a similar way. For a given algebra \(\mathcal{A}\), the main problem concerning these notions is to prove that they automatically become a derivation (respectively, an automorphism) or to give examples of local and 2-local derivations or automorphisms of \(\mathcal{A},\) which are not derivations or automorphisms, respectively.
Solution of such problems for finite-dimensional Lie algebras over algebraically closed field of zero characteristic were obtained in \cite{AyuKud, AyuKudRak, ChenWang}. Namely, in \cite{AyuKudRak} it was proved that every 2-local derivation on a semi-simple Lie algebra \(\mathcal{L}\) is a derivation and that each finite-dimensional nilpotent Lie algebra, with dimension larger than two admits 2-local derivation which is not a derivation. Concerning 2-local automorphism, Z.Chen and D.Wang in \cite{ChenWang} proved that if \(\mathcal{L}\) is a simple Lie algebra of type $A_{l},D_{l}$ or $E_{k}, (k = 6, 7, 8)$ over an algebraically closed field of characteristic zero then every 2-local automorphism of \(\mathcal{L}\) is an automorphism. Finally, in \cite{AyuKud} it was proved that every 2-local automorphism of a finite-dimensional semi-simple Lie algebra over an algebraically closed field of characteristic zero is an automorphism. Moreover, have shown also that every nilpotent Lie algebra with finite dimension larger than two admits 2-local automorphisms which is not an automorphism.
In \cite{Ayupov8, AyuYus} the authors studied 2-local derivations of infinite-dimensional Lie algebras over a field of characteristic zero and proved that all 2-local derivations of the Witt algebra as well as of the positive Witt algebra and the classical one-sided Witt algebra are (global) derivations and every 2-local derivation on Virasoro algebras is a derivation. In \cite{AyuKudYus} we have proved that every 2-local derivation on the generalized Witt algebra $W_n(\mathbb{F})$ over the vector space $\mathbb{F}^n$ is a derivation. In \cite{YangKai} Y.Chen, K.Zhao and Y.Zhao studied local derivations on generalized Witt algebras. They proved that every local derivation on Witt algebras is a derivation and that every local derivation on a centerless generalized Virasoro algebra of higher rank is a derivation.
In the present paper we study local and 2-local derivations of locally finite split simple Lie algebras.
\section{Preliminaries}
In this section we give some necessary definitions and preliminary results (for details see \cite{Neeb2005, Neeb2001}).
A Lie algebra $\mathfrak{g}$ over a field $\mathbb{F}$ is a vector space over $\mathbb{F}$ with a bilinear mapping $\mathfrak{g}\times\mathfrak{g}\rightarrow\mathfrak{g}$ denoted $(x,y)\mapsto[x,y]$ and called the bracket of $\mathfrak{g}$ and satisfying: $$ [x,x] =0 ,\ \ \ \ \forall x\in\mathfrak{g}, $$ $$ [[x,y],z]+[[y,z],x]+[[z,x],y]=0, \forall x,y,z\in\mathfrak{g}. $$
A Lie algebra $\mathfrak{g}$ is said to be \textit{solvable} if $\mathfrak{g}^{(k)}=\{0\}$ for some integer $k,$ where $\mathfrak{g}^{(0)}=\mathfrak{g},$ $\mathfrak{g}^{(k)}=\Big[\mathfrak{g}^{(k-1)}, \mathfrak{g}^{(k-1)}\Big],\, k\geq1.$ Any Lie algebra $\mathfrak{g}$ contains a unique maximal solvable ideal, called the radical of $\mathfrak{g}$ and denoted by $\mbox{Rad} \mathfrak{g}.$ A non trivial Lie algebra $\mathfrak{g}$ is called \textit{semisimple} if $\mbox{Rad} \mathfrak{g}=0.$ That is equivalent to requiring that $\mathfrak{g}$ have no nonzero abelian ideals. A Lie algebra $\mathfrak{g}$ is simple, if it has no non-trivial ideals and is not abelian.
We say that a Lie algebra $\mathfrak{g}$ has a \textit{root decomposition} with respect to an abelian subalgebra $\mathfrak{h},$ if $$ \mathfrak{g}=\mathfrak{h}\oplus\bigoplus\limits_{\alpha\in \mathfrak{R}}\mathfrak{g}_{\alpha}, $$ where $\mathfrak{g}_{\alpha}=\Big\{x\in\mathfrak{g}: [h,x]=\alpha(h)x,\,\, \forall h\in\mathfrak{h}\Big\}$ and $\mathfrak{R}=\Big\{\alpha\in\mathfrak{h}^*\backslash0:\mathfrak{g}_{\alpha}\neq\{0\}\Big\}$
is the corresponding root system and $\mathfrak{h}^*$ is the space of all linear functionals on $\mathfrak{h}.$ In this case, $\mathfrak{h}$ is called \textit{splitting Cartan subalgebra} of $\mathfrak{g},$ and $\mathfrak{g}$ respectively the pair $(\mathfrak{g},\mathfrak{h})$ is called \textit{split} Lie algebra.
Suppose that $\mathfrak{g}$ is a Lie algebra over $\mathbb{F}$ which is a directed union of finite-dimensional simple Lie algebras, that is, $\mathfrak{g}=\lim\limits_{\longrightarrow}\mathfrak{g}_\alpha$ is the direct limit of a family $(\mathfrak{g}_\alpha)_{\alpha\in A}$ of finite-dimensional simple Lie algebras $\mathfrak{g}_\alpha$ which are subalgebras of $\mathfrak{g}$ and the directed order $\leq$ on the index set $A$ is given by $\alpha\leq \beta$ if $\mathfrak{g}_\alpha\leq \mathfrak{g}_\beta.$ A Lie algebra $\mathfrak{g}$ of this form is said to be locally finite simple Lie algebra.
Now following \cite{Neeb2005} we give a description of locally finite split simple Lie algebras.
For a set $\mathfrak{J}$ we denote by $M_\mathfrak{J}(\mathbb{F})=\mathbb{F}^{\mathfrak{J}\times\mathfrak{J}}$
the set of all
$\mathfrak{J}\times\mathfrak{J}$-matrices with entries in $\mathbb{F}.$
Let $M_\mathfrak{J}(\mathbb{F})_{rc-fin}\subseteq M_\mathfrak{J}(\mathbb{F})$ be the set of all $\mathfrak{J}\times\mathfrak{J}$-matrices
with at most finitely many non-zero entries in each row and each column, and let $\mathfrak{gl}_{\mathfrak{J}}(\mathbb{F})$ denote the subspace
consisting of all matrices with at most finitely many non-zero entries. The matrix product $xy$ is defined if at least one factor is in $\mathfrak{gl}_{\mathfrak{J}}(\mathbb{F})$ and the other is in $M_\mathfrak{J}(\mathbb{F}).$ In particular, $\mathfrak{gl}_{\mathfrak{J}}(\mathbb{F})$ thus inherits the structure of locally finite Lie algebra via $[x,y]:=xy-yx$ and \begin{equation*}\begin{split} \mathfrak{sl}_{\mathfrak{J}}(\mathbb{F})=\left\{x\in \mathfrak{gl}_{\mathfrak{J}}(\mathbb{F}): tr(x)=0\right\} \end{split}\end{equation*} is a simple Lie algebra.
For $i,j\in \mathfrak{J}$ denote by $e_{i,j}$ a matrix unit defined as $$ e_{i,j}:\mathfrak{J}\times\mathfrak{J}\rightarrow\mathbb{F},\ \ (k,s)\longmapsto\delta_{ik}\delta_{sj}, $$ where $\delta_{i,j}$ is the Kronecker symbol.
Set $2\mathfrak{J}:=\mathfrak{J}\,\dot{\cup}-\mathfrak{J},$ where $-\mathfrak{J}$ denotes a copy of $\mathfrak{J}$ whose elements are denoted by $-i\,(i\in\mathfrak{J})$ and consider the $2\mathfrak{J}\times2\mathfrak{J}$-matrices \begin{equation*} q_1=\sum\limits_{i\in\mathfrak{J}}e_{i,-i}+e_{-i,i}\ \ \text{and} \ \ q_2=\sum\limits_{i\in\mathfrak{J}}e_{i,-i}-e_{-i,i}. \end{equation*} Set \begin{equation*} \mathfrak{o}_{\mathfrak{J},\mathfrak{J}}(\mathbb{F})=\left\{x\in\mathfrak{gl}_{2\mathfrak{J}}(\mathbb{F}):\ x^{\top} q_1+q_1x=0\right\} \end{equation*} and \begin{equation*} \mathfrak{sp}_{\mathfrak{J}}(\mathbb{F})=\left\{x\in\mathfrak{gl}_{2\mathfrak{J}}(\mathbb{F}):\ x^{\top} q_2+q_2x=0\right\}. \end{equation*}
By \cite[Theorem IV.6]{Neeb2001} every infinite dimensional locally finite split simple Lie algebra is isomorphic to one of the Lie algebras $\mathfrak{sl}_{\mathfrak{J}}(\mathbb{F}), \mathfrak{o}_{\mathfrak{J},\mathfrak{J}}(\mathbb{F}), \mathfrak{sp}_{\mathfrak{J}}(\mathbb{F}),$ where $\mathfrak{J}$ is an infinite set with $\textrm{card}\mathfrak{J} = \dim\mathfrak{g}.$
In the next section we shall use the following description of algebras of derivations of locally finite simple Lie algebras \cite[Theorem I.3]{Neeb2005}: \begin{equation*} \begin{split} der\left(\mathfrak{sl}_{\mathfrak{J}}(\mathbb{F}\right) & \cong M_\mathfrak{J}(\mathbb{F})_{rc-fin}/\mathbb{F}\mathbf{1}\\ der\left(\mathfrak{o}_{\mathfrak{J},\mathfrak{J}}(\mathbb{F})\right) & \cong \left\{x\in M_{\mathfrak{J}}(\mathbb{F})_{rc-fin}:x^\top q_1+q_1x=0\right\}\\ der\left(\mathfrak{sp}_\mathfrak{J}(\mathbb(F))\right) & \cong \left\{x\in M_{\mathfrak{J}}(\mathbb{F})_{rc-fin}:x^\top q_2+q_2x=0\right\}, \end{split} \end{equation*} where $\mathbf{1}=(\delta_{ij})$ is the indentity matrix in $M_\mathfrak{J}(\mathbb{F}).$ In particular, any derivation $D$ on $\mathfrak{sl}_{\mathfrak{J}}(\mathbb{F})$ represented as \begin{equation}\label{dersplit} D(x)=[a,x],\,\, x\in \mathfrak{sl}_{\mathfrak{J}}(\mathbb{F}), \end{equation} where $a\in M_\mathfrak{J}(\mathbb{F})_{rc-fin}.$ Further, in the cases of algebras $\mathfrak{o}_{\mathfrak{J},\mathfrak{J}}(\mathbb{F})$ and $\mathfrak{sp}_{\mathfrak{J}}(\mathbb{F})$ an element $a\in M_\mathfrak{J}(\mathbb{F})_{rc-fin}$ need to satisfy conditions $a^\top q_1+q_1 a=0$ and $a^\top q_2+q_2 a=0,$ respectively.
\section{Main results}
\subsection{Local derivation on the locally finite split simple Lie algebras}
\
The main result of this subsection is given as follows. \begin{theorem}\label{th21} Let $\mathfrak{g}$ be a locally finite split simple Lie algebras over a field of characteristic zero. Then any local derivation on $\mathfrak{g}$ is a derivation. \end{theorem}
For the proof we need several Lemmata and from now on $\mathfrak{g}$ is an one of the algebras
$\mathfrak{sl}_{\mathfrak{J}}(\mathbb{F}), \mathfrak{o}_{\mathfrak{J},\mathfrak{J}}(\mathbb{F}), \mathfrak{sp}_{\mathfrak{J}}(\mathbb{F})$ (see the end of the previous Section).
Any $x\in M_\mathfrak{J}(\mathbb{F})_{rc-fin}$ can be uniquely represented as $$ x=\sum\limits_{i,j\in \mathfrak{J}} x_{i,j}e_{i,j}, $$ where $x_{i,j}\in \mathbb{F}$ for all $i,j\in \mathfrak{J}.$
For a subset $\mathfrak{I} \subset \mathfrak{J}$ we shall identify the algebra $M_\mathfrak{I}(\mathbb{F})_{rc-fin}$ with the subalgebra in $M_\mathfrak{J}(\mathbb{F})_{rc-fin},$ consisting of elements of the form $ x=\sum\limits_{i,j\in \mathfrak{I}} x_{i,j}e_{i,j}, $ where $x_{i,j}\in \mathbb{F}$ for all $i,j\in \mathfrak{I}.$ Further, for a finite subset $\mathfrak{I} \subset \mathfrak{J}$ we define a projection mapping $\pi_\mathfrak{I}:M_\mathfrak{J}(\mathbb{F})_{rc-fin}\rightarrow M_\mathfrak{I}(\mathbb{F})_{rc-fin}$ as follows \begin{equation*} \pi_\mathfrak{I}(x)=\sum\limits_{i,j\in \mathfrak{I}} x_{i,j}e_{i,j}, \end{equation*} where $x=\sum\limits_{i,j\in \mathfrak{J}} x_{i,j}e_{i,j}.$
\begin{lemma}\label{123} $$ \pi_\mathfrak{I}\left([x,y]\right)=\left[\pi_\mathfrak{I}(x), \pi_\mathfrak{I}(y)\right] $$ for all $x\in M_\mathfrak{J}(\mathbb{F})_{rc-fin}$ and $y\in M_\mathfrak{I}(\mathbb{F})_{rc-fin}.$ \end{lemma}
\begin{proof} Note that each matrix $x\in M_\mathfrak{J}(\mathbb{F})_{rc-fin}$ is represented as $x=x_{\mathfrak{I},\mathfrak{I}}+x_{\mathfrak{I},\mathfrak{K}}+x_{\mathfrak{K},\mathfrak{I}}+x_{\mathfrak{K}, \mathfrak{K}},$ where $x_{\mathfrak{I},\mathfrak{I}}=\sum\limits_{i,j\in \mathfrak{I}} x_{i,j}e_{i,j},$ $x_{\mathfrak{I},\mathfrak{K}}=\sum\limits_{i\in \mathfrak{I}, j\in \mathfrak{K}} x_{i,j}e_{i,j},$ $x_{\mathfrak{K},\mathfrak{I}}=\sum\limits_{i\in \mathfrak{K}, j\in \mathfrak{I}} x_{i,j}e_{i,j},$ $x_{\mathfrak{K},\mathfrak{K}}=\sum\limits_{i,j\in \mathfrak{K}} x_{i,j}e_{i,j}$ and $\mathfrak{K}=\mathfrak{J}\setminus\mathfrak{I}.$ Take the matrices $x=x_{\mathfrak{I},\mathfrak{I}}+x_{\mathfrak{I},\mathfrak{K}}+x_{\mathfrak{K},\mathfrak{I}}+x_{\mathfrak{K}, \mathfrak{K}}\in M_\mathfrak{J}(\mathbb{F})_{rc-fin}$ and $y=y_{\mathfrak{I},\mathfrak{I}}\in M_\mathfrak{I}(\mathbb{F})_{rc-fin}.$ Then
\begin{equation*}\begin{split} \pi_\mathfrak{I}\left([x,y]\right)&=\pi_\mathfrak{I}\left(\left[x_{\mathfrak{I},\mathfrak{I}}+x_{\mathfrak{I},\mathfrak{K}}+x_{\mathfrak{K},\mathfrak{I}}+x_{\mathfrak{K}, \mathfrak{K}}, y_{\mathfrak{I},\mathfrak{I}}\right]\right)\\ &= \pi_\mathfrak{I}\left([x_{\mathfrak{I},\mathfrak{I}}, y_{\mathfrak{I},\mathfrak{I}}]\right)+\pi_\mathfrak{I}\left(\left[x_{\mathfrak{I},\mathfrak{K}}+x_{\mathfrak{K},\mathfrak{I}}+x_{\mathfrak{K}, \mathfrak{K}}, y_{\mathfrak{I},\mathfrak{I}}\right]\right)\\ &=[x_{\mathfrak{I},\mathfrak{I}}, y_{\mathfrak{I},\mathfrak{I}}] = \left[\pi_\mathfrak{I}(x), \pi_\mathfrak{I}(y)\right].
\end{split}\end{equation*}
\end{proof}
For a subset $\mathfrak{I} \subset \mathfrak{J}$ denote by $\mathfrak{g}_\mathfrak{I}$ the subalgebra in $\mathfrak{g}$ consisting of elements of the form $ x=\sum\limits_{i,j\in \mathfrak{I}} x_{i,j}e_{i,j}\in \mathfrak{g}, $ where $x_{i,j}\in \mathbb{F}$ for all $i,j\in \mathfrak{I}.$
It is clear that the restriction $\pi_\mathfrak{I}|_{\mathfrak{g}}$ of $\pi_\mathfrak{I}$ on $\mathfrak{g}$ maps $\mathfrak{g}$ onto $\mathfrak{g}_\mathfrak{I}.$
\begin{lemma}\label{resder} Let $\Delta$ be a local derivation on $\mathfrak{g}.$ Then the mapping $\Delta_\mathfrak{I}$ on $\mathfrak{g}_\mathfrak{I}$ defined as $$ \Delta_\mathfrak{I}(x)=\pi_\mathfrak{I}(\Delta(x)),\ x\in\mathfrak{g}_\mathfrak{I} $$ is a local derivation for all finite subset $\mathfrak{I}$ in $\mathfrak{J}.$ \end{lemma}
\begin{proof} Let $x\in \mathfrak{g}_\mathfrak{I}$ be an arbitrary element. By \eqref{dersplit} there is an element $a_x\in M_\mathfrak{J}(\mathbb{F})_{rc-fin}$ such that $\left[a_x, \mathfrak{g}\right]\subseteq \mathfrak{g}$ and $ \Delta(x)=[a_x,x]. $ Then by Lemma \ref{123} \begin{equation*} \Delta_\mathfrak{I}(x)=\pi_\mathfrak{I}(\Delta(x))=\pi_\mathfrak{I}([a_x,x])=
[\pi_\mathfrak{I}(a_x),\pi_\mathfrak{I}(x)]=[\pi_\mathfrak{I}(a_x),x]. \end{equation*} Thus $\Delta_\mathfrak{I}$ is a local derivation. \end{proof}
{\it Proof of Theorem \ref{th21}.} Let $\Delta$ be a local derivation $\mathfrak{g}$ and let $x\in \mathfrak{g}$ be an arbitrary element. Take a finite subset $\mathfrak{I}$ in $\mathfrak{J}$ such that $x, y, \Delta(x), \Delta(y), \Delta([x,y])\in\mathfrak{g}_\mathfrak{I}.$
By Lemma~\ref{resder}, $\Delta_\mathfrak{I}$ is a local derivation of $\mathfrak{g}_\mathfrak{I}.$ Since $\mathfrak{g}_\mathfrak{I}$ is a finite dimensional simple Lie algebra, by \cite[Theorem 3.1]{Ayupov7} $\Delta_\mathfrak{I}$ is a derivation. Hence, \begin{equation*}\begin{split} \Delta([x,y]) &=\Delta_\mathfrak{I}([x,y])=[\Delta_\mathfrak{I}(x),y]+[x,\Delta_\mathfrak{I}(y)]=[\Delta(x),y]+[x,\Delta(y)]. \end{split} \end{equation*} This means that $\Delta$ is a derivation.
\subsection{2-local derivations on the locally finite split simple Lie algebras}
\
In this subsection we study 2-local derivations on the locally finite split simple Lie algebras.
Recall that a bilinear from $\kappa$ on $\mathfrak{g}$ is said to be non degenerate, i.e. $\kappa(x, y)=0$ for all $y\in \mathfrak{g}$ implies that $x=0.$
We shall use the following results from \cite{Neeb2005}.
\begin{proposition} There exists a nondegenerate invariant symmetric bilinear form $\kappa$ on $\mathfrak{g}.$ \end{proposition}
\begin{proposition}
Every invariant symmetric bilinear form $\kappa$ on $\mathfrak{g}$ is invariant under all derivations of $\mathfrak{g}.$ \end{proposition}
The main results of this subsection is the following.
\begin{theorem}\label{th32} Let $\mathfrak{g}$ be a locally finite split simple Lie algebras over a field of characteristic zero. Then any 2-local derivation on $\mathfrak{g}$ is a derivation. \end{theorem}
\begin{proof}
Let us first to show that $\nabla$ is linear.
Let $x,y,z\in \mathfrak{g}$ be arbitrary elements. Taking into account invariance of $\kappa$ we obtain \begin{equation*}\begin{split} \kappa(\nabla(x+y),z)&=\kappa(D_{x+y,z}(x+y),z)=-\kappa(x+y,D_{x+y,z}(z))\\ &=-\kappa(x+y,\nabla(z))=-\kappa(x,\nabla(z))-\kappa(y,\nabla(z))\\ &=-\kappa(x,D_{x,z}(z))-k(y,D_{y,z}(z))=\kappa(D_{x,z}(x),z)\\ &+\kappa(D_{y,z},z)=\kappa(\nabla(x),z)+\kappa(\nabla(y),z)\\ &=\kappa(\nabla(x)+\nabla(y),z), \end{split} \end{equation*} i.e. \begin{equation*} \kappa(\nabla(x+y),z)=\kappa(\nabla(x)+\nabla(y),z) \end{equation*} Since $\kappa$ is non-degenerate the last equality implies that
\begin{equation*} \nabla(x+y)=\nabla(x)+\nabla(y)\ \ \ \text{for}\ x,y\in\mathfrak{g}. \end{equation*} Further, \begin{equation*} \nabla(\lambda x)=D_{\lambda x,x}(\lambda x)= \lambda D_{\lambda x,x}(x)=\lambda\nabla(x). \end{equation*} Hence, $\nabla$ is linear, and therefore is a local derivation.
Finally, by Theorem \ref{th21} a local derivation $\nabla$ is a derivation. \end{proof}
\end{document} |
\begin{document}
\title{\large\bf{Kramers-Fokker-Planck
operators with homogeneous potentials} \begin{abstract} In this article we establish a global subelliptic estimate for Kramers-Fokker-Planck operators with homogeneous potentials $V(q)$ under some conditions, involving in particular the control of the eigenvalues of the Hessian matrix of the potential. Namely, this work presents a different approach from the one in \cite{Ben}, in which the case $V(q_1,q_2)=-q_1^2(q_1^2+q_2^2)^n$ was already treated only for $n=1.$ With this article, after the former one dealing with non homogeneous polynomial potentials, we conclude the analysis of all the examples of degenerate ellipticity at infinty presented in the framework of Witten Laplacian by Helffer and Nier in \cite{HeNi}. Like in \cite{Ben}, our subelliptic lower bounds are the optimal ones up to some logarithmic correction. \end{abstract}
\noindent\textbf{Key words:} subelliptic estimates, compact resolvent, Kramers-Fokker-Planck operator.\\ \noindent\textbf{MSC-2010:} 35Q84, 35H20, 35P05, 47A10, 14P10 \tableofcontents \section{Introduction and main results} In this work we study the Kramers-Fokker-Planck operator \begin{align} K_V=p.\partial_q-\partial_qV(q).\partial_p+\frac{1}{2}(-\Delta_p+p^2)~,\;\;\;\;\;(q,p)\in
\mathbb{R}^{2d} \,, \label{a.3eq1} \end{align} where $q$ denotes the space variable, $p$ denotes the velocity variable and the potential $V(q)$ is a real-valued function defined in the whole space $\mathbb{R}^d_q.$
Setting \[
O_p=\frac{1}{2}(D^2_p+p^2) \;,\quad\quad \text{and}\quad\quad X_V=p.\partial_q-\partial_qV(q).\partial_p~, \] the Kramers-Fokker-Planck operator $K_V$ defined in (\ref{a.3eq1}) reads $K_V=X_V+O_p.$ \\ We firstly list some notations used throughout the paper. We denote for an arbitrary function $V(q)$ in $\mathcal{C}^{\infty}(\mathbb{R}^d)$ \[
\begin{aligned}
\mathrm{Tr}_{+,V}(q) & = \sum\limits_{\substack{\nu\in \mathrm{Spec}(\mathrm{Hess}\; V)\\ \nu>0}} \nu(q)\,,
\\ \mathrm{Tr}_{-,V}(q) &=-\sum\limits_{\substack{\nu\in \mathrm{Spec}(\mathrm{Hess}\; V)\\ \nu\le 0}}\nu(q)\;.
\end{aligned} \] In particular for a polynomial $V$ of degree less than 3, $\mathrm{Tr}_{+,V}$ and $\mathrm{Tr}_{-,V}$ are two constants. In this case we define the constants $A_V$ and $B_V$ by \begin{align*}
A_V& = \max \{(1+\mathrm{Tr}_{+,V})^{2/3}, 1+\mathrm{Tr}_{-,V}\}\;,\\
B_V &= \max\{\min\limits_{q\in\mathbb{R}^d}\left|\nabla\;V(q)\right|^{4/3}, \frac{1+\mathrm{Tr}_{-,V}}{\log(2+\mathrm{Tr}_{-,V})^2}\}\;. \end{align*} This work is principally based on the publication by Ben Said, Nier, and Viola \cite{BNV}, which concerns the study of Kramers-Fokker-Planck operators with polynomials of degree less than three. In \cite{BNV} we proved the existence of a constant $c>0$, independent of $V$, such that the following global subelliptic estimate with remainder \begin{align}
\|K_Vu\|^2_{L^2(\mathbb{R}^{2d})}+A_V\|u\|^2_{L^2(\mathbb{R}^{2d})}\ge
{c} \Big(\|O_pu\|^2_{L^2(\mathbb{R}^{2d})}&+\|X_Vu\|^2_{L^2(\mathbb{R}^{2d})}\nonumber\\&+\|\langle\partial_q V(q)\rangle^{2/3}u\|^2_{L^2(\mathbb{R}^{2d})}+\|\langle D_q\rangle^{2/3}u\|_{L^2(\mathbb{R}^{2d})}\Big)\label{eq44} \end{align} holds for all $u\in \mathcal{C}_0^{\infty}(\mathbb{R}^{2d}).$ Furthermore, supposing
$\mathrm{Tr}_{-,V}+\min\limits_{q\in\mathbb{R}^d}\left|\nabla\;V(q)\right|\not=0$, there exists a constant $c>0$, independent of $V$, such that \begin{align}
\|K_Vu\|^2_{L^2(\mathbb{R}^{2d})}\ge c\,B_V\|u\|^2_{L^2(\mathbb{R}^{2d})}~,\label{1.5mm} \end{align} is valid for all $u\in \mathcal{C}_0^{\infty}(\mathbb{R}^{2d}).$ As a consequence collecting (\ref{1.5mm}) and (\ref{eq44}) together, there is a constant $c>0$, independent of $V$, so that the global subelliptic estimates without remainder \begin{align}
\|K_Vu\|^2_{L^2(\mathbb{R}^{2d})}\ge \frac{c}{1+\frac{A_V}{B_V}}\Big(\|O_pu\|^2_{L^2(\mathbb{R}^{2d})}&+\|X_Vu\|^2_{L^2(\mathbb{R}^{2d})}\nonumber\\&+\|\langle\partial_q V(q)\rangle^{2/3}u\|^2_{L^2(\mathbb{R}^{2d})}+\|\langle D_q\rangle^{2/3}u\|_{L^2(\mathbb{R}^{2d})}\Big) \label{eq55} \end{align} holds for all $u\in \mathcal{C}_0^{\infty}(\mathbb{R}^{2d}).$ Here and throughout the paper we use the notation \begin{align*}
\langle \cdot\rangle=\sqrt{1+|\cdot|^2}\;. \end{align*}
Moreover we remind that for an arbitrary potential $V\in\mathcal{C}^{\infty}(\mathbb{R}^d)$, the Kramers-Fokker-Planck operator $K_V$ is essential maximal accretive when endowed with the domain $\mathcal{C}_0^{\infty}(\mathbb{R}^{2d})\;$(see Proposition 5.5, page 44 in \cite{HeNi}). Thanks to this property we deduce that the domain of the closure of $K_V$ is given by \begin{align*} D(K_V)=\left\{u\in L^2(\mathbb{R}^{2d}),\; K_Vu\in L^2(\mathbb{R}^{2d})\right\}~. \end{align*} Resultently, by density of $\mathcal{C}_0^{\infty}(\mathbb{R}^{2d})$ in the domain $D(K_V)$ all estimates written in this article, which are verified with $C^\infty_0(\mathbb{R}^{2d})$ functions, can be extended to $D(K_V).$ By relative bounded perturbation
with bound less than $1$\,,
this result holds as well when $V\in
\mathcal{C}^{\infty}(\mathbb{R}\setminus\left\{0\right\})$ is an
homogeneous function of degree $r>1$.
Our results will require the following assumption after setting \begin{align}
\mathcal{S}=\left\lbrace q\in\mathbb{R}^{d},\;\;|q|=1 \right\rbrace\;.\end{align} \begin{assumption}\label{a.31.4} The potential $V(q)$ is an homogeneous function of degree $r> 2$ in\\ $\mathcal{C}^{\infty}(\mathbb{R}^d\setminus\left\{~0\right\})$ and satisfies: \begin{align} \forall\;\; q\in\mathcal{S}\;,\;\;\;\;\;\;\;\;\;\partial_qV(q)=0\Rightarrow \mathrm{Tr}_{-,V}(q)>0\;.\label{a31.4.} \end{align} \end{assumption} Our main result is the following. \begin{thm}\label{a.3thm1.1} If the potential $V(q)$ verifies Assumption \ref{a.31.4}, then there exists a strictly positive constant $C_{V}>1$ (which depends on $V$) such that \begin{align}
\|K_{V}u\|^2_{L^2}+C_{V}\|u\|^2_{L^2}\ge \frac{1}{C_V}\Big(\|L(O_p)u\|^2_{L^2}&+\|L(\langle\nabla V(q)\rangle^{\frac{2}{3}}) u\|^2_{L^2}\nonumber\\&+\|L(\langle\mathrm{Hess}\; V(q)\rangle^{\frac{1}{2}} ) u\|^2_{L^2}+\|L(\langle D_q\rangle^{\frac{2}{3}} ) u\|^2_{L^2}\Big)~,\label{a.31.6} \end{align} holds for all $u\in D(K_{V})$ where $L(s)=\frac{s+1}{\log(s+1)}$ for any $s\ge1.$ \end{thm} \begin{cor} \label{cor} The Kramers-Fokker-Planck operator $K_V$ with a potential $V(q)$ satisfying Assumption \ref{a.31.4} has a compact resolvent. \end{cor} \begin{proof}
Let $0<\delta<1.$ Define the functions $f_\delta:\mathbb{R}^d\to\mathbb{R}$ by $$f_\delta(q)= |\nabla V(q)|^{\frac{4}{3}(1-\delta)}+|\mathrm{Hess}\,V(q)|^{1-\delta}~.$$ As a result of (\ref{a.31.6}) in Theorem~\ref{a.3thm1.1} there is a constant $C_V>1$ such that \begin{align*}
\|K_Vu\|^2_{L^2}+C_V\|u\|^2_{L^2}\ge\frac{1}{C_V}\Big(\langle u,f_\delta u\rangle+ \|L(O_p)u\|^2_{L^2}+\|L(\langle D_q\rangle^{\frac{2}{3}} ) u\|^2_{L^2}\Big)~, \end{align*} holds for all $u\in\mathcal{C}_0^{\infty}(\mathbb{R}^{2d})$ and all $\delta\in (0,1).$ In order to show that the operator $K_V$ has a compact resolvent it is sufficient to prove that $\lim\limits_{q\to +\infty}f_\delta(q)=+\infty.$
It is a matter of how different derivatives scale. Consider the unit sphere $S=\{q\in\mathbb{R}^{d}:|q|=1\}$. By Assumption (\ref{a31.4.}), at every point on $S$ either $\nabla V\neq 0$ or $|\mathrm{Hess}\; V|\neq 0$. Then the function $f_\delta$ is always positive on $S$. By hypothesis, $f_\delta$ is continuous on $S$ and therefore it achieves a positive minimum there, call it $m_\delta>0$.
For any $y,|y|>1$ there exists $\lambda>1$ such that $y=\lambda q$ for some $q\in S$. By homogeneity, $$ V(y)=\lambda^rV\left(\frac{y}{\lambda}\right)=\lambda^rV(q) $$ and therefore, by the chain rule $$
|\nabla V(y)|=\lambda^{r-1}|\nabla V(q)| $$ and $$
|\mathrm{Hess}\;V(y)|=\lambda^{d(r-2)}|\mathrm{Hess}\;V(q)|. $$ Adding these up, $$
|\nabla V(y)|^{\frac{4}{3}(1-\delta)}+|\mathrm{Hess}\,V(y)|^{1-\delta}\ge \lambda^{(1-\delta)\min\{\frac{4}{3}(r-1),d(r-2)\}}f_\delta(q)\ge m_\delta\lambda^{(1-\delta)\min\{\frac{4}{3}(r-1),d(r-2)\}} $$
which goes to infinity as $|y|=\lambda\to\infty$, since by assumption $r>2$. \end{proof} \begin{RQ} The result of Corollary \label{cor} does not hold in the case of homogenous polynomial of degree 2 with degenerate Hessian. Indeed, we already know that in this case, the resolvent of the Kramers-Fokker-Planck operator $K_V$ is not compact since it is not as so for the Witten Laplacian (cf. Proposition 5.19 and Theorem 10.16 in \cite{HeNi}). \end{RQ} \begin{RQ} Our results are in agreement with the results of Wei-Xi-Li \cite{Li}\cite{Li2} and those of Helffer-Nier on Witten Laplacian with homogeneous potential \cite{HeNi1}. \end{RQ} \section{Observations and first inequalities} \subsection{Dyadic partition of unity} In this paper, we make use of a locally finite dyadic partition of unity with respect to the position variable $q \in \mathbb{R}^d.$ Such a partition is described in the following Proposition. For a detailed proof, we refer to \cite{BCD} (see page 59). \begin{prop} Let $\mathcal{C}$ be the shell $\left\lbrace x\in\mathbb{R}^{d},\;\;
\frac{3}{4}< |x|<\frac{8}{3} \right\rbrace.$ There exist radial functions $\chi$ and $\phi$ valued in the interval $[0, 1],$ belonging respectively to $\mathcal{C}^{\infty}_{0}(B(0, \frac{4}{3}))$ and to $\mathcal{C}^{\infty}_{0}(\mathcal{C})$ such that \begin{align*} \forall x\in \mathbb{R}^d,\quad\quad\chi(x) +\sum_{j\ge0}\phi(2^{-j} x) = 1\;, \end{align*} \begin{align*} \forall x\in \mathbb{R}^d\setminus\left\{0\right\},\quad\quad\sum_{j\in\mathbb{Z}}\phi(2^{-j} x) = 1\;. \end{align*} \end{prop} Setting for all $q\in\mathbb{R}^d,$ \begin{align*}&\chi_{-1}(q)=\frac{\chi(2q)}{\Big(\chi^2(2q)+\sum\limits_{j'\ge0}\phi^2(2^{-j'} q)\Big)^{\frac{1}{2}}}=\frac{\chi(2q)}{\Big(\chi^2(2q)+\phi^2(q)\Big)^{\frac{1}{2}}}\;,\\& \chi_{j}(q)=\frac{\phi(2^{-j} q)}{\Big(\chi^2(2q)+\sum\limits_{j'\ge0}\phi^2(2^{-j'} q)\Big)^{\frac{1}{2}}}\stackrel{\text{if}~j\leq 2}{=} \;, \frac{\phi(2^{-j} q)}{\Big(\sum\limits_{j-1\leq j'\leq j+1}\phi^2(2^{-j'} q)\Big)^{\frac{1}{2}}} \end{align*} we get a localy finite dyadic partition of unity \begin{align} \sum_{j\geq-1}\chi_j^2(q)=
\tilde{\chi}_{-1}^{2}(2|q|) +\tilde{\chi}_{0}^{2}(2|q|)+\sum_{j\geq 0}\widetilde{\chi}^2(2^{-j}|q|)=1\label{a.32.1} \end{align} where for all $j\in\mathbb{N},$ the cutoff functions $ \widetilde{\chi}_{0},\tilde{\chi}$ and $\widetilde{\chi}_{-1}$ belong respectively to $\mathcal{C}_0^{\infty}(\left]
\frac{3}{4},\frac{8}{3}\right[)$, $\mathcal{C}_0^{\infty}(\left] \frac{3}{4},\frac{8}{3}\right[)$ and $\mathcal{C}_0^{\infty}(\left] 0,{\frac{4}{3}}\right[).$ \begin{lem}\label{a.3lem2.2} Let $V$ be in $\mathcal{C}^{\infty}(\mathbb{R}^{d}\setminus\left\{0\right\}).$ Consider the Kramers-Fokker-Planck operator $K_{V}$ defined as in (\ref{a.3eq1}). For a locally finite partition of unity $\sum\limits_{j\ge-1}\chi^2_j(q)=1$ one has \begin{align}
\|K_{V}u\|^2_{L^2(\mathbb{R}^{2d})}=\sum\limits_{j\ge-1}\|K_{V}(\chi_ju)\|^2_{L^2(\mathbb{R}^{2d})}-\|(p\partial_q\chi_j)u\|^2_{L^2(\mathbb{R}^{2d})}\label{a.32.4}~, \end{align} for all $u\in\mathcal{C}_0^{\infty}(\mathbb{R}^{2d}).$
In particular when the cutoff functions $\chi_j$ have the form (\ref{a.32.1}), there exists a uniform constant $c>0$ so that \begin{align}
(1+4c)\|K_{V}u\|^2_{L^2(\mathbb{R}^{2d})}+c\|u\|^2_{L^2(\mathbb{R}^{2d})}\ge\sum\limits_{j\ge-1}\|K_{V}(\chi_ju)\|^2_{L^2(\mathbb{R}^{2d})},\label{a.32.5} \end{align} holds for all $u\in\mathcal{C}_0^{\infty}(\mathbb{R}^{2d}).$ \end{lem} \begin{proof} The proof of the equality (\ref{a.32.4}) is detailed in \cite{Ben}. Now it remains to show the inequality (\ref{a.32.5}), after considering a locally finite dyadic partition of unity \begin{align} \sum_{j\geq-1}\chi_j^2(q)=1\;, \end{align}
where for all $j\in\mathbb{N},$ the cutoff functions $ \chi_{j}$ and $\chi_{-1}$ are respectively supported in the shell \\$\left\lbrace q\in\mathbb{R}^{d},\;\; 2^j\frac{3}{4}\le |q|\le 2^j\frac{8}{4} \right\rbrace$ and in the ball $B( 0,\frac{3}{4}).$
Since the partition is locally finite, for each index $j\ge-1$ there are finitely many $j'$ such that $(\partial_q\chi_j)\chi_{j'}$ is nonzero. Along these lines, there exists a uniform constant $c>0$ so that \begin{align}
\sum\limits_{j\geq-1}\|(p\partial_q\chi_j)u\|^2_{L^2}&=\sum\limits_{j\geq-1}\sum\limits_{j'\geq-1}\|(p\partial_q\chi_j)\chi_{j'}u\|^2_{L^2}\nonumber\\&\le c\sum\limits_{j\geq-1}\frac{1}{(2^j)^2}\|p\chi_ju\|^2_{L^2}~,\label{a.32.6} \end{align} holds for all $u\in\mathcal{C}_0^{\infty}(\mathbb{R}^{2d}).$
On the other hand, for every $u\in\mathcal{C}_0^{\infty}(\mathbb{R}^{2d}),$ \begin{align}
c\sum\limits_{j\geq-1}\frac{1}{(2^j)^2}\|p\chi_ju\|^2_{L^2}\le 4c\,\|pu\|^2_{L^2}\le8c\,\mathrm{Re}\;\langle u,K_Vu\rangle\le 4c\,(\|u\|^2_{L^2}+\|K_Vu\|^2_{L^2})\;.\label{a.32.7} \end{align} Collecting the estimates (\ref{a.32.4}), (\ref{a.32.6}) and (\ref{a.32.7}), we establish the desired inequality (\ref{a.32.5}). \end{proof} \subsection{Localisation in a fixed Shell}
\begin{lem}\label{a.3lem2.3} Let $V(q)$ be an homogeneous function in $\mathcal{C}^{\infty}(\mathbb{R}^{d}\setminus\left\{0\right\})$ of degree $r$ and assume $j\in\mathbb{Z}.$ Given $u_j\in\mathcal{C}_0^{\infty}(\mathbb{R}^{2d}),$ one has \begin{align*}
\|K_{V}u_j\|_{L^2(\mathbb{R}^{2d})}=\|K_{j,V}v_j\|_{L^2(\mathbb{R}^{2d})}\;, \end{align*} where the operator $K_{j,V}$ is defined by \begin{align}
K_{j,V}=\frac{1}{2^j}p\partial_q-(2^j)^{r-1}\partial_qV(q)\partial_p+O_p\;,\label{a.3222}\end{align} and $\;v_j(q,p)=2^{\frac{jd}{2}}u_j(2^jq,p).$
In particular when $u_j$ is supported in $\left\lbrace q\in\mathbb{R}^{d},\; 2^j\frac{3}{4}\le |q|\le 2^j\frac{8}{3}\right\rbrace,$ the support of $v_j$ is a fixed shell $\overline{\mathcal{C}}=\left\lbrace q\in\mathbb{R}^{d},\; \frac{3}{4}\le |q|\le \frac{8}{3}\right\rbrace\;.$ \end{lem} \begin{proof} Let $j\in\mathbb{Z}$ be an index. Assume $u_j\in\mathcal{C}_0^{\infty}(\mathbb{R}^{2d})$ and state \begin{align} v_j(q,p)=2^{\frac{jd}{2}}u_j(2^jq,p)\;.\label{a.3211} \end{align} On the grounds that the function $V$ is homogeneous of degree $r$ we deduce that respectively its gradient $\partial_qV(q)$ is homogeneous of degree $r-1.$ As follows, we can write \begin{align*} K_Vu_j(q,p)&=K_V\Big(2^{\frac{-jd}{2}}v_j(2^{-j}q,p)\Big)\\&=2^{\frac{-jd}{2}}\Big((2^{-j}p\partial_q-(2^j)^{r-1}\partial_qV(q)\partial_p+O_p)v_j\Big)(2^{-j}q,p)\;. \end{align*} Notice that if \begin{align*}
\mathrm{supp}\;u_j\subset\left\lbrace q\in\mathbb{R}^{d},\; 2^j\frac{3}{4}\le |q|\le 2^j\frac{8}{3}\right\rbrace\;, \end{align*} the cutoff functions $v_j,$ defined in (\ref{a.3211}), are all supported in the fixed shell \begin{align*}
\overline{\mathcal{C}}=\left\lbrace q\in\mathbb{R}^{d},\; \frac{3}{4}\le |q|\le \frac{8}{3}\right\rbrace\;. \end{align*} \end{proof} \begin{RQ} Assume $j\in\mathbb{N}.$ If we introduce a small parameter $h=2^{-2(r-1)j}$ then the operator $K_{j,V},$ defined in (\ref{a.3222}), can be rewritten as \begin{align*} K_{j,V}=\frac{1}{h}\Big(\sqrt{h}p(h^{\frac{1}{2}+\frac{1}{2(r-1)}}\partial_q)-\sqrt{h}\partial_qV(q)\partial_p+\frac{h}{2}(-\Delta_p+p^2)\Big)\;. \end{align*} Now owing to a dilation with respect to the velocity variable $p,$ which for $(\sqrt{h}p,\sqrt{h}\partial_p)$ associates $(p,h\partial_p),$ we deduce that the operator $K_{j,V}$ is unitary equivalent to \begin{align*} \widehat{K}_{j,V}=\frac{1}{h}\Big(p(h^{\frac{1}{2}+\frac{1}{2(r-1)}}\partial_q)-\partial_qV(q)h\partial_p+\frac{1}{2}(-h^2\Delta_p+p^2)\Big)\;. \end{align*} In particular, taking $r=2,$ \begin{align*} \widehat{K}_{j,V}=\frac{1}{h}\Big(p(h\partial_q)-\partial_qV(q)h\partial_p+\frac{1}{2}(-h^2\Delta_p+p^2)\Big)\;, \end{align*} is clearly a semiclassical operator with respect to the variables $q$ and $p$. However if $r>2$, the operator $\widehat{K}_{j,V}$ is semiclassical only with respect to the velocity variable $p$ (since $h^{\frac{1}{2}+\frac{1}{2(r-1)}}>h$). For a polynomial $V(q),$ the case $r=2$ corresponds to the quadratic situation. Extensive works have been done concerned with this case (see \cite{Hor}\cite{HiPr}\cite{Vio}\cite{Vio1}\cite{AlVi}\cite{BNV}). \end{RQ}
\section{Proof of the main result} In this section we present the proof of Theorem~\ref{a.3thm1.1}. \begin{proof} In the whole proof we denote \begin{align*}
\overline{\mathcal{C}}=\left\lbrace q\in\mathbb{R}^{d},\; \frac{3}{4}\le |q|\le \frac{8}{3}\right\rbrace\;. \end{align*} Assume $u\in\mathcal{C}_0^{\infty}(\mathbb{R}^{2d})$ and consider a localy finite dyadic partition of unity defined as in (\ref{a.32.1}). By Lemma \ref{a.3lem2.2} (see (\ref{a.32.5})), there is a uniform constant $c$ such that \begin{align}
(1+4c)\|K_{V}u\|^2_{L^2(\mathbb{R}^{2d})}+c\|u\|^2_{L^2(\mathbb{R}^{2d})}\ge\sum\limits_{j\ge-1}\|K_{V}u_j\|^2_{L^2(\mathbb{R}^{2d})}.\label{a.33111} \end{align} where we denote $u_j=\chi_ju.$ We obtain by Lemma \ref{a.3lem2.3} and the estimate (\ref{a.33111}) \begin{align}
(1+4c)\|K_{V}u\|^2_{L^2(\mathbb{R}^{2d})}+c\|u\|^2_{L^2(\mathbb{R}^{2d})}\ge\sum\limits_{j\ge-1}\|K_{j,V}v_j\|^2_{L^2(\mathbb{R}^{2d})}\;, \end{align} where the operator \begin{align*}
K_{j,V}=\frac{1}{2^j}p\partial_q-(2^j)^{r-1}\partial_qV(q)\partial_p+O_p\;,\end{align*} and $ v_j(q,p)=2^{\frac{jd}{2}}u_j(2^jq,p)\;.$ Setting $h=2^{-2(r-1)j},$ one has \begin{align*}
K_{j,V}=p(h^{\frac{1}{2(r-1)}}\partial_q)-h^{-\frac{1}{2}}\partial_qV(q)\partial_p+\frac{1}{2}(-\Delta_p+p^2)\;.\end{align*} Now, fix $\nu>0$ such that \begin{align}
\max(\frac{1}{6},\frac{1}{8}+\frac{3}{8(r-1)})<\nu<\frac{1}{4}+\frac{1}{4(r-1)}~. \label{a.32..9} \end{align} Such a choice is always possible: \begin{itemize} \item In the case $r\ge 10,$ $\max(\frac{1}{6},\frac{1}{8}+\frac{3}{8(r-1)})$ equals $\frac{1}{6}$ while $\frac{1}{4}+\frac{1}{4(r-1)}$ is always greater than $\frac{1}{4}.$ So we can choose a value $\nu$ independent of $r$ between $\frac{1}{6}$ and $\frac{1}{4}.$ \item in the case $2< r<10,$ $\max(\frac{1}{6},\frac{1}{8}+\frac{3}{8(r-1)})$ equals $\frac{1}{8}+\frac{3}{8(r-1)}<\frac{1}{4}+\frac{1}{4(r-1)}$\,. Hence, we can choose for example $\nu=\frac{3}{16}+\frac{5}{16(r-1)}.$ \end{itemize} Taking $\nu>0,$ satisfying (\ref{a.32..9}), we consider a locally finite partition of unity with respect to $q\in\mathbb{R}^d$ given by \begin{align*}
\sum\limits_{k\ge-1}(\theta_{k,h}(q))^2&=\sum\limits_{k\ge-1}\Big(\theta(\frac{1}{|\ln(h)|h^{\nu}}q-q_k)\Big)^2\nonumber\\&=\sum\limits_{k\ge-1}\Big(\theta(\frac{1}{|\ln(h)|h^{\nu}}(q-q_{k,h})\Big)^2=1\;, \end{align*} where for any index $k$ \begin{align*}
q_{k,h}=|\ln(h)|h^{\nu}q_{k}\;,\;\;\;\;\;\mathrm{supp}\;\theta_{k,h}\subset B(q_{k,h},|\ln(h)|h^{\nu})\;,\;\;\;\;\;\theta_{k,h}\equiv1\;\;\text{in}\;\;B(q_{k,h},\frac{1}{2}|\ln(h)|h^{\nu})\;. \end{align*} Using this partition we get through Lemma \ref{a.3lem2.2} (see (\ref{a.32.4})), \begin{align}
\|K_{j,V}v_j\|^2_{L^2}\ge \sum\limits_{k\ge-1}\|K_{j,V}\theta_{k,h}v_j\|^2_{L^2}-|\ln(h)|^{-2}h^{\frac{1}{r-1}-2\nu}\|p\theta_{k,h}v_j\|^2_{L^2}\;.\label{a.3322} \end{align} In order to reduce the written expressions we denote in the whole of the proof \begin{align*}w_{k,j}=\theta_{k,h}v_j\;.\end{align*} Taking into account (\ref{a.3322}), \begin{align}
\|K_{j,V}v_j\|^2_{L^2}&\ge \sum\limits_{k\ge-1}\|K_{j,V}w_{k,j}\|^2_{L^2}-|\ln(h)|^{-2}h^{\frac{1}{r-1}-2\nu}\|w_{k,j}\|_{L^2}\|K_{j,V}w_{k,j}\|_{L^2}\nonumber\\&\ge\sum\limits_{k\ge-1}{\frac{3}{4}}\|K_{j,V}w_{k,j}\|^2_{L^2}-2|\ln(h)|^{-2}h^{\frac{1}{r-1}-2\nu}\|w_{k,j}\|^2_{L^2}\;.\label{a.32.13} \end{align} Notice that in the last inequality we simply use respectively the fact that \begin{align*}
\|pw_{k,j}\|^2_{L^2}\leq 2\mathrm{Re}\langle w_{k,j},K_{j,V}w_{k,j}\rangle\le \|w_{k,j}\|_{L^2}\|K_{j,V}w_{k,j}\|_{L^2}\;, \end{align*} and the Cauchy inequality with epsilon ( $ab\le \epsilon a^2+\frac{1}{4\epsilon}b^2$).
From now on, set \begin{align*} K_0=\left\{q\in \overline{\mathcal{C}}\;,\;\;\;\partial_qV(q)=0\right\}\;. \end{align*} Clearly, by continuity of the map $q\mapsto\partial_qV(q)$ on the shell $\overline{\mathcal{C}}$ (which is a compact set of $\mathbb{R}^d$), we deduce the compactness of $K_0.$
Since $q\mapsto\frac{\mathrm{Tr}_{-,V}(q)}{1+\mathrm{Tr}_{+,V}(q)}$ is uniformly continuous on any compact neighborhood of $K_{0}$\,, there exists $\varepsilon_{1}>0$ such that \begin{align} d(q,K_0)\le \epsilon_1\Rightarrow \frac{\mathrm{Tr}_{-,V}(q)}{1+\mathrm{Tr}_{+,V}(q)}\ge \frac{\epsilon_0}{2}\;,\label{a.33.6.} \end{align} where $\epsilon_0:=\min\limits_{q\in K_0}\frac{\mathrm{Tr}_{-,V}(q)}{1+\mathrm{Tr}_{+,V}(q)}.$
On the other hand, in vue of the definition of $K_0$ and by continuity of $q\mapsto \partial_qV(q)$ on $\overline{\mathcal{C}},$ there is a constant $\epsilon_2>0$ (that depends on $\epsilon_1$) such that \begin{align}
\forall\; q\in \overline{\mathcal{C}}\;,\;\;d(q,K_0)\ge \epsilon_1\Rightarrow |\partial_qV(q)|\ge \epsilon_2\;.\label{a.337} \end{align} Now let us introduce \begin{align*} \Sigma(\epsilon_1)=\left\{q\in \mathcal{C}\;,\;\;d(q,K_0)\ge\epsilon_1\right\}\;, \end{align*} \begin{align*} I(\epsilon_1)=\left\{k\in \mathbb{Z}\;,\;\;\mathrm{supp}\;\theta_{k,h}\subset\Sigma(\epsilon_1)\right\}\;. \end{align*} In order to establish a subelliptic estimate for $K_{j,V},$ we distinguish the two following cases. \begin{outerdesc}
\item[Case \textbf{1}] $k\not\in I(\epsilon_1).$ In this case the support of the cutoff function $\theta_{k,h}$ might intercect the set of zeros of the gradient of $V.$
\item[Case \textbf{2}] $k\in I(\epsilon_1).$ Here the gradient of $V$ does not vanish for all $q$ in the support of $\theta_{k,h}.$ \end{outerdesc}
The idea is to use, in the suitable situation, either quadratic
or linear approximating polynomial $\widetilde{V}$ near some $q'_{k,h}\in\mathrm{supp}\;\theta_{k,h}$ to write \begin{align*}
\sum\limits_{k\ge-1}\|K_{j,V}w_{k,j}\|^2_{L^2}\ge \frac{1}{2}\sum\limits_{k\ge-1}\|K_{j,\widetilde{V}}w_{k,j}\|^2_{L^2}-\|(K_{j,V}-K_{j,\widetilde{V}})w_{k,j}\|^2_{L^2}\;, \end{align*} or equivalently \begin{align}
\sum\limits_{k\ge-1}\|K_{j,V}w_{k,j}\|^2_{L^2}\ge \frac{1}{2}\sum\limits_{k\ge-1}\|K_{j,\widetilde{V}}w_{k,j}\|^2_{L^2}-\|\frac{1}{\sqrt{h}}(\partial_qV(q)- \partial_q\widetilde{V}(q))\partial_pw_{k,j}\|^2_{L^2}\;.\label{a.33.444} \end{align} Then based on the estimates written in \cite{BNV}, which are valid for the operator $K_{\widetilde{V}},$ we deduce a subelliptic estimate for $K_{\widetilde{V}},$ after a careful control of the errors which appear in (\ref{a.32.13}) and (\ref{a.33.444}).
\noindent\textbf{Case 1.} In this situation, we use the quadractic approximation near some element \\$q'_{k,h}\in\mathrm{supp}\;\theta_{k,h}\cap(\mathbb{R}^d\setminus \Sigma(\epsilon_1)),$ \begin{align*}
V^2_{k,h}(q)&=\sum\limits_{|\alpha|\le2}\frac{\partial_q^{\alpha}V(q'_{k,h})}{\alpha!}(q-q'_{k,h})^{\alpha}\;. \end{align*} Notice that one has for all $q\in\mathbb{R}^d,$ \begin{align}
|V(q)- V^2_{k,h}(q)|=\mathcal{O}(|q-q'_{k,h}|^3)\;. \end{align} Accordingly, for every $q$ in the support of $w_{k,j},$
\begin{align}
|\partial_qV(q)- \partial_qV^2_{k,h}(q)|&=\mathcal{O}(|q-q'_{k,h}|^2)\nonumber\\&=\mathcal{O}(|\ln(h)|^2h^{2\nu})\;.\label{a.33.66} \end{align} Combining (\ref{a.33.444}) and (\ref{a.33.66}), there is a constant $c > 0$ such that
\begin{align}
\sum\limits_{k\ge-1}\|K_{j,V}w_{k,j}\|^2_{L^2}&\ge \frac{1}{2}\sum\limits_{k\ge-1}\|K_{j,V^2_{k,h}}w_{k,j}\|^2_{L^2}-c\,\frac{(|\ln(h)|^2h^{2\nu})^2}{h}\|\partial_pw_{k,j}\|^2_{L^2}\nonumber\\&\ge \frac{1}{2}\sum\limits_{k\ge-1}\|K_{j,V^2_{k,h}}w_{k,j}\|^2_{L^2}-c\,\frac{(|\ln(h)|^2h^{2\nu})^2}{h}\|w_{k,j}\|_{L^2}\|K_{j,V^2_{k,h}}w_{k,j}\|_{L^2}\nonumber\\&\ge \frac{3}{16}\sum\limits_{k\ge-1}\|K_{j,V^2_{k,h}}w_{k,j}\|^2_{L^2}-2c\,\frac{(|\ln(h)|^2h^{2\nu})^2}{h}\|w_{k,j}\|^2_{L^2}\;.\label{a.32.77} \end{align} Putting (\ref{a.32.13}) and (\ref{a.32.77}) together, \begin{align}
\|K_{j,V}v_j\|^2\ge \frac{9}{64}\sum\limits_{k\ge-1}\|K_{j,V^2_{k,h}}w_{k,j}\|^2-\frac{3}{2}\,c\,\frac{(|\ln(h)|^2h^{2\nu})^2}{h}\|w_{k,j}\|^2-2|\ln(h)|^{-2}h^{\frac{1}{r-1}-2\nu}\|w_{k,j}\|^2\;.\label{a.3312} \end{align} On the other hand, owning to a change of variables $q"=qh^{\frac{1}{2(r-1)}},$ one can write \begin{align}
\|K_{j,V^2_{k,h}}w_{k,j}\|_{L^2}=\|\widetilde{K}_{j,V^2_{k,h}}\widetilde{w}_{k,j}\|_{L^2}\label{a.33.88}\;, \end{align} where the operator $\widetilde{K}_{j,V^2_{k,h}}$ reads \begin{align*} \widetilde{K}_{j,V^2_{k,h}}&=p\partial_q-h^{-\frac{1}{2}}\partial_qV^2_{k,h}(h^{\frac{1}{2(r-1)}}q)\partial_p+\frac{1}{2}(-\Delta_p+p^2) \\ & = p\partial_q-\underbrace{h^{-\frac{1}{2}+\frac{1}{2(r-1)}}}_{=:H}\partial_qV^2_{k,h}(q)\partial_p+\frac{1}{2}(-\Delta_p+p^2) \;, \end{align*} and \begin{align*} w_{k,j}(q,p)=\frac{1}{h^{\frac{d}{4(r-1)}}}\widetilde{w}(\frac{q}{h^{\frac{1}{2(r-1)}}},p)\;. \end{align*} In the rest of the proof we denote \begin{align*} H=h^{-\frac{1}{2}}h^{\frac{1}{2(r-1)}}\;. \end{align*} From now on assume $j\in\mathbb{N}.$ In view of (\ref{a.33.6.}), $\mathrm{Tr}_{-,V^2_{k,h}}=\mathrm{Tr}_{-,V}(q'_{k,h})\not=0.$ Hence by (\ref{1.5mm}), \begin{align}
\|\widetilde{K}_{j,V^2_{k,h}}\widetilde{w}_{k,j}\|^2_{L^2}\ge c\,\frac{1+H\mathrm{Tr}_{-,V^2_{k,h}}}{\log(2+H\mathrm{Tr}_{-,V^2_{k,h}})^2}\| \widetilde{w}_{k,j}\|^2_{L^2}\;. \end{align} Or samely \begin{align}
\|\widetilde{K}_{j,V^2_{k,h}}\widetilde{w}_{k,j}\|^2_{L^2}\ge c\,\frac{1+H\mathrm{Tr}_{-,V}(q'_{k,h})}{\log(2+H\mathrm{Tr}_{-,V}(q'_{k,h}))^2}\| \widetilde{w}_{k,j}\|^2_{L^2}\;.\label{a.3316} \end{align} Using once more (\ref{a.33.6.}), \begin{align} \mathrm{Tr}_{-,V}(q'_{k,h})\ge \frac{\epsilon_0}{2}(1+\mathrm{Tr}_{+,V}(q'_{k,h}))\;, \end{align} where we remind that $\epsilon_0=\min\limits_{q\in K_0}\frac{\mathrm{Tr}_{-,V}(q)}{1+\mathrm{Tr}_{+,V}(q)}.$ Consequently \begin{align}
|\mathrm{Hess}\;V(q'_{k,h})|\ge \mathrm{Tr}_{-,V}(q'_{k,h})\ge \frac{\epsilon_0}{2}\;, \end{align} and \begin{align}
\mathrm{Tr}_{-,V}(q'_{k,h})&\ge \frac{1}{2}\mathrm{Tr}_{-,V}(q'_{k,h})+\frac{\epsilon_0}{4}(1+\mathrm{Tr}_{+,V}(q'_{k,h}))\nonumber\\&\ge \frac{1}{2}\min(1,\frac{\epsilon_0}{2})(\mathrm{Tr}_{-,V}(q'_{k,h})+\mathrm{Tr}_{+,V}(q'_{k,h}))\nonumber\\&\ge \frac{1}{2}\min(1,\frac{\epsilon_0}{2})|\mathrm{Hess}\;V(q'_{k,h})|\;.\label{a.3318} \end{align} Furthermore by continuity of the map $q\mapsto \mathrm{Tr}_{-,V}(q)$ on the compact set $ \overline{\mathcal{C}},$ there exists a constant $\epsilon_3 > 0$ such that $\mathrm{Tr}_{-,V}(q)\le \epsilon_3$ for all $q\in\overline{\mathcal{C}}.$ Hence \begin{align}\frac{\epsilon_0}{2}\le \mathrm{Tr}_{-,V}(q'_{k,h})\le \epsilon_3\;.\label{a.3317} \end{align} From (\ref{a.3316}), (\ref{a.3318}) and (\ref{a.3317}), \begin{align*}
\|\widetilde{K}_{j,V^2_{k,h}}\widetilde{w}_{k,j}\|^2_{L^2}\ge c\,\frac{H}{\log(H)^2}\| \widetilde{w}_{k,j}\|^2_{L^2}\;. \end{align*} It follows from the above inequality and (\ref{a.33.88}), \begin{align}
\|K_{j,V^2_{k,h}}w_{k,j}\|^2_{L^2}\ge c\,\frac{H}{\log(H)^2}\| w_{k,j}\|^2_{L^2}\;.\label{a.33.1333} \end{align} Now using the estimate (\ref{a.33.1333}), we should control the errors coming from the partition of unity and the quadratic approximation. For this reason, notice that our choice of exponent $\nu$ in \eqref{a.32..9} implies \begin{align*} \left\{ \begin{array}{l}
\frac{(|\ln(h)|^2h^{2\nu})^2}{h}\ll \frac{H}{\log(H)^2}\\
|\ln(h)|^{-2}h^{\frac{1}{r-1}-2\nu}\ll \frac{H}{\log(H)^2}\;. \end{array} \right. \end{align*} As a result, collectting the estimates (\ref{a.3312}) and (\ref{a.33.1333}), we deduce the existence of a constant $c>0$ such that \begin{align}
\|K_{j,V}v_j\|^2_{L^2}\ge c\sum\limits_{k\ge-1}\|K_{j,V^2_{k,h}}w_{k,j}\|^2_{L^2}\;.\label{a.33...15} \end{align} Via (\ref{eq44}), there is a constant $c>0$ so that \begin{align}
\|\widetilde{K} _{j,V^2_{k,h}}\widetilde{w}_{k,j}\|^2+(1+10c)H|\mathrm{Hess}\;V(q'_{k,h})|\|\widetilde{w}_{k,j}\|^2\ge c\Big(\|O_p\widetilde{w}_{k,j}\|^2&+\|\langle D_q \rangle^{\frac{2}{3}} \widetilde{w}_{k,j}\|^2\nonumber\\&+H|\mathrm{Hess}\;V(q'_{k,h})|\|\widetilde{w}_{k,j}\|^2\Big)\;. \end{align} Hence using the reverse change of variables $q"=\frac{q}{h^{\frac{1}{2(r-1)}}}\;,$ we obtain in view of the above estimate and (\ref{a.33.88}), \begin{align}
\|K_{j,V^2_{k,h}}w_{k,j}\|^2+(1+10c)H|\mathrm{Hess}\;V(q'_{k,h})|\|w_{k,j}\|^2\ge c\Big(\|O_pw_{k,j}\|^2&+\|\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}} w_{k,j}\|^2\nonumber\\&+H|\mathrm{Hess}\;V(q'_{k,h})|\|w_{k,j}\|^2\Big)\;.\label{a.33.16} \end{align} Or by (\ref{a.3318}) and (\ref{a.3317}), \begin{align}
\frac{\epsilon_0}{2}\le|\mathrm{Hess}\;V(q'_{k,h})|\le \frac{2\epsilon_3}{\min(1,\frac{\epsilon_0}{2})}\;,\label{a.33.19}\end{align} Putting (\ref{a.33.16}) and (\ref{a.33.19}) together, there is a constant $c>0$ so that \begin{align}
\|K_{j,V^2_{k,h}}w_{k,j}\|^2+H\|w_{k,j}\|^2\ge c\Big(\|O_pw_{k,j}\|^2&+\|\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}}w_{k,j}\|^2\nonumber\\&+H\|w_{k,j}\|^2+\|\langle H|\mathrm{Hess}\;V(q'_{k,h})|\rangle^{\frac{1}{2}}w_{k,j}\|^2\Big)\;.\label{a 33.1888} \end{align}
On the other hand, for all $q\in\mathrm{supp}\;w_{k,j},$ \begin{align}|\mathrm{Hess}\;V(q)-\mathrm{Hess}\;V(q'_{k,h})|=\mathcal{O}(|q-q'_{k,h}|)=\mathcal{O}(|\ln(h)|h^{\nu})\label{a.33266}\;\end{align} Therefore by (\ref{a.33.19}) and (\ref{a.33266}), we obtain for every $q\in\mathrm{supp}\;w_{k,j}$ and all $j$ sufficiently large. \begin{align}
\frac{1}{2}|\mathrm{Hess}\;V(q'_{k,h})|\le|\mathrm{Hess}\;V(q)|\le \frac{3}{2}|\mathrm{Hess}\;V(q'_{k,h})|\;.\label{a.3326} \end{align} From (\ref{a 33.1888}) and (\ref{a.3326}), there exists a constant $c>0$ so that \begin{align}
\|K_{j,V^2_{k,h}}w_{k,j}\|^2+H\|w_{k,j}\|^2\ge c\Big(\|O_pw_{k,j}\|^2&+\|\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}} w_{k,j}\|^2\nonumber\\&+H\|w_{k,j}\|^2+\|\langle H|\mathrm{Hess}\;V(q)|\rangle^{\frac{1}{2}}w_{k,j}\|^2\Big)\;,\label{a 3328} \end{align} is valid for all $j$ large enough.
Furthermore, by continuity of the map $q\mapsto|\partial_qV(q)|^{\frac{4}{3}}$ on the fixed shell $\overline{\mathcal{C}}$, for all $q\in\mathrm{supp}\; w_{k,j}$ \begin{align}
\frac{1}{4}H\ge c\,|h^{-\frac{1}{2}}\partial_qV(q)|^{\frac{4}{3}}\;,\label{a 33.199} \end{align} holds for all $j$ sufficiently large.
In such a way, considering (\ref{a 3328}) and (\ref{a 33.199}) \begin{align}
\|K_{j,V^2_{k,h}}w_{k,j}\|^2+(2+H)\|w_{k,j}\|^2\ge c\Big(\|&O_pw_{k,j}\|^2+\|\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}} w_{k,j}\|^2+(2+H)\|w_{k,j}\|^2\nonumber\\&+\|(H|\mathrm{Hess}\;V(q)|)^{\frac{1}{2}}w_{k,j}\|^2+\|\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle^{\frac{2}{3}}w_{k,j}\|^2\Big)\;.\label{a.33.20000} \end{align} Putting (\ref{a.33.1333}) and (\ref{a.33.20000}) together, \begin{align}
\|K_{j,V^2_{k,h}}w_{k,j}\|^2\ge c\Big(\|\frac{O_p}{\log(2+H)}&w_{k,j}\|^2+\|\frac{\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}}}{\log(2+H)}w_{k,j}\|^2+\|\frac{(2+H)^{\frac{1}{2}}}{\log(2+H)}w_{k,j}\|^2\nonumber\\&+\|\frac{\langle H|\mathrm{Hess}\;V(q)|\rangle^{\frac{1}{2}}}{\log(2+H)}w_{k,j}\|^2+\|\frac{\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle^{\frac{2}{3}}}{\log(2+H)}w_{k,j}\|^2\Big)\;,\label{a.33.201} \end{align} holds for all $j\ge j_0,$ for some $j_0\ge 1$ large enough.
Now let us collect the finite remaining terms for $-1\le j\le j_0.$ After recalling $h=2^{-j}$ and $H=h^{-\frac{1}{2}+\frac{1}{2(r-1)}}$ we define \begin{multline*}
c_V^{(1)}=\max_{-1\le j\le j_0}
\left[A_{V_{k,h}^{2}}+\sup_{q\in \mathrm{supp}\,(\chi_{j}\theta_{k,h})}\left(\langle H|\mathrm{Hess}~V(q)|\rangle+\langle h^{-\frac{1}{2}}\left|\partial_{q}V(q)\right|\rangle^{4/3}\right) \right. \\ \left.
+\frac{(2+H)}{\log(2+H)^2}+\frac{3}{2}\,c\,\frac{(|\ln(h)|^2h^{2\nu})^2}{h}+2|\ln(h)|^{-2}h^{\frac{1}{r-1}-2\nu}\right]\,. \end{multline*} From the lower bound \eqref{eq44}, we deduce the existence of a constant $c>0$ so that \begin{align}
\frac{9}{64}\|K_{V_{k,h}^{2}}w_{k,j}\|&+(c_V^{(1)}-\frac{3}{2}\,c\,\frac{(|\ln(h)|^2h^{2\nu})^2}{h}-2|\ln(h)|^{-2}h^{\frac{1}{r-1}-2\nu})\|w_{k,j}\|^2\nonumber\\&\geq c\Big(\|O_pw_{k,j}\|^{2}+\|\langle
h^{\frac{1}{2(r-1)}} D_{q}\rangle^{2/3}w_{k,j}\|^{2}
+\|\langle h^{-\frac{1}{2}}|\partial_{q}V(q)|\rangle^{2/3}w_{k,j}\|^{2}
\nonumber\\&\hspace{4cm}+\|\langle
H|\mathrm{Hess}~V(q)|\rangle^{1/2}w_{k,j}\|^{2}
+\|\frac{(2+H)^{\frac{1}{2}}}{\log(2+H)}w_{k,j}\|^{2}\Big)\;,\label{a.33334} \end{align} holds for all $-1\le j\le j_0.$
Finally, collecting (\ref{a.33...15}), (\ref{a.33.201}) and (\ref{a.33334}), \begin{align}
\|K_{j,V}v_j\|^2+c_V^{(2)}\|v_j\|^2\ge c\sum\limits_{k\not\in I(\epsilon_1)}\Big(\|&\frac{O_p}{\log(2+H)}w_{k,j}\|^2+\|\frac{\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}}}{\log(2+H)}w_{k,j}\|^2+\|\frac{(2+H)^{\frac{1}{2}}}{\log(2+H)}w_{k,j}\|^2\nonumber\\&+\|\frac{\langle H|\mathrm{Hess}\;V(q)|\rangle^{\frac{1}{2}}}{\log(2+H)}w_{k,j}\|^2+\|\frac{\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle^{\frac{2}{3}}}{\log(2+H)}w_{k,j}\|^2\Big)\;,\label{a.33.2000} \end{align} is valid for every $j\ge -1.$
\noindent\textbf{Case 2.} We consider in this case the linear approximating polynomial \begin{align*}
V^1_{k,h}(q)&=\sum\limits_{|\alpha|=1}\frac{\partial_q^{\alpha}V(q_{k,h})}{\alpha!}(q-q_{k,h})^{\alpha}\;. \end{align*} Note that for any $q\in\mathbb{R}^d,$ \begin{align}
|V(q)- V^1_{k,h}(q)|=\mathcal{O}(|q-q_{k,h}|^2)\;, \end{align} and for every $q\in\mathrm{supp}\;w_{k,j},$
\begin{align}
|\partial_qV(q)- \partial_qV^1_{k,h}(q)|&=\mathcal{O}(|q-q_{k,h}|)\nonumber\\&=\mathcal{O}(|\ln(h)|h^{\nu})\;.\label{a.32.24} \end{align} Due to (\ref{a.33.444}) and (\ref{a.32.24}),
\begin{align}
\sum\limits_{k\ge-1}\|K_{j,V}w_{k,j}\|^2&
\ge \frac{1}{2}\sum\limits_{k\ge-1}\|K_{j,V^1_{k,h}}w_{k,j}\|^2-c\,\frac{(|\ln(h)|h^{\nu})^2}{h}\|\partial_pw_{k,j}\|^2\nonumber\\&\ge \frac{1}{2}\sum\limits_{k\ge-1}\|K_{j,V^1_{k,h}}w_{k,j}\|^2-c\,\frac{(|\ln(h)|h^{\nu})^2}{h}\|w_{k,j}\|\|K_{j,V^1_{k,h}}w_{k,j}\|\nonumber\\&\ge \frac{3}{16}\sum\limits_{k\ge-1}\|K_{j,V^1_{k,h}}w_{k,j}\|^2-2c\,\frac{(|\ln(h)|h^{\nu})^2}{h}\|w_{k,j}\|^2\;.\label{a.32.255} \end{align} Assembling (\ref{a.32.13}) and (\ref{a.32.255}), \begin{align}
\|K_{j,V}v_j\|^2\ge \frac{9}{64}\sum\limits_{k\ge-1}\|K_{j,V^1_{k,h}}w_{k,j}\|^2-\frac{3}{2}\,c\,\frac{(|\ln(h)|^2h^{2\nu})^2}{h}\|w_{k,j}\|^2-2|\ln(h)|^{-2}h^{\frac{1}{r-1}-2\nu}\|w_{k,j}\|^2\;.\label{a.3336} \end{align} Additionally, one has \begin{align}
\|K_{j,V^1_{k,h}}w_{k,j}\|_{L^2}=\|\widetilde{K}_{j,V^1_{k,,h}}\widetilde{w}_{k,j}\|_{L^2}\;,\label{a.32.666} \end{align} where the operator $\widetilde{K}_{j,V^1_{k,,h}}$ is given by \begin{align} \widetilde{K}_{j,V^1_{k,h}}&=p\partial_q-h^{-\frac{1}{2}}\partial_qV^1_{k,h}(h^{\frac{1}{2(r-1)}}q)\partial_p+\frac{1}{2}(-\Delta_p+p^2)\nonumber\\&=p\partial_q-h^{-\frac{1}{2}}\partial_qV(q_{k,h})\partial_p+\frac{1}{2}(-\Delta_p+p^2)\;, \end{align} and \begin{align} w_{k,j}(q,p)=\frac{1}{h^{\frac{d}{4(r-1)}}}\widetilde{w}(\frac{q}{h^{\frac{1}{2(r-1)}}},p)\;. \end{align} Now, in order to absorb the errors in (\ref{a.3336}) we need the following estimates showed in \cite{BNV} (see (\ref{1.5mm})), \begin{align}
\|\widetilde{K} _{j,V^1_{k,h}}\widetilde{w}_{k,j}\|^2_{L^2}\ge c\| (h^{-\frac{1}{2}}|\partial_qV(q_{k,h})|)^{\frac{2}{3}}\widetilde{w}_{k,j}\|^2_{L^2}\;.\label{a.32.30} \end{align} From now on assume $j\in\mathbb{N}.$ Taking into account (\ref{a.337}) and (\ref{a.32.30}), \begin{align}
\|K_{j,V^1_{k,h}}\widetilde{w}_{k,j}\|^2_{L^2}\ge c\| (h^{-\frac{1}{2}})^{\frac{2}{3}}\widetilde{w}_{k,j}\|^2_{L^2}\;.\label{a.3341} \end{align} Owing to (\ref{a.32.666}) and (\ref{a.32.30}), \begin{align}
\|K_{j,V^1_{k,h}}w_{k,j}\|^2\ge c\| (h^{-\frac{1}{2}})^{\frac{2}{3}}w_{k,j}\|^2\;.\label{a.33.311} \end{align} Note that one has
Therefore, combining (\ref{a.3336}) and (\ref{a.33.311}), there is a constant $c>0$ so that \begin{align}
\|K_{j,V}v_j\|^2\ge c\sum\limits_{k\ge-1}\|K_{j,V^1_{k,h}}w_{k,j}\|^2\;.\label{a.33.3222} \end{align} Using once more \cite{BNV} (see (\ref{eq44})), there is a constant $c>0$ such that \begin{align}
\|\widetilde{K}_{j,V^1_{k,h}}\widetilde{w}_{k,j}\|^2\ge c\Big(\|O_p\widetilde{w}_{k,j}\|^2+\|\langle D_q \rangle^{\frac{2}{3}} \widetilde{w}_{k,j}\|^2+\|\langle h^{-\frac{1}{2}}|\partial_qV(q_{k,h})|\rangle ^{\frac{2}{3}}\widetilde{w}_{k,j}\|^2\Big)\;.\label{a.33.344} \end{align} As a consequence of (\ref{a.32.666}) and (\ref{a.33.344}), \begin{align}
\|K_{j,V^1_{k,h}}w_{k,j}\|^2\ge c\Big(\|O_pw_{k,j}\|^2+\|\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}} w_{k,j}\|^2+\|\langle h^{-\frac{1}{2}}|\partial_qV(q_{k,h})|\rangle ^{\frac{2}{3}}w_{k,j}\|^2\Big)\;.\label{a.3346} \end{align} By (\ref{a.337}) and (\ref{a.32.24}), \begin{align}
\frac{1}{2}|\partial_qV(q)|\le|\partial_qV(q_{k,h})|\le \frac{3}{2}|\partial_qV(q)|\;,\label{a.3326} \end{align} holds for all $q\in\mathrm{supp}\;w_{k,j}$ and any $j$ large. Then, it follows from (\ref{a.3326}) and (\ref{a.3346}), \begin{align}
\|K_{j,V^1_{k,h}}w_{k,j}\|^2\ge c\Big(\|O_pw_{k,j}\|^2+\|\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}} w_{k,j}\|^2+\|\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle ^{\frac{2}{3}}w_{k,j}\|^2\Big)\;. \end{align}
Or in this case, in vue of the (\ref{a.337}), one has $|\partial_qV(q)|\ge \epsilon_2$ for all $q\in \mathrm{supp}\; w_{k,j}.$ Hence it results from the above inequality \begin{align}
\|K_{j,V^1_{k,h}}w_{k,j}\|^2\ge c\Big(\|O_pw_{k,j}\|^2&+\|\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}} w_{k,j}\|^2+\|(h^{-\frac{1}{2}})^{\frac{2}{3}}w_{k,j}\|^2+\|\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle ^{\frac{2}{3}}w_{k,j}\|^2\Big)\;.\label{a.3335} \end{align}
Furthermore, by continuity of $q\mapsto|\mathrm{Hess}\;V(q)|$ on the compact set $\overline{\mathcal{C}},$ one has for all \\$q\in\mathrm{supp}\; w_{k,j}$ and any $j$ large \begin{align}
\frac{1}{4}(h^{-\frac{1}{2}})^{\frac{4}{3 }}\ge c\, H|\mathrm{Hess}\;V(q)|\;. \end{align} Then by the above inequality and (\ref{a.3335}), we get \begin{align}
\|K_{j,V^1_{k,h}}w_{k,j}\|^2\ge c\Big(\|O_pw_{k,j}\|^2&+\|\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}} w_{k,j}\|^2+\|(2+H)^{\frac{1}{2}}w_{k,j}\|^2\nonumber\\&+\|\langle H|\mathrm{Hess}\;V(q)|\rangle^{\frac{1}{2}}w_{k,j}\|^2+\|\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle^{\frac{2}{3}}w_{k,j}\|^2\Big)\;,\label{a.33.7.7} \end{align} for every $j\ge j_1$ for some $j_1\ge 1$ large. Now set \begin{multline*}
c_V^{(3)}=\max_{-1\le j\le j_1}
\left[\sup_{q\in \mathrm{supp}\,(\chi_{j}\theta_{k,h})}\left(\langle H|\mathrm{Hess}~V(q)|\rangle+\langle h^{-\frac{1}{2}}\left|\partial_{q}V(q)\right|\rangle^{4/3}\right) \right. \\ \left.
+\frac{(2+H)}{\log(2+H)^2}+\frac{3}{2}\,c\,\frac{(|\ln(h)|^2h^{2\nu})^2}{h}+2|\ln(h)|^{-2}h^{\frac{1}{r-1}-2\nu}\right]\,. \end{multline*} Seeing \eqref{eq44}, we deduce the existence of a constant $c>0$ so that \begin{align}
\frac{9}{64}\|K_{V_{k,h}^{1}}w_{k,j}\|&+(c_V^{(3)}-\frac{3}{2}\,c\,\frac{(|\ln(h)|^2h^{2\nu})^2}{h}-2|\ln(h)|^{-2}h^{\frac{1}{r-1}-2\nu})\|w_{k,j}\|^2\nonumber\\&\geq c(\|O_pw_{k,j}\|^{2}+\|\langle
h^{\frac{1}{2(r-1)}} D_{q}\rangle^{2/3}w_{k,j}\|^{2}
+\|\langle h^{-\frac{1}{2}}|\partial_{q}V(q)|\rangle^{2/3}w_{k,j}\|^{2}
\nonumber\\&\hspace{4cm}+\|\langle
H|\mathrm{Hess}~V(q)|\rangle^{1/2}w_{k,j}\|^{2}
+\|\frac{(2+H)^{\frac{1}{2}}}{\log(2+H)}w_{k,j}\|^{2})\;,\label{a.333334} \end{align} holds for all $-1\le j\le j_1.$
Thus, combining the estimates (\ref{a.33.3222}), (\ref{a.33.7.7}) and (\ref{a.333334}) \begin{align}
\|K_{j,V}v_j\|^2+c_V^{(4)}\|v_j\|^2\ge c\sum\limits_{k\in I(\epsilon_1)}\Big(\|&O_pw_{k,j}\|^2+\|\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}}w_{k,j}\|^2+\|\frac{(2+H)^{\frac{1}{2}}}{\log(2+H)}w_{k,j}\|^{2}\nonumber\\&+\|\langle H|\mathrm{Hess}\;V(q)|\rangle ^{\frac{1}{2}}w_{k,j}\|^2+\|\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle ^{\frac{2}{3}}w_{k,j}\|^2\Big)\;,\label{a.33.200} \end{align} holds for all $j\ge-1.$
In conclusion, in view of (\ref{a.33.2000}) and (\ref{a.33.200}), there is a constant $c>0$ such that \begin{align}
\|K_{j,V}v_j\|^2+c_V^{(5)}\|v_j\|^2\ge c\sum\limits_{k\ge-1}\Big(\|&\frac{O_p}{\log(2+H)}w_{k,j}\|^2+\|\frac{\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}}}{\log(2+H)}w_{k,j}\|^2+\|\frac{(2+H)^{\frac{1}{2}}}{\log(2+H)}w_{k,j}\|^2\nonumber\\&+\|\frac{\langle H|\mathrm{Hess}\;V(q)|\rangle ^{\frac{1}{2}}}{\log(2+H)}w_{k,j}\|^2+\|\frac{\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle ^{\frac{2}{3}}}{\log(2+H)}w_{k,j}\|^2\Big)\;,\label{a.3333555} \end{align} holds for all $j\ge -1.$
Finally setting $L(s) =\frac{ s+1}{\log(s+1)}$ for all $s \ge 1,$ notice that there is a constant $c > 0$ such that for all $x \ge 1,$ \begin{align} \inf_{t\ge2}\frac{x}{\log(t)}+ t \ge \frac{ 1}{c}L(x)\;.\label{a.33555} \end{align} After setting the quantities \begin{align*}
\Lambda_{1,j}=\frac{O_p}{\log(2+H)}~,&\quad\Lambda_{2,j}=\frac{\langle H|\mathrm{Hess}\;V(q)|\rangle^{1/2}}{\log(2+H)}~,\quad\Lambda_{3,j}=\frac{\langle h^{-\frac{1}{2}}|\partial_q V(q)|\rangle^{\frac23}}{\log(2+H)}~,\\&\quad\Lambda_{4,j}=\frac{2+H}{\log(2+H)^2}\;,\quad\Lambda_{5,j}=\frac{\langle h^{\frac{1}{2(r-1)}}D_q \rangle)^{\frac{2}{3}}}{\log(2+H)}~, \end{align*} we get through the estimate (\ref{a.33555}), for every $j,k\ge -1$ \begin{align*}
\|\Lambda_{1,j}w_{k,j}\|^2_{L^2}+\frac{1}{4}\|\Lambda_{4,j}w_{k,j}\|^2_{L^2}\ge c_1\|L(O_p)w_{k,j}\|^2_{L^2}\;, \end{align*} \begin{align*}
\|\Lambda_{5,j}w_{k,j}\|^2_{L^2}+\frac{1}{4}\|\Lambda_{4,j}w_{k,j}\|^2_{L^2}\ge c_2\|L(\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}})w_{k,j}\|^2_{L^2}\;, \end{align*} \begin{align*}
\|\Lambda_{2,j}w_{k,j}\|^2_{L^2}+\frac{1}{4}\|\Lambda_{4,j}w_{k,j}\|^2_{L^2}\ge c_3\|L(\langle H|\mathrm{Hess}\;V(q)|\rangle^{\frac{1}{2}})w_{k,j}\|^2_{L^2}\;, \end{align*} \begin{align*}
\|\Lambda_{3,j}w_{k,j}\|^2+\frac{1}{4}\|\Lambda_{4,j}w_{k,j}\|^2_{L^2}\ge c_4\|L(\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle^{\frac{2}{3}})w_{k,j}\|^2_{L^2}\;. \end{align*} From the above estimates and (\ref{a.3333555}), \begin{align}
\|K_{j,V}v_j\|^2+c_V^{(6)}\|v_j\|^2\ge\; c\sum\limits_{k\ge-1}&\Big(\|L(O_p)w_{k,j}\|^2+\|L(\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}})w_{k,j}\|^2\nonumber\\&+\|L(\langle H|\mathrm{Hess}\;V(q)|\rangle^{\frac{1}{2}})w_{k,j}\|^2+\|L(\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle^{\frac{2}{3}})w_{k,j}\|^2\Big)\;. \end{align} Therefore in view of Lemma 2.5 in \cite{Ben} conjugated by the unitary transformation of the change of scale, \begin{align}
\|K_{j,V}v_j\|^2+c_V^{(7)}\|v_j\|^2\ge\; c\Big(\|L(O_p&)v_j\|^2+\|L(\langle h^{\frac{1}{2(r-1)}} D_q \rangle^{\frac{2}{3}})v_j\|^2\nonumber\\&+\|L(\langle H|\mathrm{Hess}\;V(q)|\rangle^{\frac{1}{2}} )v_j\|^2+\|L(\langle h^{-\frac{1}{2}}|\partial_qV(q)|\rangle^{\frac{2}{3}} )v_j\|^2\Big)\;, \end{align} or equivalently \begin{align}
\|K_{V}u_j\|^2+c_V^{(7)}\|u_j\|^2\ge c\Big(\|L(O_p)&u_j\|^2+\|L(\langle D_q \rangle^{\frac{2}{3}})u_j\|^2\nonumber\\&+\|L(\langle \mathrm{Hess}\;V(q)\rangle^{\frac{1}{2}})u_j\|^2+\|L(\langle \partial_qV(q)\rangle^{\frac{2}{3}})u_j\|^2\Big)\;, \end{align} for every $j\ge -1.$
Therefore, combining the last estimate and (\ref{a.33111}), there is a constant $C_V>1$ so that \begin{align}
\|K_{V}u\|^2_{L^2(\mathbb{R}^{2d})}+C_V\|u\|^2_{L^2(\mathbb{R}^{2d})}\ge \frac{1}{C_V}\Big(\|&L(O_p)u\|^2+\|L(\langle D_q \rangle^{\frac{2}{3}})u\|^2\nonumber\\&+\|L(\langle\mathrm{Hess}\;V(q)\rangle^{\frac{1}{2}})u\|^2+\|L(\langle \partial_qV(q)\rangle^{\frac{2}{3}})u\|^2\Big) \end{align} holds for all $u\in\mathcal{C}_0^{\infty}(\mathbb{R}^{2d}).$ \end{proof} \textbf{Acknowledgement} I would like to thank my supervisor Francis Nier for his support and guidance throughout this work.
\end{document} |
\begin{document}
\title{Virtually Fibering Right-Angled Coxeter Groups}
\author{Kasia Jankiewicz} \author{Sergey Norin} \author[D.~T.~Wise]{Daniel T. Wise}
\address{Dept. of Math. \& Stats.\\
McGill University \\
Montreal, Quebec, Canada H3A 0B9}
\email{[email protected]}
\email{[email protected]}
\email{[email protected]}
\subjclass[2010]{20F55} \keywords{Coxeter groups, Morse theory, Coherent groups} \date{\today} \thanks{Research supported by NSERC}
\begin{com} {\bf \normalsize COMMENTS\\} ARE\\ SHOWING!\\ \end{com}
\begin{abstract}
We show that certain right-angled Coxeter groups have finite index subgroups that quotient to $\ensuremath{\field{Z}}$
with finitely generated kernels.
The proof uses Bestvina-Brady Morse theory facilitated by combinatorial arguments.
We describe a variety of examples where the plan succeeds or fails. Among the successful examples are the right-angled reflection groups in $\ensuremath{\field{H}}^4$ with fundamental domain the $120$-cell or the $24$-cell.
\end{abstract}
\maketitle
{\small \tableofcontents }
\section{Introduction} A group $G$ \emph{virtually algebraically fibers} if there is a finite index subgroup $G'$ admitting an epimorphism $G'\to\ensuremath{\field{Z}}$ with finitely generated kernel. We do not require any other finiteness properties of the kernel beyond finite generation. It is an interesting observation of Stallings \cite{Stallings62} that when $G$ is the fundamental group of a $3$-manifold $M$ and $G$ virtually algebraically fibers then the kernel is the fundamental group of a surface $S$, and the corresponding finite cover of $M$ is an $S$-bundle over a circle. In fact, with the exception of a limited class of closed graph manifolds, every compact irreducible 3-manifold $M$ with $\chi(M)=0$ does virtually fiber \cite{WiseIsraelHierarchy, AgolGrovesManning2012, LiuGraphManifolds, PrzytyckiWiseMixed}.
The goal of this paper is to obtain virtual algebraic fibering of a right-angled Coxeter group $G$. The group $G$ acts properly and cocompactly on a CAT(0) cube complex $\widetilde X$. The method of this paper is to use a combinatorial argument to choose a finite index torsion-free subgroup $G'$, so that there is a function $X\rightarrow S^1$ on the compact nonpositively curved cube complex $X= G' \backslash \widetilde X$ that lifts to a Bestvina-Brady Morse function $\widetilde X\to \ensuremath{\field{R}}.$
Although the situation can become quite complicated, our method enables us to produce Morse functions even when $G$ is associated to a simplicial graph $\Gamma$ that is quite large.
The initial motivation was to examine whether the Bestvina-Brady theory can be applied successfully for a hyperbolic 3-manifold in the simple setting of a right-angled hyperbolic reflection group. In the uniform case, we find that it can be applied for an infinite family of dual L\"{o}bell graphs (see Lemma~\ref{lem:LobellGraphsWin}), in the non-uniform case, we find that there are examples where Bestvina-Brady cannot be applied to \emph{any} finite index subgroup quotienting $\widetilde X$ (see Example~\ref{exmp:tutte}). This last realization is established by connecting the problem to an old conjecture of Tait about hamiltonian cycles in graphs, to which a famous counterexample was provided by Tutte.
Overall our results provide some evidence that Bestvina-Brady Morse theory can be applied towards virtual fibering as an alternate route to Agol's criterion \cite{Agol08}. However, Tutte's example demonstrates that alternate cube complexes would have to be utilized - even for certain cusped hyperbolic manifolds.
In two other highly noteworthy cases, we successfully apply the theory to a uniform and a non-uniform 4-dimensional hyperbolic reflection group: namely, the reflection groups whose fundamental domain is the right-angled hyperbolic 120-cell, and the right-angled hyperbolic 24-cell. These yield the first examples of higher-dimensional hyperbolic manifolds whose fundamental group virtually algebraically fiber.
Most of the paper is dedicated to examples which begin to substantiate the larger goal of obtaining virtual algebraic fibering of virtually special groups in a broader context than for 3-manifold groups.
{\bf Acknowledgement:} We are grateful to the referee for helpful comments and corrections.
\subsection{Contextualization} Bestvina and Brady \cite{BestvinaBrady97} introduced their highly influential theory partially in response to a flawed attempt by Gromov to produce non-hyperbolic groups that do not contain $\ensuremath{\field{Z}}^2$. Their theory generalizes earlier work of Stallings \cite{Stallings63} and Bieri \cite{BieriBook81} and led to many interesting examples of subgroups with exotic finiteness properties. Partially realizing Gromov's goal, Brady \cite{Brady99} applied this theory to obtain a remarkable example of a word-hyperbolic group with a finitely presented subgroup that is not word-hyperbolic. It remains an open problem whether there can be such a subgroup with a finite $K(\pi,1)$. We suggest a possible approach towards this in Section~\ref{sec:7}. Our approach creates a platform which enables the Bestvina-Brady theory to reach complicated examples. It seems that some exotic examples necessarily arise in a fairly involved setting.
Virtual algebraic fibering was investigated for Bourdon buildings in \cite{WiseRandomMorse} and for Coxeter groups having higher bounded exponents in \cite{JankiewiczWise2015}. In those cases the Bestvina-Brady Morse function was instead produced using the probabilistic method.
\subsection{Guide to reading this text:}
The ``combinatorial game'' that we are playing here is a self-contained problem about graphs described in Section~\ref{sec:legal systems} and illustrated there by two simple examples. Its vocabulary, ``systems of moves'', ``legal states'' and so forth is used heavily throughout the text.
We review the right-angled Coxeter group $G(\Gamma)$ associated to the simplicial graph $\Gamma$ in Section~\ref{sub:racg}. We also describe there the CAT(0) cube complex $\widetilde X$ that $G$ acts on cocompactly, which is the Davis complex of $G$.
We describe a conjecture about algebraic virtual fibering and Euler characteristic for some of these right-angled Coxeter groups.
A brief review of the part of Bestvina-Brady Morse theory that we utilize is described in Section~\ref{sub:bestvina brady}. In Section~\ref{sub:legal system fibers}, we present the focal point of the paper, Theorem~\ref{thm:win fiber}, which explains that a legal system for $\Gamma$ leads to virtual algebraic fibering of $G(\Gamma)$.
Section~\ref{sec:succeeds} exhibits a variety of examples where our method successfully provides algebraic virtual fibering.
Section~\ref{sec:fails} describes examples where our method fails, and indeed any attempt to virtually apply the Bestvina-Brady theory for these examples must fail.
Section~\ref{sec:7} poses a problem aiming to produce exotic subgroups of hyperbolic groups following Brady as discussed above.
In Section~\ref{sec:8} we investigate the applicability of the method to random right-angled Coxeter groups associated to Erd\H{o}s-R\'enyi random graphs.
\section{Systems of moves and legal states}\label{sec:legal systems}
Let $\Gamma=\Gamma(V,E)$ be a simplicial graph with vertices $V$ and edges $E$.
A \emph{state} of $\Gamma$ is a subset $S\subset V$. The state is \emph{legal} if the subgraphs induced by $S$ and by the complement $V-S$ of $S$ in $V$ are both nonempty and connected. A \emph{move at} $v\in V$ is an element $m_v\in 2^V$ with the following property: \begin{enumerate} \item\label{move prop:1} $v\in m_v$. \item\label{move prop:2} $u\not\in m_v$ if $\{u,v\}\in E$. \end{enumerate}
A \emph{system} of moves is a choice $m_v$ of a move for each $v\in V$. We do not assume that $m_v\neq m_u$ for $v\neq u$.
We identify $\ensuremath{\field{Z}}_2^V$ with $2^V$ in the obvious way where $\emptyset$ is the identity element and multiplication is the symmetric difference.
Accordingly, each state and each move is identified with an element of $\ensuremath{\field{Z}}_2^V$. The set of moves generates a subgroup $M$ of $\ensuremath{\field{Z}}_2^V$.
The system is \emph{legal} if there is an $M$-orbit all of whose elements are legal states. We refer to such an orbit as a \emph{legal orbit}.
A state $S$ is \emph{strongly legal} if $S$ is legal, and moreover, each vertex of $S$ is adjacent to a vertex of $V-S$ and vice-versa. It follows from the definitions that every state in a legal orbit is actually strongly legal.
\begin{exmp} In many cases, our system of moves arises as follows: There is a partition $V=\sqcup_i V_i$ where each $V_i$ has the property that no two of its vertices are adjacent. The move associated to $v$ is the element $V_i$ containing $v$. We refer to such a system of moves as a \emph{colored system}. \end{exmp}
\begin{exmp} Consider the following graph $\Gamma$ on four vertices and the system of moves given by the partition of $\Gamma$ into three parts:
\begin{align*} &&m_1 = \bwstyle \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[white, fill = white,minimum width=4pt] {
(0,0) node {}
(1,1) node {} }; \draw[black,minimum width=2pt] {
(0,1) node {} -- (1,1) node {} -- (1,0) node {} -- (0,0) node {} --(0,1) -- (1,0) }; \draw[black, fill = white,minimum width=4pt] {
(0,0) node {}
(1,1) node {} }; \end{tikzpicture} &&m_2 = \bwstyle\begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[white, fill = white,minimum width=4pt] {
(0,0) node {}
(1,1) node {} }; \draw[black,minimum width=2pt] {
(0,1) node {} -- (1,1) node {} -- (1,0) node {} -- (0,0) node {} --(0,1) -- (1,0) }; \draw[black, fill = white,minimum width=4pt] {
(0,1) node {} }; \end{tikzpicture} &&m_3 = \bwstyle\begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[white, fill = white,minimum width=4pt] {
(0,0) node {}
(1,1) node {} }; \draw[black,minimum width=2pt] {
(0,1) node {} -- (1,1) node {} -- (1,0) node {} -- (0,0) node {} --(0,1) -- (1,0) }; \draw[black, fill = white,minimum width=4pt] {
(1,0) node {} }; \end{tikzpicture}&& \end{align*} This is a legal system since there is a legal orbit:
\begin{align*} \bwstyle S = \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[black] {
(1,0) to (0,0) to (0,1) }; \draw[WildStrawberry, thick, fill = WildStrawberry, minimum width=3pt] {
(0,0) node {} }; \draw[LimeGreen, thick, minimum width=3pt] {
(1,0) node {} -- (0,1) node {} -- (1,1) node {} -- (1,0) }; \end{tikzpicture}&& \bwstyle m_1S = \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[black] {
(1,0) to (1,1) to (0,1) }; \draw[WildStrawberry, thick, fill = WildStrawberry, minimum width=3pt] {
(1,1) node {} }; \draw[LimeGreen, thick, minimum width=3pt] {
(1,0) node {} -- (0,1) node {} -- (0,0) node {} -- (1,0) }; \end{tikzpicture} &&\bwstyle m_2S = \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[black] {
(0,0) to (1,0) to (0,1) to (1,1) }; \draw[WildStrawberry, thick, fill = WildStrawberry, minimum width=3pt] {
(0,0) node {} -- (0,1) node {} }; \draw[LimeGreen, thick, minimum width=3pt] {
(1,1) node {} -- (1,0) node {} }; \end{tikzpicture} &&\bwstyle m_3S = \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[black] {
(0,0) to (0,1) to (1,0) to (1,1) }; \draw[WildStrawberry, thick, fill = WildStrawberry, minimum width=3pt] {
(0,0) node {} -- (1,0) node {} }; \draw[LimeGreen, thick, minimum width=3pt] {
(1,1) node {} -- (0,1) node {} }; \end{tikzpicture}& \\ \bwstyle m_2m_3S = \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[black] {
(1,0) to (1,1) to (0,1) }; \draw[WildStrawberry, thick, minimum width=3pt] {
(0,0) node {} -- (1,0) node {} -- (0,1) node {} -- (0,0) }; \draw[LimeGreen, thick, minimum width=3pt] {
(1,1) node {} }; \end{tikzpicture} &&\bwstyle m_1m_2S = \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[black] {
(0,0) to (0,1) to (1,0) to (1,1) }; \draw[WildStrawberry, thick, minimum width=3pt] {
(0,1) node {} -- (1,1) node {} }; \draw[LimeGreen, thick, minimum width=3pt] {
(1,0) node {} -- (0,0) node {} }; \end{tikzpicture} &&\bwstyle m_1m_3S = \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[black] {
(0,0) to (1,0) to (0,1) to (1,1) }; \draw[LimeGreen, thick, minimum width=3pt] {
(0,0) node {} -- (0,1) node {} }; \draw[WildStrawberry, thick, minimum width=3pt] {
(1,1) node {} -- (1,0) node {} }; \end{tikzpicture} &&\bwstyle m_1m_2m_3S = \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[black] {
(1,0) to (0,0) to (0,1) }; \draw[LimeGreen, thick, minimum width=3pt] {
(0,0) node {} }; \draw[WildStrawberry, thick, minimum width=3pt] {
(1,0) node {} -- (0,1) node {} -- (1,1) node {} -- (1,0) }; \end{tikzpicture} \end{align*} Note that the orbit of $\bwstyle S' = \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[black] {
(1,1) to (1,0) to (0,0)
(1,0) to (0,1) }; \draw[WildStrawberry, thick, minimum width=3pt] {
(0,0) node {} -- (0,1) node {} -- (1,1) node {} }; \draw[LimeGreen, thick, minimum width=3pt] {
(1,0) node {} }; \end{tikzpicture}$ contains the non-legal state $m_2 S' = \tikzstyle{every node}=[circle, draw, fill,
inner sep=0pt, ] \begin{tikzpicture}[baseline=2.5ex, scale=.75] \draw[black] {
(1,1) to (1,0) to (0,0) to (0,1) to (1,1) }; \draw[WildStrawberry, thick, minimum width=3pt] {
(0,0) node {}
(1,1) node {} }; \draw[LimeGreen, thick, minimum width=3pt] {
(1,0) node {} -- (0,1) node {} }; \end{tikzpicture}$. \end{exmp}
\section{Coxeter Groups, Curvature, and a Guess} \newcommand{\NotTOCsubsection}[1] {\subsection{#1}} \NotTOCsubsection{Right-angled Coxeter Groups}\label{sub:racg} Let $G(\Gamma)$ be the right-angled Coxeter group associated to $\Gamma$. Let $\widetilde X$ denote the associated CAT(0) cube complex that $G$ acts on properly and cocompactly, which is known as the Davis complex of $G$. We recall that the $1$-skeleton of $\widetilde X$ is isomorphic to the Cayley graph of $G$ after identifying each bigon to an edge, and $n$-cubes are equivariantly added to the $1$-skeleton for each collection of $n$ pairwise commuting generators (see for instance \cite[Chap. 7]{DavisCoxeterBook2008}). \begin{com} Davis defines $X$ in section 1.2 of his book as the cubical complex $P_L$ associated to the simplicial complex $L$. Then he discussed $\tilde P_L$ and its CAT(0) structure. \end{com} Let $\alpha:G\rightarrow \ensuremath{\field{Z}}_2^V$ denote the homomorphism induced by $s\mapsto\{s\}$, so $\alpha$ is merely the abelianization homomorphism. Let $G'=\ker(\alpha)$. Let $X = G'\backslash \widetilde X$.
\NotTOCsubsection{The Charney-Davis Curvature} \begin{defn}[Curvature of $\Gamma$]
For a finite simplicial graph $\Gamma$ we define its \emph{$n$-curvature} $\kappa_n(\Gamma) = \sum_{i=-1}^n (-2)^{i+1}|K_i|$ where $K_i$ is the set of $i$-cliques.
Note that $|K_{-1}|=1$ as there is a unique $(-1)$-simplex.
We are specifically interested in $\kappa_2(\Gamma)=1-\frac{|V|}{2} +\frac{|E|}{4}$. \end{defn} Let $\kappa = \kappa_{\infty}$. By distributing $\frac{1}{2^d}$ of the Euler characteristic $(-1)^d$ concentrated at an open $d$-cube among each of its $2^d$ vertices, we obtain the following tautological formula for a compact cube complex $Y$: \begin{equation}\label{eq:euler curvature} \chi(Y)=\sum_{y\in Y^0} {\ensuremath{\kappa}}(\link(y)) \end{equation}
For $X$ and $\Gamma$ as in Section~\ref{sub:racg} we have:
$\chi(X)= 2^d{\ensuremath{\kappa}}(\Gamma)$.
\NotTOCsubsection{A Guess} As mentioned in the introduction, a primary aspiration of this paper is to test the possibility of understanding virtual algebraic fibering of special groups by applying Bestvina-Brady theory to finite covers of nonpositively curved cube complexes. This was the original intended approach towards the virtual fibering problem by one of the authors. It was side-stepped by Agol who gave a criterion for virtual fibering that employed Gabai's sutured-manifold technology \cite{Agol08}. Other forays that tested this approach using the probabilistic method were given in \cite{WiseRandomMorse,JankiewiczWise2015}.
We are guided by the following optimistic guess. Although our approach confirms this guess in many cases, we show the approach is not always applicable in Section~\ref{sub:duals}. \begin{guess}\label{guess:naive} Let $G(\Gamma)$ be a finitely generated right-angled Coxeter group. Suppose \begin{com} There might be a way of restating this in terms of properties of $\Gamma$. \end{com} $\kappa_2(\Gamma)\geq 0$. Then either: \begin{enumerate} \item $G$ is virtually abelian; \item $G$ splits over a virtually abelian subgroup; \item $G$ has a non virtually abelian sub-Coxeter group $G'$ that virtually algebraically fibers. \end{enumerate} \end{guess} We are grateful to Mike Davis for drawing attention to the connection with the conjecture in \cite{DavisOkun2001} that the orbifold associated to a right-angled Coxeter group $G(\Gamma)$ has a finite index cover fibering over a circle when the flag complex of $\Gamma$ is an even dimensional sphere.
\section{Legal Systems Provide Virtual Fiberings}
\subsection{Bestvina-Brady Morse Theory}\label{sub:bestvina brady} A \emph{diagonal map} $[0,1]^d\rightarrow S^1$ on a $d$-cube is the restriction of the composition $\ensuremath{\field{R}}^d \rightarrow \ensuremath{\field{R}} \rightarrow S^1$ where the first map is the linear map $(x_1,\dots, x_d)\mapsto \sum_i \pm x_i$ and the second map is the quotient $\ensuremath{\field{R}} / \ensuremath{\field{Z}} =S^1$.
A \emph{diagonal map} on a cube complex is a map $X\rightarrow S^1$ whose restriction to each cube of $X$
is a diagonal map.
Consider a diagonal map $\phi:X\to S^1$, and let $\tilde \phi:\widetilde X \to \ensuremath{\field{R}}$.
A $(d-1)$-simplex of $\link(x)$ is \emph{ascending} (resp. \emph{descending}) if the restriction of $\tilde \phi$ to the corresponding $d$-cube has a minimum (resp. maximum) at $x$. The \emph{ascending link} $\link_{\uparrow}(x)$ (resp. \emph{descending link} $\link_{\downarrow}(x)$) is the subcomplex of $\link(x)$ consisting of all ascending (resp. descending) vertices and edges. Bestvina and Brady proved the following in \cite{BestvinaBrady97}: \begin{thm} If each ascending and descending link is connected then the kernel of $\phi_*:\pi_1X\rightarrow \ensuremath{\field{Z}}$ is finitely generated. \end{thm}
\begin{rem}
A diagonal map is determined by (and equivalent to) directing the $1$-cubes of $X^1$ so that opposite $1$-cubes of each $2$-cube agree. From this viewpoint, the ascending and descending links correspond to the induced subcomplexes of outgoing and incoming 1-cubes.
Since connectedness of a simplicial complex is determined by the connectedness of its 1-skeleton we will focus entirely on the 1-skeleton when discussing the ascending and descending links. \end{rem}
\subsection{Virtually Algebraically Fibering $G(\Gamma)$} \label{sub:legal system fibers} \begin{thm}\label{thm:win fiber} Let $\Gamma$ be a finite graph. Suppose there is a system $m:V\rightarrow 2^V$ with a legal orbit. Then there is a diagonal map $\phi:X\rightarrow S^1$ whose ascending and descending links are nonempty and connected. \end{thm} \begin{cor}\label{cor:win f.g.} $G$ has a finite index subgroup $G'$ such that there is an epimorphism $G'\rightarrow \ensuremath{\field{Z}}$ with finitely generated kernel. \end{cor}
\begin{proof}[Proof of Theorem~\ref{thm:win fiber}] Let $S$ be a state such that each element of $\langle m_v : v\in V\rangle S$ is legal. Consider a base 0-cube $\hat x\in X^0$. For each $z\in \ensuremath{\field{Z}}_2^v$ we associate $z\hat x$ with $zS$. This determines a way to direct the 1-cubes at $z\hat x$. Specifically: A 1-cube is outgoing if the corresponding vertex $v$ of $\link(z\hat x)$ is in $zS$ and it is incoming if $v\not\in zS$.
Let $z_1\hat x$ and $z_2\hat x$ be the endpoints of a 1-cube $c$ corresponding to a move $v$ of $G$, and note that $z_1= m_vz_2$. Since $z_1=m_vz_2$ we see that the direction of $c$ induced by $z_1S$ and $z_2S$ agree by Move Property~\eqref{move prop:1}.
Move Property~\eqref{move prop:2} implies that the opposite sides of each 2-cube are directed consistently.
\end{proof}
\begin{rem} We have restricted ourselves to Bestvina-Brady Morse functions associated to diagonal maps. These correspond to those for which the minimum and maximum vertices of each square are antipodal. In fact, a slight generalization of legal system is equivalent to the existence of a diagonal Bestvina-Brady Morse function on a finite cover. Diagonal maps are associated to a consistent direction for edges that are parallel along each ``hyperplane'' There are other possible Morse functions, but they are less natural to investigate in terms of $\Gamma$. \end{rem}
\section{Examples where the method succeeds}\label{sec:succeeds}
\subsection{Some favorite small examples}\label{sub:small examples} \subsubsection{Cube}\label{ex:cube}
Let $\Gamma$ be a $1$-skeleton of the $3$-cube. This is a bipartite graph, with $\girth(\Gamma) =4$ and $\kappa(\Gamma) = 1-\frac{8}{2}+\frac{12}{4}=0$. The system of moves corresponding to the bipartition is legal:
\[ \bwstyle\begin{tikzpicture}[scale=0.75] \draw {
(0,0,0) to (0,1,0) to (1,1,0) to (1,0,0) to (0,0,0) to (0,0,1) to (1,0,1) to (1,1,1) to (0,1,1) to (0,0,1)
(0,1,0) to (0,1,1)
(1,1,0) to (1,1,1)
(1,0,0) to (1,0,1) }; \draw[black, fill = white,minimum width=4pt] {
(0,0,0) node {}
(1,1,0) node {}
(0,1,1) node {}
(1,0,1) node {} }; \draw[black,minimum width=2pt] {
(0,1,0) node {}
(1,0,0) node {}
(0,0,1) node {}
(1,1,1) node {} }; \end{tikzpicture} \hspace{7pt} \begin{tikzpicture}[scale=0.75] \draw {
(0,0,0) to (0,1,0) to (1,1,0) to (1,0,0) to (0,0,0) to (0,0,1) to (1,0,1) to (1,1,1) to (0,1,1) to (0,0,1)
(0,1,0) to (0,1,1)
(1,1,0) to (1,1,1)
(1,0,0) to (1,0,1) }; \draw[black, fill = white,minimum width=4pt] {
(0,1,0) node {}
(1,0,0) node {}
(0,0,1) node {}
(1,1,1) node {} }; \draw[black,minimum width=2pt] {
(0,0,0) node {}
(1,1,0) node {}
(0,1,1) node {}
(1,0,1) node {} }; \end{tikzpicture} \] This is an example of a legal orbit:
\[ \tikzstyle{every node}=[circle, draw, fill,
inner sep=0pt, minimum width=3pt] \begin{tikzpicture}[scale = 0.75] \draw[WildStrawberry,thick] {
(0,0,0) node {} -- (0,1,0) node {} -- (1,1,0) node {} -- (1,1,1) node {} }; \draw[LimeGreen,thick] {
(1,0,0) node {} -- (1,0,1) node {} -- (0,0,1) node {} -- (0, 1,1) node {} }; \draw[gray] {
(0,0,1) -- (0,0,0) -- (1,0,0) -- (1,1,0)
(0,1,0) -- (0,1,1) -- (1,1,1) -- (1,0,1) }; \end{tikzpicture} \hspace{7pt} \begin{tikzpicture}[scale = 0.75] \draw[WildStrawberry,thick] {
(0,1,0) node {} -- (0,1,1) node {} -- (1,1,1) node {} -- (1,0,1) node {} }; \draw[LimeGreen,thick] {
(0,0,1) node {} -- (0,0,0) node {} -- (1,0,0) node {} -- (1, 1,0) node {} }; \draw[gray] {
(0,0,0) -- (0,1,0) -- (1,1,0) -- (1,1,1)
(1,0,0) -- (1,0,1) -- (0,0,1) -- (0,1,1) }; \end{tikzpicture} \hspace{7pt} \begin{tikzpicture}[scale = 0.75] \draw[LimeGreen,thick] {
(0,1,0) node {} -- (0,1,1) node {} -- (1,1,1) node {} -- (1,0,1) node {} }; \draw[WildStrawberry,thick] {
(0,0,1) node {} -- (0,0,0) node {} -- (1,0,0) node {} -- (1, 1) node {} }; \draw[gray] {
(0,0,0) -- (0,1,0) -- (1,1,0) -- (1,1,1)
(1,0,0) -- (1,0,1) -- (0,0,1) -- (0,1,1) }; \end{tikzpicture} \hspace{7pt} \begin{tikzpicture}[scale = 0.75] \draw[LimeGreen,thick] {
(0,0,0) node {} -- (0,1,0) node {} -- (1,1,0) node {} -- (1,1,1) node {} }; \draw[WildStrawberry,thick] {
(1,0,0) node {} -- (1,0,1) node {} -- (0,0,1) node {} -- (0,1,1) node {} }; \draw[gray] {
(0,0,1) -- (0,0,0) -- (1,0,0) -- (1,1,0)
(0,1,0) -- (0,1,1) -- (1,1,1) -- (1,0,1) }; \end{tikzpicture} \]
\subsubsection{Wagner graph} The Wagner graph $\Gamma$ is the following $3$-valent graph on $8$ vertices:
\[\tikzstyle{every node}=[circle, draw, fill,
inner sep=1pt, ] \begin{tikzpicture}[scale = 0.75] \draw[black] {
(180-22.5:1) -- (135-22.5:1) -- (90-22.5:1) -- (45-22.5:1) -- (0-22.5:1) -- (315-22.5:1) -- (270-22.5:1) -- (225-22.5:1) -- (180-22.5:1)
(180-22.5:1) -- (0-22.5:1)
(90-22.5:1) -- (270-22.5:1) }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(135-22.5:1) -- (315-22.5:1) }; \draw[black] {
(180-22.5:1) node [label=left:$1$] {}
(135-22.5:1) node [label=left:$2$] {}
(90-22.5:1) node [label=right:$3$] {}
(45-22.5:1) node [label=right:$4$] {}
(0-22.5:1) node [label=right:$5$] {}
(315-22.5:1) node [label=right:$6$] {}
(270-22.5:1) node [label=left:$7$] {}
(225-22.5:1) node [label=left:$8$] {} }; \end{tikzpicture}\] The girth of the Wagner graph is $4$ and the curvature $\kappa(\Gamma) = 1 - \frac{8}{2} + \frac{12}{4} = 0$. The graph $\Gamma$ is noteworthy for having a legal system but not having any legal colored system. The following system of moves is legal:
\begin{align*} \bwstyle m_1 =m_4 = \begin{tikzpicture}[baseline = 0, scale = 0.75] \draw[black] {
(180-22.5:1) -- (135-22.5:1) -- (90-22.5:1) -- (45-22.5:1) -- (0-22.5:1) -- (315-22.5:1) -- (270-22.5:1) -- (225-22.5:1) -- (180-22.5:1)
(180-22.5:1) -- (0-22.5:1)
(90-22.5:1) -- (270-22.5:1) }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(135-22.5:1) -- (315-22.5:1) }; \draw[black, fill = white, minimum width = 4pt] {
(180-22.5:1) node {}
(45-22.5:1) node {}
(315-22.5:1) node {}
(270-22.5:1) node {} }; \end{tikzpicture} && \bwstyle m_5 = m_8 = \begin{tikzpicture}[baseline=0, scale = 0.75] \draw[black] {
(180-22.5:1) -- (135-22.5:1) -- (90-22.5:1) -- (45-22.5:1) -- (0-22.5:1) -- (315-22.5:1) -- (270-22.5:1) -- (225-22.5:1) -- (180-22.5:1)
(180-22.5:1) -- (0-22.5:1)
(90-22.5:1) -- (270-22.5:1) }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(135-22.5:1) -- (315-22.5:1) }; \draw[black, fill = white, minimum width = 4pt] {
(135-22.5:1) node {}
(90-22.5:1) node {}
(0-22.5:1) node {}
(225-22.5:1) node {} }; \end{tikzpicture} && \bwstyle m_2 = m_7 = \begin{tikzpicture}[baseline=0, scale = 0.75] \draw[black] {
(180-22.5:1) -- (135-22.5:1) -- (90-22.5:1) -- (45-22.5:1) -- (0-22.5:1) -- (315-22.5:1) -- (270-22.5:1) -- (225-22.5:1) -- (180-22.5:1)
(180-22.5:1) -- (0-22.5:1)
(90-22.5:1) -- (270-22.5:1) }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(135-22.5:1) -- (315-22.5:1) }; \draw[black, fill = white, minimum width = 4pt] {
(135-22.5:1) node {}
(45-22.5:1) node {}
(0-22.5:1) node {}
(270-22.5:1) node {} }; \end{tikzpicture} && \bwstyle m_3 = m_6 = \begin{tikzpicture}[baseline=0, scale = 0.75] \draw[black] {
(180-22.5:1) -- (135-22.5:1) -- (90-22.5:1) -- (45-22.5:1) -- (0-22.5:1) -- (315-22.5:1) -- (270-22.5:1) -- (225-22.5:1) -- (180-22.5:1)
(180-22.5:1) -- (0-22.5:1)
(90-22.5:1) -- (270-22.5:1) }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(135-22.5:1) -- (315-22.5:1) }; \draw[black, fill = white, minimum width = 4pt] {
(90-22.5:1) node {}
(180-22.5:1) node {}
(225-22.5:1) node {}
(315-22.5:1) node {} }; \end{tikzpicture} \end{align*} The following figure indicates a legal orbit. \begin{align*} \bwstyle \begin{tikzpicture}[baseline = 0, scale=0.75] \draw[black] {
(135-22.5:1) -- (90-22.5:1) -- (45-22.5:1) -- (0-22.5:1)
(270-22.5:1) -- (225-22.5:1)
(180-22.5:1) -- (0-22.5:1) }; \draw[WildStrawberry, thick, minimum width = 4pt,] {
(135-22.5:1) node {} -- (180-22.5:1) node {} -- (225-22.5:1) node {}
(45-22.5:1) node {} }; \draw[LimeGreen, thick, minimum width = 4pt] {
(90-22.5:1) node {} -- (270-22.5:1) node {} -- (315-22.5:1) node {} -- (0-22.5:1) node {} }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[WildStrawberry] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(135-22.5:1) -- (315-22.5:1) }; \end{tikzpicture} && \bwstyle \begin{tikzpicture}[baseline = 0, scale=0.75] \draw[black] {
(180-22.5:1) -- (135-22.5:1) -- (90-22.5:1)
(0-22.5:1) -- (315-22.5:1)
(225-22.5:1) -- (180-22.5:1)
(90-22.5:1) -- (270-22.5:1) }; \draw[WildStrawberry, thick, minimum width = 4pt,] {
(225-22.5:1) node {} -- (270-22.5:1) node {} -- (315-22.5:1) node {}
(135-22.5:1) node {} }; \draw[LimeGreen, thick, minimum width = 4pt] {
(180-22.5:1) node {} -- (0-22.5:1) node {} -- (45-22.5:1) node {} -- (90-22.5:1) node {} }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[WildStrawberry] {
(135-22.5:1) -- (315-22.5:1) }; \end{tikzpicture} && \bwstyle \begin{tikzpicture}[baseline = 0, scale=0.75] \draw[black] {
(180-22.5:1) -- (135-22.5:1) -- (90-22.5:1)
(0-22.5:1) -- (315-22.5:1)
(225-22.5:1) -- (180-22.5:1)
(90-22.5:1) -- (270-22.5:1) }; \draw[LimeGreen, thick, minimum width = 4pt,] {
(225-22.5:1) node {} -- (270-22.5:1) node {} -- (315-22.5:1) node {}
(135-22.5:1) node {} }; \draw[WildStrawberry, thick, minimum width = 4pt] {
(180-22.5:1) node {} -- (0-22.5:1) node {} -- (45-22.5:1) node {} -- (90-22.5:1) node {} }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[LimeGreen] {
(135-22.5:1) -- (315-22.5:1) }; \end{tikzpicture} && \bwstyle \begin{tikzpicture}[baseline = 0, scale=0.75] \draw[black] {
(180-22.5:1) -- (135-22.5:1)
(45-22.5:1) --
(0-22.5:1) -- (315-22.5:1) --
(270-22.5:1)
(90-22.5:1) -- (270-22.5:1) }; \draw[WildStrawberry, thick, minimum width = 4pt,] {
(0-22.5:1) node {} -- (180-22.5:1) node {} -- (225-22.5:1) node {} -- (270-22.5:1) node {} }; \draw[LimeGreen, thick, minimum width = 4pt] {
(45-22.5:1) node {} -- (90-22.5:1) node {} -- (135-22.5:1) node {}
(315-22.5:1) node {} }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[LimeGreen] {
(135-22.5:1) -- (315-22.5:1) }; \end{tikzpicture} && \bwstyle \begin{tikzpicture}[baseline = 0, scale=0.75] \draw[black] {
(180-22.5:1) -- (135-22.5:1)
(45-22.5:1) --
(0-22.5:1) -- (315-22.5:1) --
(270-22.5:1)
(90-22.5:1) -- (270-22.5:1) }; \draw[LimeGreen, thick, minimum width = 4pt,] {
(0-22.5:1) node {} -- (180-22.5:1) node {} -- (225-22.5:1) node {} -- (270-22.5:1) node {} }; \draw[WildStrawberry, thick, minimum width = 4pt] {
(45-22.5:1) node {} -- (90-22.5:1) node {} -- (135-22.5:1) node {}
(315-22.5:1) node {} }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[WildStrawberry] {
(135-22.5:1) -- (315-22.5:1) }; \end{tikzpicture} && \bwstyle \begin{tikzpicture}[baseline = 0, scale=0.75] \draw[black] {
(135-22.5:1) -- (90-22.5:1) -- (45-22.5:1) -- (0-22.5:1)
(270-22.5:1) -- (225-22.5:1)
(180-22.5:1) -- (0-22.5:1) }; \draw[LimeGreen, thick, minimum width = 4pt,] {
(135-22.5:1) node {} -- (180-22.5:1) node {} -- (225-22.5:1) node {}
(45-22.5:1) node {} }; \draw[WildStrawberry, thick, minimum width = 4pt] {
(90-22.5:1) node {} -- (270-22.5:1) node {} -- (315-22.5:1) node {} -- (0-22.5:1) node {} }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[LimeGreen] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(135-22.5:1) -- (315-22.5:1) }; \end{tikzpicture} && \bwstyle \begin{tikzpicture}[baseline = 0, scale=0.75] \draw[black] {
(90-22.5:1) -- (45-22.5:1)
(315-22.5:1) --
(270-22.5:1) -- (225-22.5:1)
-- (180-22.5:1)
(180-22.5:1) -- (0-22.5:1) }; \draw[WildStrawberry, thick, minimum width = 4pt,] {
(315-22.5:1) node {} -- (0-22.5:1) node {} -- (45-22.5:1) node {}
(225-22.5:1) node {} }; \draw[LimeGreen, thick, minimum width = 4pt] {
(270-22.5:1) node {} -- (90-22.5:1) node {} -- (135-22.5:1) node {} -- (180-22.5:1) node {} }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[WildStrawberry] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(135-22.5:1) -- (315-22.5:1) }; \end{tikzpicture} && \bwstyle \begin{tikzpicture}[baseline = 0, scale=0.75] \draw[black] {
(90-22.5:1) -- (45-22.5:1) (315-22.5:1) --
(270-22.5:1) -- (225-22.5:1)
-- (180-22.5:1)
(180-22.5:1) -- (0-22.5:1) }; \draw[LimeGreen, thick, minimum width = 4pt,] {
(315-22.5:1) node {} -- (0-22.5:1) node {} -- (45-22.5:1) node {}
(225-22.5:1) node {} }; \draw[WildStrawberry, thick, minimum width = 4pt] {
(270-22.5:1) node {} -- (90-22.5:1) node {} -- (135-22.5:1) node {} -- (180-22.5:1) node {} }; \draw[white, fill = white, minimum width = 4pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[LimeGreen] {
(45-22.5:1) -- (225-22.5:1) }; \draw[white, fill = white, minimum width = 2pt, inner sep = 0pt] {
(0-22.5:0) node {} }; \draw[black] {
(135-22.5:1) -- (315-22.5:1) }; \end{tikzpicture} \end{align*} \begin{com} Possibly unify conventions for generators and states in pictures... What do others think? \end{com}
\subsection{Some high density bipartite examples} Consider a bipartite graph $\Gamma(V,E)$ with bipartite structure $V=A\sqcup B$. In many cases where there are sufficiently many edges, there exists a legal system all of whose moves are either $A$ or $B$.
For instance the following easy criterion often applies in high density situations. For instance, it applies to the graph $\Gamma$ in Figure~\ref{fig:TBWS}.
\begin{figure}
\caption{A bipartite graph $\Gamma$ with a legal system illustrating
Lemma~\ref{lem:TBWS}.}
\label{fig:TBWS}
\end{figure}
\begin{lem}\label{lem:TBWS} Consider a bipartite graph $\Gamma$ whose vertex set has the bipartite structure $A\sqcup B$ where $A = A_1\sqcup A_2$ and $B = B_1\sqcup B_2$. If the following holds then each $A_i\sqcup B_j$ is a legal state for each $i,j$ and so $\{A,B\}$ is a legal system.
\begin{enumerate} \item\label{TBWS:1} For each $i,j$, and for each pair of vertices $a,a'\in A_i$, there is a path from $a$ to $a'$ in the subgraph induced by $A_i\sqcup B_j$. \item\label{TBWS:2} For each $i,j$ each element of $B_j$ is adjacent to an element of $A_i$.
\end{enumerate}
\end{lem} \begin{proof} The orbit of $A_1 \cup B_1$ is $\{A_i\cup B_j \ : \ i,j \in \{1,2\} \ \}$. For each state $A_i\cup B_j$, the vertices of $A_i$ lie in a single component by~\eqref{TBWS:1} and each vertex of $B_j$ is joined to this component by~\eqref{TBWS:2}. \end{proof}
\begin{prob}\label{prob:4connected wins or subwins} Is there a 4-connected finite graph $\Gamma$ with $\girth(\Gamma)\geq 4$ and ${\ensuremath{\kappa}}(\Gamma)\geq 0$ but no 4-connected (or even 3-connected) subgraph $\Gamma'$ with a legal system?
\end{prob} Figure~\ref{fig:Tutte} depicts Tutte's graph which is 3-connected but contains no 3-connected subgraph with a legal system. We are unable to make more than a vague connection to $n$-connectivity here but refer to Remark~\ref{rem:connectivity} and Theorem~\ref{thm:DWtoHam} and Conjecture~\ref{conj:reformed barnette}.
\subsection{$24$-cell}
The $24$-cell is one of six convex regular 4-polytopes. Its boundary is composed of $24$ octahedra with $6$ meeting at each vertex, and $3$ at each edge. It can be realized as a right-angled ideal hyperbolic polytope. Let $G$ be the Coxeter group of reflections in the 3-dimensional faces of the $24$-cell. Since the $24$-cell is self-dual $G = G(\Gamma)$ where $\Gamma$ is the $1$-skeleton of the $24$-cell.
\subsubsection{The $1$-skeleton of the $24$-cell} The graph $\Gamma$ is obtained from the $1$-skeleton of the $4$-cube as follows: for each of the $3$-cubes in the $4$-cube we add an \emph{extra} vertex and join it to all vertices of its ``surrounding'' $3$-cube.
See the left graph in Figure~\ref{fig:24graph}. The resulting graph has $24$ vertices: $16$ of the $4$-cube and $8$ extra vertices; and has $96$ edges: $32$ in the $4$-cube and $8$ edges for each extra vertex. \subsubsection{Legal system in the $24$-cell}
Consider the system of moves for $\Gamma$ corresponding to the $3$-coloring of $\Gamma$ where the $4$-cube is colored using a bipartite structure and
all extra vertices are given a third color. Note that extra vertices are pairwise at distance $\geq 2$.
The state $S_b$ of the $4$-cube graph, presented on the right in Figure~\ref{fig:24graph}, has the property that its restriction to any $3$-cube is a legal state of the $3$-cube as in Example~\ref{ex:cube}. All states obtained from $S_b$ by the moves corresponding to the bipartite structure of the $4$-cube graph also have this property and remain legal. Thus the bipartition of the $4$-cube graph is a legal system (A similar argument works for an $n$-cube). We extend $S_b$ to a state $S_o$ of $\Gamma$. Since each extra vertex $v$ in $\Gamma$ is adjacent to all vertices of a $3$-cube, for each state $S$ in the orbit of $S_b$, the vertex $v$ is joined to both a vertex in $S$ and a vertex in $V-S$. Let $S_o$ be the union of $S_b$ and any subset of the set of extra vertices. Then $S_o$ provides a state for $\Gamma$ whose orbit is legal. \begin{figure}
\caption{The $1$-skeleton of the $24$-cell has $8$ green vertices corresponding to $3$-dimensional faces of the $4$-cube. We illustrate only one such vertex here. On the right there is a legal state for the $1$-skeleton of the $4$-cube. All elements of its orbit are legal where the system of moves correspond to bipartition of the $1$-skeleton of the $4$-cube.}
\label{fig:24graph}
\end{figure}
\subsection{Icosahedron}\label{sub:icosahedron} Consider the $6$-coloring of the icosahedron on the left in Figure~\ref{fig:icosahedron}.
For each vertex $v\in V$ there exists exactly one color \emph{k} such that no neighbor of $v$ has it. In particular, the set of elements in color $k$ or in the color of $v$ is a move at $v$, e.g. the pink and purple vertices form a move at the top pink vertex. We will show that the system $M$ of moves of this form is legal. This system generates an index $2$ subgroup of a group $M'$ generated by all single color sets. Consider the collection $\mathcal S$ of states having exactly one vertex of each color. The collection $\mathcal S$ is a single orbit under $M'$-action. Not all such states are legal, but all nonlegal state of such form lie in a single orbit under $M$-action. To see that note that for any nonlegal $S\in\mathcal S$ the graph induced by either $S$ or $V-S$ consists of two disjoint triangles as in the picture below. Indeed, the connected component of this graph cannot be a single vertex or a pair of vertices connected by an edge since every vertex and every such pair have two neighbors in the same color. Any two illegal states differ on an even number of colors.
\begin{figure}
\caption{The move at $v$ consists of the two vertices with the same color as $v$ together with the two vertices with the color not adjacent to $v$.
Let $S_o$ be a state consisting of exactly one vertex of each color. There are two orbits of such states. One is a legal system and is illustrated in the middle. On the right there is a non-legal state. We use a different convention here than in previous examples: vertices of the same color correspond to moves, and circled vertices form a state.}
\label{fig:icosahedron}
\end{figure}
\subsection{$600$-cell} The $120$-cell is a notable convex regular $4$-polytope that admits an embedding in $\ensuremath{\field{H}}^4$ as a right-angled polytope. Identifying opposite $3$-dimensional faces of the $120$-cell gives a compact hyperbolic $4$-manifold \cite{Davis85}. The dual of the $120$-cell is the $600$-cell whose boundary is a flag complex that is built of $600$ tetrahedra and is characterized by the property that the link of each vertex is an icosahedron. The reflection group associated to the $120$-cell generated by reflections along its $3$-dimensional faces is the Coxeter group whose defining graph is the $1$-skeleton of the $600$-cell. We refer to \cite{wiki:600cell} for a description of the 600-cell, and note that the construction below was motivated by the discussion there.
\begin{figure}
\caption{The $1$-skeleton of the $600$-cell is on the left. Red and blue vertices are even and odd respectively. One even hovering vertex and two consecutive odd hovering vertices are illustrated. On the right there is a legal system. Moves correspond to all vertices labelled with the same number. There are also moves corresponding to the hovering vertices which are not illustrated in the figure.}
\label{fig:600graph}
\end{figure}
\subsubsection{The 1-skeleton of the $600$-cell} We refer to Figure~\ref{fig:600graph}. Begin with a torus represented by a $10\times 10$ grid. Note that it has a bipartite structure and we refer to its two classes of vertices as \emph{even} and \emph{odd}. (We regard the northwest most vertex in the grid as even.)
We now add the following edges which join odd-to-odd and even-to-even: For each of the 100 squares of the torus, we add both diagonals as edges. For each even vertex we add an edge joining it to the two vertices that are two edges north and south of it in its column. For each odd vertex we add an edge joining it to the two vertices that are two edges east and west of it in its row.
We use the terms \emph{row-cycle} and \emph{column-cycle} to refer to the corresponding cycles within the torus.
We now add 20 further \emph{hovering} vertices. Ten of these are \emph{odd} and ten are \emph{even}. Each even hovering vertex corresponds to a consecutive pair of column-cycles and we attach it to all ten even vertices in those column-cycles. We add an edge between consecutive even hovering vertices.
Similarly, each odd hovering vertex corresponds to a consecutive pair of row-cycles and we attach it to all odd vertices in those row-cycles. We add an edge between consecutive odd hovering vertices.
The resulting graph $\Gamma$ has 120 vertices, and 720 edges.
Observe that the automorphism group of $\Gamma$ acts transitively on the non-hovering vertices and also on the hovering vertices. It therefore suffices to examine a vertex $v$ in each case and confirm that the graph spanned by the vertices adjacent to $v$ is an icosahedral graph as illustrated in Figure~\ref{fig:link in 600}. A blue hovering vertex of $\Gamma$ is adjacent to two horizontal row-cycles of blue vertices as well as two blue hovering vertices. Its link corresponds to an octahedron obtained by subdividing a 5-sided antiprism: Its horizontal row-cycles yield 5-cycles in the link that are connected by an alternating band of triangles, and each hovering vertex in the link is adjacent to all the vertices of one of these 5-cycles. A blue non-hovering vertex is adjacent to four red non-hovering vertices, six blue non-hovering vertices, and two blue hovering vertices. Its link corresponds to an icosahedron obtained by subdividing a cuboctahedron.
\begin{figure}
\caption{Links of a blue hovering vertex and non-hovering vertex.}
\label{fig:link in 600}
\end{figure}
\subsubsection{A legal system for the 600-cell 1-skeleton}
We color the vertices of the grid-vertices of $\Gamma$ as on the right in Figure~\ref{fig:600graph}: The vertices in the westmost column are colored $0,1,\ldots, 9$ starting from the top of the grid. The subsequent columns are colored so that even numbers decrease traveling southeast, and odd numbers increase traveling northeast.
The cycle of even hovering vertices are colored $0',2', 4',6', 8', 0',2', 4',6',8'$. The cycle of odd hovering vertices are colored $1', 3', 5', 7', 9', 1', 3', 5', 7', 9'$.
The start state $S_o$ consists of all vertices in alternate row-cycles of the torus, together with a choice of alternate vertices within each hovering cycle.
Note that there are exactly 20 moves, corresponding to the distinct colors.
\subsubsection{Proof that it is a legal system}
Consider a state $S$ in the orbit of $S_o$.
As the reader can verify, a feature of the colored moves is that for consecutive column-cycles, their odd vertices in $S$ are \emph{complementary} in the sense that that for each color, exactly one of the two vertices with that color lies in $S$. The analogous statement holds for even vertices and row-cycles.
There is a \emph{degenerate} odd case where alternate column-cycles have all their odd vertices in $S$.
Similarly there is a \emph{degenerate} even case where alternate row-cycles have all their even vertices in $S$.
There are now four cases to consider according to whether odd or even are degenerate.
We first observe that in a doubly-degenerate case, (e.g. $S_o$), the grid-edges connect each row-cycle to each column-cycle, and so $S$ is connected since in addition, each hovering vertex in $S$ is automatically connected at one of its two sides.
We now examine a non-degenerate case where
some but not all odd vertices in a column-cycle are in $S$. We will show that the odd vertices in $S$ are connected in this case: The consecutive complementary color property shows that every odd vertex is connected to a ``traversing-cycle'' in the odd part of $S$ which contains a vertex in each column-cycle. Indeed, consider a northmost odd vertex of $S$ within $S$-component of a column-cycle, as the odd vertex north of it is not in $S$, we see that the vertex southeast of it is in $S$, and it is thus connected to an odd vertex in the next column eastward. Likewise, is connected to a traversing-cycle intersecting each column-cycle.
Each odd hovering $S$-vertex is connected to each traversing-cycle so we are done.
An analogous argument shows that for a non-degenerate even case, where some but not all even vertices in a row-cycle are in $S$, there is a traversing-cycle consisting of even vertices of $S$ that contains a vertex from each row-cycle, and contains a vertex from each column-cycle.
If the odd vertices of $S$ are degenerate but the even are not, then the traversing cycle in the even part of $S$ is connected to all odd rows. The case where the even part is degenerate and the odd part is connected analogously.
We now consider the case where neither odd nor even is degenerate: Consider a column-cycle with a maximal number of vertices in $S$. If it has four then these vertices connect to all even vertices and so the odd part is connected to the even part. If it has three then these must be consecutive, since otherwise separate groups of two and one will be adjacent to all even vertices in that column. The even and odd are thus connected unless there is only one even state in $S$ within that column. But then the complementary property assures that there are four even vertices in $S$ within the next column. But these are connected to all odd vertices within that column.
\begin{rem} The group $G(\Gamma)$ is a cocompact right-angled reflection group in $\ensuremath{\field{H}}^4$ that was studied by Bowditch and Mess \cite{BowditchMess94} who showed that $G(\Gamma)$ is \emph{incoherent} in the sense that it has a f.g. subgroup that is not f.p. and we refer also to the work of M.Kapovich and Potyagailo and Vinberg \cite{KapovichPotyagailoVinberg2008} as well as \cite{Kapovich2013}. \end{rem}
\subsection{Brinkmann graph}
The Brinkmann graph is a very symmetric graph with $21$ vertices and $42$ edges, and with $\girth(\Gamma)=5$. Thus ${\ensuremath{\kappa}}(\Gamma)=1$. We describe a legal system for $\Gamma$ associated to a coloring in Figure~\ref{fig:brinkmann}.
The Brinkmann graph obviously has subgraphs $\Gamma'$ with ${\ensuremath{\kappa}}(\Gamma')=0$, but we have not checked if any such subgraph has a legal system.
\begin{figure}
\caption{The legal system of Brinkmann graph has $4$ moves each corresponding to vertices in one of $4$ colors in the figure. An orbit of a state under these moves consists of $16$ states. In the figure we illustrate $8$ states of a legal orbit, the $8$ other states are ``complementary'' to the illustrated states, i.e. they are obtained from the illustrated states by exchanging circled and uncircled vertices. Thus to ensure that the orbit is legal it suffices to check only $8$ illustrated states.}
\label{fig:brinkmann}
\label{fig:Brinkmann}
\end{figure}
\subsection{Blowup of a cube}
Let $\bar \Gamma$ be the 1-skeleton of a $d$-cube. We will produce a graph $\Gamma$ and a map $\rho: \Gamma\rightarrow \bar \Gamma$ with the following properties: There is a number $n$ and we will later conveniently assume that $n$ is a large prime. For each $\bar v\in \bar \Gamma^0$, its preimage $\rho^{-1}(\bar v)$ consists of a set of vertices $v_i : 1\leq i \leq n$.
We distinguish one of the three parallelism classes of edges of $\bar \Gamma$.
Let $\bar e$ be a distinguished edge of $\bar \Gamma$ whose endpoints are $\bar u, \bar v$. Its preimage in $\Gamma$ consists of a set of $2n-1$ edges each of which maps to $\bar e$, such that their union with $u_1,\ldots, u_n, v_1,\ldots, v_n$ is a tree. (In practice $\rho^{-1}(e)$ is obtained from a $2n$-cycle by removing an edge.)
Let $\bar e$ be a non-distinguished edge of $\bar \Gamma$ whose endpoints are $\bar u,\bar v$. Its preimage consists of $n$ edges mapping to $\bar e$, such that the correspondence between their endpoints yields a bijection $\{u_1,\ldots, u_n\} \leftrightarrow \{v_1,\ldots, v_n\}$.
We now explain that for strategic choices above we have $\girth(\Gamma)\geq 6$. First observe that $\Gamma$ is bipartite, and also simplicial by construction, and so it suffices to exclude $4$-cycles. A cycle in $\Gamma$ projects to a combinatorial path in $\bar \Gamma$. Observe that a $4$-cycle cannot project to a single edge, since the preimage of a distinguished edge is a tree, and the preimage of a non-distinguished edge is a disjoint union of edges. Likewise, the $4$-cycle cannot project to the union of two edges, since at least one of these is non-distinguished, and so consecutive edges of the $4$-cycle mapping to this non-distinguished edge would not share an endpoint. The remaining possibility is that the $4$-cycle maps to a $4$-cycle of $\bar \Gamma$. It thus suffices to exclude 4-cycles over the 4-cycles of $\bar \Gamma$.
When $n$ is large, one can choose the trees and the bijections at random for each $e$ and the conclusion almost always holds. Moreover, $\girth(\Gamma)$ can be ensured to be arbitrarily large for large $n$ in this case.
Alternately, for the distinguished edge $e$ with vertices $\bar u,\bar v$ we use a ``linear tree'' $u_1 - v_1 - u_2 - v_2 - \cdots - v_{n-1} - u_n - v_n$. And we associate a natural number $ k_e < \frac{n}{2}$ to each non-distinguished edge $e$, and use the bijection $u_i \mapsto v_{i+k_e}$ with subscripts taken modulo $n$. And we then require that for edges $\bar e, \bar e'$ that are parallel in a square of $\bar \Gamma$, the associated numbers $k_e, k_{e'}$ satisfy $|k_e- k_{e'}|\geq 3$.
We note that when $d>3$, the graph $\Gamma$ has ${\ensuremath{\kappa}}(\Gamma)>0$. However, when $d=3$, we have ${\ensuremath{\kappa}}(\Gamma)=0$. Indeed, $\Gamma$ then has $8n$ vertices, and $8n+4(2n-1)$ edges. Thus ${\ensuremath{\kappa}}(\Gamma)= 1- \frac{8n}{2}+\frac{8n+4(2n-1)}{4} = 1-4n + 2n+ (2n-1) =0$.
A legal system for $\Gamma$ is the ``preimage'' of a legal system for $\bar \Gamma$ as described in Example~\ref{ex:cube}: \begin{com} this works for arbitrary cubes - and even cubes minus some edges...\end{com} Its moves correspond to a 2-coloring of $\Gamma$ and its states correspond to preimages of states in the legal system for $\bar \Gamma$. To see that each such state $S$ is connected, let $\bar S$ be the corresponding state in $\bar \Gamma$.
For each vertex $v_i$ in $S$ there is a path in $\bar S$ from $\bar v$ to a distinguished edge $\bar e$.
This path lifts to a path in $S$ that ends at the preimage of $\bar e$ which is a tree.
\begin{rem} It appears that the key point to generalizing this ``blow-up'' construction of a graph $\bar \Gamma$ with a legal system, is that there is a distinguished edge in each state. \end{rem}
\subsection{L\"{o}bell graphs and right-angled 3-dimensional hyperbolic reflection groups}
\begin{defn}[L\"{o}bell graphs] The \emph{$n$-antiprism} is the polyhedron whose boundary consists of two disjoint $n$-gon faces joined by an annulus that is subdivided into $2n$ triangles, so that there is one $n$-gon and three triangles at each vertex. The \emph{dual L\"{o}bell graph} of degree $n\geq 4$ is the 1-skeleton of the polyhedron obtained from an $n$-antiprism by centrally subdividing each $n$-gon into $n$~triangles. For example, for $n=5$ we obtain an icosahedron. The L\"{o}bell graphs themselves are formed from pentagons and $n$-gons, and early examples beyond the icosahedron occurred in L\"{o}bells construction of certain hyperbolic 3-manifolds. They were further studied by Vesnin in \cite{Vesnin87}. \end{defn}
\begin{lem}\label{lem:LobellGraphsWin} Every dual L\"{o}bell graph of degree $n \geq 5$ has a legal system. \end{lem} \begin{proof} If $n=5$ the dual L\"obell graph is the icosahedron and has a legal system described in Section~\ref{sub:icosahedron}. Suppose $n \geq 6$. Consider the following coloring of the dual L\"obell graph with $n+1$ colors having two vertices of each color: \begin{itemize} \item the vertices $\{v_*,w_*\}$ that are dual to the $n$-gonal faces have color $c_*$, \item let $v_1,\dots, v_n$ and $w_1,\dots , w_n$ denote consecutively the vertices adjacent to $v_*$ and $w_*$. And ``shift'' the subscripts so that $w_i$ is adjacent to $v_{i-2}$ and $v_{i-1}$ for each $i$ (mod $n$). Each pair $\{v_i,w_i\}$ has color $c_i$. See Figure~\ref{fig:lobell coloring}. \end{itemize} There is a move corresponding to $c_*$. There is a move corresponding to $c_i$ for each $i \notin \{1, \lceil \frac{n}{2} \rceil\}$. There is also a move corresponding to the four vertices colored by $c_1$ or $c_{\lceil\frac{n}{2}\rceil}$. This last move is valid as $c_1$-vertices are not adjacent to $c_{\lceil\frac{n}{2}\rceil}$-vertices for $n\geq 6$ because of our subscript shift. Let $S$ be a state containing exactly one $c_*$-vertex, exactly one $c_i$-vertex for each $i\notin\{1, {\lceil\frac{n}{2}\rceil}\}$, and also containing either $v_{1}$ and $w_{\lceil\frac{n}{2}\rceil}$ or $v_{\lceil\frac{n}{2}\rceil}$ and $w_{1}$.
We now verify that $S$ and the complement $\bar S$ of $S$ are connected. Let $V=\{v_1,\dots, v_n\}$ and $W=\{w_1,\dots , w_n\}$. It suffices to show that each vertex $v\in V\cap S$ lies in the same connected component as some $w\in W\cap S$, and that each $w\in W\cap S$ lies in the same connected component as some $v\in V\cap S$. Indeed, since one $c_*$-vertex is in $S$, either all the vertices in $V\cap S$ or all the vertices in $W\cap S$ are adjacent to it, and it follows that $S$ is connected. Consider any maximal string $\underline v = \{v_i,v_{i+1}, \dots, v_k\}\subset V$ (with indices mod $n$) that is in $S$. We have $\underline v\subsetneq V$, since only one of $v_1,v_{\lceil\frac n 2 \rceil}$ is in $S$. Note that $w_{k+1}$ is in $S$, since $v_{k+1}\in \bar S$ by maximality of $\underline v$. Since $w_{k+1}$ is adjacent to $v_k$ we conclude that each vertex of $\underline v$ lies in the connected component of some vertex in $W\cap S$. Similarly consider a maximal string $\underline w = \{w_i,\dots w_k\}\subset W$ that is in $S$ and now $v_{i-1}$ is in $S$ by maximality of $\underline w$. Since $v_{i-1}$ is adjacent to $w_{i}$, we conclude that each vertex of $W\cap S$ lies in a connected component of some vertex in $V\cap S$. We verify that $\bar S$ is connected in the exact same way. Let $S_0=\{v_*, w_1, v_2, v_3,\ldots, v_n\}$. Then each element of the orbit of $S_0$ using the above moves is legal and has legal complement. \end{proof}
\begin{figure}
\caption{L\"{o}bell graphs of degree $6$,$7$,$8$ and $9$. In each graph there is one vertex ``at infinity'' not illustrated in the figure.}
\label{fig:lobell}
\end{figure}
\begin{figure}
\caption{At left is the coloring of the degree $6$ dual L\"{o}bell graph from the proof of Lemma~\ref{lem:LobellGraphsWin}. The vertex ``at infinity'' is paired with the central vertex. At right is an example of a legal state whose orbit is legal.}
\label{fig:lobell coloring}
\label{fig:lobell system}
\end{figure}
We recall the following result of Pogorelov characterizing 3-dimensional compact right-angled hyperbolic polyhedra \cite{Pogorelov67}, which we restate in terms of Coxeter groups:
\begin{thm}\label{thm:pogorelov} For a finite simplicial graph $\Gamma$, the group $G(\Gamma)$ is a cocompact 3-dimensional right-angled hyperbolic reflection group if and only if $\Gamma$ embeds in $S^2$ so that: \begin{enumerate} \item each region is a triangle; \item each cycle of length $\leq4$ either bounds a triangle or two triangles meeting at an edge; \item $\Gamma$ is not a triangle or a 4-clique. \end{enumerate} \end{thm}
It follows that each vertex has valence $\geq 5$ since a low valence vertex would lead to a contradiction. For instance, a valence~$4$ vertex would be surrounded by a 4-cycle, which would then have to bound a pair of triangles meeting an edge, in which case there would be a triangle not bounding a region.
We refer to graphs satisfying the conditions in Theorem~\ref{thm:pogorelov} as \emph{Pogorelov graphs}. Inoue gave a recursive construction of all (duals of) Pogorelov graphs \cite{Inoue08}. Expressing his result in terms of duals, the base case consists of the dual L\"{o}bell graphs. His structural inductions are of two types: \begin{enumerate} \item
Combine two Pogorelov graphs by removing a valence~$n$ vertex from each and then gluing together along an $n$-antiprism as on the top of Figure~\ref{fig:Inoue}
\item
Expand a vertex of a Pogorelov graph to an edge whose vertices each have valence $\geq 5$ as in the bottom of Figure~\ref{fig:Inoue}. \end{enumerate}
\begin{figure}
\caption{The two Inoue structural induction moves.}
\label{fig:Inoue}
\end{figure}
It is tempting to try to apply our method to every cocompact right-angled reflection group in
$\ensuremath{\field{H}}^3$ by affirmatively solving the following: \begin{prob}\label{prob:Pogorelov Duals Win} Does every Pogorelov graph have a legal system? \end{prob}
As we have verified that the dual L\"{o}bell graphs have legal systems in Lemma~\ref{lem:LobellGraphsWin}, in view of Inoue's result, it is conceivable that one might be able to approach Problem~\ref{prob:Pogorelov Duals Win} by structural induction. However, we caution that the corresponding problem for finite covolume reflection groups has a negative solution, as indicated by Theorem~\ref{thm:DWtoHam} and the examples discussed immediately afterwards.
\subsection{Characterizing finite volume 3-dimensional right-angled hyperbolic reflection groups}
Let $\Gamma\subset S^2$ be a graph embedded in the sphere. We use the term \emph{quad} for a full 4-cycle in $\Gamma$ that bounds a region of $S^2$.
\begin{prop}[Cusped 3-dimensional hyperbolic reflection groups]\label{prop:right angled reflection} Let $\Gamma\subset S^2$ be a simplicial connected graph embedded in the sphere. Let $G=G(\Gamma)$ be the associated right-angled Coxeter group. Then $G$ is isomorphic to a finite co-volume cusped hyperbolic reflection group precisely if the following conditions hold: \begin{enumerate} \item\label{cuspitem:1} Each region is bounded by a quad or triangle (the quads generate the cusps). \item\label{cuspitem:2} The intersection of distinct quads is either empty, a vertex, or an edge. \item\label{cuspitem:3} Any cycle of length~$\leq 4$ bounds a region or the union of two triangular regions. \item\label{cuspitem:4} $\Gamma$ is not a triangle, $4$-clique, $4$-cycle, or cone on a $4$-cycle. \end{enumerate} \end{prop}
A simple example is where $\Gamma$ is the 1-skeleton of a cube as discussed in Example~\ref{ex:cube}. It is dual to a right-angled hyperbolic ideal octahedron, which is the fundamental domain of the reflection group. Another simple example is the $1$-skeleton of a triangular prism. \begin{com}it has an easy legal system with three moves: each move has one vertex on each triangle. a start state is a triangle. one notes that the union of two vertices of different colors contains the third vertex in its 1-nbhood, and that if they aren't connected then this third vertex connects them... \end{com}
Note that $G$ is virtually abelian in each of the cases listed in Condition~\eqref{cuspitem:4}. In the presence of Conditions~\eqref{cuspitem:1}-\eqref{cuspitem:3}, Condition~\eqref{cuspitem:4} is equivalent to requiring that $\Gamma$ has at least 6 vertices. To establish this equivalence the reader can consider all 32 connected simplicial graphs with at most $5$ vertices. \begin{com} Most cases are excluded by having a region with at least 5 sides or 3 triangles that share an edge. \end{com}
Note that the final two conditions together imply that no vertex is surrounded by four or fewer triangles.
This is a highly simplified version of known results, and we are grateful to Igor Rivin for tracing these references for us. A classical reference is to Andre'ev's paper \cite{Andreev70} which was corrected by Hodgson \cite{Hodgson90} using \cite{RivinPhD}. Another proof was given by Hubbard-Roeder-Dunbar \cite{RoederHubbardDunbar2007} along Andre'ev's lines. Finally there is an orbifold proof by Thurston in \cite{ThurstonNotes}. We instead give an explanation in the context of geometric group theory, that also depends on hyperbolization:
\begin{proof} We now show that if $\Gamma$ satisfies Conditions~\eqref{cuspitem:1} - \eqref{cuspitem:4} then $G$ is hyperbolic relative to its quads. Suppose $\Gamma$ contained a full subgraph of the form $B\star C = \{b_1,b_2\}\star \{c_1,c_2,c_3\}$ where $c_1,c_3$ and $b_1,b_2$ are nonadjacent. Then $b_1c_1b_2c_3$ is a full 4-cycle and is therefore a quad by Condition~\eqref{cuspitem:3}. Hence neither
$b_1c_1b_2c_2$ nor $b_1c_2b_2c_3$ are quads by Condition~\eqref{cuspitem:2}. Hence $c_1,c_2$ and $c_2,c_3$ are adjacent, and hence $\Gamma$ is the $c_2$-cone on the quad $b_1c_1b_2c_3$ which violates Condition~\eqref{cuspitem:4}.
Observe that each full 4-cycle is a quad by Condition~\eqref{cuspitem:3}. It follows from Corollary~\ref{cor:rel hyp fin vol} that $G$ is hyperbolic relative to its quads.
We now verify that $G$ is a finite volume hyperbolic reflection group. Regard $G$ as acting by reflections on a simply-connected 3-manifold $\widetilde M$ with fundamental domain a ball whose boundary is the cell structure on $S^2$ that is dual to $\Gamma$. The stabilizer of each vertex is the right-angled reflection group associated to a region of $\Gamma$ and note that by Condition~\eqref{cuspitem:1} this is either $(\ensuremath{\field{Z}}_2*\ensuremath{\field{Z}}_2)^2$ or $\ensuremath{\field{Z}}_2^3$. Let $G'$ be a torsion-free finite index subgroup. The quotient $G'\backslash \widetilde M$ has the property that the link of each quad vertex is a torus, and we can thus remove a finite neighborhood from each such vertex to obtain a manifold $\bar M$ whose boundary is a union of tori. Note that $G$ does not split along a finite group since $\Gamma$ is connected and has no internal $3$-cycles by Condition~\eqref{cuspitem:3}. \begin{com} If it split along a finite group, then it would have a finite index subgroup that split as a nontrivial free product. There would then be a sphere or compression disk corresponding to the splitting, and this would contradict that the 3-manifold is aspherical or that the boundary of a core is incompressible.\end{com} Note that $\bar M$ is not a ball or a thickened torus by the excluded degenerate possibilities for $\Gamma$. Our peripheral structure consists entirely of the conjugates of subgroups commensurable with the $G(Q)$ where $Q$ varies over the quads, and thus the JSJ decomposition for $\bar M$ cannot have a Seifert-fibered piece, and must consist of a single piece for otherwise $\pi_1\bar M$ would split over $\ensuremath{\field{Z}}^2$. We conclude that $M$ is a finite-volume hyperbolic manifold by Thurston's geometrization.
For the converse, assume that $G$ is a finite volume hyperbolic reflection group. Condition~\eqref{cuspitem:1} holds since if there were some region with more than 4 sides,
then $\chi(G')<0$ where $G'$ is a torsion-free finite index subgroup. Condition~\eqref{cuspitem:2} holds since otherwise there would be maximal virtually $\ensuremath{\field{Z}}^2$ subgroups with infinite intersection. Condition~\eqref{cuspitem:3} holds since otherwise $G$ would split over a virtually $\ensuremath{\field{Z}}^2$ or a finite group associated to a full 4-cycle or 3-cycle that does not bound a region. \begin{com}reference: finite volume hyperbolic reflection group does not split along a virtually $\ensuremath{\field{Z}}^2$ or a finite group \end{com}
Condition~\eqref{cuspitem:4} holds since $G$ contains a rank two free subgroup but each of the degenerate cases is virtually abelian.
\end{proof}
The \emph{join} $A\star B$ of two nonempty graphs, is the graph consisting of $A\sqcup B$ together with an edge from each vertex of $A$ to each vertex of $B$. A \emph{clique} is a complete graph $K(n)$ where $n\geq 0$.
The following is a special case of a characterization given by Caprace \cite[Thm~A${}'$]{Caprace09,Caprace13}: \begin{thm}\label{thm:caprace} Let $\mathcal T$ be a collection of subgraphs of $\Gamma$. Then $G(\Gamma)$ is hyperbolic relative to $\{ G( K ) \ : \ K \in \mathcal T \}$ if and only if the following conditions are satisfied: \begin{enumerate}[(a)] \item\label{caprace:RH1} For each non-clique subgraphs $J_1,J_2$ whose join $J_1 \star J_2$ is a full subgraph of $\Gamma$ there exists $K\in\mathcal T$ such that $J_1\star J_2\subset K$. \item\label{caprace:RH2} For all $K_1,K_2\in\mathcal T$ with $K_1\neq K_2$, the intersection $K_1\cap K_2$ is a clique. \item\label{caprace:erratum} For all $K\in \mathcal T$ and nonadjacent vertices $v_1,v_2\in K$,
if $v$ is adjacent to both $v_1$ and $v_2$ then $v\in K$. \end{enumerate} \end{thm} \begin{proof} The three conditions of Theorem~\ref{thm:caprace} are straightforward simplifications of the corresponding three conditions in \cite[Thm~A']{Caprace13} except that Condition~\eqref{caprace:RH1} is stated with a weaker assumption that $J_1,J_2$ are neither joins nor cliques. Thus Theorem~\ref{thm:caprace}
is implied by Caprace's original statement.
To see that our variant implies Caprace's original version we argue as follows: First note that if $J_1$ is a join, then it can be expressed as a join $A_1\star B_1$
where $A_1$ is not a join or a clique. If $J_2$ is not a join then we let $A_2=J_2$,
and otherwise we let $J_2=A_2\star B_2$ where $A_2$ is not a join or a clique.
Condition~\eqref{caprace:RH1} implies that $A_1\star A_2$ lies in some $K\in \mathcal T$.
Let $x_i,y_i$ be non-adjacent vertices of $A_i$. For each vertex $v_i$ of $B_i$,
Condition~\eqref{caprace:erratum} applied to $v_i$ with $x_i,y_i\in K$ shows that $v_i\in K$. \end{proof}
\begin{cor}\label{cor:rel hyp fin vol} Let $\Gamma$ be a simplicial graph.
The right-angled Coxeter group $G=G(\Gamma)$ is hyperbolic relative to
$\{G(Q)\ : \ Q \text{ is a full 4-cycle}\}$
if and only if $\Gamma$ has no full subgraph $B\star C$ where $|V(B)|=2$ and $|V(C)|=3$ and $B,C$ are not cliques.
\end{cor}
\begin{proof} Suppose $\Gamma$ has a full subgraph $B\star C$ as above. Let $b_1,b_2$ be the vertices of $B$. Let $c_1,c_2,c_3$ be the vertices of $C$ and assume $c_1,c_3$ are nonadjacent. Then $\{b_1,c_1,b_2,c_3\}$ are the vertices of a full 4-cycle $Q$. Thus $c_2\in Q$ by Condition~\eqref{caprace:erratum} of Theorem~\ref{thm:caprace}, which is impossible.
Let $\mathcal T$ consist of all full $4$-cycles.
We now verify Condition~\eqref{caprace:RH1}. Suppose $J_1,J_2$ are non-clique subgraphs whose join is a full subgraph of $\Gamma$. Since $J_1,J_2$ are not cliques, they each have at least two vertices, and neither consists of a single edge. If they both have two vertices then $J_1\star J_2$ is a full 4-cycle. Otherwise, one contains a full subgraph $A$ consisting of two non adjacent vertices,
and the other contains a full subgraph $B$ consisting of three vertices that is not a clique.
Then $A\star B$ is a full subgraph of $\Gamma$ which contradicts our hypothesis.
If one of $K_1,K_2$ in Condition~\eqref{caprace:RH2} is a pair of non adjacent vertices then $|V(K_1\cap K_2)|\leq1$. Suppose $K_1,K_2$ are both $4$-cycles.
We will show that $K_1\cap K_2$ does not contain a pair of nonadjacent vertices, and so $K_1\cap K_2$ consists of a single edge or vertex, and is thus a clique.
If $K_1\cap K_2$ contains a pair $u,v$ of nonadjacent vertices and let $B$ be the full graph spanned by $u,v$, and let $C$ be the graph spanned by the vertices of $K_1,K_2$ not in $B$. Note that $V(C)\geq 3$ since otherwise $K_1=K_2$. Since $C$ contains a pair of opposite vertices of each $K_i$ we see that $C$ is not a clique.
Since $u,v$ are adjacent to each vertex of $C$ we see that $B\star C$ is a full subgraph of $\Gamma$ which contradicts our hypothesis.
We now verify Condition~\eqref{caprace:erratum}. Suppose that $v$ is adjacent to vertices $v_1,v_2$, and that $v_1,v_2$ are in some $K\in \mathcal T$ and $v_1,v_2$ are not adjacent. Then either $v\in K$, or letting $v_3,v_4$ be the other vertices of $K$, and letting $B,C$ be the full subgraphs whose vertices are $\{v_1,v_2\}$ and $\{v,v_3,v_4\}$ we see that $B\star C$ is a full subgraph of $\Gamma$, which is impossible.
\end{proof}
\section{Examples where the method fails}\label{sec:fails} In Sections~\ref{sub:coning off} and \ref{sub:duals} we describe examples of graphs $\Gamma$ such that ${\ensuremath{\kappa}}(\Gamma)\geq 0$ but such that there is no legal system, because $\Gamma$ does not have a single legal state. We emphasize that not only does our method fail for these examples, but Bestvina-Brady Morse theory cannot be successfully applied to any finite cover of $X$.
The first class of examples are easy bipartite graphs described in Section~\ref{sub:coning off}. In Section~\ref{sub:duals} we describe a second class of examples that are more complicated but have the advantage that they are planar. In Section~\ref{sub:negative euler} we show that if $\Gamma$ has a legal system then ${\ensuremath{\kappa}}_2(\Gamma) =1-v/2+e/4 \geq 0$.
\subsection{Bipartite Cones}\label{sub:coning off} Consider the bipartite graph $\Lambda_{(m,k)}$ whose vertices are $N_m\sqcup C(m,k)$ where $N_m = \{1,\ldots, m\}$ and $C(m,k)$ consists of all k-element subsets of $N_m$. An edge joins a vertex $r\in N_m$ to each element of $C(n,k)$ containing $r$.
\begin{prop} For $m\geq 2k-1$ and $k\geq 2$ the graph $\Lambda_{(m,k)}$ does not have a legal state. \end{prop} \begin{proof} Consider a state $S$. By the pigeon-hole principle, at least $k$ elements of $N_m$ are either in $S$ or in $N_m-S$. Without loss of generality assume the former. Let $v$ be the vertex of $C(m,k)$ that is joined to these $k$ elements of $N_m$. Either $S=V-\{v\}$ or $m_vS=V-\{v\}$ since otherwise either $S$ or $m_vS$ is not legal since $v$ is separated by its neighbors. Without loss of generality assume the former. For each $u\in C(m,k)-\{v\}$ we have $m_u = \{u,v\}$ since otherwise $m_uS$ is not legal as $u,v$ lie in different components. Finally, note that for any two distinct $u_1,u_2\in C(m,k)-\{v\}$ the state $m_{u_1}m_{u_2}S = \{u_1,u_2,v\}$ is not legal. \end{proof}
We note that $\Lambda_{(m,k)}$ is connected for $m\geq k$ and $\girth(\Lambda_m)=4$ for $m\geq 4$.
We have ${\ensuremath{\kappa}}(\Lambda_{(m,k)})>0$ for $m\geq 2k-1$ since
${\ensuremath{\kappa}}(\Lambda_{(m,k)})= 1- (m + {m \choose k})/2 + (k{m \choose k})/4 = 1-m/2 + (k/4-1/2){m \choose k}$.
The graph $\Lambda_{(4,3)}$ is the 1-skeleton of the 3-cube, and ${\ensuremath{\kappa}}(\Lambda_{(4,3)})=0$. However, as above, $\Lambda_{(5,3)}$ provides a very small example of a graph with no legal state but with ${\ensuremath{\kappa}}>0$.
\begin{exmp} We will show that the bipartition of $\Lambda_{(m,k)}$ provides a legal system when $3<k<m<2k-1$.
We need to find sets $S_N\subset N_m$ and $S_C\subset C(m,k)$ such that $S_N\cup S_C$ is a state whose orbit is legal. Let $S_N= \{1,\ldots, \lceil{m/2}\rceil \} \subset N_m$. Note that $|N_m- S_N|< k$ so every $k$-subset of $N_m$ contains at least one element of $S_N$, i.e. every vertex in $C(m,k)$ is joined to a vertex in $S_N$. Since $k> m/2$ and $k< m$ there are at least two distinct $k$-subsets of $N_m$ containing $S_N$, i.e. two vertices $v,v'\in C(m,k)$ both joined to all vertices of $S_N$. Similarly, there are vertices $u,u'\in C(m,k)$ both joined to all vertices of $N_m-S_N$. Clearly $v,v',u,u'$ are all distinct, since no vertex of $C(m,k)$ is joined to all vertices on $N_m$. Letting $S_C = \{v,u\}$ we obtain a state $S_N\cup S_C$ whose orbit is legal. \end{exmp}
\subsection{Vertex-face-incidence-graphs and hamiltonian cycles}\label{sub:duals}
A \emph{cycle} $C$ in $\Theta$ is a subgraph homeomorphic to a circle. The cycle $C$ is \emph{hamiltonian} if each vertex of $\Theta$ lies in $C$.
Given an embedding $\Theta\subset S^2$ in the sphere,
we let $F$ denote the set of the \emph{faces} which are components of $S^2-\Theta$.
The \emph{vertex-face-incidence graph} $\mathcal{VF}(\Theta)$ is a bipartite graph whose vertices are partitioned as
$V\sqcup F$ and where $v \in V$ is joined to $f \in F$ by an edge if and only if $v\in \boundary f$.
\begin{defn}[$k$-connected] $\Theta$ is \emph{$k$-connected} if there does not exist a subset $U\subset V=\Theta^0$ of at most $(k-1)$ vertices such that the subgraph induced by $V-U$ is disconnected.
For instance, if $\Theta$ is $3$-connected then an embedding $\Theta\subset S^2$ is essentially unique by Steinitz's theorem. \end{defn}
The following is easy to verify:
\begin{prop} Let $\Theta$ be a $2$-connected planar simplicial graph. Then $\mathcal{VF}(\Theta)$ has a planar embedding such that each face of $\mathcal{VF}(\Theta)$ is a quadrangle, corresponding to an edge of $\Theta$. Moreover, $\mathcal{VF}(\Theta)$ is $3$-connected if and only if $\Theta$ is $3$-connected. \end{prop}
The main result of this section is:
\begin{thm}\label{thm:DWtoHam} Let $\Theta$ be a $2$-connected $3$-valent planar graph. Then the following are equivalent: \begin{description} \item [(a)] $\Theta$ has a Hamilton cycle, \item [(b)] $\mathcal{VF}(\Theta)$ has a strongly legal state, \item [(c)] $\mathcal{VF}(\Theta)$ has a legal system. \end{description} \end{thm}
Figure~\ref{f:K4vsCube} illustrates the correspondence between a Hamilton cycle in $\Theta$ and a strongly legal state of $\mathcal{VF}(\Theta)$ in the case when $\Theta$ is a ``house''.
\begin{figure}
\caption{ $\Theta$ is on the left and $\mathcal{VF}(\Theta)$ on the right.
The hamiltonian cycle on the left corresponds to the strongly legal state on the right.}
\label{f:K4vsCube}
\end{figure}
\begin{proof}[Proof of (c) $\Rightarrow$ (b)] This follows from the definitions. \end{proof}
\begin{proof}[Proof of (b) $\Rightarrow$ (a)] Let $A$ be a legal state and let $B \ = \ (\mathcal{VF}(\Theta))^0-A$.
Let $E \subseteq E(\Theta)$ be the set of edges $e$ of $\Theta$ for which the two faces meeting along
$e$ belong to different parts of $A\sqcup B$. By definition of a strongly legal state, every vertex of $\Theta$ is incident with faces in both $A$ and $B$. Thus as $\Theta$ is $3$-valent we find that every vertex of $\Theta$ is incident to exactly two edges of $E$. Thus $E$ is the edge set of collection of disjoint cycles $\{C_i\}$ in $\Theta$. We will show that this collection consists of a single cycle that is a hamiltonian cycle of $\Theta$.
Suppose this were not the case. Let $R$ be the set of regions of $S^2- \bigcup_i C_i$.
Note that any two faces of $\Theta$ that lie in the same region belong to the same part of $A\sqcup B$. If there were two or more cycles then there would be three or more regions and hence two whose faces are in the same part, say $A$. A path in $A$ from one to the other would have to pass through a region whose faces are in $B$. \end{proof}
\begin{proof}[Proof of (a) $\Rightarrow$ (c)]
Let $C$ be a Hamiltonian cycle of $\Theta$.
Since $\Theta$ is 3-valent, by choosing an arbitrary way of directing the edges of $\Theta-C$, we find that each vertex of $\theta$ is either an initial or terminal vertex. We thus have a partition $V(\Theta)=V_1\sqcup V_2$ consisting of initial and terminal vertices.
We will apply Lemma~\ref{lem:TBWS} where $A=F$ and $B=V$ denote the face-vertices and vertex-vertices of $\mathcal{VF}(\Theta)$. Let $F_1,F_2$ consist of the faces on opposite sides of $C\subset S^2$. Let $V_1,V_2$ be $A$ and $\Theta^0-A$.
Condition~\eqref{TBWS:1} is satisfied as follows: Consider the disk diagram $D_i$ consisting of a hemisphere bounded by $C$, so the faces of $D_i$ are the elements of $F_i$. For each pair of faces in $F_i$, there is a gallery joining them in $D_i$ that consists of a sequence of faces meeting along common edges. Since each such edge is in the interior of $D_i$, it is an edge of $\Theta-C$. Therefore it has one vertex in $Y_1$ and the other vertex in $Y_2$. Thus this gallery provides the desired path.
Condition~\eqref{TBWS:2} is satisfied since every vertex lies on $C$ and thus lies on a face in each hemisphere. \end{proof}
Theorem~\ref{thm:DWtoHam} implies that characterizing the planar quadrangulations (planar graphs in which every face has four edges) having legal systems is a difficult task. In particular a classical conjecture of Barnette states that every bipartite $3$-connected $3$-regular planar graph has a Hamilton cycle (see \cite{Gould2014}). Using Theorem~\ref{thm:DWtoHam} we can awkwardly restate this conjecture in the language of legal systems.
\begin{conj}[Barnette]\label{conj:reformed barnette} Let $Q$ be a $3$-connected planar quadrangulation, and let $(A,B)$ be the bipartition of the vertices of $Q$. Suppose that every vertex of $A$ has valence three, and that $A$ admits a partition $(A_1,A_2)$ such that every face of $Q$ is incident with a vertex in $A_1$ and with a vertex in $A_2$. Then $Q$ has a legal system. \end{conj}
\begin{figure}
\caption{Tutte's Graph is a famous planar graph with no hamiltonian cycle.}
\label{fig:Tutte}
\end{figure}
\begin{exmp}[Tutte]\label{exmp:tutte} Tait had conjectured in 1884 that every $3$-valent $3$-connected planar graph has a hamiltonian cycle. In 1946, Tutte produced a counterexample to this conjecture:
the graph $\Theta$ depicted in Figure~\ref{fig:Tutte}. Its vertex-face incidence graph $\mathcal{VF}(\Theta)$ has the property that any proper connected subgraph with ${\ensuremath{\kappa}}\geq 0$ is a square or a chain of length $\leq 2$. \begin{com} Indeed, a subgraph with $\kappa\geq 0$ would have all regions squares. So it suffices to check that $\mathcal{VF}(\Theta)$ does not contain a $4$-cycle bounding a region. This would correspond to a pair of regions in $\Theta$ with disconnected intersection. \end{com} Combining this with Theorem~\ref{thm:DWtoHam}, we deduce that there exist 3-connected planar quadrangulations not admitting a legal state, and hence Guess~\ref{guess:naive} is false for planar graphs, as there is not even a single legal state. Consequently, Bestvina-Brady Morse theory cannot be applied to any cover of the cube complex $X$ associated to a torsion-free finite index subgroup of $G$.
The Coxeter group $G=G(\mathcal{VF}(\Theta))$ corresponds to the fundamental domain of a finite co-volume cusped 3-dimensional hyperbolic right-angled Coxeter group. Indeed, there is no internal 3-cycle in $\mathcal{VF}(\Theta)$ since it is bipartite and no internal 4-cycle in $\mathcal{VF}(\Theta)$ since any two regions of $\Theta$ have connected intersection - as revealed by an inspection of Figure~\ref{fig:Tutte}. The intersection of two quads is either empty, a vertex, or an edge, since otherwise either $\Theta$ is not simplicial, or two regions of $\Theta$ overlap along more than one edge. Consequently, $\mathcal{VF}(\Theta)$ satisfies the conditions of Proposition~\ref{prop:right angled reflection}. \end{exmp}
\begin{com} Does the pyramidal triangulation of a quadrangulation of the 2-sphere always have a legal system? Always have a legal state?
Does there exist a flag triangulation of $S^2$ that has no legal state? Any such should arise as a vertex link of some cubulated hyperbolic 3-manifold. \end{com}
\begin{rem}\label{rem:connectivity} Tutte proved that $4$-connected planar graphs are hamiltonian,
and so perhaps $\mathcal{VF}(\Theta)$ has a legal state when $\Theta$ has stronger connectivity properties:
e.g. in the spirit of Conjecture~\ref{conj:reformed barnette}.
Finally, we note that
Grinberg's formula \cite{Grinberg68} provides a host of other counterexamples to Tait's conjecture, and these provide a rich family of examples of 3-dimensional right-angled hyperbolic reflection groups where Bestvina-Brady Morse theory cannot possibly show virtual fibering for any finite cover of the associated cube complex.
The above examples show that, using the standard affine structure on the cubes, Bestvina-Brady theory cannot be applied to the cube complex which is the dual spine
to the hyperbolic tiling by ideal polyhedra that is formed from the reflection walls of certain hyperbolic Coxeter groups. In Example~\ref{exmp:barycentric} we show that the method of this text cannot be applied to certain right-angled Coxeter groups that are commensurable with closed (non-hyperbolic) 3-manifold fundamental groups.
\end{rem}
\begin{rem} Consider an embedding $\Theta\subset S^2$. As $\girth(\mathcal{VF}(\Theta)) = 4$ we have
$$\kappa(\mathcal{VF}(\Theta)) = 1 - \frac{|V(\mathcal{VF}(\Theta))|}{2} + \frac{|E(\mathcal{VF}(\Theta))|}{4}= 1 - \frac{|V(\Theta)| + |F(\Theta)|}{2} + \frac{2|E(\Theta)|}{4} = 0$$ where the final equality holds by Euler's formula for $\Theta\subset S^2$. \end{rem}
\begin{lem}\label{lem:conesquare}
Let $C$ be a cycle of length four in a graph $\Gamma'$, and let $\Gamma$ be the graph obtained from $\Gamma'$ by adding a vertex adjacent to the vertices of $C$ and to no other vertices. If $\Gamma$ has a legal system then so does $\Gamma'$. \end{lem}
\begin{rem} The analogous statement holds more generally with $C$ replaced by the cocktail party graph $K(2,2,\ldots, 2)$. More generally, consider the amalgam $\Gamma = \Gamma'\cup_C \Gamma''$ of the graphs $\Gamma',\Gamma''$ along such $C$: If $\Gamma$ has a legal system, then so do $\Gamma'$ and $\Gamma''$. This specializes to the above result when
$\Gamma''$ is the cone on a cocktail party graph $C$. \end{rem} \begin{proof} Let $a,b,c,d$ be the vertices of $C$ in order, and let $v$ be the new vertex. Let $M$ be the group associated to the legal system on $\Gamma$, let $O$ be a legal $M$-orbit, let $V$ denote the vertex set of $\Gamma$, and let $V'=V-\{v\}$ be the vertex set of $\Gamma'$.
We claim that the restriction of $M$ and $O$ to $\Gamma'$ (i.e. ignore $v$) provides a legal system.
Suppose not. Then w.l.o.g. there exists a state $S \in O$ such that $S \cap V'$ does not induce a connected subgraph of $\Gamma'$. As $S$ induces a connected subgraph in $\Gamma$, w.l.o.g. we have $a,c,v \in S$ and $b,d \not \in S$. However, there exists $m \in M$ such that $a,c \in m$ but $v,b,d \not \in m$. Indeed, either $m_a$ or $m_c$ or $m_a+m_c$ has this property. The state $mS$ is not strongly legal since $v \in mS$, but $a,b,c,d \not \in mS$. \end{proof}
\begin{exmp}\label{exmp:barycentric} Let $\Gamma$ be the 1-skeleton of the first barycentric subdivision of the cell structure for $S^2$ whose 1-skeleton is Tutte's graph. Then the clique complex of $\Gamma$ is $S^2$ but $\Gamma$ has no legal system. Indeed, by starting with $\Gamma$ and applying Lemma~\ref{lem:conesquare} sixty-nine times (once for each edge of $\Theta$) we arrive at $\mathcal{VF}(\Theta)$. Hence $\Gamma$ has no legal-system since $\mathcal{VF}(\Theta)$ has no legal-system by Example~\ref{exmp:tutte}. \end{exmp}
\subsection{Failure With Negative Euler characteristic}\label{sub:negative euler} \begin{rem}When $G$ is a locally quasiconvex hyperbolic group it cannot have an infinite index f.g. infinite normal subgroup \cite[Prop~3.9]{ABC91}. Likewise, a non virtually abelian Kleinian group that is locally geometrically finite cannot have an infinite index nontrivial f.g. normal subgroup. There are thus a variety of examples of right-angled Coxeter groups $G(\Gamma)$ where $\Gamma$ has no legal system.
A prominent such family arise from planar graphs $\Gamma\subset S^2$ such that some region has more than five sides but where each cycle of length~$3$~or~$4$ bounds a region. In this case, $G(\Gamma)$ has an index~$\leq 16$ subgroup that is the fundamental group of a hyperbolic 3-manifold $M$ where $\boundary M$ contains a surface of genus~$>1$ that corresponds to the large region. Thus $\pi_1M$ is locally geometrically finite by a result of Thurston~\cite{Canary96}, and hence cannot have a nontrivial normal subgroup that is finitely generated.
The previous class of examples has $\chi(G)<0$. More generally, other examples of such Coxeter groups are virtually 2-dimensional coherent groups where $\chi(G)\neq 0$. \begin{com}There are many such examples in \ref{SectionalpqrITHINK}.\end{com} For them the f.g. kernel of Corollary~\ref{cor:win f.g.} would be free and thus $\chi(G)=0$, which is impossible. Indeed, if $N\subset G$ is a nontrivial f.p. infinite index normal subgroup with $\cd(G)=2$ then $N$ is free \cite{Bieri76}.
\end{rem}
The above discussion suggests that $\beta_0(G)-\beta_1(G)+\beta_2(G) \geq 0$ when there is a legal system and $\cd(G)=2$, and indeed, we have the following simple and precise count that holds in general: \begin{thm} Let $\Gamma$ be a finite graph. Suppose there is a legal system for $\Gamma$. Then $\kappa_2(\Gamma) = 1-\frac{V}{2}+\frac{E}{4} \geq 0$.
Moreover, if $\kappa_2(\Gamma) = 0$ then all states in a legal system are trees. \end{thm} \begin{proof} Let $K$ be a $d$-clique in $\Gamma$. Observe that $K$ occurs in exactly $\frac1{2^d}$ of the states in the legal orbit. Indeed, letting $m_1,\ldots, m_d$ denote the moves at the $d$ vertices of $K$, we see that the orbit is partitioned into cardinality $2^d$ equivalence classes according to the action of $\langle m_1,\ldots, m_d\rangle$ and $K$ appears in precisely one element within each class.
As each state $S$ is connected we have $0\leq 1-v(S)+e(S)$. Thus letting $n$ denote the cardinality of the legal orbit we have: $$0\leq \sum_S (1-v(S)+e(S))
= \sum_S 1 - \sum_S v(S) + \sum_S e(S) = n - n \frac12 V + n\frac14 E =n(1-\frac{V}{2}+\frac{E}{4})$$ Moreover, the above inequality is an equality precisely if each state is a tree. \end{proof} The above proof works for any inequality uniformly satisfied by numbers of cells for each state. In particular, it is of interest in the case where we assume that each state is acyclic, and we have the following which is also a consequence of a cohomology computation: \begin{cor}\label{cor:contractible states} Suppose $\Gamma$ has a legal system with the property that the flag complex $Q(S)$ is contractible for each state $S$. Then the generalized form of the above inequality provides that ${\ensuremath{\kappa}}(\Gamma)=0$ and hence $\chi(G)=0$. (Here, $\chi(G)=\frac{1}{[G:G']}\chi(G')$ where $G'$ is a finite index torsion-fee subgroup.) \end{cor}
\section{In pursuit of an exotic subgroup of a hyperbolic group}\label{sec:7} In this section we pose a problem aiming to use Theorem~\ref{thm:win fiber} to provide a hyperbolic group $G(\Gamma)$ such that the kernel $K$ of $G'\to \ensuremath{\field{Z}}$ has finite $K(\pi,1)$ but is not hyperbolic. \begin{prob} Find $\Gamma$ having a legal system whose states have contractible clique-complexes, such that the clique-complex $Q(\Gamma)$ is flag-no-square but does not contain a 2-sphere. And such that $\vcd(G(\Gamma))\geq 3$. \begin{com}I do not yet know of a quick way to see that $\cd(G)>2$ by looking at $\Gamma$. I presume it suffices to see that $\ensuremath{{\sf{H}}}_2(Q(\Gamma))\neq 0$ or perhaps that $Q(\Gamma)$ does not deformation retract to a graph.\end{com} \end{prob}
We do not know how to make a flag-no-square complex that is a closed pseudo-manifold of dimension $\geq3$ for which there is a legal system. For that matter we do not know how to make a flag-no-square (closed pseudo-manifold) of $\cd(Q(\Gamma))\geq 3$ with ${\ensuremath{\kappa}}(\Gamma)=0$. Note that if $\Gamma$ has a legal system and the clique complex of each state is contractible then ${\ensuremath{\kappa}}(\Gamma)=0$ by Corollary~\ref{cor:contractible states}. In this connection we note that \cite[Conj~6.1]{LutzNevo14} implies that there does not exist a flag-no-square simplicial structure on $S^3$ with ${\ensuremath{\kappa}}(\Gamma)=0$. \begin{com} and there is no flag-no-square structure on $S^n$ for $n>3$ or $n>4$ reference in Przytycki-Swiatkowski? or Davis-Jankiewicz?\end{com}
\begin{prob} Find a finite simplicial graph $\Gamma$ with the property that: \begin{enumerate} \item ${\ensuremath{\kappa}}(\Gamma)\leq 0$ (we are primarily interested in ${\ensuremath{\kappa}}(\Gamma)=0$.) \item The clique-complex of $\Gamma$ is flag-no-square (i.e. any length 4 cycle bounds two triangles.) \item $\Gamma$ is not planar \item Every edge of $\Gamma$ lies on a triangle (preferably at least two). \end{enumerate} \end{prob}
\begin{prob} Is there $\Gamma$ as above with the more general property that no two $4$-cycles (without triangles) share non-adjacent vertices? And such that there is no 2-sphere in the space obtained by filling in all 3-cycles and 4-cycles?
It might be possible to find a legal system for such $\Gamma$ and use that to create a word-hyperbolic group with a non-hyperbolic normal subgroup with strong finiteness properties. \end{prob}
\section{Random graphs}\label{sub:random}\label{sec:8}
In this section we show that Erd\H{o}s-R\'enyi random graphs for a wide range of densities almost surely contain legal systems. Recall that the model $\mathcal{G}(n,p)$ consists of graphs with vertex set $V=\{1,2,\ldots,n\}$ in which edges are chosen independently with probability $p$. We are interested in the asymptotic behaviour of the model and so assume $n \to \infty$ throughout.
We will show that if $p$ is reasonably far away from both zero and one, then a graph in $\mathcal{G}(n,p)$ almost surely has a legal system. While our bounds on $p$ are not the best possible, they significantly imply that almost every labelled graph on $n$ vertices has a legal system, as the model $\mathcal{G}(n, 1/2)$ corresponds to the uniform distribution on such graphs.
The \emph{complement} of a graph $\Gamma(V,E)$ is the graph with vertex set $V$ and where two distinct vertices $v,w$ are joined by an edge if and only if there is no edge between them in $\Gamma$.
A \emph{matching} in a graph $\Gamma(V,E)$ is a set $M\subseteq E$ such that no vertex in $V$ is incident to more than one edge of $M$. A \emph{perfect matching} in $\Gamma(V,E)$ is a matching $M$ such that every vertex of $V$ is incident to an edge of $M$.
We will need the following classical result about the existence of perfect matchings in random graphs.
\begin{thm}[Erd\H{o}s-R\'enyi~\cite{ERMatching66}]\label{thm:Gnp1}
Let $p =(\log{n}+\omega(n))/n$ where $\omega(n) \to \infty$. If $n$ is even then a graph in $\mathcal{G}(n,p)$ almost surely contains a perfect matching. \end{thm}
Further, we will need a result on connectivity of random bipartite graphs. Let $\mathcal{G}(n_1,n_2,p)$ be a random graph model consisting of bipartite graphs with bipartition $(A,B)$ for a pair of disjoint sets $A$ and $B$, with $|A|=n_1$ and $|B|=n_2$, where the edges joining $A$ and $B$ are chosen independently with probability $p$.
\begin{thm}[Pal{\'a}sti~\cite{Palasti64}]\label{thm:Gnp2} Let $0\leq n_1 \leq n_2$, and let $p =(\log{n_2}+\omega(n_2))/n_1$ where $\omega(n) \to \infty$. Then a graph in $\mathcal{G}(n_1,n_2,p)$ is almost surely connected. \end{thm}
\begin{thm}[Bollob{\'a}s~\cite{Bollobas80}]\label{thm:maxdegree} The maximum valence of a graph in $\mathcal G(n,p)$ is almost surely $O(pn)$. \end{thm}
Let $\Gamma=(V,E)$ be a graph, a \emph{perfect antimatching} is a partition $\mathcal{M} = \{S_1,\ldots,S_k\}$ of $V$ such that each $S_i$ consists of precisely two nonadjacent vertices.
A set $T \subseteq V$ is an \emph{$\mathcal{M}$-transversal} if $|T \cap S_i|=1$ for each $i$.
A perfect antimatching $\mathcal{M}$ is \emph{lawful} if every $\mathcal{M}$-transversal is a legal state. Note that the set of all $\mathcal{M}$-transversals forms an orbit of the subgroup of $2^{V}$ generated by $\mathcal{M}$. Thus if $\mathcal{M}$ is lawful then it is a legal system.
\begin{thm}\label{thm:random} Suppose \begin{equation}\label{inequalityp1} \frac{(2\log{n})^{1/2}+\omega(n)}{n^{1/2}} \leq p \leq 1-\frac{1}{n^{1.99}} \end{equation} for some $\omega(n)$ with $\omega(n) \to \infty$. Then a graph in $\mathcal{G}(n,p)$ almost surely contains a legal system. \end{thm}
Theorem~\ref{thm:random} is proven using colored legal system with at most two vertices of each color. We hoped that by considering colored legal systems with color classes of higher cardinality one could decrease the lower bound. And indeed, we refer the reader to Theorem~\ref{PGK} below:
\begin{proof} The proof is broken into two cases that employ various types of legal systems. The case where $p \leq 1-\frac{2\log{n}+\omega(n)}{n}$ is treated in Lemma~\ref{lem:intermediate}. The case where $ p \geq 1 -\frac{2\log{n}+\omega(n)}{n}$ is treated in Lemma~\ref{lem:high} under the assumption that $\Gamma$ is not a complete graph. Note that that $\Gamma$ is almost surely not complete if $p\leq 1-\frac{1}{n^{1.99}}$ since the probability that $\Gamma$ is a complete graph equals $p^{\frac{n(n-1)}{2}}\leq e^{-(1-p)\frac{n(n-1)}{2}}\to 0$ as $n\to \infty$. \end{proof}
\begin{lem}[Intermediate probability]\label{lem:intermediate} Suppose $p$ satisfies the following inequality \begin{equation}\label{inequalityp2}\frac{(2\log{n})^{1/2}+\omega(n)}{n^{1/2}} \leq p \leq 1-\frac{2\log{n}+\omega(n)}{n} \end{equation} for some $\omega(n)$ with $\omega(n) \to \infty$. Then a graph $\Gamma$ in $\mathcal{G}(n,p)$ almost surely contains a legal system. \end{lem} \begin{proof} We will first work under the assumption that $n$ is even, in which case we actually show that under Condition~\eqref{inequalityp2}, a random graph in $\mathcal{G}(n,p)$ almost surely contains a lawful perfect antimatching. We will adjust the argument to handle the case when $n$ is odd at the end of the proof.
We start by partitioning $V=\{1,2,\ldots,n\}$ into two even cardinality parts $A$ and $B$ each of size at most $n/2+1$. Since a perfect antimatching corresponds to a perfect matching of a complement, by the right inequality of Equation~\eqref{inequalityp2} and Theorem~\ref{thm:Gnp1} the subgraphs of $\Gamma$ induced by both $A$ and $B$ almost surely contain perfect antimatchings. We assume that such antimatchings exist and denote them by $\mathcal{M}_A$ and $\mathcal{M_B}$.
We will show that $\mathcal{M} = \mathcal{M}_A \cup \mathcal{M_B}$ is almost surely lawful. Let $E_1$ be the set of edges with one end in $A$ and the other in $B$. Note that each edge in $E_1$ is chosen independently with probability $p$.
We construct an auxiliary bipartite graph $\Lambda'_A$ with bipartition $(A,\mathcal{M}_B)$ by joining $u \in A$ and $S \in M_B$ by an edge if $u$ is adjacent to both elements of $S$. Then $\Lambda'_A$ is a random bipartite graph in $\mathcal{G}(|B|/2,|A|,p^2)$ and by Theorem~\ref{thm:Gnp2} it is almost surely connected. Define $\Lambda_B'$ in $\mathcal{G}(|A|/2,|B|,p^2)$ analogously.
We will show that if $\Lambda'_A$ and $\Lambda_B'$ are both connected then $\mathcal{M}$ is lawful. Indeed, let $T$ be an $\mathcal{M}$-transversal and let $\Gamma_T$ be the subgraph of $\Gamma$ induced by $T$. Consider $u,v \in T \cap A$, and let $P$ be a path in $\Lambda'_A$ with ends $u$ and $v$. Replacing the vertices of $P$ in $\mathcal{M}_B$ by corresponding elements of $T$, we obtain a path in $\Gamma_T$ from $u$ to $v$. It follows that $T \cap A$ lies in one component of $\Gamma_T$. Similarly, $T \cap B$ lies in one component of $\Gamma_T$. Moreover, by construction of $\Lambda'_A$ there certainly exists an edge of $\Gamma$ from a vertex in $T \cap A$ to a vertex in $T \cap B$. Thus $\Gamma_T$ is connected, as desired.
Finally, if $n$ is odd, we apply the above argument to the subgraph $\Gamma'$ of the random graph $\Gamma$ induced by $V - \{n\}$. It implies that $\Gamma'$ almost surely contains a lawful perfect antimatching $\mathcal{M}$. Then $\mathcal{M} \cup \{\{n\}\}$ is almost surely a legal system on $\Gamma$, as $n$ is almost surely joined to both vertices of some $S\in\mathcal M$. \end{proof}
We are grateful to Roman Glebov and Gonzalo Fiz Pontiveros for describing arguments that explain how to handle random graphs at high density. In particular, the following is due to Roman Glebov: \begin{lem}\label{lem:high} Let $q=1-p$. Suppose $$q = O(\frac{\log n}{n}).$$ Then a graph $\Gamma$ in $\mathcal G(n,p)$ almost surely either is a complete graph or contains a legal system. \end{lem} \begin{proof}
Let $H$ be the complement of $\Gamma$. Note that $H$ is in $\mathcal G(n,q)$. We consider two cases.
\item \textbf{Case 1: $q = o(n^{-3/2})$.}\\ The probability that a triple of vertices is joined by two edges in $H$ equals $3q^2-2q^3 \leq 3q^2$. The expected number of such triples is of order ${n\choose 3}3q^2 \leq \frac12 n^3q^2\to 0$ as $n\to\infty$. Thus the probability that there is a pair of edges in $H$ with a common vertex tends to $0$ as $n\to\infty$. We now describe a winning system when $\Gamma\in \mathcal G(n,p)$ is not a complete graph. Choose an edge in $H$ joining vertices $v,w$. Consider the system of moves consisting of $\{v,w\}$ and the singletons of all other vertices. Let the initial state $S = \{v\}$. Since every other vertex in $\Gamma$ is joined by an edge to both $v$ and $w$, the orbit of $S$ is legal. \item \textbf{Case 2: $q\neq o(n^{-3/2})$.}\\ Let $k$ be the number of edges in a maximal matching $M$ in $H$.
Denote by $D$ the set of all vertices that are not incident to any edge in $M$. Note that $|D| = n-2k$ and the subgraph $\Gamma(D)$ of $\Gamma$ is a clique. Consider the system of moves consisting of $\{v,w\}$ for all $(v,w)\in M$ and $\{v\}$ for all $v\in D$. Let $S$ be a state that has exactly one vertex in each $\{v,w\}$ for $(v,w)\in M$.
We first prove that $\Gamma(S)$ is connected. Suppose to the contrary that $\Gamma(S)$ can be decomposed
into two subgraphs $\Gamma(S_1), \Gamma(S_2)$ not having adjacent vertices,
Then the subgraph $H(S)$ induced by $S$ in $H$ contains the complete bipartite graph on $S_1,S_2$ as a subgraph.
Thus there is a vertex in $H(S)$ of valence at least $\lceil\frac k 2\rceil$.
Let $d_H$ denote the maximum valence of $H$, and note that $d_H=O(qn)$ by Theorem~\ref{thm:maxdegree}. By hypothesis $q = O(\frac{\log n}{n})$, and so $d_H=O(\log n)$. Combining with the previous conclusion about a vertex of $H(S)$, we find that $k=O(\log n)$.
The set $D$ of vertices not incident to any edge in a largest matching has $n-2k$ vertices and the subgraph $H(D)$ has no edges. We will use that $k=O(\log n)$ to show below that the edgeless subgraph $H(D)$ almost surely does not exist.
The probability that a set of $n-2k$ vertices has no edges equals $(1-q)^{\binom{n-2k}{2}}$,
and hence the probability that there exists a subset of size $n-2k$ with no edges is bounded by $$\binom{n}{2k}(1-q)^{\binom{n-2k}{2}} \leq n^{2k}e^{-q{\binom{n-2k}{2}}}.$$ As $k=O(\log n)$, for all sufficiently large $n$ we have:
$$n^{2k}e^{-q{\binom{n-2k}{2}}} \leq e^{2c\log^2n - q\binom{n-2c\log n }{2}} $$ And hence since $q\geq b n^{-3/2}$ with $b>0$ for sufficiently large $n$ we have: $$2c\log^2n - q\binom{n-2c\log n}{2}\leq 2c\log^2n - b n^{-3/2}\binom{n-2c\log n}{2}\to -\infty$$ And so the above probability goes to 0 as claimed.
Having proven that $\Gamma(S)$ is connected whenever $S$ is a state containing exactly one vertex from each edge in $M$, it remains to prove that $\Gamma(S\cup D')$ is connected for any $D'\subset D$. The probability that there is $d\in D$ that is not adjacent to any vertex in $S$ is $(n-2k)q^k \leq (n-2k)\left(c\frac{\log n}{n}\right)^k$. This last term tends to $0$ as $n\to \infty$ since $k\to \infty$. \end{proof}
In response to Theorem~\ref{thm:random}, Fiz Pontiveros, Glebov and Karpas have proven the following result in \cite{FPGK}, which definitively explains when a random graph has a legal system: \begin{thm}[Fiz Pontiveros-Glebov-Karpas]\label{PGK} Suppose that \[\frac{\log n+\log\log n +\omega(1)}{n}\leq p \leq 1-\frac{\omega(1)}{n^2}.\] Then a random graph in $\mathcal{G}(n,p)$ almost surely has a legal system. \end{thm} As they explained a random graph $\Gamma\in\mathcal{G}(n,p)$ that is not complete almost surely has a legal system precisely when $\Gamma$ almost surely has the property that each of its vertices has valence at least $2$.
For a slightly different setting of random regular graphs it would be interesting to resolve the following question.
\begin{prob}
Does there a exist a constant $d \in \mathbb{N}$ such that a random $d$-regular graph on $n$ vertices almost surely has a legal system? \end{prob}
We now describe a simpler bipartite version of Theorem~\ref{thm:random}. \begin{prop} Suppose $$p \geq \frac{2\log n + \omega(n)}{n}$$ for some $\omega(n)$ with $\omega(n) \to \infty$. Then a graph in $\mathcal{G}(n,n,p)$ almost surely contains a legal system. \end{prop} \begin{proof}
First suppose $n$ is even. Let $V = A\sqcup B$ be the bipartite structure of a graph $\Gamma$ in $\mathcal G(n,n,p)$. Partition $A$ into $A_1\sqcup A_2$ where $|A_i|= \frac{n}2$ for $i= 1,2$. Similarly partition $B$. Each of the graphs induced by $A_i \cup B_j$ for $i,j \in \{1,2\}$ is a random graph in $\mathcal G(\frac n 2, \frac n 2, p)$. By Theorem~\ref{thm:Gnp1} they are all connected. Thus the system of moves corresponding to the bipartition and its orbit consisting of states $A_i\cup B_j$ is a legal system.
If $n$ is odd, we use the same argument for the subgraph $\Gamma'$ of $\Gamma$ induced by $n-1$ vertices in each side of the bipartition. The remaining vertices $a,b$ are almost surely joined to a vertex in $B_1$ and a vertex in $B_2$, or a vertex in $A_1$ and a vertex in $A_2$ respectively. Thus the orbit of the state $A_1\cup B_1\cup \{a,b\}$ is legal. \end{proof}
We close with a few further questions about the existence of legal systems for certain families of graphs:
\begin{prob}Does a generic 3-connected finite planar graph $\Gamma$ with $\girth(\Gamma)=4$ and ${\ensuremath{\kappa}}(\Gamma)=0$ have the property that there is no legal state? \end{prob}
\begin{prob} Is there a sensible statement (positive or negative) that can be made for generic bipartite graphs (or girth~$4$ graphs) with a fixed number of vertices and ${\ensuremath{\kappa}}(\Gamma)=1-v/2+e/4 =0$ ?
How about if $\Gamma$ is planar? \end{prob}
\end{document} |
\begin{document}
\title{\bf Structural Controllability on Graphs for\\ Drifted Bilinear Systems over Lie Groups}
\date{}
\author{Xing Wang\thanks{Key Laboratory of Mathematics Mechanization, Institute of Systems Science, Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing 100190, China; School of Mathematical Sciences, University of Chinese Academy of Sciences, Beijing 100049, China. ([email protected])}, Bo Li\thanks{Key Laboratory of Mathematics Mechanization, Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing 100190, China. ([email protected])}, Jr-Shin Li\thanks{Department of Electrical and Systems Engineering, Washington University, St. Louis, MO 63130, USA. ([email protected])},
Ian R. Petersen\thanks{Research School of Engineering, Australian National University, Canberra, ACT 0200, Australia. ([email protected])}, Guodong Shi\thanks{Australian Center for Field Robotics, School of Aerospace, Mechanical and Mechatronic Engineering, The University of Sydney, NSW 2006, Australia. ([email protected])}}
\maketitle
\begin{abstract} In this paper, we study graphical conditions for structural controllability and accessibility of drifted bilinear systems over Lie groups. We consider a bilinear control system with drift and controlled terms that evolves over the special orthogonal group, the general linear group, and the special unitary group. Zero patterns are prescribed for the drift and controlled dynamics with respect to a set of base elements in the corresponding Lie algebra. The drift dynamics must respect a rigid zero-pattern in the sense that the drift takes values as a linear combination of base elements with strictly non-zero coefficients; the controlled dynamics are allowed to follow a free zero pattern with potentially zero coefficients in the configuration of the controlled term by linear combination of the controlled base elements. First of all, for such bilinear systems over the special orthogonal group or the special unitary group, the zero patterns are shown to be associated with two undirected or directed graphs whose connectivity and connected components ensure structural controllability/accessibility. Next, for bilinear systems over the special unitary group, we introduce two edge-colored graphs associated with the drift and controlled zero patterns, and prove structural controllability conditions related to connectivity and the number of edges of a particular color. \end{abstract}
\section{Introduction} The past decade has witnessed an important line of research for multi-agent systems where {\em agents}, representing subsystems with distributed sensing and control units, are dynamically interconnected over an underlying network in order to achieve collective goals such as consensus, formation, coverage, and controllability \cite{jad03,mesbahi}. The links of such a network might indicate physical interactions, or non-physical information exchanges, which define a {\em topology} of the overall multi-agent system. The interface of classical control theory and graph theoretic methods has led to fundamental insights regarding how the network topology enables convergence of distributed algorithms, stabilizability of distributed controllers, or controllability \cite{Murray04,Martinez07,Nedic10,C2015,B2007,Rahmani09} of the network dynamical states, for both linear and nonlinear network dynamics.
Perhaps one central problem in networked systems is the ability to control the system dynamics with control inputs scattered amongst a subset of agents (nodes), leading to a natural network controllability problem. Indeed classical control theories have established celebrated results on conditions for controllability of general dynamical systems. In the networked system era, graphical conditions that link notions of {\em connectivity} for the network topology to the controllability of the system are desired. Sparked by such a vision, a series of important understandings on graph-theoretic controllability were established for networked systems with linear dynamics \cite{Rahmani09,ji2009,Parlangeli12,Gharesifard17,Chen15}. Recently such developments have been pushed further to structural controllability \cite{Morse2019,Trentelman2020,Trentelman2021}, where instead of focusing on a specific system setup, controllability is defined as a structural property for generic configurations of the system parameters. The seminal work of Lin established that structural controllability is fully determined by the system {\em zero-pattern}, which specifies the locations of the potentially non-zero entries in the parameter space. It turns out that the structural controllability of linear networked systems can also be established from a graph-theoretic point of view, marking significant progress in the study of network controllability \cite{Morse2019,Trentelman2020,Trentelman2021}.
Controllability analysis from graph theory is a challenging question for multi-agent systems with general nonlinear dynamics. For systems with bilinear dynamics, however, graph-theoretic insights have been shown to be promising towards understanding controllability conditions \cite{structural,li2017}. In bilinear systems, the dynamical evolution of the system state is governed by the product of the system state and the control inputs. Therefore, bilinear systems are nonlinear systems with special geometric constraints and the bilinear property: the system state evolves in a Lie group, and the vector field is a bilinear function taking values in the corresponding Lie algebra with respect to the state and the input \cite{Elliott2009}. Despite its simplicity, bilinear systems have a wide range of applications in the fields of engineering, economic, and even quantum systems \cite{Khapalov1996,altafini2002,albertini2003,dir08,alb02,qi2020}; the study of controllability for bilinear system over Lie groups led to the discovery of the Lie algebra rank condition \cite{Elliott1971,Brockett1973,Aoki1975,H1972,J1975,W1975} as an important chapter in nonlinear system theory.
The Lie algebra rank condition has been shown to be related to the graphical perspective as well. In \cite{structural}, a framework for the structural controllability of bilinear control systems was proposed, where it was shown that the connectivity of the underlying interaction graph determines the structural controllability of several classes of bilinear control systems over matrix groups. In \cite{li2017}, a graphical notion of permutation cycles was introduced to bilinear systems for the characterization of controllability over the special orthogonal group. Although the result in \cite{structural} considered a special class of drifted systems, the results derived in \cite{structural,li2017} mostly focused on driftless systems. Later in \cite{wang2020,arxiv}, controllability and accessibility conditions for bilinear systems were extended to drifted dynamics, where the drift dynamics and controlled dynamics define two interaction graphs, respectively, and their joint connectivity was shown to be critical for ensuring controllability or accessibility of systems over different matrix Lie groups. It is worth emphasizing that for linear networked systems, the system graphs are motivated from agent interconnections often with a physical implication \cite{Rahmani09,ji2009,Parlangeli12,Gharesifard17,Chen15}, while for bilinear systems the system graphs are artificially introduced in order to facilitate a graphical analysis.
In this paper, we study graph theoretic conditions for the structural controllability and accessibility of drifted bilinear systems over Lie groups. For a drifted bilinear system over the special orthogonal group, the general linear group, or the special unitary group, zero patterns are introduced for the drift dynamics and the controlled dynamics, respectively. The zero pattern associated with the drift is assumed to be rigid, as a linear combination with non-zero coefficients of several base elements in a finite subset of the corresponding Lie algebra. As a result, the parameters of the drift dynamics are possibly dependent, similar to the setup in \cite{Morse2019} for linear networked systems. The zero pattern associated with the controlled terms is assumed to be free, where coefficients might take zero values for the linear combination of the base elements. First of all, for systems over the special orthogonal group and the general Lie group, two (undirected or directed) graphs are introduced, respectively, corresponding to the drift zero pattern and the controlled zero pattern; structural controllability/accessibility conditions are derived based on connectivity and sizes of connected components. Next, for systems over the special unitary group, we introduced two edge-colored graphs corresponding to the zero patterns of the drift and controlled dynamics; and then conditions based on connectivity and number of edges in a particular color are established for the structural controllability of the system.
Along with the theoretical results, various examples are presented to illustrate the intuitions behind. The technical proofs for all statements are put in the Appendix. The remainder of the paper is organized as follows. In Section \ref{sec:pre}, we present some preliminary material in graph theory. In Section \ref{sec:problem}, we define the problem of interest. In Section \ref{sec:results}, the main results of the work are presented, followed by some concluding remarks in Section \ref{sec:conc}.
\section{Graph Theory Preliminaries}\label{sec:pre} \subsection{Undirected Graph} An undirected graph $\mathrm {G}=(\mathrm {V}, \mathrm {E})$ consists of a finite set $\mathrm{V}$ of nodes and an edge set $\mathrm {E}$, where $\mathrm {E}$ is a set of unordered pairs of elements in ${\rm V}$. If there is an edge $\{v_i,v_j\}\in\mathrm {E}$, we say that $v_i$ and $v_j$ are {\it adjacent} or {\it neighbours}. The number of neighbours of node $v$ is called the {\it degree} of $v$, denoted by ${\rm deg}(v)$. A graph $\mathrm{G}$ is a {\it bi-graph} if there is a partition of the node set into $\mathrm{V}=\mathrm{V}_{1}\bigcup \mathrm{V}_{2}$ with $\mathrm{V}_{1}$ and $\mathrm{V}_{2}$ being nonempty and mutually disjoint, where all edges are between $\mathrm{V}_{1}$ and $\mathrm{V}_{2}$.
A {\it path} between two nodes $v_1$ and $v_k$ in $\mathrm{G}$ is a sequence of distinct nodes $v_1v_2\dots v_{k}$ such that for any $m=1,\dots,k-1$, there is an edge between $v_m$ and $v_{m+1}$. A pair of distinct nodes $v_i$ and $v_j$
is said to be {\it reachable} from each other if there is a path between them. A node is always assumed to be reachable from itself. We call a graph $\mathrm{G}$ {\it connected} if every pair of distinct nodes in $\mathrm{V}$ is reachable from each other. The subgraph of $\mathrm{G}$ derived from the node set $\mathrm{V}^\ast \subseteq \mathrm{V}$, denoted by $\mathrm{G}|_{\mathrm{V}^\ast}$,
is the graph $(\mathrm{V}^\ast, \mathrm{E}^\ast)$, where $\{v_i,v_j\}\in \mathrm{E}^\ast$ if and only if $\{v_i,v_j\}\in \mathrm{E}$ for $v_i,v_j\in \mathrm{V}^\ast$. A {\it connected component} (or just component) of $\mathrm {G}$ is a connected subgraph induced by some $\mathrm{V}^\ast \subseteq \mathrm{V}$, which is reachable to no additional nodes in $\mathrm {V}\setminus \mathrm{V}^\ast$. Let $\mathrm{G}_1=(\mathrm {V}_1, \mathrm {E}_1)$, $\mathrm{G}_2=(\mathrm {V}_2, \mathrm {E}_2)$. The union graph $\mathrm{G}_1\mcup\mathrm{G}_2$ is defined by $\mathrm{G}_1\mcup\mathrm{G}_2=(\mathrm{V}_1\mcup\mathrm{V}_2,\mathrm{E}_1\mcup\mathrm{E}_2)$.
\subsection{Directed Graph} A {\it directed} graph (digraph) $\mathcal {G}=(\mathrm {V}, \mathcal {E})$ consists of a finite set $\mathrm {V}$ of nodes and an arc set $\mathcal {E}\subseteq \mathrm{V}\times\mathrm{V}$, where $(v_i,v_j)\in\mathcal {E}$ denotes an {\it arc} from node $v_i\in \mathrm{V}$ to node $v_j\in\mathrm{V}$. For $(v_i,v_j)\in\mathcal {E}$, we say that $v_i$ is an {\it in-neighbor} of $v_j$ and $v_j$ is an {\it out-neighbor} of $v_i$. The number of in-neighbors and out-neighbors of $v$ is called its {\it in-degree} and {\it out-degree}, denoted by ${\rm deg}^{+}(v)$ and ${\rm deg}^{-}(v)$, respectively.
A {\it self-loop} in a digraph is an arc starting from and pointing to the same node. A digraph $\mathcal{G}$ is {\it simple} if it has no self-loops. $\mathcal{G}$ is {\it simple complete} if $\mathcal {E}=\mathrm{V}\times\mathrm{V}\setminus \{(v_i,v_i): v_i\in \mathrm{V}\}$. The digraph obtained by removing the self-loop of $\mathcal{G}$ is called the simple digraph corresponding to $\mathcal{G}$. A {\it directed path} from node $v_1\in \mathrm{V}$ to $v_k\in \mathrm{V}$ is a sequence of distinct nodes $v_1v_2\dots v_{k}$ such that for any $m=1,\dots,k-1$, $(v_m, v_{m+1})$ is a directed arc in $\mathcal{E}$.
We say that node $v_j$ is {\it reachable} from node $v_i$ if there is a directed path from $v_i$ to $v_j$. A digraph $\mathcal{G}$ is {\it strongly connected} if every two nodes are mutually reachable. A {\it weakly connected component} of a digraph $\mathcal{G}$ is a component of $\mathcal{G}$ when the directions of links are ignored. Let $\mathcal{G}_1=(\mathrm {V}_1, \mathcal {E}_1)$, $\mathcal{G}_2=(\mathrm {V}_2, \mathcal {E}_2)$. The union graph $\mathcal{G}_1\mcup\mathcal{G}_2$ is defined by $\mathcal{G}_1\mcup\mathcal{G}_2=(\mathrm{V}_1\mcup\mathrm{V}_2,\mathcal{E}_1\mcup\mathcal{E}_2)$. \subsection{Edge-colored Multigraph} Let $\textbf{C}=\{1,2,\dots,k\}$ be a set with cardinality $k$, whose elements are called colors. An {\it edge-colored multigraph} $\mathscr{G}=(\mathrm {V}, \mathscr {E})$ consists of a finite set $\mathrm{V}$ of nodes and an edge set $\mathscr {E}$, where an element $\mathbbm{e}=\big\{{i,j};c\big\}\in\mathscr {E}$ denotes an edge with color $c\in \textbf{C}$ between nodes $i\in{\rm V}$ and $j\in{\rm V}$ \cite{Groos2006}. Each edge has a set of one or two nodes associated to it, which are called its endpoints. A {\it self-loop} $\{i,i;c\}\in\mathscr {E}$ is an edge that joins a single endpoint to itself. A {\it multi-edge} is a collection of two or more edges having identical endpoints. The edges in a multi-edge are distinguished by different colors. We call an edge-colored multigraph {\it simple} if it has neither self-loops nor multi-edges. A {\it complete} edge-colored multigraph is a simple edge-colored multigraph such that every pair of nodes is joined by an edge. We say that an edge-colored multigraph $\mathscr{G}$ is {\it empty} if $\mathscr{E}$ is empty.
In an edge-colored multigraph $\mathscr{G}$, a {\it walk} $W=\langle v_0,\mathbbm{e}_1,v_1,\mathbbm{e}_2,\dots,v_{k-1},\mathbbm{e}_k,v_k\rangle$ from node $v_0$ to node $v_k$ is an alternating sequence of nodes and edges, such that the endpoints of $ \mathbbm{e}_i$ is equal to $\{v_{i-1},v_i\}$, for $i = 1,\dots,k$. A {\it closed walk} (or cycle) is a walk that begins and ends at the same node. Node $v_j$ is {\it reachable} from node $v_i$ if there is a walk from $v_i$ to $v_j$. An edge-colored multigraph is {\it connected} if for every pair of nodes $v_i $ and $v_j$, there is a walk from $v_i$ to $v_j$. A {\it subgraph} of $\mathscr{G}$ is an edge-colored multigraph $\mathscr{H}$ whose nodes and edges are all in $\mathscr{G}$. A maximal connected subgraph of $\mathscr{G}$ is called a {\it connected component} or simply a component of $\mathscr{G}$. Let $\mathscr{G}_1=(\mathrm {V}_1, \mathscr {E}_1)$, $\mathscr{G}_2=(\mathrm {V}_2, \mathscr {E}_2)$. The union graph $\mathscr{G}_1\mcup\mathscr{G}_2$ is defined by $\mathscr{G}_1\mcup\mathscr{G}_2=(\mathrm{V}_1\mcup\mathrm{V}_2,\mathscr{E}_1\mcup\mathscr{E}_2)$.
\section{Problem Definition}\label{sec:problem} \subsection{Bilinear Control Systems over Lie Groups} Let $\mathbf{G}$ be a connected Lie group and $\mathfrak{g}$ be its corresponding Lie algebra. We consider the following bilinear control system over $\mathbf{G}$: \begin{align}\label{bilinear}
\dot{X}(t)= \mathsf{B}_0 X(t)+\Big(\sum_{i=1}^m u_i(t) \mathsf{B}_i\Big) X(t), \ \ X(0)=X_{0}, \end{align} where $X(t)\in \mathbf{G}, \mathsf{B}_i\in \mathfrak{g}$ for $i=0,\dots,m$, and $u_i(t) \in \mathbb{R}$ are piecewise continuous control signals for $i=1,\dots,m$. For $T\geq 0$, the set $\mathcal{R}_{T}(X_{0})$ consists of the points in $\mathbf{G}$ that are {\it attainable} from $X_{0}$ at time $T$; i.e., all terminal points $X(T)$ of solutions of system \eqref{bilinear} originating at $X(0)=X_{0}$ under all possible $u_i(t), i=1,\dots,m,t\in[0,T]$. The {\it attainable set} $\mathcal{R}(X_{0})$ then is defined as the union of such sets $\mathcal{R}_{T}(X_{0})$ for all $T\geq0$; i.e., $\mathcal{R}(X_{0}):=\bigcup_{T\geq0}\mathcal{R}_{T}(X_{0})\subset\mathbf{G}.$ Let $I$ be the identity of $\mathbf{G}$.
\begin{definition}\label{def1}{\rm(\cite{dir08})}
The system \eqref{bilinear} is called accessible if $\mathcal{R}(I)$ has an interior point in $\mathbf{G}$; and controllable, if $\mathcal{R}(I)=\mathbf{G}$. \end{definition}
The system Lie algebra of $\eqref{bilinear}$ is given as $\{\mathsf{B}_0,\mathsf{B}_1,\dots,\mathsf{B}_m\}_{\rm LA}$, where $\{\mathsf{B}_0,\mathsf{B}_1,\dots,\mathsf{B}_m\}_{\rm LA}$ is the generated Lie subalgebra of $\mathsf{B}_0,\mathsf{B}_1,\dots,\mathsf{B}_m$. The algebraic criteria developed in \cite{H1972, J1975, dir08} can be used to verify the accessibility and controllability of the system $\eqref{bilinear}$ by exploiting the algebraic structure of the system Lie algebra. That is, the system $\eqref{bilinear}$ is accessible on the Lie group $\mathbf{G}$ if and only if the system Lie algebra satisfies $\{\mathsf{B}_0,\mathsf{B}_1,\dots,\mathsf{B}_m\}_{\rm LA}=\mathfrak{g}$; If $\mathsf{B}_0=0$ or the Lie group $\mathbf{G}$ is compact, then the system $\eqref{bilinear}$ is controllable on the Lie group $\mathbf{G}$ if and only if it is accessible on the Lie group $\mathbf{G}$.
\subsection{Zero Patterns} Let us assume that $\mathfrak{g}$ is a finite dimensional vector space over $\mathbb{R}$. Let $\mathfrak{g}_{a}:=\{\textrm{g}_{a_1},\dots,\textrm{g}_{a_k}\}$ be a subset of $\mathfrak{g}$, i.e., $\textrm{g}_{a_s}\in \mathfrak{g}$ for all $s=1,\dots,k$ with $k\geq 1$. We introduce the following definition. \begin{definition}\label{def2} (i) The free zero pattern generated by $\mathfrak{g}_{a}$, denoted by $\Sigma_{\rm f}(\mathfrak{g}_{a})$, is defined as the generated linear subspace of $\mathfrak{g}_{a}$, i.e., $$ \Sigma_{\rm f}(\mathfrak{g}_{a}):=\Big\{ \sum_{s=1}^kl_s{\rm g}_{a_s}: l_s \in \mathbb{R},s=1,\dots,k\Big\}. $$ (ii) The rigid zero pattern generated by $\mathfrak{g}_{a}$, denoted by $\Sigma_{\rm r}(\mathfrak{g}_{a})$, is defined as the set of linear combinations with nonzero coefficients of the elements in $\mathfrak{g}_{a}$, i.e., $$ \Sigma_{\rm r}(\mathfrak{g}_{a}):=\Big\{ \sum_{s=1}^kl_s{\rm g}_{a_s}: l_s\neq 0 \in \mathbb{R},s=1,\dots,k\Big\}. $$ \end{definition}
\noindent{\bf Example 1.} The general linear group, ${\rm GL}(n)$, is the Lie group formed by all $\mathbb{R}^{n\times n}$ invertible matrices. It has two components separated by the set of singular matrices. The connected component of ${\rm GL}(n)$ containing $I_{n}$ is a Lie subgroup of ${\rm GL}(n)$, denoted by ${\rm GL^{+}}(n)$. The Lie algebra associated with Lie group ${\rm GL^{+}}(n)$ is $\mathfrak{gl}(n)$, the set of all $n\times n$ real matrices. Let $E_{ij}\in\mathbb{R}^{n\times n}$ be the matrix with $(i,j)$-th entry being $1$ and others being $0$. Let $\mathfrak{g}$ be $\mathfrak{gl}(n)$.
\begin{figure}
\caption{The form of the matrix in $\Sigma_{\rm r}(\mathfrak{g}_{a})$ (left) and the form of the matrix in $\Sigma_{\rm r}(\mathfrak{g}_{b})$ (right). The dashed lines indicate linear relations. }
\label{fig1}
\end{figure}
(i) Let $\mathfrak{g}_{a}:=\{\textrm{g}_{a_1}, \textrm{g}_{a_2}, \textrm{g}_{a_3}\}$ with $\textrm{g}_{a_1}=E_{12}$, $\textrm{g}_{a_2}=E_{13}$, and $\textrm{g}_{a_3}=E_{31}$. Then an element $G\in \Sigma_{\rm f}(\mathfrak{g}_{a})$ may take value as $E_{12}$ or $E_{13}$; an element $G\in \Sigma_{\rm r}(\mathfrak{g}_{a})$ can neither be $E_{12}$ nor $E_{13}$.
(ii) Let $\mathfrak{g}_{b}:=\{\textrm{g}_{b_1}\}$ with $\textrm{g}_{b_1}=E_{12}+2E_{13}+3E_{31}$. Then an element $G\in \Sigma_{\rm f}(\mathfrak{g}_{b})$ or $G\in \Sigma_{\rm r}(\mathfrak{g}_{b})$ always satisfies that $G_{13}=2G_{12}$, $G_{31}=3G_{12}$; i.e., the zero pattern specified by $\mathfrak{g}_{b}$ imposes a linear equality constraints on the elements of the matrices therein.
$\square$
\subsection{Structural Controllability}
Let $\mathfrak{g}_{\alpha}:=\{\textrm{g}_{\alpha_1},\dots,\textrm{g}_{\alpha_d}\}$ and $\mathfrak{g}_{\beta}:=\{\textrm{g}_{\beta_1},\dots,\textrm{g}_{\beta_e}\}$ be two subsets of the Lie algebra $\mathfrak{g}$. We introduce the following definition on the structural controllability and accessibility of the system \eqref{bilinear}.
\begin{definition}\label{def3} The system \eqref{bilinear} is called structurally controllable (accessible) on the Lie group $\mathbf{G}$ with respect to the pair of zero patterns $\Sigma_{\rm zero}:=(\Sigma_{\rm r}(\mathfrak{g}_{\alpha}),\Sigma_{\rm f}(\mathfrak{g}_{\beta}))$ if there exist an integer $m\geq 1$, $\mathsf{B}_0\in \Sigma_{\rm r}(\mathfrak{g}_{\alpha})$ and $\mathsf{B}_i\in \Sigma_{\rm f}(\mathfrak{g}_{\beta}), i=1,\dots,m$, such that the system \eqref{bilinear} is controllable (accessible) on the Lie group $\mathbf{G}$. \end{definition}
\section{Main Results}\label{sec:results} \subsection{Structural Controllability over $\SO(n)$ } The special orthogonal group, $\SO(n)$, is the Lie group formed by all $\mathbb{R}^{n\times n}$ orthogonal matrices whose determinant is equal to one. The Lie algebra of $\SO(n)$, $\so(n)$, consists of $n\times n$ real skew-symmetric matrices, which has the dimension $n(n-1)/2$. Define $B_{ij}=E_{ij}-E_{ji}$. Then the set $\mathpzc{B}=\{B_{ij}: 1\leq i < j \leq n\}$ forms a standard basis of $\so(n)$. Let $A_1,\dots,A_d$ be $d$ matrices in $\so(n)$. We introduce the pair of zero patterns $\Sigma_{\rm zero}:=\big(\Sigma_{\rm r}(\so(n)_{\alpha}),\Sigma_{\rm f}(\so(n)_{\beta})\big)$ by $$ \so(n)_{\alpha}:=\{A_{1},\dots,A_{d}\}, \quad \so(n)_{\beta}:=\{B_{i_1j_1},\dots,B_{i_ej_e}\}, $$ where $B_{i_kj_k}\in\mathpzc{B}$ for $k=1,\dots,e$.
By the compactness of the Lie group $\SO(n)$, the system \eqref{bilinear} is controllable if and only if it is accessible. Let $\mathrm{V}=\{1,2,\dots,n\}$. We first introduce the following graph representations of the zero patterns. \begin{definition}\label{def4}
$(i)$ The drift graph associated with $\Sigma_{\rm r}(\so(n)_{\alpha})$, denoted by ${\mathrm{G}}^{{}^\alpha}_{{\rm drift}}$, is defined as the undirected graph $\mathrm{G}_{\rm drift}^{{}^\alpha}=\big(\mathrm{V},\mathrm{E}_{\rm drift}^{{}^\alpha}\big)$, where $\{i,j\}\in\mathrm{E}_{\rm drift}^{{}^\alpha}$ if and only if there exists $s=1,\dots,d$ such that $[A_s]_{ij}=-[A_s]_{ji}\neq 0$. \\
$(ii)$ The controlled graph associated with $\Sigma_{\rm f}(\so(n)_{\beta})$, denoted by $\mathrm{G}_{\rm contr}^{{}^\beta}$, is defined as the undirected graph $\mathrm{G}_{\rm contr}^{{}^\beta}=\big(\mathrm{V},\mathrm{E}_{\rm contr}^{{}^\beta}\big)$ with $\mathrm{E}_{\rm contr}^{{}^\beta}=\big\{\{i_1,j_1\},\dots, \{i_e,j_e\}\big\}$. \end{definition}
A necessary condition for structural controllability of system \eqref{bilinear} is that the union graph $\mathrm{G}_{\rm drift}^{{}^\alpha} \mcup \mathrm{G}_{\rm contr}^{{}^\beta}$ must be connected \cite{wang2020}. However, this condition is not sufficient. The following theorem gives a sufficient condition for system \eqref{bilinear} to be structurally controllable on the Lie group $\SO(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\so(n)_{\alpha}),\Sigma_{\rm f}(\so(n)_{\beta})\big)$.
\begin{theorem}\label{thm1} The system \eqref{bilinear} is structurally controllable on the Lie group $\SO(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\so(n)_{\alpha}),\Sigma_{\rm f}(\so(n)_{\beta})\big)$ if each connected component of $\mathrm{G}_{\rm contr}^{{}^\beta}$ contains at least three nodes and the union graph $\mathrm{G}_{\rm drift}^{{}^\alpha} \mcup \mathrm{G}_{\rm contr}^{{}^\beta}$ is connected. \end{theorem}
\noindent{\bf Example 2.} Consider the system \eqref{bilinear} evolving on $\SO(6)$. Let $\so(6)_\alpha=\{A_1,A_2,A_3\}$ with $A_1=2B_{14}+B_{25}$, $A_2=B_{12}-B_{15}$, and $A_3=3B_{15}+2B_{25}$. Let $\so(6)_\beta=\{B_{12},B_{23},B_{13},B_{45},B_{56},B_{46}\}$. The drift graph associated with $\Sigma_{\rm r}(\so(6)_{\alpha})$ and the controlled graph associated with $\Sigma_{\rm f}(\so(6)_{\beta})$ are shown, respectively, in Figure \ref{fig2}.
\begin{figure}\label{fig2}
\end{figure} It is clear that each connected component of $\mathrm{G}_{\rm contr}^{{}^\beta}$ contains at least three nodes, and the union graph $\mathrm{G}_{\rm drift}^{{}^\alpha} \mcup \mathrm{G}_{\rm contr}^{{}^\beta}$ is connected. As a result, the graphical condition of Theorem \ref{thm1} has been met. Choose $A=A_1+3A_2+A_3\in\Sigma_{\rm r}(\so(6)_{\alpha})$, and by direct computation one can verify \begin{equation}\label{ex.2} \{A,B_{12},B_{23},B_{45},B_{56}\}_{\rm LA}=\so(6). \end{equation} This implies that the system \eqref{bilinear} is structurally controllable on the Lie group $\SO(6)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\so(6)_{\alpha}),\Sigma_{\rm f}(\so(6)_{\beta})\big)$, providing a validation of Theorem \ref{thm1}.
$\square$
\subsection{Structural Accessibility over ${\rm GL}^{+}(n)$ } Recall that the Lie algebra of ${\rm GL^{+}}(n)$ is $\gl(n)$, the set of all $n\times n$ real matrices. The set $\mathpzc{E}=\{E_{ij}: 1\leq i,j\leq n\}$ forms a basis of $\gl(n)$. Let $A_1,\dots,A_d$ be $d$ matrices in $\gl(n)$. A pair of zero patterns $\Sigma_{\rm zero}:=\big(\Sigma_{\rm r}(\gl(n)_{\alpha}),\Sigma_{\rm f}(\gl(n)_{\beta})\big)$ is given by $$ \gl(n)_{\alpha}:=\{A_{1},\dots,A_{d}\}, \quad \gl(n)_{\beta}:=\{E_{i_1j_1},\dots,E_{i_ej_e}\}. $$ We similarly introduce the following graph representations of these zero patterns, which now have to be directed graphs. \begin{definition}\label{def5}
$(i)$ The drift graph associated with $\Sigma_{\rm r}(\gl(n)_{\alpha})$, denoted by $\mathcal{G}_{\rm drift}^{{}^\alpha}$, is defined as the directed graph $\mathcal{G}_{\rm drift}^{{}^\alpha}=\big(\mathrm{V},\mathcal{E}_{\rm drift}^{{}^\alpha}\big)$, where $(i,j)\in \mathcal{E}_{\rm drift}^{{}^\alpha}$ if and only if there exists $s=1,\dots,d$ such that $[A_s]_{ij}\neq 0$. \\
$(ii)$ The controlled graph associated with $\Sigma_{\rm f}(\gl(n)_{\beta})$, denoted by $\mathcal{G}_{\rm contr}^{{}^\beta}$, is defined as the directed graph $\mathcal{G}_{\rm contr}^{{}^\beta}=\big(\mathrm{V},\mathcal{E}_{\rm contr}^{{}^\beta}\big)$ with $\mathcal{E}_{\rm contr}^{{}^\beta}=\big\{(i_1,j_1),\dots, (i_e,j_e)\big\}$. \end{definition} Note that Lie group ${\rm GL^{+}}(n)$ is connected, but not compact. A necessary condition for the system \eqref{bilinear} to be controllable is that it is accessible. In the presence of the drift term $\mathsf{B}_0$, accessibility (plus the connectedness of ${\rm GL^{+}}(n)$) is not sufficient for controllability. In this case we give some sufficient conditions, and a necessary condition for the system \eqref{bilinear} to be structurally accessible on the Lie group ${\rm GL^{+}}(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\gl(n)_{\alpha}),\Sigma_{\rm f}(\gl(n)_{\beta})\big)$.
When the union graph $\mathcal{G}_{\rm drift}^{{}^\alpha} \mcup \mathcal{G}_{\rm contr}^{{}^\beta}$ is not strongly connected, the system \eqref{bilinear} cannot be structurally accessible on the Lie group ${\rm GL^{+}}(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\gl(n)_{\alpha}),\Sigma_{\rm f}(\gl(n)_{\beta})\big)$ \cite{wang2020}. The following theorem establishes a sufficient condition under which the system \eqref{bilinear} is structurally accessible.
\begin{theorem}\label{thm2.1}
The system \eqref{bilinear} is structurally accessible on the Lie group ${\rm GL^{+}}(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\gl(n)_{\alpha}),\Sigma_{\rm f}(\gl(n)_{\beta})\big)$ if the following conditions hold:
$(i)$ Each weakly connected component of $\mathcal{G}_{\rm contr}^{{}^\beta}$ is strongly connected with at least two nodes;
$(ii)$ The union graph $\mathcal{G}_{\rm drift}^{{}^\alpha} \mcup \mathcal{G}_{\rm contr}^{{}^\beta}$ is strongly connected;
$(iii)$ $\mathcal{G}_{\rm contr}^{{}^\beta}$ has at least one self-loop.
\end{theorem}
In particular, when $\gl(n)_{\alpha}$ is a subset of $\mathpzc{E}$, the condition that $\mathcal{G}_{\rm contr}^{{}^\beta}$ has at least one self-loop can be relaxed.
\begin{theorem}\label{thm2.2}
Suppose $\gl(n)_{\alpha}\subseteq\mathpzc{E}$. Then the system \eqref{bilinear} is structurally accessible on the Lie group ${\rm GL^{+}}(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\gl(n)_{\alpha}),\Sigma_{\rm f}(\gl(n)_{\beta})\big)$ if each weakly connected component of $\mathcal{G}_{\rm contr}^{{}^\beta}$ is strongly connected with at least two nodes, and the union graph $\mathcal{G}_{\rm drift}^{{}^\alpha}\mcup \mathcal{G}_{\rm contr}^{{}^\beta}$ is a strongly connected digraph with at least one self-loop. \end{theorem}
\noindent{\bf Example 3.} Consider the system \eqref{bilinear} evolving on ${\rm GL^{+}}(4)$. Let $\gl(4)_\alpha=\{A_1,A_2,A_3\}$ with $A_1=3E_{13}+E_{42}$, $A_2=E_{12}-E_{13}$, and $A_3=E_{33}-E_{44}+2E_{31}$. Let $\gl(4)_\beta=\{E_{11},E_{12},E_{21},E_{34},E_{43}\}$. The drift graph associated with $\Sigma_{\rm r}(\gl(4)_{\alpha})$ and the controlled graph associated with $\Sigma_{\rm f}(\gl(4)_{\beta})$ are shown, respectively, in Figure \ref{fig3}. \begin{figure}\label{fig3}
\end{figure}
(i) It is easily seen that each weakly connected component of $\mathcal{G}_{\rm contr}^{{}^\beta}$ is strongly connected with two nodes. In addition, $\mathcal{G}_{\rm contr}^{{}^\beta}$ has one self-loop and the union graph $\mathcal{G}_{\rm drift}^{{}^\alpha} \mcup \mathcal{G}_{\rm contr}^{{}^\beta}$ is strongly connected. Hence, the graphical condition of Theorem \ref{thm2.1} has been met. Choose $A=A_1+2A_2+A_3\in\Sigma_{\rm r}(\gl(4)_{\alpha})$. By direct computation one can verify $\{A,E_{11},E_{12},E_{21},E_{34},E_{43}\}_{\rm LA}=\gl(4)$. Thus, the system \eqref{bilinear} is structurally accessible on the Lie group ${\rm GL^{+}}(4)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\gl(4)_{\alpha}),\Sigma_{\rm f}(\gl(4)_{\beta})\big)$. This provides a validation of Theorem \ref{thm2.1}.
(ii) Let $\gl(4)_\beta=\{E_{12},E_{21},E_{34},E_{43}\}$. Then $\mathcal{G}_{\rm contr}^{{}^\beta}$ has no self-loops. Note that $\mathcal{G}_{\rm drift}^{{}^\alpha}$ has two self-loops. A direct verification shows that $\{A,E_{12},E_{21},E_{34},E_{43}\}_{\rm LA}=\mathfrak{sl}(4)$ for any $A\in\Sigma_{\rm r}(\gl(4)_{\alpha})$. This is to say, for any integer $m\geq 1$, and for any $\mathsf{B}_0\in \Sigma_{\rm r}(\gl(4)_{\alpha})$, $\mathsf{B}_i\in \Sigma_{\rm f}(\gl(4)_{\beta})$, we have $\{\mathsf{B}_0,\mathsf{B}_1,\dots,\mathsf{B}_m\}_{\rm LA}\neq \gl(4)$. Therefore, the system \eqref{bilinear} is not structurally accessible on the Lie group ${\rm GL^{+}}(4)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\gl(4)_{\alpha}),\Sigma_{\rm f}(\gl(4)_{\beta})\big)$. Generally speaking, the self-loop of $\mathcal{G}_{\rm drift}^{{}^\alpha}$ cannot replace the role of the self-loop of $\mathcal{G}_{\rm contr}^{{}^\beta}$, which is why we require the set $\gl(n)_{\alpha}$ to be a subset of $\mathpzc{E}$ in Theorem \ref{thm2.2}.
$\square$
\noindent{\bf Example 4.} Consider the system \eqref{bilinear} evolving on ${\rm GL^{+}}(4)$. Let $\gl(4)_\alpha=\{E_{12},E_{13},E_{31},E_{33},E_{42}\}$, $\gl(4)_\beta=\{E_{12},E_{21},E_{34},E_{43}\}$. The drift graph associated with $\Sigma_{\rm r}(\gl(4)_{\alpha})$ and the controlled graph associated with $\Sigma_{\rm f}(\gl(4)_{\beta})$ are shown, respectively, in Figure \ref{fig4}. \begin{figure}\label{fig4}
\end{figure} Each weakly connected component of $\mathcal{G}_{\rm contr}^{{}^\beta}$ is strongly connected with two nodes. The union graph $\mathcal{G}_{\rm drift}^{{}^\alpha} \mcup \mathcal{G}_{\rm contr}^{{}^\beta}$ continues to be strongly connected. Moreover, $\mathcal{G}_{\rm drift}^{{}^\alpha}$ has a self-loop. Therefore, the graphical condition of Theorem \ref{thm2.2} is satisfied. For any $A\in\Sigma_{\rm r}(\gl(4)_{\alpha})$, we have $A=l_1E_{12}+l_2E_{13}+l_3E_{31}+l_4E_{33}+l_5E_{42}$ with $l_s\neq0\in\mathbb{R}$ for $s=1,\dots,5$. Write $\widetilde{A}=A-l_1E_{12}$. Then $A':=\widetilde{A}-[[\widetilde{A},E_{21}],E_{12}]=l_3E_{31}+l_4E_{33}$, and $A'-[[A',E_{12}],E_{21}]=l_4E_{33}$. We thus get $E_{33}\in\{A,E_{12},E_{21},E_{34},E_{43}\}_{\rm LA}$. This allows us to further obtain $\{A,E_{12},E_{21},E_{34},E_{43}\}_{\rm LA}=\gl(4)$. As a result, the system \eqref{bilinear} is structurally accessible on the Lie group ${\rm GL^{+}}(4)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\gl(4)_{\alpha}),\Sigma_{\rm f}(\gl(4)_{\beta})\big)$. This provides a validation of Theorem \ref{thm2.2}.
$\square$
\subsection{Structural Controllability over ${\rm SU}(n)$ } The special unitary group, ${\rm SU}(n)$, is the Lie group formed by all $\mathbb{C}^{n\times n}$ unitary matrices with determinant one. The Lie algebra of ${\rm SU}(n)$ is equal to $\su(n)$, the set of all $\mathbb{C}^{n\times n}$ skew-Hermitian matrices with trace zero. Lie algebra $\su(n)$ is a finite dimensional vector space over $\mathbb{R}$. Let $\sym(n)$ denote the vector space containing all real traceless symmetric matrices. The Cartan-like decomposition of $\su(n)$ is given by $\su(n)= \so(n)\oplus {\rm i}\sym(n)$, where ${\rm i}$ is the imaginary unit. It follows easily that the dimension of $\su(n)$ is equal to $n^2-1$. Let $C_{ij}={\rm i}(E_{ij}+E_{ji})$, $D_{ij}={\rm i}(E_{ii}-E_{jj})$. Define $\mathpzc{C}=\{C_{ij}: 1\leq i< j\leq n\}$, $\mathpzc{D} = \{D_{ij}: 1\leq i< j\leq n\}$. The maximal linearly independent set of $\mathpzc{D}$, denoted by $\mathpzc{D}_L$, contains $n-1$ elements. Then $\mathpzc{B}\mcup\mathpzc{C}\mcup\mathpzc{D}_L$ forms a basis of $\su(n)$.
Let $A_1,\dots,A_d$ be $d$ matrices in $\su(n)$. Let ${\rm S}_1\subseteq \mathpzc{B}$, ${\rm S}_2\subseteq \mathpzc{C}$, ${\rm S}_3\subseteq \mathpzc{D}$ be three subsets of $\su(n)$. We introduce $$ \su(n)_{\alpha}:=\{A_{1},\dots,A_{d}\}, \quad \su(n)_{\beta}:={\rm S}_1\mcup{\rm S}_2\mcup{\rm S}_3, $$ so a pair of zero patterns $\Sigma_{\rm zero}:=\big(\Sigma_{\rm r}(\su(n)_{\alpha}),\Sigma_{\rm f}(\su(n)_{\beta})\big)$ is induced. Set $\textbf{C}=\{{\rm Blue, Red, Green}\}$. The graphs corresponding to the zero patterns $\Sigma_{\rm r}(\su(n)_{\alpha})$ and $\Sigma_{\rm f}(\su(n)_{\beta})$ are edge-colored multigraphs. \begin{definition}\label{def6}
$(i)$ The drift graph associated with $\Sigma_{\rm r}(\su(n)_{\alpha})$, denoted by $\mathscr{G}_{\rm drift}^{{}^\alpha}$, is defined as the edge-colored multigraph $\mathscr{G}_{\rm drift}^{{}^\alpha}=\big(\mathrm{V},\mathscr{E}_{\rm drift}^{{}^\alpha}\big)$ with $\mathscr{E}_{\rm drift}^{{}^\alpha}=\mathscr{E}_{\rm Blue}\mcup\mathscr{E}_{\rm Red}\mcup\mathscr{E}_{\rm Green}$, where $\{i,j;{\rm Blue}\}\in \mathscr{E}_{\rm Blue}$ if and only if there exists $s=1,\dots,d$ such that ${\rm Re}([A_s]_{ij})=-{\rm Re}([A_s]_{ji})\neq 0$; $\{i,j;{\rm Red}\}\in \mathscr{E}_{\rm Red}$ if and only if there exists $s=1,\dots,d$ such that ${\rm Im}([A_s]_{ij})={\rm Im}([A_s]_{ji})\neq 0$; and $\{i,i;{\rm Green}\}\in \mathscr{E}_{\rm Green}$ if and only if there exists $s=1,\dots,d$ such that $[A_s]_{ii}\neq 0$. \\
$(ii)$ The controlled graph associated with $\Sigma_{\rm f}(\su(n)_{\beta})$, denoted by $\mathscr{G}_{\rm contr}^{{}^\beta}$, is defined as the edge-colored multigraph $\mathscr{G}_{\rm contr}^{{}^\beta}=\big(\mathrm{V},\mathscr{E}_{\rm contr}^{{}^\beta}\big)$ with $\mathscr{E}_{\rm contr}^{{}^\beta}=\mathscr{E}_{{\rm S}_1}\mcup\mathscr{E}_{{\rm S}_2}\mcup\mathscr{E}_{{\rm S}_3}$, where $\mathscr{E}_{{\rm S}_1}=\big\{\{i,j;{\rm Blue}\}: B_{ij}\in{\rm S}_1\big\}, \mathscr{E}_{{\rm S}_2}=\big\{\{i,j;{\rm Red}\}: C_{ij}\in{\rm S}_2\big\}$, and $\mathscr{E}_{{\rm S}_3}=\big\{\{k,k;{\rm Green}\}: k=i,j, D_{ij}\in{\rm S}_3\big\}$. \end{definition}
Similar to ${\rm SO}(n)$, ${\rm SU}(n)$ is also connected and compact, the controllability analysis of the system \eqref{bilinear} on ${\rm SU}(n)$ relies on the investigation of the equivalence between the system Lie algebra and the underlying Lie algebra $\su(n)$. In this section we shall look for necessary and sufficient conditions for the system \eqref{bilinear} to be structurally controllable on the Lie group ${\rm SU}(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\su(n)_{\alpha}),\Sigma_{\rm f}(\su(n)_{\beta})\big)$.
Note that, for the graph $\mathscr{G}_{\rm contr}^{{}^\beta}$, there are two cases (a) $\mathscr{G}_{\rm contr}^{{}^\beta}$ is connected; (b) $\mathscr{G}_{\rm contr}^{{}^\beta}$ is not connected. First of all, when $\mathscr{G}_{\rm contr}^{{}^\beta}$ is connected, we have the following theorem.
\begin{theorem}\label{thm4} Suppose $\mathscr{G}_{\rm contr}^{{}^\beta}$ is connected. Then the system \eqref{bilinear} is structurally controllable on the Lie group ${\rm SU}(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\su(n)_{\alpha}),\Sigma_{\rm f}(\su(n)_{\beta})\big)$ if and only if the union graph $\mathscr{G}_{\rm drift}^{{}^\alpha}\mcup\mathscr{G}_{\rm contr}^{{}^\beta}$ has a self-loop or a cycle with an odd number of Red edges. \end{theorem}
It is worth pointing out that a multi-edge $\big\{\{i,j;{\rm Blue}\},\{i,j;{\rm Red}\}\big\}$ in $\mathscr{G}_{\rm drift}^{{}^\alpha}\mcup\mathscr{G}_{\rm contr}^{{}^\beta}$ can form a cycle with an odd number of Red edges. We now turn to the case when $\mathscr{G}_{\rm contr}^{{}^\beta}$ is not connected. An obvious necessary condition for structural controllability is that the union graph $\mathscr{G}_{\rm drift}^{{}^\alpha}\mcup\mathscr{G}_{\rm contr}^{{}^\beta}$ must be connected. This condition is however not sufficient.
\begin{theorem}\label{thm5} Suppose $\mathscr{G}_{\rm contr}^{{}^\beta}$ is not connected. Then the system \eqref{bilinear} is structurally controllable on the Lie group ${\rm SU}(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\su(n)_{\alpha}),\Sigma_{\rm f}(\su(n)_{\beta})\big)$ if the following conditions hold:
$(i)$ Each connected component of $\mathscr{G}_{\rm contr}^{{}^\beta}$ contains at least three nodes;
$(ii)$ $\mathscr{G}_{\rm drift}^{{}^\alpha}$ has no multi-edges and the union graph $\mathscr{G}_{\rm drift}^{{}^\alpha}\mcup\mathscr{G}_{\rm contr}^{{}^\beta}$ is connected;
$(iii)$ $\mathscr{G}_{\rm drift}^{{}^\alpha}\mcup\mathscr{G}_{\rm contr}^{{}^\beta}$ has a self-loop or a cycle with an odd number of Red edges. \end{theorem}
\noindent{\bf Example 5.} Consider the system \eqref{bilinear} evolving on ${\rm SU}(5)$. Let $\su(5)_\alpha=\{A_{1},A_{2},A_{3}\}$ with $A_1=C_{12}-2B_{25}$, $A_2=2B_{12}+D_{15}+3C_{45}$, and $A_3=B_{34}-C_{34}$. Let $\su(5)_\beta=\{B_{12},C_{13},C_{34},B_{14},B_{15},C_{15},D_{24}\}$. The drift graph associated with $\Sigma_{\rm r}(\su(5)_{\alpha})$ and the controlled graph associated with $\Sigma_{\rm f}(\su(5)_{\beta})$ are shown, respectively, in Figure \ref{fig5}. \begin{figure}\label{fig5}
\end{figure}
(i) It is evident that $\mathscr{G}_{\rm contr}^{{}^\beta}$ is connected. Note that $\mathscr{G}_{\rm contr}^{{}^\beta}$ has two self-loops. An easy computation shows that $\{B_{12},C_{13},B_{14},B_{15},D_{24}\}_{\rm LA}=\su(5)$. On the other hand, $\mathscr{G}_{\rm contr}^{{}^\beta}$ has a cycle containing an odd number of Red edges. This gives $\{B_{12},C_{13},B_{14},B_{15},C_{15}\}_{\rm LA}=\su(5)$. Both of these two situations will lead to structural controllability of the system \eqref{bilinear} on the Lie group ${\rm SU}(5)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\su(5)_{\alpha}),\Sigma_{\rm f}(\su(5)_{\beta})\big)$.
(ii) Let $\su(5)_\beta=\{B_{12},C_{13},C_{34},B_{14},B_{15}\}$. Then $\mathscr{G}_{\rm contr}^{{}^\beta}$ is still connected, but it has no self-loops, and no cycles with an odd number of Red edges. We have $\{B_{12},C_{13},C_{34},B_{14},B_{15}\}_{\rm LA}\neq\su(5)$, which is easy to check. However, $\mathscr{G}_{\rm drift}^{{}^\alpha}$ has two self-loops. Choose $A=A_1+A_2+A_3\in\Sigma_{\rm r}(\su(5)_{\alpha})$. A trivial verification shows that $A+2[B_{15},B_{12}]-2B_{12}+C_{34}=C_{12}+D_{15}+3C_{45}+B_{34}$. Write $\widetilde{A}=C_{12}+D_{15}+3C_{45}+B_{34}$, and we have $[[\widetilde{A},C_{34}],B_{14}]=2C_{14}$. This implies that $C_{14}\in\{A,B_{12},C_{13},C_{34},B_{14},B_{15}\}_{\rm LA}$. By direct computation one can verify $\big\{\{C_{14}\}\mcup\{B_{12},C_{13},C_{34},B_{14},B_{15}\}\big\}_{\rm LA}=\su(5)$. We thus get $\{A,B_{12},C_{13},C_{34},B_{14},B_{15}\}_{\rm LA}=\so(5)$, and, in consequence, the system \eqref{bilinear} is structurally controllable on the Lie group ${\rm SU}(5)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\su(5)_{\alpha}),\Sigma_{\rm f}(\su(5)_{\beta})\big)$.
If we replace $A_2=2B_{12}+D_{15}+3B_{45}$ with $A_2=2B_{12}+3B_{45}$, then $\mathscr{G}_{\rm drift}^{{}^\alpha}$ has no self-loops. However, $\mathscr{G}_{\rm drift}^{{}^\alpha}\mcup\mathscr{G}_{\rm contr}^{{}^\beta}$ has at least one cycle with an odd number of Red edges. For $A=A_1+A_2+A_3\in\Sigma_{\rm r}(\su(5)_{\alpha})$, we have $A+2[B_{15},B_{12}]-2B_{12}+C_{34}=C_{12}+3C_{45}+B_{34}$ and $[B_{12},C_{12}+3C_{45}+B_{34}]=2D_{12}$. Now $D_{12}\in\{A,B_{12},C_{13},C_{34},B_{14},B_{15}\}_{\rm LA}$, and so $\{A,B_{12},C_{13},C_{34},B_{14},B_{15}\}_{\rm LA}=\su(5)$. Therefore, the system \eqref{bilinear} is also structurally controllable on the Lie group ${\rm SU}(5)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\su(5)_{\alpha}),\Sigma_{\rm f}(\su(5)_{\beta})\big)$. This example provides a validation of Theorem \ref{thm4}.
$\square$
\noindent{\bf Example 6.} Consider the system \eqref{bilinear} evolving on ${\rm SU}(6)$. Let $\su(6)_\alpha=\{A_{1}, A_{2}, A_{3}\}$ with $A_1=C_{14}+2B_{45}$, $A_2=3B_{15}-2C_{25}+D_{25}$, and $A_3=B_{56}-C_{36}$. Let $\su(6)_\beta=\{B_{12},B_{13},C_{13},C_{23},B_{46},C_{56},B_{56},D_{45}\}$. The drift graph associated with $\Sigma_{\rm r}(\su(6)_{\alpha})$ and the controlled graph associated with $\Sigma_{\rm f}(\su(6)_{\beta})$ are shown, respectively, in Figure \ref{fig6}. \begin{figure}\label{fig6}
\end{figure} Obviously, $\mathscr{G}_{\rm drift}^{{}^\alpha}$ has no multiple edges. Each connected component of $\mathscr{G}_{\rm contr}^{{}^\beta}$ contains at least three nodes and the union graph $\mathscr{G}_{\rm drift}^{{}^\alpha}\mcup \mathscr{G}_{\rm contr}^{{}^\beta}$ is connected. First, note that $\mathscr{G}_{\rm contr}^{{}^\beta}$ has two self-loops. For any $A\in\Sigma_{\rm r}(\su(6)_{\alpha})$, we have $A=l_1A_{1}+l_2A_{2}+l_3A_{3}$ with $l_s\neq0\in\mathbb{R}$ for $s=1,2,3$. An easy computation shows that \begin{equation}\label{ex.1} [[A,B_{13}],B_{12}]=l_3C_{26}. \end{equation} This gives $C_{26}\in\{A,B_{12},B_{13},B_{46},C_{56},D_{45}\}_{\rm LA}$, and we thus get $\{A,B_{12},B_{13},B_{46},C_{56},D_{45}\}_{\rm LA}=\su(6)$. Hence, the system \eqref{bilinear} is structurally controllable on the Lie group ${\rm SU}(6)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\su(6)_{\alpha}),\Sigma_{\rm f}(\su(6)_{\beta})\big)$.
Similarly it can be observed that $\mathscr{G}_{\rm contr}^{{}^\beta}$ has a cycle with an odd number of Red edges. By \eqref{ex.1} it is immediate that $C_{26}\in\{A,B_{12},B_{13},C_{23},B_{46},C_{56}\}_{\rm LA}$. This clearly forces $\{A,B_{12},B_{13},C_{23},B_{46},C_{56}\}_{\rm LA}=\su(6)$. Thus, one can deduce the structural controllability of system \eqref{bilinear}. This example provides a validation of Theorem \ref{thm5}.
$\square$
\section{Conclusions}\label{sec:conc}
We have established graphical conditions for structural controllability and accessibility of drifted bilinear systems over Lie groups. Bilinear control systems with drift and controlled terms that evolve over Lie groups were considered. Zero patterns prescribed the possible configuration of the drift and controlled dynamics with respect to a set of base elements in the corresponding Lie algebra. The drift dynamics was assumed to be fixed, taking values as a linear combination of the base elements with strictly non-zero coefficients; the controlled dynamics was assumed to be free with potentially zero coefficients in the configuration. For bilinear systems over the special orthogonal group or the special unitary group, the zero patterns were proven to be associated with two undirected or directed graphs whose connectivity and connected components ensure structural controllability/accessibility. For bilinear systems over the special unitary group, two edge-colored graphs associated with the drift and controlled zero patterns were proven to be critical for investigating structural controllability. Future work might include necessary and sufficient graphical conditions for structural controllability and accessibility, and a computational complexity analysis of the established connectivity conditions.
\section*{Appendix} \subsection*{A. Proof of Theorem \ref{thm1}} We first recall a few auxiliary lemmas that are used in the proof. Consider $\mathfrak{G}$ as the set of all undirected graphs over the node set $\mathrm{V}$, and $\mathfrak{B}$ as the class of all subsets of $\mathpzc{B}$. By identifying each edge $\{i,j\}$ with the matrix $B_{ij}$, we establish a natural $1-1$ correspondence, between each element in $\mathfrak{G}$ and each element in $\mathfrak{B}$. We denote such a mapping $\ell$ which maps from $\mathfrak{B}$ to $\mathfrak{G}$.
\begin{lemma}\label{so.1} Let $\mathcal {S}$ be a subset of $\mathpzc{B}$ and $\ell(\mathcal {S})$ be the graph associated with $\mathcal {S}$. The Lie algebra generated by $\mathcal {S}$ is equal to $\so(n)$ if and and if $\ell(\mathcal {S})$ is connected. \end{lemma} For any $A\in\so(n)$, we can represent it uniquely in the form $$ A=\sum_{k=1}^{l} a_{k} B_{{i}^\ast_k{j}^\ast_k}, $$ where $a_k\neq 0\in\mathbb{R}$, $i^\ast_k,j^\ast_k\in\mathrm{V}$, and $B_{{i}^\ast_k{j}^\ast_k}\in\mathpzc{B}$. Define $\mathrm{G}_A=(\mathrm{V},\mathrm{E}_A)$ with $\mathrm{E}_{A}=\big\{\{i^\ast_1,j^\ast_1\},\dots,\{i^\ast_l,j^\ast_l\}\big\}$. The following lemma holds \cite{wang2020}.
\begin{lemma}\label{so.2} Consider a subset $\mathcal {S}\subseteq\mathpzc{B}$ with the associated graph $\mathrm{G}_{\mathcal {S}}:=\ell(\mathcal {S})$ and a matrix $A\in\so(n)$ with the associated graph $\mathrm{G}_A$. The following statements hold.
$(i)$ The Lie algebra generated by $\{A\}\mcup\mathcal {S}$ is equal to $\so(n)$ if each connected component of $\mathrm{G}_{\mathcal {S}}$ contains at least three nodes and the union graph $\mathrm{G}_A \mcup \mathrm{G}_{\mathcal {S}}$ is connected.
$(ii)$ If the Lie algebra generated by $\{A\}\mcup\mathcal {S}$ is equal to $\so(n)$, then the union graph $\mathrm{G}_A \mcup \mathrm{G}_{\mathcal {S}}$ is connected. \end{lemma}
We are now in a position to show the detailed proof of Theorem \ref{thm1}. Since the union graph $\mathrm{G}_{\rm drift}^{{}^\alpha} \mcup \mathrm{G}_{\rm contr}^{{}^\beta}$ is connected, there must exist a matrix $A\in\Sigma_{\rm r}(\so(n)_{\alpha})$ such that $\mathrm{G}_A \mcup \mathrm{G}_{\rm contr}^{{}^\beta}$ is connected. Moreover, each connected component of $\mathrm{G}_{\rm contr}^{{}^\beta}$ contains at least three nodes. Lemma \ref{so.2}. (i) now leads to $\big\{\{A\}\mcup \so(n)_\beta\big\}_{\rm LA}=\so(n)$. Therefore, there exist $A\in\Sigma_{\rm r}(\so(n)_{\alpha})$ and $\so(n)_\beta\subset \Sigma_{\rm f}(\su(n)_{\beta})$, such that system \eqref{bilinear} is controllable on the Lie group ${\rm SO}(n)$. This is to say, the system \eqref{bilinear} is structurally controllable on the Lie group ${\rm SO}(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}=\big(\Sigma_{\rm r}(\so(n)_{\alpha}),\Sigma_{\rm f}(\so(n)_{\beta})\big)$. This is the desired conclusion.
\subsection*{B. Proof of Theorem \ref{thm2.1}} Before presenting the detailed proof for Theorem \ref{thm2.1}, we recall some auxiliary lemmas and definitions which will be useful for the analysis. \begin{lemma}\label{lem1} The Lie bracket of $E_{ij}$ and $E_{kl}$ in $\mathpzc{E}$ satisfies the relation $[E_{ij},E_{kl}] = \delta_{jk}E_{il}-\delta_{li}E_{kj}$, where $\delta$ is the Kronecker delta function, i.e., $\delta_{mn}=1$ if $m=n$ and $\delta_{mn}=0$ otherwise. \end{lemma}
\begin{definition}\label{def7} Let $\mathcal{G}$ be a simple digraph. The simple digraph transitive closure mapping, $\mathcal{M}(\cdot)$, is defined as $$ \mathcal{M}(\mathcal{G}) = \Big(\mathrm{V}, \mathcal{E}\mcup\big\{(i,k)\,:\,\exists j \ \text{s.t. }\ (i,j)\in \mathcal{E},~(j,k)\in \mathcal{E},\ i\neq k \big\}\Big). $$ We then recursively define $\mathcal{M}^{k}(\mathcal{G}):=\mathcal{M}(\mathcal{M}^{k-1}(\mathcal{G}))$ with $\mathcal{M}^{0}(\mathcal{G}):=\mathcal{G}$. \end{definition}
\begin{lemma}\label{map.2} Let $\mathcal{G}$ be a simple digraph. There exists an integer $z$ such that the digraph $\mathcal{M}^{z}(\mathcal{G})$ is simple complete if and only if $\mathcal{G}$ is strongly connected. \end{lemma}
Consider $\mathfrak{D}$ as the set of all digraphs over the node set $\mathrm{V}$, and $\mathfrak{E}$ as the set of all subsets of $\mathpzc{E}$. By identifying each arc $(i,j)$ with the matrix $E_{ij}\in\mathpzc{E}$, we establish a natural $1-1$ correspondence between each element in $\mathfrak{D}$ and each element in $\mathfrak{E}$. We denote such a mapping $\tau$ which maps from $\mathfrak{E}$ to $\mathfrak{D}$. Denote $\mathpzc{E}_{1}=\{E_{ij}:1\le i\neq j\le n\}$, $\mathpzc{E}_{2}=\{E_{ii}:1\le i\le n\}$. Then with $\mathpzc{E}=\mathpzc{E}_{1}\mcup\mathpzc{E}_{2}$, for any $\mathcal {S}\subseteq \mathpzc{E}_{1}$, the digraph $\tau(\mathcal {S})$ is simple. The following lemmas, Lemma \ref{gl.0}, \ref{gl.1}, \ref{lem2} appeared in \cite{arxiv}. We therefore collect them below and the details of their proofs are omitted.
\begin{lemma}\label{gl.0} Given a subset $\mathcal {S}\subseteq \mathpzc{E}_{1}$, let $\mathcal {S}^0\subseteq \mathcal {S}^1 \subseteq \mathcal {S}^2\cdots$ be an ascending chain of subsets of $\mathpzc{E}_{1}$ such that $\mathcal {S}^0=\mathcal {S}, \mathcal {S}^1 = [\mathcal {S}^0,\mathcal {S}^0]\mcup \mathcal {S}^0, \dots, \mathcal {S}^{k+1} = [\mathcal {S}^k, \mathcal {S}^k]\mcup \mathcal {S}^k,\dots.$ where $[\mathcal {S}^k, \mathcal {S}^k] = \big\{[E_{ij}, E_{jk}]=E_{ik}: E_{ij},E_{jk} \in \mathcal {S}^k ~{\rm and}~ i\neq k\big\}$. Then $\mathcal{M}^{k}(\tau(\mathcal {S}))=\tau(\mathcal {S}^k)$ holds for all $k= 0, 1,\dots$. \end{lemma}
We denote by $\mathfrak{sl}(n)$ the vector space of $n\times n$ real traceless matrices.
\begin{lemma}\label{gl.1} Let $\mathcal {S}$ be a subset of $\mathpzc{E}$ and $\tau(\mathcal {S})$ be the digraph associated with $\mathcal {S}$.
$(i)$ The Lie algebra generated by $\mathcal {S}$ contains $\mathfrak{sl}(n)$ if and only if $\tau(\mathcal {S})$ is strongly connected.
$(ii)$ The Lie algebra generated by $\mathcal {S}$ is equal to $\gl(n)$ if and only if $\tau(\mathcal {S})$ is a strongly connected digraph with at least one self-loop. \end{lemma}
\begin{definition}\label{def8} Let $\mathcal{G}=(\mathrm{V},\mathcal{E})$ be a simple digraph. Given an ordered pair of nodes $\langle i,j\rangle$ with $i, j\in\mathrm{V}$, the digraph $\mathcal{H}_{ij}(\mathcal{G}) := (\mathrm{V},\mathcal{E}_{ij})$ is called the circumjacent closure at $\langle i,j\rangle$ of $\mathcal{G}$ with $ \mathcal{E}_{ij}=\mathcal{E}_{ij}^{1}\mcup\mathcal{E}_{ij}^{2}$, where \begin{align*} \mathcal{E}_{ij}^{1}= \big\{(i,k)\,:\,\exists k \ \text{s.t. }\ (j,k)\in \mathcal{E},~k\neq i\big\}, ~~\mathcal{E}_{ij}^{2}= \big\{(k,j)\,:\,\exists k \ \text{s.t. }\ (k,i)\in \mathcal{E},~k\neq j\big\}. \end{align*} \end{definition}
\begin{lemma}\label{lem2}
Let $\mathcal{G}=(\mathrm{V},\mathcal{E})$ be a simple directed graph. Suppose for $i,j\in \mathrm{V}$ we have $(i,j), (j,i)\notin \mathcal{E}$ and $\deg^{+}({i})=k$,\ $\deg^{-}({j})=l$. Then $\mathcal{H}_{{i}{j}}(\mathcal{G})= (\mathrm{V},\mathcal{E}_{{i}{j}})$ satisfies
$(i)$ $|\mathcal{E}_{{i}{j}}|=k+l$;
$(ii)$ $\deg^{-}({i})=l$,\ $\deg^{+}({j})=k$;
$(iii)$ all nodes have zero degree except for ${i}$, ${j}$, ${i}$'s in-neighbors and ${j}$'s out-neighbors. \end{lemma} For any $A\in\gl(n)$, we can represent it uniquely in the form $$ A=\sum_{k=1}^{l} a_{k} E_{{i}^\ast_k{j}^\ast_k}, $$ where $a_k\neq 0\in\mathbb{R}$, $i^\ast_k,j^\ast_k\in\mathrm{V}$, and $E_{{i}^\ast_k{j}^\ast_k}\in\mathpzc{E}$. Define the map $\varphi$ that takes a matrix $A\in\gl(n)$ to a digraph $\mathcal{G}_{A}:=(\mathrm{V},\mathcal{E}_{A})$ with $\mathcal{E}_{A}=\big\{(i^\ast_1,j^\ast_1),\dots,(i^\ast_l,j^\ast_l)\big\}$. We will have established Theorem \ref{thm2.1} if we prove the lemma below.
\begin{lemma}\label{gl.2} Consider a subset $\mathcal {S}\subseteq\mathpzc{E}$ with the associated digraph $\tau(\mathcal {S})$ and a matrix $A\in\gl(n)$ with the associated digraph $\mathcal{G}_A=(\mathrm{V},\mathcal{E}_A)$. Let $\mathcal{G}_{\mathcal {S}}=(\mathrm{V},\mathcal{E}_{\mathcal {S}})$ be $\tau(\mathcal {S})$. Then the Lie algebra generated by $\{A\}\mcup\mathcal {S}$ is equal to $\gl(n)$ if the following conditions hold:
$(i)$ Each weakly connected component of $\mathcal{G}_{\mathcal {S}}$ is strongly connected with at least two nodes;
$(ii)$ The digraph $\mathcal{G}_{\mathcal {S}}$ has at least one self-loop;
$(iii)$ The union graph $\mathcal{G}_A \mcup\mathcal{G}_{\mathcal {S}}$ is strongly connected. \end{lemma}
\begin{proof} Let $\widetilde{\mathcal{G}}_{A}=(\mathrm{V},\widetilde{\mathcal{E}}_A)$ and $\widetilde{\mathcal{G}}_{\mathcal {S}}=(\mathrm{V},\widetilde{\mathcal{E}}_{\mathcal {S}})$ be the simple digraphs corresponding to $\mathcal{G}_A$ and $\mathcal{G}_{\mathcal {S}}$ by ignoring the self-loops, respectively. Because the union graph $\mathcal{G}_A \mcup\mathcal{G}_{\mathcal {S}}$ is strongly connected, the union graph $\widetilde{\mathcal{G}}_A \mcup\widetilde{\mathcal{G}}_{\mathcal {S}}$ is strongly connected too. If $\mathcal{G}_{\mathcal {S}}$ is strongly connected, then by Lemma \ref{gl.1} the Lie algebra generated by $\{A\}\mcup\mathcal {S}$ is equal to $\gl(n)$. Now, assume that $\mathcal{G}_{\mathcal {S}}$ is the union of $m$ weakly connected components with $ m\geq2$.
Let $\widetilde{\mathcal{G}}_{\mathcal {S}}^{i}=(\mathrm{V}_{i},\widetilde{\mathcal{E}}_{\mathcal {S}}^{i})$ denote the $i$-th weakly connected component of $\widetilde{\mathcal{G}}_{\mathcal {S}}$ for $i=1,\dots,m$. It is easily seen that each $\widetilde{\mathcal{G}}_{\mathcal {S}}^{i}$ is strongly connected. According to Lemma~\ref{map.2}, there exists an integer $z_{i}$ such that $\mathcal{M}^{z_{i}}(\widetilde{\mathcal{G}}_{\mathcal {S}}^{i})$ is a simple complete digraph for each $i$. Therefore $\mathcal{M}^{z^{*}}(\widetilde{\mathcal{G}}_{\mathcal {S}})=\mcup_{i=1}^{m}\mathcal{M}^{z_{i}}(\widetilde{\mathcal{G}}_{\mathcal {S}}^{i})$, where $z^{*}=\max_i\{z_{i}\}$. For simplicity, we denote $\mathcal{G}^{*}=(\mathrm{V},\mathcal{E}^{*})=\mathcal{M}^{z^{*}}(\widetilde{\mathcal{G}}_{\mathcal {S}})$ with $\mathcal{E}^{*}=\mcup_{i=1}^{m}\{(u,v): u, v\in\mathrm{V}_{i}, u\neq v\}$. Lemma \ref{gl.0} shows that the elements in $\{E_{ij}:(i,j)\in\mathcal{E}^{*}\}$ can be generated by iterated Lie brackets of elements in $\mathcal {S}$.
Define the digraph $\mathcal{G}_{\rm valid}$ by $\mathcal{G}_{\rm valid}:= (\mathrm{V},\mathcal{E}_{\rm valid})$, where $\mathcal{E}_{\rm valid}:=\widetilde{\mathcal{E}}_{A}\setminus \mathcal{E}^{*}$. As the union graph $\widetilde{\mathcal{G}}_A\mcup\widetilde{\mathcal{G}}_{\mathcal {S}}$ is strongly connected while $\widetilde{\mathcal{G}}_{\mathcal {S}}$ is not, we always have $|\mathcal{E}_{\rm valid}|\geq 2$. Because $\widetilde{\mathcal{G}}_A \mcup\widetilde{\mathcal{G}}_{\mathcal {S}}$ is strongly connected, $\mathcal{G}_{\rm valid} \mcup \widetilde{\mathcal{G}}_{\mathcal {S}}$ is also strongly connected by the definition of $\mathcal{G}_{\rm valid}$. In addition, we can deduce that $\mathcal{G}_{\rm valid}$ satisfies: i) all arcs are between different $\mathrm{V}_{i}$ and no arcs within each $\mathrm{V}_{i}$; ii) each $\mathrm{V}_{i}$ has at least one node with out-degree greater than zero; iii) each $\mathrm{V}_{i}$ has at least one node with in-degree greater than zero.
Let $\widetilde{A}=\sum_{({i}^\ast_k,{j}^\ast_k)\in\mathcal{E}_{\rm valid}}a_{k}E_{{i}^\ast_k{j}^\ast_k}.$ It is clear that $\varphi(\widetilde{A})=\mathcal{G}_{\rm valid}$. The Lie bracket of $\widetilde{A}$ and $E_{ij}\in\{E_{ij}:(i,j)\in\mathcal{E}^{*}\}$ satisfies $[\widetilde{A}, E_{ij}]\in\big\{\{A\}\mcup\mathcal {S}\big\}_{\rm LA}$. In addition, the relationship between the Lie bracket $[\widetilde{A},E_{ij}]$ and the circumjacent closure at $\langle i,j\rangle$ of $\mathcal{G}_{\rm valid}$ satisfies \begin{equation}\label{eq1} \varphi\big([\widetilde{A},E_{ij}] \big)=\mathcal{H}_{ij}(\mathcal{G}_{\rm valid}) \end{equation} for $ E_{ij}\in \{E_{ij}:(i,j)\in\mathcal{E}^{*}\}$. To prove the statement, we need to consider the Lie algebra generated by $\{A\}\mcup\mathcal {S}$. Since $\mathcal{G}_{\mathcal {S}}$ has at least one self-loop, without loss of generality we assume that the node $v_{11}\in\mathrm{V}_{1}$ has a self-loop, i.e., $E_{v_{11}v_{11}}\in\mathcal {S}$. The remainder of the proof contains two steps.
\noindent{\it Step 1.} We first prove that if $\mathcal{G}_{\rm valid}$ has arcs from the nodes in $\mathrm{V}_{1}$ to the nodes in $\mathrm{V}_{k}, k\in \{2,\dots,m\}$, then all elements in the set $\{E_{ij}:i\in\mathrm{V}_{1},~ j\in\mathrm{V}_{k}\}$ can be obtained by iterated Lie brackets of elements in $\{\widetilde{A}\}\mcup\mathcal {S}$. Note that for any node $v\in\mathrm{V}_{1}$ in $\mathcal{G}_{\rm valid}$ with $\deg^-(v)>0$, Lemma \ref{gl.1} yields $E_{vv}\in\mathcal \{{S}\}_{\rm LA}$ since $\mathcal{G}_{\mathcal {S}}^{1}$ is a strongly connected digraph with self-loops. We only need to consider the case where the out-degree of node $v_{11}$ is greater than zero. The analysis for the other nodes in $\mathrm{V}_{1}$ whose out-degree is greater than zero can be similarly established.
Let $\deg^-(v_{11})=k>0$, and $v_{i_{1}j_{1}},\dots,v_{i_{k}j_{k}}$ be the out-neighbors of $v_{11}$. Apparently, these out-neighbors are in $\mathrm{V}_{2}\mcup\dots\mcup\mathrm{V}_{m}$. To be specific, let $v_{i_{1}j_{1}},\dots,v_{i_{r}j_{r}}\in\mathrm{V}_{2}$ with $1\leq r\leq k$. Applying Lemma \ref{lem1}, we deduce that the digraph $\varphi\big([\widetilde{A},E_{v_{11}v_{11}}]\big)$ satisfies: i) $\deg^-(v_{11})=k>0$; ii) other nodes in $\mathrm{V}_{1}$ has zero degree. Fix $v_{12}\in{\rm V}_1$ and $v_{12}\neq v_{11}$. Using Lemma \ref{lem2}, we have \begin{equation*}
\mathcal{H}_{v_{12}v_{11}}\Big(\varphi\big([\widetilde{A},E_{v_{11}v_{11}}]\big)\Big)=\Big(\mathrm{V},\big\{(v_{12},v_{i_{1}j_{1}}),(v_{12},v_{i_{2}j_{2}}),\dots,(v_{12},v_{i_{k}j_{k}})\big\}\Big).
\end{equation*} Let $v_{i_{*}j_{*}}\neq v_{i_{1}j_{1}} \in \mathrm{V}_{2}$. By selecting the node pair $\langle v_{i_{1}j_{1}},v_{i_{*}j_{*}}\rangle$ we can obtain \begin{equation}\label{eq2}
\mathcal{H}_{v_{i_{1}j_{1}}v_{i_{*}j_{*}}}\Big(\mathcal{H}_{v_{12}v_{11}}\big(\varphi([\widetilde{A},E_{v_{11}v_{11}}])\big)\Big)= \Big(\mathrm{V},\big\{(v_{12},v_{i_{*}j_{*}})\big\}\Big). \end{equation} Since $(v_{12},v_{11})$ and $(v_{i_{1}j_{1}},v_{i_{*}j_{*}})$ are in $\mathcal{E}^{*}$, i.e., $E_{v_{12}v_{11}},E_{v_{i_{1}j_{1}}v_{i_{*}j_{*}}}\in\{\mathcal {S}\}_{\rm LA}$, from \eqref{eq1} and \eqref{eq2} we conclude that $$ \varphi\Big([[[\widetilde{A},E_{v_{11}v_{11}}],E_{v_{12}v_{11}}],E_{v_{i_{1}j_{1}}v_{i_{*}j_{*}}}]\Big) =\Big(\mathrm{V},\big\{(v_{12},v_{i_{*}j_{*}})\big\}\Big). $$ This implies that $$[[[\widetilde{A},E_{v_{11}v_{11}}],E_{v_{12}v_{11}}],E_{v_{i_{1}j_{1}}v_{i_{*}j_{*}}}]= a^{*}E_{v_{12}v_{i_{*}j_{*}}},$$ where $a^{*}$ is the coefficient generated during the operation of the Lie brackets.
Therefore, $E_{v_{12}v_{i_{*}j_{*}}}\in\mathpzc{E}_{1}$ can be obtained by iterated Lie brackets of elements in $\{\widetilde{A}\}\mcup\mathcal {S}$. This, together with the strong connectivity of $\mathcal{G}_{\mathcal {S}}^{1}$ and $\mathcal{G}_{\mathcal {S}}^{2}$, implies that all elements in the set $\{E_{ij}:i\in\mathrm{V}_{1}, ~j\in\mathrm{V}_{2}\}$ can be obtained by iterated Lie brackets of elements in $\{\widetilde{A}\}\mcup\mathcal {S}$.
\noindent{\it Step 2.} Analysis similar to that in the proof of Theorem 3 in \cite{arxiv} shows that all elements in set $\mathcal {S}^*:= \{E_{ij}:i\in\mathrm{V}_{1},~j\in\mathrm{V}_{2}\mcup\dots\mcup\mathrm{V}_{m}\}\mcup\{E_{ij}:i\in\mathrm{V}_{2}\mcup\dots\mcup\mathrm{V}_{m},~ j\in\mathrm{V}_{1}\}$ can be obtained by iterated Lie brackets of elements in $\{\widetilde{A}\}\mcup\mathcal {S}$. Since $\mathcal {S}^*\mcup \mathcal {S}\subseteq\mathpzc{E}$ and $\tau(\mathcal {S}^*\mcup \mathcal {S})$ is a connected digraph with self-loops, we conclude from Lemma \ref{gl.1} that $\{\mathcal {S}^*\mcup \mathcal {S}\}_{\rm LA}=\gl(n)$, and consequently, we have $\big\{\{A\}\mcup\mathcal {S}\big\}_{\rm LA}=\gl(n)$. This completes the proof. \end{proof}
\subsection*{C. Proof of Theorem \ref{thm2.2}} The statement in Theorem \ref{thm2.2} will be proved once we prove the lemma below. \begin{lemma}\label{gl.3} Consider two subset $\mathcal {A}, \mathcal {S}\subseteq \mathpzc{E}$ with the associated graphs $\mathcal{G}_{\mathcal {A}}=\tau(\mathcal {A})$ and $\mathcal{G}_{\mathcal {S}}=\tau(\mathcal {S})$, respectively. There exists a matrix $A\in\Sigma_{\rm r}(\mathcal {A})$ such that the Lie algebra generated by $\{A\}\mcup\mathcal{S}$ is equal to $\gl(n)$ if the following conditions hold:
$(i)$ Each weakly connected component of $\mathcal{G}_{\mathcal {S}}$ is strongly connected with at least two nodes;
$(ii)$ The union graph $\mathcal{G}_{\mathcal {A}}\mcup\mathcal{G}_{\mathcal {S}}$ is a strongly connected digraph with self-loops. \end{lemma} \begin{proof} Note that $\mathcal{G}_{\mathcal {A}}\mcup\mathcal{G}_{\mathcal {S}}$ has self-loops. That is, $\mathcal{G}_{\mathcal {A}}$ or $\mathcal{G}_{\mathcal {S}}$ has self-loops. Using Lemma \ref{gl.2}, for any $A\in\Sigma_{\rm r}(\mathcal {A})$, the generated Lie algebra by $\{A\}\mcup\mathcal{S}$ is equal to $\gl(n)$ if $\mathcal{G}_{\mathcal {S}}$ has self-loops. Thus, in the rest of the proof, we consider the case where only $\mathcal{G}_{\mathcal {A}}$ has self-loops. Set $\mathcal {A}=\{E_{i_1i_1},E_{i_2i_2},\dots,E_{i_li_l},E_{i_{l+1}j_{l+1}},\dots,E_{i_{l+r}j_{l+r}}\}$ with $l\geq 1$. For any $A\in\Sigma_{\rm r}(\mathcal {A})$, $$ A=\sum_{k=1}^l a_{k} E_{i_ki_k}+\sum_{k=l+1}^{l+r} a_{k} E_{i_kj_k}, $$ where $a_{k}\neq 0\in\mathbb{R}$. It follows that $\mathcal{G}_{A}=\mathcal{G}_{\mathcal {A}}$.
If $\mathcal{G}_{\mathcal {S}}$ is strongly connected, then by Lemma \ref{gl.1} the Lie algebra generated by $\mathcal {S}$ contains $\mathfrak{sl}(n)$. Hence, for any $A\in\Sigma_{\rm r}(\mathcal {A})$, there holds $\sum_{k=1}^{l} a_{k} E_{i_ki_k}\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}.$ Because $E_{ii}-E_{jj}\in\{\mathcal {S}\}_{\rm LA}$, for $1\leq i\neq j \leq n$, we can obtain \begin{equation}\label{eq3}
\begin{split}
\sum_{k=1}^{l}a_{k}E_{i_{k}i_{k}} & +a_{l}(E_{i_{l-1}i_{l-1}}-E_{i_{l}i_{l}}) \\
& +(a_{l}+a_{l-1})(E_{i_{l-2}i_{l-2}}-E_{i_{l-1}i_{l-1}})\\
& +\cdots+\Big(\sum_{k=2}^{l}a_{k}\Big)(E_{i_{1}i_{1}}-E_{i_{2}i_{2}})\\
& =\Big(\sum_{k=1}^{l}a_{k}\Big)E_{i_{1}i_{1}}\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}.
\end{split} \end{equation} Therefore, when ${\rm tr}A\neq 0$, i.e., $\sum_{k=1}^{l}a_{k}\neq0$, we have $E_{i_{1}i_{1}}\in\big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}.$ Lemma \ref{gl.1} now yields $\big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}=\gl(n)$. Consequently, for all $A\in\Sigma_{\rm r}(\mathcal {A})$ with ${\rm tr}A\neq 0$, the Lie algebra generated by $\{A\}\mcup\mathcal{S}$ is equal to $\gl(n)$.
Now, let $\mathcal{G}_{\mathcal {S}}$ be the union of $m$ weakly connected components with $ m\geq2$. Consider $A\in\Sigma_{\rm r}(\mathcal {A})$ with ${\rm tr}A\neq 0$. We continue to use the definitions of $\widetilde{\mathcal{G}}_{A}$, $\widetilde{\mathcal{G}}_{\mathcal {S}}$, $\mathcal{G}^{*}$, $\mathcal{G}_{\rm valid}$ and $\widetilde{A}$ in the proof of Lemma \ref{gl.2}. By the definition of $\widetilde{A}$, one has
$$\widetilde{A}+\sum_{k=1}^{l} a_{k} E_{i_ki_k}\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}.$$
Recall that all the elements in $\{E_{ij}:(i,j)\in\mathcal{E}^{*}\}$ can be generated by iterated Lie brackets of elements in $\mathcal {S}$. Using Lemma \ref{lem1}, we have \begin{equation}\label{eq11}
\{\mathcal{S}\}_{\rm LA}\supset\{E_{ij}:~(i,j)\in\mathcal{E}^{*}\}\mcup\{E_{ii}-E_{jj}:~(i,j)\in\mathcal{E}^{*}\}. \end{equation} Without loss of generality, we assume $\{i_1,\dots,i_{l_1}\}\in\mathrm{V}_1$, $\{i_{l_1+1},\dots,i_{l_2}\}\in\mathrm{V}_2,\dots, \{i_{l_{r}+1},\dots,i_{l_{r+1}}=i_{l}\}\in\mathrm{V}_{r+1}$, with $1\leq r+1\leq m$. Repeating the process in \eqref{eq3} for $\widetilde{A}+\sum_{k=1}^{l} a_{k} E_{i_ki_k}$ gives $$\widetilde{A}+\sum_{k=1}^{r+1} a_{k}^* E_{i_{l_k}i_{l_k}}\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA},$$ where $a_{k}^*\neq 0$ if and only if $\sum_{j=l_{k-1}+1}^{l_k} a_j\neq0$. There must exist a $1\leq k\leq r+1$ such that $a_{k}^*\neq 0$ since ${\rm tr}A\neq 0$. From now on, we will write it simply as $\widetilde{A}+\sum_{k=1}^{r+1} a_{k} E_{i_{k}i_{k}}$ when no confusion can arise. It is worth pointing out that node $i_k$ is in $\mathrm{V}_k$ for $k=1,\dots,r+1.$ The remainder of the proof will be divided into three steps.
\noindent{\it Step 1.} We first prove that the following relation holds: \begin{equation}\label{eq4} \sum_{k=1}^{r+1} a_{k} E_{i_ki_k}\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}. \end{equation} To this end, consider $E_{i^*j^*}\in \{E_{ij}:(i,j)\in\mathcal{E}^{*}\}$. The matrix $\widetilde{A}$ can be decomposed into $\widetilde{A}=\widetilde{A}_1+\widetilde{A}_2+\widetilde{A}_3$, where $\widetilde{A}_1$ is the part of the linear combination of elements of the form $E_{ri^*}$ in $\widetilde{A}$, $\widetilde{A}_2$ is the part of the linear combination of elements of the form $E_{j^*t}$ in $\widetilde{A}$, and the remaining part is $\widetilde{A}_3$. The proof of \eqref{eq4} is based on the following computation. \begin{align*}
[[\widetilde{A}+\sum_{k=1}^{r+1} a_{k} E_{i_{k}i_{k}},E_{i^*j^*}],E_{j^*i^*}] =& [[\widetilde{A}_1+\widetilde{A}_2+\widetilde{A}_3+\sum_{k=1}^{r+1} a_{k} E_{i_{k}i_{k}},E_{i^*j^*}],E_{j^*i^*}] \\
= & \widetilde{A}_1+\widetilde{A}_2+\delta_{i_ki^*}a_k(E_{i^*i^*}-E_{j^*j^*})+\delta_{i_kj^*}a_k(E_{j^*j^*}-E_{i^*i^*}). \end{align*} Set $A'=\delta_{i_ki^*}a_k(E_{i^*i^*}-E_{j^*j^*})+\delta_{i_kj^*}a_k(E_{j^*j^*}-E_{i^*i^*})$. Of course, $A'\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}$. Then there holds \begin{align*}
& \widetilde{A}+\sum_{k=1}^{r+1} a_{k} E_{i_{k}i_{k}}-\big((\widetilde{A}_1+\widetilde{A}_2+A')-A'\big)\\ =& \widetilde{A}_1+\widetilde{A}_2+\widetilde{A}_3+\sum_{k=1}^{r+1} a_{k} E_{i_{k}i_{k}}- (\widetilde{A}_1+\widetilde{A}_2)\\
= & \widetilde{A}_3+\sum_{k=1}^{r+1} a_{k} E_{i_{k}i_{k}}\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}.
\end{align*} The term $\widetilde{A}_3+\sum_{k=1}^{r+1} a_{k} E_{i_{k}i_{k}}$ can be handled in much the same way, the only difference being in the choice of $E_{i^*j^*}$. Repeating this process, we can finally conclude that $\sum_{k=1}^{r+1} a_{k} E_{i_{k}i_{k}}\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}$.
\noindent{\it Step 2.} We next prove that for any $v_k\in\mathrm{V}_k, k=1,\dots,r+1$, there holds \begin{equation}\label{eq5} \sum_{k=1}^{r+1} a_{k} E_{v_{k}v_{k}}\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}. \end{equation} Recall that $i_k\in\mathrm{V}_k$, for $k=1,\dots,r+1$. Based on \eqref{eq11} and \eqref{eq4}, it can be concluded that \begin{equation*} \sum_{k=1}^{r+1} a_{k} E_{i_{k}i_{k}}+a_1(E_{v_1v_1}-E_{i_1i_1})=a_1E_{v_1v_1}+\sum_{k=2}^{r+1} a_{k} E_{i_{k}i_{k}}\in\big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}, \end{equation*} for any $v_1\in\mathrm{V}_{1}$ and $v_1\neq i_1$. It is sufficient to show that the statement in \eqref{eq5} holds.
\noindent{\it Step 3.} In this step, we prove that there must exist a matrix $A^*\in\Sigma_{\rm r}(\mathcal {A})$ such that $\big\{\{A^*\}\mcup\mathcal{S}\big\}_{\rm LA}=\gl(n)$. As the {\it step 1} in the proof of Lemma \ref{gl.2}, we consider the digraph $\mathcal{G}_{\rm valid}$. Let $v_{11}$ be the node in $\mathrm{V}_{1}$ with $\deg^-(v_{11})=l>0$, and $v_{i_{1}j_{1}},\dots,v_{i_{l}j_{l}}$ be the out-neighbors of $v_{11}$. Let $v_{k1}$ and $v_{k2}$ denote two different nodes in $\mathrm{V}_{k}$, for $k=1,\dots,r+1$. From \eqref{eq5} we obtain $$a_1E_{v_{11}v_{11}}+\sum_{k=2}^{r+1} a_{k} E_{v_{k1}v_{k1}}\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA},$$ and $$a_1E_{v_{11}v_{11}}+\sum_{k=2}^{r+1} a_{k} E_{v_{k2}v_{k2}}\in \big\{\{A\}\mcup\mathcal{S}\big\}_{\rm LA}.$$ Write $\mathcal{G}=\varphi\big([a_1E_{v_{11}v_{11}}+\sum_{k=2}^{r+1} a_{k} E_{v_{k2}v_{k2}},[a_1E_{v_{11}v_{11}}+\sum_{k=2}^{r+1} a_{k} E_{v_{k1}v_{k1}}, \widetilde{A}]]\big)$. The digraph $\mathcal{G}$ satisfies: i) $\deg(v_{12})=0$; ii) all edges are adjacent to node $v_{11}$; iii) the set of out-neighbors of node $v_{11}$ is a subset of $\{v_{i_{1}j_{1}},\dots,v_{i_{l}j_{l}}\}$. Let $\mathcal {O}$ be the set of out-neighbors of node $v_{11}$ in $\mathcal{G}$. Whether the set $\mathcal {O}$ and the set $\{v_{i_{1}j_{1}},\dots,v_{i_{l}j_{l}}\}$ are equal is related to the combination coefficient of $A$. It follows that there must exist a matrix $A^*\in\Sigma_{\rm r}(\mathcal {A})$ such that $\mathcal {O}=\{v_{i_{1}j_{1}},\dots,v_{i_{l}j_{l}}\}$. Proceeding as in the proof of Lemma \ref{gl.2}, we have $\big\{\{A^*\}\mcup\mathcal{S}\big\}_{\rm LA}=\gl(n)$, and the proof is complete. \end{proof}
\subsection*{D. Proof of Proposition \ref{su.1}}
\begin{definition}\label{map.1} $(i)$ Let ${\rm S}_1\subseteq\mathpzc{B}$, $\mathrm{S}_2\subseteq\mathpzc{C}$ and $\mathrm{S}_3\subseteq\mathpzc{D}$. The edge-colored multigraph $\mathscr{G}_{\mathrm{S}_1\mcup\mathrm{S}_2\mcup\mathrm{S}_3}$ is given by $\mathscr{G}_{\mathrm{S}_1\mcup\mathrm{S}_2\mcup\mathrm{S}_3}=(\mathrm{V},\mathscr{E}_{\mathrm{S}_1\mcup\mathrm{S}_2\mcup\mathrm{S}_3})$ with $\mathscr{E}_{\mathrm{S}_1\mcup\mathrm{S}_2\mcup\mathrm{S}_3}=\mathscr{E}_{\mathrm{S}_1}\mcup\mathscr{E}_{\mathrm{S}_2}\mcup\mathscr{E}_{\mathrm{S}_3}$, where $\mathscr{E}_{\mathrm{S}_1}=\big\{\{i,j;{\rm Blue}\}:B_{ij}\in\mathrm{S}_1\big\}$, $\mathscr{E}_{\mathrm{S}_2}=\big\{\{i,j;{\rm Red}\}:C_{ij}\in\mathrm{S}_2\big\}$, and $\mathscr{E}_{\mathrm{S}_3}=\big\{\{k,k;{\rm Green}\}: k=i,j, D_{ij}\in\mathrm{S}_3\big\}$.
$(ii)$ Let $\mathscr {G}$ be an edge-colored multigraph without self-loops, i.e., there are ${\rm S}_1\subseteq\mathpzc{B}$ and $\mathrm{S}_2\subseteq\mathpzc{C}$, such that $\mathscr {G}=\mathscr{G}_{\mathrm{S}_1\mcup\mathrm{S}_2}$. Then the edge-colored multigraph transitive closure mapping, $\mathcal{\mathcal {T}}(\cdot)$, is defined as $$ \mathcal{T}\big(\mathscr{G}_{\mathrm{S}_1\mcup\mathrm{S}_2}\big) = \big(\mathrm{V}, \mathscr{E}_{\mathrm{S}_1\mcup\mathrm{S}_2}\mcup\mathscr{E}_1\mcup\mathscr{E}_2\mcup\mathscr{E}_3\big), $$ where $\mathscr{E}_1=\big\{\{i,k;{\rm Blue}\}:\,\exists j \ \text{s.t. }\ \{i,j;{\rm Blue}\}\in \mathscr{E}_{\mathrm{S}_1}, \{j,k;{\rm Blue}\}\in \mathscr{E}_{\mathrm{S}_1} \big\}$, $\mathscr{E}_2=\big\{\{i,k;{\rm Blue}\}:\,\exists j \ \text{s.t. }\ \{i,j;{\rm Red}\}\in \mathscr{E}_{\mathrm{S}_2}, \{j,k;{\rm Red}\}\in \mathscr{E}_{\mathrm{S}_2} \big\}$ and $\mathscr{E}_3=\big\{\{i,k;{\rm Red}\}:\,\exists j \ \text{s.t. }\ \{i,j;{\rm Red}\}\in \mathscr{E}_{\mathrm{S}_2}, \{j,k;{\rm Blue}\}\in \mathscr{E}_{\mathrm{S}_1}, i\neq k \big\}$. \end{definition} We then recursively define $\mathcal{T}^{k}(\mathscr{G}):=\mathcal{T}(\mathcal{T}^{k-1}(\mathscr{G}))$ with $\mathcal{T}^{0}(\mathscr{G}):=\mathscr{G}$. It is worth pointing out that there exists an integer $z$ such that $K_n$ is a spanning subgraph of $\mathcal{T}^{z}(\mathscr{G})$ if and only if $\mathscr{G}$ is connected, where $K_n$ is the complete graph of $n$ vertices.
\begin{proposition}\label{su.1} Consider a subset ${\rm S}\subseteq\mathpzc{B}\mcup\mathpzc{C}\mcup\mathpzc{D}$ with the associated edge-colored multigraph $\mathscr{G}_{\rm S}$. Then the Lie algebra generated by ${\rm S}$ is equal to $\su(n)$ if and only if $\mathscr{G}_{\rm S}$ satisfies one of the following conditions:
$(i)$ $\mathscr{G}_{\rm S}$ has edges of at least two colors, and all the Blue edges together with the vertex set $\mathrm{V}$ form a connected graph;
$(ii)$ $\mathscr{G}_{\rm S}$ is a connected graph with self-loops;
$(iii) $ $\mathscr{G}_{\rm S}$ is a connected graph with a cycle containing an odd number of Red edges. \end{proposition}
Our subsequent study will be based on the following lemma: \begin{lemma}\label{lem3} The Lie brackets among the basis elements of $\su(n)$ satisfy \[[B_{ij},B_{kl}] = \delta_{jk}B_{il}+\delta_{il}B_{jk}+\delta_{jl}B_{ki}+\delta_{ik}B_{lj};\] \[[C_{ij},C_{kl}] =\delta_{li}B_{kj}+\delta_{ki}B_{lj}+\delta_{lj}B_{ki}+\delta_{kj}B_{li};\] \[[B_{ij},C_{kl}]=\delta_{jk}C_{il}+\delta_{jl}C_{ik}-\delta_{il}C_{kj}-\delta_{ik}C_{lj};\] \[[B_{ij},D_{kl}]=(\delta_{jk}+\delta_{li}-\delta_{ki}-\delta_{jl})C_{ij};\] \[[C_{ij},D_{kl}]=(\delta_{ki}+\delta_{jl}-\delta_{kj}-\delta_{il}) B_{ij};\] and \[[D_{ij},D_{kl}]= 0.\] for all $1\leq i,j,k,l \leq n.$ \end{lemma} It is easy to prove this lemma directly, so we omit the proof.
Notice that, by Lemma \ref{lem3}, $ [B_{ij} , B_{kl}] \neq0$ if and only if there exists a bridging index \begin{equation}\label{eq6}
j = k,~ i = l,~j = l,{~\rm or}~ i = k. \end{equation}
Likewise, $[C_{ij},C_{kl}]\neq 0, [B_{ij},C_{kl}]\neq0, [B_{ij},D_{kl}]\neq0$ and $[C_{ij},D_{kl}]\neq 0$ if and only if \eqref{eq6} holds. In particular, $[B_{ij},C_{ij}]=2D_{ij}$.
\subsubsection*{D.1 Proof of Sufficiency for Proposition \ref{su.1}} Denote ${\rm S}={\rm S}_1\mcup{\rm S}_2\mcup{\rm S}_3$ with ${\rm S}_1\subseteq\mathpzc{B}$, ${\rm S}_2\subseteq\mathpzc{C}$ and ${\rm S}_3\subseteq\mathpzc{D}$. In this section, we show how the Lie algebra generated by ${\rm S}$ is equal to $\su(n)$ under conditions $(i),(ii)$ and $(iii)$ respectively.
(i) Suppose $\mathscr{G}_{\rm S}$ has edges of at least two colors, and all the Blue edges together with the vertex set $\mathrm{V}$ form a connected graph. It is clear that the edge generated subgraph obtained from all Blue edges is the graph $\mathscr{G}_{{\rm S}_1}$. From Lemma \ref{so.1}, we conclude that all the elements in $\mathpzc{B}$ can be generated by iterated Lie brackets of elements in ${\rm S}_1$, i.e., $\{{\rm S}_1\}_{\rm LA}=\so(n)$. Also $\mathscr{G}_{\rm S}$ has edges of at least two colors, i.e., ${\rm S}_2\mcup{\rm S}_3\neq\emptyset$. Note that $[B_{ij},C_{ij}]=2D_{ij}$. We only need to show that if ${\rm S}_3\neq\emptyset$, then $\{{\rm S}_1\mcup{\rm S}_3\}_{\rm LA}=\su(n)$.
There is no loss of generality in assuming $D_{12}\in{\rm S}_3$. Notice that $\mathpzc{B}\subset\{{\rm S}_1\}_{\rm LA}$. By Lemma \ref{lem3}, we have $$[B_{12},D_{12}]=-2C_{12},~~[B_{1j},D_{12}]=-C_{1j},~ j=3,\dots,n.$$ Then it is evident that $$\mathpzc{C}_1:=\{C_{1j}:2\leq j\leq n\}\subset\{{\rm S}_1\mcup{\rm S}_3\}_{\rm LA}.$$ In addition, $[B_{2j},D_{12}]=C_{2j}$ for $j=3,\dots,n$. This clearly forces $$\mathpzc{C}_2:=\{C_{2j}:3\leq j\leq n\}\subset\{{\rm S}_1\mcup{\rm S}_3\}_{\rm LA}.$$ Lemma \ref{lem3} now gives $D_{23}\in\{{\rm S}_1\mcup{\rm S}_3\}_{\rm LA}$ since $[B_{23},C_{23}]=2D_{23}$. In the same manner, we can see that $$\mathpzc{C}_3:=\{C_{3j}:4\leq j \leq n\}\subset\{{\rm S}_1\mcup{\rm S}_3\}_{\rm LA}.$$ We continue in this fashion obtaining $\mathpzc{C}_i:=\{C_{ij}:i+1\leq j \leq n\}\subset\{{\rm S}_1\mcup{\rm S}_3\}_{\rm LA}$ for $i=4,\dots,n-1$. Therefore $\mathpzc{C}:=\mcup_{i=1}^{n-1}\mathpzc{C}_i\subset\{{\rm S}_1\mcup{\rm S}_3\}_{\rm LA}.$ Immediately there holds $\mathpzc{D}\subset\{{\rm S}_1\mcup{\rm S}_3\}_{\rm LA}$, and consequently, we have $\{{\rm S}_1\mcup{\rm S}_3\}_{\rm LA}=\su(n)$.
(ii) Let $\mathscr{G}_{\rm S}$ be a connected graph with self-loops. Evidently, the subgraph $\mathscr{G}_{{\rm S}_1\mcup{\rm S}_2}:=\big(\mathrm{V},\mathscr{E}_{{\rm S}_1\mcup{\rm S}_2}\big)$ of $\mathscr{G}_{\rm S}$ is connected, and ${\rm S}_3\neq\emptyset$. We can certainly assume that $D_{12}\in {\rm S}_3$. Based on (i), if we prove that the Lie algebra $\{{\rm S}\}_{\rm LA}$ contains $\so(n)$ as a Lie subalgebra, the statement $\{{\rm S}\}_{\rm LA}=\su(n)$ follows.
It is worth noting from Lemma \ref{lem3} and Definition \ref{map.1} that taking first-order Lie brackets of the elements in ${\rm S}_1\mcup{\rm S}_2$ corresponds to adding edges that connect the endpoints of incident edges in $\mathscr{G}_{{\rm S}_1\mcup{\rm S}_2}$. As $\mathscr{G}_{{\rm S}_1\mcup{\rm S}_2}$ is connected, we conclude that there exists an integer $z$ such that $K_n$ is a spanning subgraph of $\mathcal{T}^z\big(\mathscr{G}_{\mathrm{S}_1\mcup\mathrm{S}_2}\big)$. In other words, for any $1\leq i<j\leq n$, $B_{ij}\in\{{\rm S}_1\mcup{\rm S}_2\}_{\rm LA}$ or $C_{ij}\in\{{\rm S}_1\mcup{\rm S}_2\}_{\rm LA}$.
Fix $i=1$. There are two cases: (a) if for all $2\leq j\leq n$, $B_{1j}\in\{{\rm S}_1\mcup{\rm S}_2\}_{\rm LA}$, then $\ell\big(\{B_{12},\dots,B_{1n}\}\big)$ is connected and thus $\{B_{12},\dots,B_{1n}\}_{\rm LA}=\so(n)$. It follows that $\so(n)\subset \{{\rm S}_1\mcup{\rm S}_2\}_{\rm LA}$; (b) if there is a $2\leq j^*\leq n$ such that $B_{1j^*}\notin\{{\rm S}_1\mcup{\rm S}_2\}_{\rm LA}$, then $C_{1j^*}$ must be in $\{{\rm S}_1\mcup{\rm S}_2\}_{\rm LA}$. Since $D_{12}\in{\rm S}_3$, Lemma \ref{lem3} shows that $B_{1j^*}\in\{{\rm S}_1\mcup{\rm S}_2\mcup{\rm S}_3\}_{\rm LA}$, by taking Lie bracket of $C_{1j^*}$ and $D_{12}$. Hence, for any $2\leq j\leq n$, $B_{1j}\in\{{\rm S}_1\mcup{\rm S}_2\mcup{\rm S}_3\}_{\rm LA}$. This clearly forces $\so(n)\subset\{{\rm S}_1\mcup{\rm S}_2\mcup{\rm S}_3\}_{\rm LA}$.
In conclusion, the Lie algebra generated by ${\rm S}$ contains $\so(n)$ as a Lie subalgebra. Furthermore, with ${\rm S}_3\neq\emptyset$, we have $\{{\rm S}\}_{\rm LA}=\su(n)$.
(iii) Let $\mathscr{G}_{\rm S}$ be a connected graph with a cycle containing an odd number of Red edges. By (ii) it is immediate that if there are self-loops in $\mathscr{G}_{\rm S}$, then $\{{\rm S}\}_{\rm LA}=\su(n)$. Thus, we focus on proving $\{{\rm S}\}_{\rm LA}=\su(n)$ for $\mathscr{G}_{\rm S}$ without self-loops, i.e., ${\rm S}_3=\emptyset$. Let ${\rm P}=\big(\{v_1,v_2\},\{v_2,v_3\},\dots,\{v_l,v_{l+1}=v_1\}\big)$ denote the cycle containing an odd number of Red edges. The set of matrices corresponding to these edges in ${\rm P}$ will be denoted by ${\rm S_P}$. The proof is based on the following lemma. \begin{lemma}\label{su.2} The Lie algebra generated by ${\rm S_P}$ contains at least one element in $\mathpzc{D}$. \end{lemma} \begin{proof} Recall that $[B_{ij},C_{ij}]=2D_{ij}, [B_{ij},B_{jk}]=B_{ik}, [C_{ij},C_{jk}]=-B_{ik}$ and $ [C_{ij},B_{jk}]=C_{ik}$. The proof is by induction on $l$, i.e., the cardinality of set ${\rm S_P}$. We divide our proof in three steps.
\noindent{\it Step 1.} Notice that the cardinality of set ${\rm S_P}\mcap\mathpzc{C}$ is an odd number. If $l=2$, then ${\rm S_P}=\{B_{v_1v_2},C_{v_1v_2}\}$. It follows easily that $D_{v_1v_2}\in\{{\rm S_P}\}_{\rm LA}$.
\noindent{\it Step 2.} Let $l=3$. There are two possibilities for elements in set ${\rm S_P}$: i) ${\rm S_P}=\{B_{v_1v_2},B_{v_2v_3},C_{v_1v_3}\}$; ii) ${\rm S_P}=\{C_{v_1v_2},C_{v_2v_3},C_{v_1v_3}\}$. In either case, we have $B_{v_1v_3}\in\{{\rm S_P}\}_{\rm LA}$, which implies $D_{v_1v_3}\in\{{\rm S_P}\}_{\rm LA}$. Now we assume
\noindent{\it \textbf{Induction Hypothesis}}. For $l\leq K-1$ with $K>2$, the conclusion holds.
\noindent{\it Step 3.} Let $l=K$ and $|{\rm S_P}\mcap\mathpzc{C}|=q$. Since $q$ is odd, $q\geq 1$. We can assume that $C_{v_1v_2}\in {\rm S_P}$. For $\{v_2,v_3\}\in {\rm P}$, there are two possibilities: i) $B_{v_2v_3}\in{\rm S_P}$; ii) $C_{v_2v_3}\in{\rm S_P}$. \begin{itemize}
\item Proof under Case (i): if $B_{v_2v_3}\in{\rm S_P}$, then $C_{v_1v_3}\in\{{\rm S_P}\}_{\rm LA}$. Define ${\rm S_{P'}}:=\big({\rm S_P}\setminus \{C_{v_1v_2},B_{v_2v_3}\}\big)\mcup\{C_{v_1v_3}\}$ with ${\rm P'}=\big(\{v_1,v_3\},\dots,\{v_l,v_1\}\big)$. We must have $|{\rm S_{P'}}|=K-1$ and $|{\rm S_{P'}}\mcap\mathpzc{C}|=q$. By our induction hypothesis, the Lie algebra generated by ${\rm S_{P'}}$ contains at least one element in $\mathpzc{D}$. In addition, ${\rm S_{P'}}\subset\{{\rm S_P}\}_{\rm LA}$. The desired result follows straightforwardly.
\item Proof of Case (ii): if $C_{v_2v_3}\in{\rm S_P}$, then $B_{v_1v_3}\in\{{\rm S_P}\}_{\rm LA}$ and $q\geq 3$. Define ${\rm S_{P'}}:=\big({\rm S_P}\setminus \{C_{v_1v_2},C_{v_2v_3}\}\big)\mcup\{B_{v_1v_3}\}$ with ${\rm P'}=\big(\{v_1,v_3\},\dots,\{v_l,v_1\}\big)$. We must have $|{\rm S_{P'}}|=K-1$ and $|{\rm S_{P'}}\mcap\mathpzc{C}|=q-2$.
Of course, $q-2$ is odd. The rest of the proof proceeds as in Case (i). \end{itemize} This proves the desired lemma. \end{proof} As in the proof of (ii), Lemma \ref{su.2} gives $\{{\rm S}\}_{\rm LA}=\su(n)$. We have thus proved the sufficiency. \subsubsection*{D.2 Proof of Necessity for Proposition \ref{su.1}} Suppose the conditions in Proposition \ref{su.1} are not true. Then for the graph $\mathscr{G}_{\rm S}$, there are three possibilities: i) $\mathscr{G}_{\rm S}$ is not connected; ii) $\mathscr{G}_{\rm S}$ is a connected graph containing only Blue edges; iii) $\mathscr{G}_{\rm S}$ is a connected graph without self-loops, and there are no cycles with an odd number of Red edges. It is easy to check that when $\mathscr{G}_{\rm S}$ is not connected, the generated Lie algebra by ${\rm S}$ is not equal to $\su(n)$. If $\mathscr{G}_{\rm S}$ is a connected graph containing only Blue edges, then $\{{\rm S}\}_{\rm LA}=\so(n)$. Now let $\mathscr{G}_{\rm S}$ be a connected graph without self-loops, and there are no cycles with an odd number of Red edges. The proof is completed by showing that $\{{\rm S}\}_{\rm LA}\neq\su(n)$.
First, we establish the relationship between Lie bracket operations and transitive closure of the edge-colored multigraph $\mathscr{G}_{\rm S}$. Since ${\rm S}_3=\emptyset$, then ${\rm S}={\rm S_1}\mcup{\rm S_2}$. Let $\mathrm {S}^0\subseteq \mathrm {S}^1 \subseteq \mathrm {S}^2\cdots$ be an ascending chain of subsets of $\mathpzc{B}\mcup\mathpzc{C}$ such that $\mathrm {S}^0=\mathrm {S}, \mathrm {S}^1 = [\mathrm {S}^0,\mathrm {S}^0]\mcup \mathrm {S}^0, \dots, \mathrm {S}^{k+1} = [\mathrm {S}^k, \mathrm {S}^k]\mcup \mathrm {S}^k,\dots.$ where $[\mathrm {S}^k, \mathrm {S}^k] = \big\{[A,B]: A,B \in \mathrm {S}^k\big\}$. The following lemma holds. \begin{lemma}
$\mathcal{T}^{k}(\mathscr {G}_{\mathrm {S}})=\mathscr {G}_{\mathrm {S}^k}$ holds for all $k= 0, 1,\dots$. \end{lemma} \begin{proof} It is evident that $\mathcal{T}^{0}(\mathscr {G}_{\mathrm {S}})=\mathscr{G}_{\mathrm {S}^0}$. Thus, in the rest of the proof, we focus on proving $\mathcal{T}^{k}(\mathscr{G}_{\mathrm {S}})=\mathscr{G}_{\mathrm {S}^k}$ for $k\geq 1$. By the definition of $\mathcal{T}(\cdot)$ and ${\rm S}^k$, we conclude that $\mathcal{T}(\mathscr{G}_{\mathrm {S}^{k-1}})=\mathscr{G}_{\mathrm {S}^k}$ if and only if $\mathscr{G}_{\mathrm {S}^{k-1}}$ has no multiple edges. From now on, the notation $\mathscr{G}\rhd \mathcal {R}$ means that $\mathscr{G}$ does not have a cycle with an odd number of Red edges.
Recall that $\mathscr{G}_{\mathrm {S}}\rhd \mathcal {R}$. This implies that $\mathscr{G}_{\mathrm {S}}$ has no multiple edges, and thus $\mathcal{T}^{1}(\mathscr{G}_{\mathrm {S}})=\mathscr{G}_{\mathrm {S}^1}$. It can be concluded that $\mathscr{G}_{\mathrm {S}^1}$ has no multiple edges. If the assertion was false, then there must exist a cycle of length 3 containing an odd number of Red edges in the graph $\mathscr{G}_{\mathrm {S}}$. This leads to a contradiction. Therefore, $\mathcal{T}(\mathscr{G}_{\mathrm {S}^1})=\mathscr{G}_{\mathrm {S}^2}$, and, together with $\mathscr{G}_{\mathrm {S}^1}=\mathcal{T}^{1}(\mathscr{G}_{\mathrm {S}})$, we have $\mathcal{T}^{2}(\mathscr{G}_{\mathrm {S}})=\mathscr{G}_{\mathrm {S}^2}$.
Our task now is to prove that $\mathscr{G}_{\mathrm {S}^2}$ has no multiple edges. This result will be proved if we can show that $\mathscr{G}_{\mathrm {S}^1}\rhd \mathcal {R}$. To this end, consider $\mathscr{G}_{\mathrm {S}}$. There are two possibilities for $\mathscr{G}_{\mathrm {S}}$: i) Each cycle in $\mathscr{G}_{\mathrm {S}}$ contains an even number of Red edges (We adopt the convention that $0$ is even); ii) $\mathscr{G}_{\mathrm {S}}$ has no cycles. \begin{itemize}
\item Proof under Case (i): Let each cycle in $\mathscr{G}_{\mathrm {S}}$ contain an even number of Red edges. Note that $\mathscr{G}_{\mathrm {S}^1}=\mathcal{T}(\mathscr{G}_{\mathrm {S}})$. We only need to consider the newly generated cycles in $\mathscr{G}_{\mathrm {S}^1}$ compared to $\mathscr{G}_{\mathrm {S}}$. Let ${\rm P}=\big(\{v_1,v_2\},\{v_2,v_3\},\dots,\{v_l,v_1\}\big)$ be a cycle of $\mathscr{G}_{\mathrm {S}^1}$. Let us first assume that $\{v_1,v_2\}\in\mathscr{G}_{\mathrm {S}^1}, \{v_1,v_2\}\notin\mathscr{G}_{\mathrm {S}}$. For the colour of edge $\{v_1,v_2\}$, there are two possibilities (i) if $\{v_1,v_2; {\rm Blue}\}\in\mathscr{G}_{\mathrm {S}^1}$, then there must exist a node $v_*$ such that $\{v_1,v_*; {\rm Blue}\}$ and $\{v_*,v_2; {\rm Blue}\}$ in $\mathscr{G}_{\mathrm {S}}$, or, $\{v_1,v_*; {\rm Red}\}$ and $\{v_*,v_2; {\rm Red}\}$ in $\mathscr{G}_{\mathrm {S}}$. Let ${\rm P'}=\big(\{v_1,v_*\},\{v_*,v_2\},\{v_2,v_3\},\dots,\{v_l,v_1\}\big)$. It is easily seen that the number of Red edges contained in ${\rm P'}$ and ${\rm P}$ has the same parity; (ii) if $\{v_1,v_2; {\rm Red}\}\in\mathscr{G}_{\mathrm {S}^1}$, then there must exist a node $v_*$ such that $\{v_1,v_*; {\rm Red}\}$ and $\{v_*,v_2; {\rm Blue}\}$ in $\mathscr{G}_{\mathrm {S}}$. We see at once that ${\rm P'}$ and ${\rm P}$ contain the same number of Red edges.
Therefore, the parity of the number of Red edges contained in ${\rm P'}$ and ${\rm P}$ is always the same. It follows that the number of Red edges in ${\rm P}$ is even, if all edges in ${\rm P'}$ are in $\mathscr{G}_{\mathrm {S}}$. If not, we can use the above method to replace the edges of $\mathscr{G}_{\mathrm {S}^1}$ in the circle ${\rm P'}$ with the edges of $\mathscr{G}_{\mathrm {S}}$, until all the edges in the obtained cycle ${\rm P^*}$ are in $\mathscr{G}_{\mathrm {S}}$. Furthermore, the Red edges contained in ${\rm P}$ and ${\rm P^*}$ have the same parity. Then it can be concluded that the number of Red edges in ${\rm P}$ is even.
On account of the arbitrariness of ${\rm P}$, we have $\mathscr{G}_{\mathrm {S}^1}\rhd \mathcal {R}$.
\item Proof of Case (ii): if $\mathscr{G}_{\mathrm {S}}$ has no cycles, then the edge set of graph $\mathcal{T}(\mathscr{G}_{\mathrm {S}})$ is the union of triangles like $\big\{\{v_i,v_j;{\rm Blue}\},\{v_j,v_k;{\rm Blue}\},\{v_k,v_i;{\rm Blue}\}\big\}$ and $\big\{\{v_i,v_j;{\rm Red}\},\{v_j,v_k;{\rm Red}\},\{v_k,v_i;{\rm Blue}\}\big\}$. It can be easily verified that $\mathcal{T}(\mathscr{G}_{\mathrm {S}})\rhd \mathcal {R}$. This gives $\mathscr{G}_{\mathrm {S}^1}\rhd \mathcal {R}$.
\end{itemize} We have thus proved $\mathcal{T}(\mathscr{G}_{\mathrm {S}})=\mathscr{G}_{\mathrm {S}^{1}}\rhd \mathcal {R}$. In fact, the proof above gives more, namely $\mathcal{T}^{k}(\mathscr{G}_{\mathrm {S}})\rhd \mathcal {R}$ holds for $k=0,1,2,...$. This certainly guarantees $\mathcal{T}^{k}(\mathscr{G}_{\mathrm {S}})$ has no multiple edges for all $k\geq1$, and, in consequence, the proof is completed by an easy induction. \end{proof}
We are now in a position to show that $\{{\rm S}\}_{\rm LA}\neq\su(n)$. Note that $\mathscr{G}_{\mathrm {S}}$ has finitely many vertices and edges, the ascending chain of graphs $\mathscr{G}_{\mathrm {S}}\subseteq\mathscr{G}_{\mathrm {S}^1}\subseteq\cdots$ stabilizes in finite steps. That is, there exists a nonnegative integer $z$ such that $\mathscr{G}_{\mathrm {S}^z}=\mathscr{G}_{\mathrm {S}^{z+1}}=\cdots$. In addition, $\mathscr{G}_{\mathrm {S}}$ is connected, and $\mathscr{G}_{\mathrm {S}^k}$ has no multiple edges for all $k\geq1$. It follows that $\mathscr{G}_{\mathrm {S}^z}$ is a complete graph with no multiple edges. From the definition of ${\rm S}^k$, we deduce that $\mathrm {S}^z$ spans $\{{\rm S}\}_{\rm LA}$. Hence, $\{{\rm S}\}_{\rm LA}$ is a $n(n-1)/2$-dimensional subalgebra of $\su(n)$, and consequently $\{{\rm S}\}_{\rm LA}\neq\su(n)$. We have now completed the proof of Proposition \ref{su.1}.
\subsection*{E. Proof of Theorem \ref{thm4}}
For any $A\in\su(n)$, we can represent it in the form of $$ A=\sum_{k=1}^{l_1} a_{k} B_{{i}_k{j}_k}+\sum_{k=l_1+1}^{l_2} a_{k} C_{{i}_k{j}_k}+\sum_{k=l_2+1}^{l_3} a_{k} D_{{i}_k{j}_k} $$ where $a_k\neq 0\in\mathbb{R}$, $i_k,j_k\in\mathrm{V}$, and $B_{{i}_k{j}_k}\in\mathpzc{B}, C_{{i}_k{j}_k}\in\mathpzc{C}, D_{{i}_k{j}_k}\in\mathpzc{D}$. Define the map $\psi$ that takes a matrix $A\in\su(n)$ to an edge-colored multigraph $\mathscr{G}_{A}:=(\mathrm{V},\mathscr{E}_{A})$ with $\mathscr{E}_{A}=\mathscr{E}_{\rm Blue}\mcup\mathscr{E}_{\rm Red}\mcup\mathscr{E}_{\rm Green}$, where $\mathscr{E}_{\rm Blue}=\big\{\{i_1,j_1;{\rm Blue}\},\dots,\{i_{l_1},j_{l_1};{\rm Blue}\}\big\}$, $\mathscr{E}_{\rm Red}=\big\{\{i_{l_1+1},j_{l_1+1};{\rm Red}\},\dots,\{i_{l_2},j_{l_2};{\rm Red}\}\big\}$, and $\mathscr{E}_{\rm Green}=\big\{\{i_{l_2+1},i_{l_2+1};{\rm Green}\},\dots,\{i_{l_3},i_{l_3};{\rm Green}\}\big\}$.
\begin{definition}\label{map.2} Let $\mathrm{G}=(\mathrm{V},\mathrm{E})$ be an undirected graph. Given any node pair $(i,j)$ of $\mathrm{G}$, the graph $\mathcal{H}_{ij}(\mathrm{G}) := (\mathrm{V},\mathrm{E}_{ij})$ is called the circumjacent closure at node pair $(i,j)$ of $\mathrm{G}$ with $\mathrm{E}_{ij}=\mathrm{E}_{ij}^{1}\mcup\mathrm{E}_{ij}^{2}$, where \begin{align*} \mathrm{E}_{ij}^{1}&= \big\{\{i,k\}\,:\,\exists k \ {\rm s.t. }\ \{j,k\}\in \mathrm{E}\big\}, \quad\\ &~~~~ \mathrm{E}_{ij}^{2}= \big\{\{j,k\}\,:\,\exists k \ {\rm s.t. }\ \{i,k\}\in \mathrm{E}\big\}. \end{align*} \end{definition}
\begin{lemma}\label{lem4}
Suppose the graph $\mathrm{G}=(\mathrm{X\mcup Y},\mathrm{E})$ is a bi-graph with $|\mathrm{X}|\geq3,|\mathrm{Y}|\geq3$ and $|\mathrm{E}|\geq1$. Then there exists a finite sequence of node pairs $(i_1,j_1),\dots,(i_z,j_z)$ for some integer $z\geq 1$ such that
$(i)$ Either $i_{s},j_{s}\in \mathrm{X}$ or $i_{s},j_{s}\in \mathrm{Y}$ for $s=1,2,\dots,z$;
$(ii)$ $\mathcal{H}_{i_{z}j_{z}}\big(\cdots\mathcal{H}_{i_{2}j_{2}}\big(\mathcal{H}_{i_{1}j_{1}}(\mathrm{G})\big)\big):=\big(\mathrm{X\mcup Y},\mathrm{E}_{i_{z}j_{z}}\big)$ is also a bi-graph with $|\mathrm{E}_{i_{z}j_{z}}|=1$. \end{lemma} The proof of this lemma appears in \cite{arxiv}, and therefore we will omit it. In general, we have the following result, and it can be proved in much the same way as Lemma \ref{lem4}. \begin{lemma}\label{lem5}
Let $\mathrm{G}:=(\mathrm{V},\mathrm{E})$ be an undirected graph with $|\mathrm{E}|\geq1$. Then there exists a finite sequence of node pairs $(i_1,j_1),\dots,(i_z,j_z)$ for some integer $z\geq 1$, such that the cardinality of the edge set of graph $\mathcal{H}_{i_{z}j_{z}}\big(\cdots\mathcal{H}_{i_{2}j_{2}}\big(\mathcal{H}_{i_{1}j_{1}}(\mathrm{G})\big)\big)$ is equal to 1. \end{lemma}
If $\mathscr{G}$ is an edge-colored multigraph without self-loops and multiple edges, then $\mathscr{G}$ can be regarded as an undirected graph without considering the color of the edges, and the conclusions in Lemma \ref{lem4} and \ref{lem5} are still valid for $\mathscr{G}$. The proof of Theorem \ref{thm4} is based on the following lemma.
\begin{lemma}\label{lem6} Consider a subset $\mathrm {S}\subseteq\mathpzc{B}\mcup\mathpzc{C}\mcup\mathpzc{D}$ with the associated multigraph $\mathscr{G}_{\mathrm {S}}=(\mathrm{V},\mathscr{E}_{\mathrm {S}})$ and a matrix $A\in\su(n)$ with the associated multigraph $\mathscr{G}_A=(\mathrm{V},\mathscr{E}_A)$. Let $\mathscr{G}_{\mathrm {S}}$ be connected. Then the Lie algebra generated by $\{A\}\mcup\mathrm {S}$ is equal to $\su(n)$ if and only if $\mathscr{G}_A\mcup\mathscr{G}_{\rm S}$ has a self-loop or a cycle with an odd number of Red edges. \end{lemma}
\begin{proof} (Sufficiency) Since $\mathscr{G}_{\mathrm {S}}$ is connected, Proposition \ref{su.1} shows that the Lie algebra generated by ${\rm S}$ is equal to $\su(n)$ if $\mathscr{G}_{\rm S}$ has a self-loop or a cycle with an odd number of Red edges. Now, assume that $\mathscr{G}_{\rm S}$ has neither a self-loop nor a cycle containing an odd number of Red edges. Analysis similar to that in the proof of necessity for Proposition \ref{su.1} shows that there exists a nonnegative integer $z$ such that $\mathscr{G}_{\mathrm {S}^z}=\mathscr{G}_{\mathrm {S}^{z+1}}=\cdots$. In addition, $\mathscr{G}_{\mathrm {S}^z}=({\rm V}, \mathscr {E}_{\mathrm {S}^z})$ is a complete graph with no self-loops or multiple edges. It follows that for any $1\leqslant i< j\leqslant n$, we have $F_{ij}\in\mathrm {S}^z$, either $F=B$ or $F=C$. Below we respectively prove that $\big\{\{A\}\mcup {\rm S}\big\}_{\rm LA}=\su(n)$ under the condition that $\mathscr{G}_A\mcup\mathscr{G}_{\rm S}$ has a self-loop or a cycle with an odd number of Red edges. The proof falls naturally into two parts.
Let us first show that if $\mathscr{G}_A\mcup\mathscr{G}_{\rm S}$ has self-loops, i.e., $\mathscr{G}_{A}$ has self-loops, then $\big\{\{A\}\mcup {\rm S}\big\}_{\rm LA}=\su(n)$. Define $$\widetilde{A}:=A-\sum_{\{{i}_k,{j}_k;{\rm Blue}\}\in \mathscr{E}_{\mathrm {S}^z}}a_{k}B_{{i}_k{j}_k}-\sum_{\{{i}_k,{j}_k;{\rm Red}\}\in \mathscr{E}_{\mathrm {S}^z}}a_{k}C_{{i}_k{j}_k}-\sum_{\{{i}_k,{i}_k;{\rm Green}\},\{{j}_k,{j}_k;{\rm Green}\} \in\mathscr{E}_{\mathrm {S}^z}}a_{k}D_{{i}_k{j}_k}.$$ It is immediate that $\widetilde{A}\neq0$ and $\mathscr{G}_{\widetilde{A}}=({\rm V},\mathscr{E}_{\widetilde{A}})$ has self-loops. Moreover, we have $\big\{\{A\}\mcup{\rm S}\big\}_{\rm LA}=\big\{\{\widetilde{A}\}\mcup{\rm S}^z\big\}_{\rm LA}$. Without loss of generality we can assume $\widetilde{A}=A_{*}+aD_{12}$, $a\neq 0\in\mathbb{R}$. Consider the graph $\mathscr{G}_{\widetilde{A}}=({\rm V},\mathscr {E}_{\widetilde{A}})$. There are two possibilities (i) $\{1,2\}\notin\mathscr {E}_{\widetilde{A}}$; (ii) $\{1,2\}\in\mathscr {E}_{\widetilde{A}}$. \begin{itemize}
\item Proof under Case (i): if $\{1,2\}\notin\mathscr {E}_{\widetilde{A}}$, then
$$\psi\big([\widetilde{A},F_{12}]\big):=\mathscr{G}=({\rm V},\mathscr {E})$$
has neither self-loops nor multiple edges, and $|\mathscr {E}|\geq 1$. With the notation $\widehat{A}=[\widetilde{A},F_{12}]$, we have $\mathscr{G}=\psi(\widehat{A})$. Applying Lemma \ref{lem5}, there exists a finite sequence of node pairs $(i_1,j_1),\dots,(i_w,j_w)$ for some integer $w\geq 1$, such that the cardinality of the edge set of graph $\mathcal{H}_{i_{w}j_{w}}\big(\cdots\mathcal{H}_{i_{2}j_{2}}\big(\mathcal{H}_{i_{1}j_{1}}(\mathscr{G})\big)\big)$ is equal to one. Let $\mathcal{H}_{i_{w}j_{w}}\big(\cdots\mathcal{H}_{i_{2}j_{2}}\big(\mathcal{H}_{i_{1}j_{1}}(\mathscr{G})\big)\big)=\big({\rm V},\big\{\{i^*,j^*\}\big\}\big)$. We conclude that
\begin{align*}
\psi\big([F_{i_{w}j_{w}},\dots,[F_{i_{2}j_{2}},[\widehat{A},F_{i_{1}j_{1}}]]]\big)
=\big({\rm V},\big\{\{i^*,j^*\}\big\}\big).
\end{align*}
Hence
\begin{equation*}
[F_{i_{w}j_{w}},\dots,[F_{i_{2}j_{2}},[\widehat{A},F_{i_{1}j_{1}}]]]=a^{*}F_{i^{*}j^{*}},
\end{equation*}
where $a^{*}$ is the coefficient generated during the operation of the Lie brackets. Since $F_{i_sj_s}\in{\rm S}^z$ for $s=1,\dots,w$, we have $F_{i^{*}j^{*}}\in\big\{\{\widetilde{A}\}\mcup{\rm S}^z\big\}_{\rm LA}$. In addition, with the definition of $\widetilde{A}$, we have $F_{i^{*}j^{*}}\notin{\rm S}^z$. Therefore, $\mathscr{G}_{\{F_{i^{*}j^{*}}\}\mcup{\rm S}^z}$ has multiple edges. Proposition \ref{su.1} now yields $\big\{\{F_{i^{*}j^{*}}\}\mcup{\rm S}^z\big\}_{\rm LA}=\su(n)$. This implies that $\big\{\{\widetilde{A}\}\mcup{\rm S}^z\big\}_{\rm LA}=\su(n)$, and thus $\big\{\{A\}\mcup{\rm S}\big\}_{\rm LA}=\su(n)$.
\item Proof of Case (ii): in $\mathscr{G}_{\widetilde{A}}$, the nodes adjacent to node $1$ except node $2$ are denoted as $i_1,\dots,i_m$, and, the nodes adjacent to node $2$ except node $1$ are denoted as $j_1,\dots,j_n$, where $m,n\geq0$.
If $m=0$, let $i\in{\rm V}$ and $i\neq1\neq2$. Then it is evident that graph $\psi\big([\widetilde{A},F_{1i}]\big)$ has neither self-loops nor multiple edges, and the cardinality of its edge set is greater than or equal to two. This is due to the fact that $\{1,i\}\notin\mathscr {E}_{\widetilde{A}}$. If $m\geq 1$, then the edge set of $\psi\big([\widetilde{A},F_{12}]\big)$ contains edge $\{2,i_1\}$. Let $i_*\in{\rm V}$ and $i_*\neq i_1\neq1\neq2$. We see at once that edge $\{i_1,i_*\}$ is not in the edge set of $\psi\big([\widetilde{A},F_{12}]\big)$. Therefore, $\psi\big([[\widetilde{A},F_{12}],F_{i_1i_*}]\big)$ has neither self-loops nor multiple edges, and the cardinality of its edge set is greater than or equal to one.
In summarizing, there must exist a matrix $\widehat{A}\in\big\{\{\widetilde{A}\}\mcup{\rm S}^z\big\}_{\rm LA}$ such that $\psi(\widehat{A})$ has neither self-loops nor multiple edges, and the cardinality of its edge set is greater than or equal to one. Analysis similar to that in the proof of Case (i) shows that $\big\{\{A\}\mcup{\rm S}\big\}_{\rm LA}=\su(n)$. \end{itemize}
The proof above gives more, namely $\big\{\{\widetilde{A}\}\mcup{\rm S}^z\big\}_{\rm LA}=\su(n)$ if $\widetilde{A}\neq0$.
We next prove that if $\mathscr{G}_A\mcup\mathscr{G}_{\rm S}$ has a cycle with an odd number of Red edges, then $\big\{\{A\}\mcup {\rm S}\big\}_{\rm LA}=\su(n)$. We continue to use the definition of $\widetilde{A}$ in the above. Now $\widetilde{A}\neq 0$, which is due to the fact that $\mathscr{G}_{\mathrm {S}^z}$ dose not have a cycle with an odd number of Red edges, while $\mathscr{G}_A\mcup\mathscr{G}_{\rm S}$ has. It follows easily that $\big\{\{A\}\mcup{\rm S}\big\}_{\rm LA}=\big\{\{\widetilde{A}\}\mcup{\rm S}^z\big\}_{\rm LA}=\su(n)$.
(Necessity) Assume that $\mathscr{G}_A\mcup\mathscr{G}_{\rm S}$ has neither self-loops nor a cycle with an odd number of Red edges. Proposition \ref{su.1} shows that the Lie algebra generated by ${\rm S}$ is not equal to $\su(n)$. In addition, $\mathscr{G}_{\mathrm {S}^z}$ is complete, and it has neither a self-loop nor a cycle with an odd number of Red edges. We must have $\mathscr{E}_A\subseteq \mathscr{E}_{\mathrm {S}^z}$, because $\mathscr{G}_{A}\mcup\mathscr{G}_{{\rm S}^z}$ has neither a self-loop nor a cycle with an odd number of Red edges. This implies that $\big\{\{A\}\mcup{\rm S}\big\}_{\rm LA}=\{{\rm S}\}_{\rm LA}\neq\su(n)$. Therefore, if the Lie algebra generated by $\{A\}\mcup\mathrm {S}$ is equal to $\su(n)$, then $\mathscr{G}_A\mcup\mathscr{G}_{\rm S}$ must have a self-loop or a cycle with an odd number of Red edges. We have now completed the proof of Lemma \ref{lem6}. \end{proof}
We are now in a position to present the proof of Theorem \ref{thm4}. Let $\mathscr{G}_{\rm contr}^{{}^\beta}$ be connected. Since $\mathscr{G}_{\rm drift}^{{}^\alpha} \mcup \mathscr{G}_{\rm contr}^{{}^\beta}$ has a self-loop or a cycle with an odd number of Red edges, there must exist a matrix $A\in\Sigma_{\rm r}(\su(n)_{\alpha})$ such that $\mathscr{G}_A\mcup \mathscr{G}_{\rm contr}^{{}^\beta}$ has a self-loop or a cycle with an odd number of Red edges. Using Lemma \ref{lem6}, we see that $\big\{\{A\}\mcup\so(n)_\beta\big\}_{\rm LA}=\su(n)$. From this we conclude that the system \eqref{bilinear} is structurally controllable on the Lie group ${\rm SU}(n)$ with respect to the pair of zero patterns $\Sigma_{\rm zero}:=\big(\Sigma_{\rm r}(\su(n)_{\alpha}),\Sigma_{\rm f}(\su(n)_{\beta})\big)$.
\subsection*{F. Proof of Theorem \ref{thm5}} The statement in Theorem \ref{thm5} will be proved once we prove the lemma below. \begin{lemma}\label{su.3} Consider a subset $\mathrm {S}\subseteq\mathpzc{B}\mcup\mathpzc{C}\mcup\mathpzc{D}$ with the associated multigraph $\mathscr{G}_{\mathrm {S}}=(\mathrm{V},\mathscr{E}_{\mathrm {S}})$ and a matrix $A\in\su(n)$ with the associated multigraph $\mathscr{G}_A=(\mathrm{V},\mathscr{E}_A)$. The Lie algebra generated by $\{A\}\mcup\mathrm {S}$ is equal to $\su(n)$ if the following conditions hold:
$(i)$ Each connected component of $\mathscr{G}_{\mathrm {S}}$ contains at least three nodes;
$(ii)$ $\mathscr{G}_{A}$ has no multiple edges and the union graph $\mathscr{G}_A \mcup\mathscr{G}_{\mathrm{S}}$ is connected;
$(iii)$ $\mathscr{G}_A \mcup\mathscr{G}_{\mathrm{S}}$ has a self-loop or a cycle with an odd number of Red edges. \end{lemma}
\begin{proof} If $\mathscr{G}_{\rm S}$ is connected, then by Lemma \ref{lem6}, the Lie algebra generated by $\{A\}\mcup\mathrm {S}$ is equal to $\su(n)$. Now assume that $\mathscr{G}_{\rm S}$ has $m$ connected components with $ m\geq2$. Let $\mathscr{G}_{i}=(\mathrm{V}_{i},\mathscr{E}_{i})$ denote the $i$-th connected component of $\mathscr{G}_{\rm S}$ for $i=1,\dots,m$. We continue to use the definition of ${\rm S}^k$ in the proof of Proposition \ref{su.1}. Write $\overline{{\rm S}}=\bigcup_{k=1}^\infty {\rm S}^k$. There must exist a nonnegative integer $z$ such that ${\rm S}^z ={\rm S}^{z+1}=\dots$, which then implies $\overline{{\rm S}}={\rm S}^z$. By the definition of $\overline{{\rm S}}$, we see that ${\rm S}^z$ spans $\{{\rm S}\}_{\rm LA}$. Since $\mathscr{G}_{\rm S}$ has $m$ connected components, $\mathscr{G}_{{\rm S}^z}$ has $m$ connected components too. In addition, for any $u<v\in\mathrm{V}_{i}$, $B_{uv}\in{\rm S}^z$ or $C_{uv}\in{\rm S}^z$.
We continue to use the definition of $\widetilde{A}$ in the proof of Lemma \ref{lem6}. As the union graph $\mathscr{G}_{A} \mcup \mathscr{G}_{{\rm S}^z}$ is connected while $\mathscr{G}_{{\rm S}^z}$ is not, we always have $\mathscr{E}_{\widetilde{A}}\neq\emptyset$, i.e., $\widetilde{A}\neq 0$. This allows us to further conclude that \begin{equation}\label{eq7} \big\{\{A\}\mcup{\rm S}\big\}_{\rm LA}=\big\{\{\widetilde{A}\}\mcup{\rm S}^z\big\}_{\rm LA}. \end{equation} The remainder of the proof is divided into two steps.
\noindent{\it Step 1.} We first prove the statement when $\mathscr{G}_{\rm S}$ contains only two connected components, i.e., $m=2$. Because $\mathscr{G}_{A} \mcup \mathscr{G}_{\rm S}$ is connected, $\mathscr{G}_{\widetilde{A}} \mcup \mathscr{G}_{{\rm S}^z}$ is also connected by the definition of $\mathscr{G}_{\widetilde{A}}$ and $\mathscr{G}_{{\rm S}^z}$. In addition, there must exist an edge $\{u,v\}\in\mathscr{G}_{\widetilde{A}}$ such that $u\in{\rm V}_1,v\in{\rm V}_2$. Let $v_{11}$ be the node in $\mathrm{V}_{1}$ with $\deg(v_{11})=k>0$, and $v_{i_{1}j_{1}},\dots,v_{i_{k}j_{k}}$ be its neighbors. For convenience, we let $v_{i_{1}j_{1}},\dots,v_{i_{r}j_{r}}\in\mathrm{V}_{2}$, $1\leq r\leq k$. Consider the node $v_{12}\in \mathrm{V}_{1}$. We must have $F_{v_{11}v_{12}}\in {\rm S}^z$, where $F=B$ or $F=C$.
Let $v_{i_{*}j_{*}}\neq v_{i_{1}j_{1}}\in \mathrm{V}_{2}$. There holds $F_{v_{i_{1}j_{1}}v_{i_{*}j_{*}}}\in {\rm S}^z$. Lemma \ref{lem3} now shows that $$\psi\big([[\widetilde{A},F_{v_{11}v_{12}}],F_{v_{i_{1}j_{1}}v_{i_{*}j_{*}}}]\big):=\mathscr{G}=\big({\rm V}_1\mcup{\rm V}_2, \mathscr {E}\big)$$
is a bi-graph, with $|\mathscr {E}|\geq1$. Write $[[\widetilde{A},F_{v_{11}v_{12}}],F_{v_{i_{1}j_{1}}v_{i_{*}j_{*}}}]=\widehat{A}$. It is immediate that $\mathscr{G}=\psi(\widehat{A})$. From Lemma \ref{lem4}, there is an integer $z\geq 1$ and a finite sequence of node pairs either $v_{s},u_{s}\in \mathrm{V}_{1}$ or $v_{s},u_{s}\in \mathrm{V}_{2}$ for $s=1,2,\dots,z$, such that $\mathcal{H}_{v_{z}u_{z}}\big(\cdots(\mathcal{H}_{v_{1}u_{1}}(\mathscr{G}))\big)=\big(\mathrm{V}_{1}\mcup\mathrm{V}_{2},\mathscr{E}_{v_{z}u_{z}}\big)$ is also a bi-graph with $|\mathscr{E}_{v_{z}u_{z}}|=1$. Let $\mathscr{E}_{v_{z}u_{z}}=\big\{\{v^{*},u^{*}\}\big\}$, where $v^{*}\in\mathrm{V}_{1},u^{*}\in\mathrm{V}_{2}$.
Note that $\{v_{s},u_{s}\}\in\mathscr{E}_{{\rm S}^z}$, we have $F_{v_{s}u_{s}}\in {\rm S}^z$ for $ s=1,\dots,z$.
Based on Definition \ref{map.2} and Lemma \ref{lem3}, it can be concluded
\begin{align*} \psi\big([F_{v_{z}u_{z}},\dots,[F_{v_{2}u_{2}},[\widehat{A},F_{v_{1}u_{1}}]]]\big)
=\mathcal{H}_{v_{z}u_{z}}\big(\cdots(\mathcal{H}_{v_{1}u_{1}}(\mathscr{G}))\big)
\end{align*} without considering the color of the edges. This yields
$$[F_{v_{z}u_{z}},\dots,[F_{v_{2}u_{2}},[\widehat{A},F_{v_{1}u_{1}}]]]=a^{*}F_{v^{*}u^{*}},$$ where $a^{*}$ is the coefficient generated during the operation of the Lie brackets. Therefore, $F_{v^{*}u^{*}}\in\big\{\{\widetilde{A}\}\mcup{\rm S}^z\big\}_{\rm LA}$, and, together with the connectivity of $\mathscr{G}_{\{F_{v^{*}u^{*}}\}\mcup{\rm S}^z}$, Lemma \ref{lem6} now leads to $$\big\{\{\widetilde{A}\}\mcup{\rm S}^z\big\}_{\rm LA}=\big\{\{\widetilde{A}\}\mcup\{F_{v^{*}u^{*}}\}\mcup{\rm S}^z\big\}_{\rm LA}=\su(n).$$ By \eqref{eq7} it is obvious that $\big\{\{A\}\mcup{\rm S}\big\}_{\rm LA}=\su(n).$
\noindent{\it Step 2.} In this step, we proceed to establish the result for the general case by induction on the number of connected components of $\mathscr{G}_{\rm S}$.
\noindent{\it \textbf{Induction Hypothesis}}. If graph $\mathscr{G}_{\rm S}$ contains $m\geq 2$ connected components, then $\big\{\{A\}\mcup{\rm S}\big\}_{\rm LA}=\su(n)$. We will prove it for $m+1$.
To do this, consider $\mathscr{G}_{\widetilde{A}}$. Let $v_{11}$ be the node in $\mathrm{V}_{1}$ with $\deg(v_{11})=k>0$, and $v_{i_{1}j_{1}},\dots,v_{i_{k}j_{k}}$ denote the nodes that are adjacent to $v_{11}$. There is no loss of generality in assuming $v_{i_{1}j_{1}},\dots,v_{i_{r}j_{r}}\in \mathrm{V}_{2}$, with $1\leq r\leq k$. Analysis similar to that in the step 1 shows that $$\psi\big([[\widetilde{A},F_{v_{11}v_{12}}],F_{v_{i_{1}j_{1}}v_{i_{*}j_{*}}}]\big)=\psi(\widehat{A})=\mathscr{G}_a\mcup\mathscr{G}_b,$$
where $\mathscr{G}_a=({\rm V}_1\mcup{\rm V}_2,\mathscr {E}_a)$ is a bi-graph with $|\mathscr {E}_a|\geq1$, $\mathscr{G}_b=(\mcup_{i=3}^{m+1}{\rm V}_i,\mathscr {E}_b)$ is a empty graph. Again we apply Lemma \ref{lem4} to $\mathscr{G}_a$ and obtain $F_{v^{*}u^{*}}\in\big\{\{\widetilde{A}\}\mcup{\rm S}^z\big\}_{\rm LA}$ with $v^{*}\in\mathrm{V}_{1},u^{*}\in\mathrm{V}_{2}$. It follows immediately that $F_{v^{*}u^{*}}\in\big\{\{A\}\mcup{\rm S}\big\}_{\rm LA}$. Now $\mathscr{G}_{\{F_{v^{*}u^{*}}\}\mcup{\rm S}}$ has $m$ connected components, and by our induction hypothesis, $\big\{\{A\}\mcup\{F_{v^{*}u^{*}}\}\mcup{\rm S}\big\}_{\rm LA}=\su(n)$. Thus we arrive at the conclusion that $\big\{\{A\}\mcup{\rm S}\big\}_{\rm LA}=\su(n)$, and the proof is complete. \end{proof}
\end{document} |
\begin{document}
\title{\textbf{Answer to an Isomorphism \\Problem in $\mathbb{Z}^2$}}
\author{\textbf{Matt Noble}\\ Department of Mathematics and Statistics\\ Middle Georgia State University\\ [email protected]} \date{} \maketitle
\begin{abstract}
For $S \subset \mathbb{R}^n$ and $d > 0$, denote by $G(S, d)$ the graph with vertex set $S$ with any two vertices being adjacent if and only if they are at a Euclidean distance $d$ apart. Deem such a graph to be ``non-trivial" if $d$ is actually realized as a distance between points of $S$. In a 2015 article, the author asked if there exist distinct $d_1, d_2$ such that the non-trivial graphs $G(\mathbb{Z}^2, d_1)$ and $G(\mathbb{Z}^2, d_2)$ are isomorphic. In our current work, we offer a straightforward geometric construction to show that a negative answer holds for this question.\\[5pt]
\noindent \textbf{Keywords and phrases:} Euclidean distance graph, graph isomorphism, lattice points \end{abstract}
\section{Introduction}
\thispagestyle{empty}
In the \textit{Geombinatorics} tradition, this work will feature a blend of graph theory, geometry, and classical number theory. We will follow the terminology and notation used in most graph theory texts, and for clarification, one could consult \cite{chartrand}. In our discussion, we will also employ a number of concepts from elementary number theory -- divisibility, the Chinese remainder theorem, representations of integers as sums of squares. For a refresher, we recommend \cite{leveque}.
For arbitrary graphs $G_1$ and $G_2$, define $G_1$ to be \textit{isomorphic} to $G_2$, and write $G_1 \simeq G_2$, if and only if there exists a bijection $\varphi: V(G_1) \rightarrow V(G_2)$ such that for any $u, v \in V(G_1)$, $u,v$ are adjacent if and only if $\varphi(u), \varphi(v)$ are adjacent. Let $f$ be any graph parameter. If it is guaranteed for isomorphic graphs $G_1$ and $G_2$ that $f(G_1) = f(G_2)$, then $f$ is said to be a \textit{graph invariant}. A common graph invariant that will play a role in our work is $k(G)$, the number of components of $G$.
The notion of the \textit{Euclidean distance graph} has been a central topic of investigation in \textit{Geombinatorics} articles, both past and present, and that will be the case here as well. Let $S \subset \mathbb{R}^n$ and $d > 0$. Define $G(S, d)$ to be the graph whose vertices are the points of $S$, with any two vertices being adjacent if and only if they are a Euclidean distance $d$ apart. Such a graph is deemed \textit{non-trivial} if $d$ is actually realized as a distance between points of $S$, as otherwise, $G(S, d)$ has an empty edge-set, and is not of interest. Note that $G(S, d_1) \simeq G(S, d_2)$ if and only if there exists an automorphism $\varphi$ of $S$ such that for any points $u,v \in S$, $|u - v| = d_1$ if and only if $|\varphi(u) - \varphi(v)| = d_2$. The \textit{isomorphism classes} of distance graphs on $S$ are formed by partitioning the interval $(0, \infty)$ such that for any set $P$ of the partition, and $d_1, d_2 \in P$, $G(S, d_1) \simeq G(S, d_2)$, and $P$ is maximal with respect to this property. As an easy example, note that the space $\mathbb{R}^n$ has only one isomorphism class, as any graph $G(\mathbb{R}^n, d)$ with $d > 0$ is isomorphic to the unit-distance graph $G(\mathbb{R}^n, 1)$ by an obvious scaling argument. Note also that with this definition, we are playing a little fast and loose with the terminology, as one would normally expect an isomorphism class of graphs to actually consist of a collection of graphs, not a collection of distances. However, considering the natural correspondence of a graph $G(S, d)$ to the distance $d$, there should be no confusion or sacrifice of mathematical precision in this setup.
As a quick warmup problem, we observe that for distinct $d_1,d_2$, the non-trivial graphs $G(\mathbb{Z}, d_1)$ and $G(\mathbb{Z}, d_2)$ are not isomorphic. Consider the set of all distance graphs with vertex set $\mathbb{Z}$. A distance $d$ is realized between distinct points of $\mathbb{Z}$ if and only if $d$ is a positive integer, so the trivial isomorphism class (that is, those distances that produce empty graphs) consists of all $d \in \mathbb{R}^+ \setminus \mathbb{Z}$. For a positive integer $d$, note that $G(\mathbb{Z}, d)$ consists of $d$ components, with each of those components being isomorphic to $G(\mathbb{Z}, 1)$. For distinct $d_1, d_2 \in \mathbb{Z}^+$, we therefore have $k(\mathbb{Z}, d_1) \neq k(\mathbb{Z}, d_2)$ and thus $G(\mathbb{Z}, d_1) \not \simeq G(\mathbb{Z}, d_2)$.
In a 2015 article \cite{isomorphism}, the author studied isomorphism classes of distance graphs with vertex set $\mathbb{Z}^2$ and posed the following question.
\begin{question} \label{z2question} For distinct $d_1, d_2 > 0$, both realized as distances between points of $\mathbb{Z}^2$, is it possible that the graphs $G(\mathbb{Z}^2, d_1)$ and $G(\mathbb{Z}^2, d_2)$ are isomorphic? \end{question}
\noindent Partial results were obtained in \cite{isomorphism}, and we will present them in the next section as a jumping off point for our present work. These partial conclusions seemed to point to Question \ref{z2question} having a negative answer, but a full resolution was not achieved.
In Section 2, we again take up the mantle on this problem. We will offer a relatively straightforward construction showing that indeed, the question does have a negative answer, or, in other words, every non-trivial isomorphism class of Euclidean distance graphs with vertex set $\mathbb{Z}^2$ consists of a single graph. In Section 3, we conclude with a few thoughts concerning directions for further investigation.
\section{An Isomorphism Problem in $\mathbb{Z}^2$}
In this section, let $\mathcal{P} \subset \mathbb{Z}^+$ be the set of all primes congruent to 1 modulo 4. Also throughout this section, we will assume that any $G(\mathbb{Z}^2, d)$ is non-trivial. In other words, $d = \sqrt{r}$ for some $r \in \mathbb{Z}^+$ with $r$ being representable as a sum of two integer squares. These $r$ are given by a well-known theorem of Euler, which we give for quick reference as Lemma \ref{twosquareslemma}.
\begin{lemma} \label{twosquareslemma} A positive integer $r$ may be written as $r = a^2 + b^2$ for $a, b \in \mathbb{Z}$ if and only if in the prime factorization of $r$, prime factors congruent to 3 modulo 4 each appear to an even degree. \end{lemma}
Given some $r$ which is representable as a sum of two integer squares, a characterization of the possible solutions $a,b$ to $r = a^2 + b^2$ is given in many introductory number theory texts (for example, see Chapter 7 of \cite{leveque}).
\begin{lemma} \label{twosquaresrepslemma} Let $r \in \mathbb{Z}^+$ with prime factorization\\ $r = 2^\gamma {p_1}^{\alpha_1}\cdots {p_m}^{\alpha_m} {q_1}^{2\beta_1} \cdots {q_n}^{2\beta_n}$ where $p_1, \ldots, p_m, q_1, \ldots q_n$ are distinct primes with each $p_i \equiv 1 \pmod 4$ and each $q_j \equiv 3 \pmod 4$. The following are both true. \begin{enumerate} \item[(i)] If $\gamma \leq 1$ and $\beta_1 = \cdots = \beta_n = 0$, then there exist $a,b \in \mathbb{Z}$ such that $r = a^2 + b^2$ and $\gcd(a,b) = 1$.
\item[(ii)] Let $r = c^2 + d^2$. Let $g = \gcd(c,d)$ and $h = 2^{\lfloor\frac{\gamma}{2}\rfloor}{q_1}^{\beta_1} \ldots {q_n}^{\beta_n}$. Then $h | g$. \end{enumerate} \end{lemma}
In \cite{isomorphism}, a version of Lemma \ref{twosquaresrepslemma} was used to determine $k(\mathbb{Z}^2, d)$ for all $d$. Moreover, it was shown that if $r_1$ is such that all of its prime factors are elements of $\mathcal{P}$, and $r_2 = r_1(2)^\gamma {q_1}^{2\beta_1} \cdots {q_n}^{2\beta_n}$ with each $q_j \equiv 3 \pmod 4$, then the graph $G(\mathbb{Z}^2, \sqrt{r_2})$ has each of its components isomorphic to the entire graph $G(\mathbb{Z}^2, \sqrt{r_1})$. This observation was utilized to show that all one needs to do to establish that Question \ref{z2question} has a negative answer is to show that for distinct $r_1, r_2 \in \mathbb{Z}^+$ with $r_1, r_2$ each having prime factorizations consisting solely of elements of $\mathcal{P}$, the corresponding $G_1 = G(\mathbb{Z}^2, \sqrt{r_1})$ and $G_2 = G(\mathbb{Z}^2, \sqrt{r_2})$ are not isomorphic. Furthermore, this was done successfully in \cite{isomorphism} in the case of $r_1, r_2$ both prime, through analysis of a particular graph invariant, the number of closed walks of a specified length containing a vertex $v \in V(G_1)$ and its image $\varphi(v) \in V(G_2)$ under an assumed isomorphism $\varphi: G_1 \rightarrow G_2$. Unfortunately, this line of proof appeared very difficult to navigate when $r_1$ or $r_2$ is composite, as in that situation, the enumeration of closed walks does not appear feasible.
We now consider a different graph invariant. Let $G_1, G_2$ be arbitrary graphs, and suppose $\varphi: G_1 \rightarrow G_2$ is an isomorphism. Let $u,v \in V(G_1)$, and for positive integer $l$, define $f_l(u,v)$ as the number of distinct paths of length $l$ beginning at $u$ and terminating at $v$. Clearly, $f_l(u,v) = f_l(\varphi(u),\varphi(v))$. Now denoting $G_1 = G(\mathbb{Z}^2, \sqrt{r_1})$ and $G_2 = G(\mathbb{Z}^2, \sqrt{r_2})$ with $r_1, r_2$ each having prime factorizations consisting solely of elements of $\mathcal{P}$, our plan will be to assume the existence of an isomorphism $\varphi: G_1 \rightarrow G_2$, and in the following lemmas, develop a few conditions to which $\varphi$ must adhere. We then combine these observations in Theorem \ref{z2result} to obtain a contradiction, thus showing Question \ref{z2question} has a negative answer. Throughout, we may without loss of generality freely assume that $\varphi$ fixes the origin and that $r_1 > r_2$.
\begin{lemma} \label{collinearlemma} Let $C_1, C_2$ be circles centered at the origin and having radii $\sqrt{r_1}, \sqrt{r_2}$, respectively. Let point $p \in C_1 \cap \mathbb{Z}^2$. There exists a point $q \in C_1 \cap \mathbb{Z}^2$ such that for all $n \in \mathbb{Z}^+$, $\varphi(np) = nq$. \end{lemma}
\begin{proof} Designate $u = (0,0)$. Let $n \in \mathbb{Z}^+$, and note that $f_n(u, np) = 1$. Denote by $C$ the circle centered at $u$ and having radius $n\sqrt{r_1}$, and note that for any point $\alpha \in \mathbb{Z}^2$ with $\alpha \not \in C$, $f_n(u, \alpha) \neq 1$. This is straightforward to see, as if $\alpha$ lies outside of $C$, then $f_n(u, \alpha) = 0$. If $\alpha$ lies inside $C$, and there exist $\alpha = v_0, v_1, \ldots, v_n = u$ constituting the vertices of a path of length $n$ in $G_1$, then there exists some $i \in \{0, \ldots, n-2\}$ with $|v_i - v_{i+2}| < 2\sqrt{r_1}$, which implies the existence of a lattice point $w \neq v_{i+1}$ such that $v_0, \ldots, v_i, w, v_{i+2}, \ldots, v_n$ also form a path of length $n$ in $G_1$.
We now proceed by way of induction. Note that the lemma holds in the case of $n = 1$ simply by the definition of $\varphi$ being an isomorphism. Assume it holds for all $n \leq k - 1$. In $G_1$, we have $f_k(u,kp) = 1$, and so in $G_2$, $f_k(u,\varphi(kp)) = 1$ as well. Thus $\varphi(kp)$ lies on a circle $C'$ centered at $u$ and having radius $k\sqrt{r_2}$. However, $\varphi(kp)$ must also be at distance $\sqrt{r_2}$ from $(k-1)q$. A circle $C''$ of radius $\sqrt{r_2}$ and centered at $(k-1)q$ intersects $C'$ at exactly the point $kq$. This completes the induction step and establishes proof of the lemma.
\ensuremath{\Box} \end{proof}
\begin{lemma} \label{anglelemma} There exist vectors $v_\alpha, v_\beta \in \mathbb{Z}^2$, each of length $\sqrt{r_1}$, such that the angle $\theta$ between $v_\alpha, v_\beta$ is not realized between any pair of vectors $w_\alpha, w_\beta \in \mathbb{Z}^2$, each of length $\sqrt{r_2}$. \end{lemma}
\begin{proof} Since by assumption $r_1 > r_2$, for some prime $p \in \mathcal{P}$ and positive integer $n$, it is the case that $p^n$ divides $r_1$ but does not divide $r_2$. By Lemma \ref{twosquaresrepslemma}, there exist $a,b \in \mathbb{Z}^+$ such that $a^2 + b^2 = r_1$ and $\gcd(a,b) = 1$. Note here that $p$ divides neither $a$ nor $b$.
Let $v_\alpha = \langle a,b \rangle$ and $v_\beta = \langle b,a \rangle$. The angle $\theta$ is then given by $\cos(\theta) = \frac{v_\alpha \cdot v_\beta}{|v_\alpha||v_\beta|} = \frac{2ab}{r_1}$. Similarly, letting $w_\alpha, w_\beta \in \mathbb{Z}^2$ be vectors of length $\sqrt{r_2}$ and $\theta'$ the angle between them, we have $\cos(\theta') = \frac{w_\alpha \cdot w_\beta}{r_2}$. Setting $\cos^{-1}(\frac{2ab}{r_1}) = \cos^{-1}(\frac{w_\alpha \cdot w_\beta}{r_2})$ gives a contradiction as it implies $2abr_2 = r_1(w_\alpha \cdot w_\beta)$ where $p^n$ divides $r_1(w_\alpha \cdot w_\beta)$ but $p^n$ does not divide $2abr_2$.
\ensuremath{\Box} \end{proof}
\begin{lemma} \label{10lemma} Let $r \in \mathbb{Z}^+$ where any prime factor of $r$ is in $\mathcal{P}$. Let $u,v \in \mathbb{Z}^2$ where $|u - v| < \sqrt{r}$. There exists a path in $G(\mathbb{Z}^2, \sqrt{r})$ beginning at $u$ and terminating at $v$ and having length $n < 8r^\frac32$. \end{lemma}
\begin{proof} By Lemma \ref{twosquaresrepslemma}, let $a,b \in \mathbb{Z}^+$ where $a^2 + b^2 = r$ and $\gcd(a,b) = 1$. Since $r$ is odd, one of $a,b$ is odd and the other even, so without loss of generality, assume $a$ even. By the Chinese remainder theorem, there exist $s, t \in \mathbb{Z}^+$ such that $sa - tb = -1$. Note here that $s+t < a+b$. As observed in Theorem 3.2 of \cite{isomorphism}, the following sum of vectors yields $\langle 0, 1 \rangle$.
\begin{center} $\langle a, b \rangle + \frac{a}{2}\left[s \langle a, b \rangle + s \langle a, -b \rangle + t \langle -b, a \rangle + t \langle -b, -a \rangle \right] + \frac{b - 1}{2} \left[s \langle b, a \rangle + s \langle -b, a \rangle + t \langle a, -b \rangle + t \langle -a, -b \rangle \right]$ \end{center}
This sum adds together $(s+t)(a+b-1) + 1$ vectors in total, and by negating or permuting entries of those vectors used, we may also form any of $\langle 1, 0 \rangle$, $\langle -1, 0 \rangle$, or $\langle 0, -1 \rangle$ by summing a similar number of vectors, each of length $\sqrt{r}$. Let $\langle x,y \rangle$ be the vector with initial point $u$ and terminal point $v$. We have each of $|x|,|y|$ less than $\sqrt{r}$ (as well as each of $a,b$ less than $\sqrt{r}$), so the desired path can be found in $G(\mathbb{Z}^2, \sqrt{r})$ of length $(s+t)(a+b-1)(|x| + |y|)$ which is in turn less than $8r^\frac32$.
\ensuremath{\Box} \end{proof}
We remark in passing that the order of the ``$8r^\frac32$" bound in the statement of Lemma \ref{10lemma} can almost certainly be improved. Perhaps that may even merit investigation in some future research program. However, the present form of Lemma \ref{10lemma} is fine for our its intended use in the proof of Theorem \ref{z2result}.
\begin{theorem} \label{z2result} Let distinct $r_1, r_2 \in \mathbb{Z}^+$, each having prime factorizations consisting solely of elements of $\mathcal{P}$. Then $G(\mathbb{Z}^2, \sqrt{r_1}) \not \simeq G(\mathbb{Z}^2, \sqrt{r_2})$. \end{theorem}
\begin{proof} As in previous discussion, let $G_1 = G(\mathbb{Z}^2, \sqrt{r_1})$ and \\$G_2 = G(\mathbb{Z}^2, \sqrt{r_2})$ with $r_1 > r_2$, and assume the existence of an isomorphism $\varphi: G_1 \rightarrow G_2$ that fixes the origin. Regarding Lemma \ref{anglelemma}, let $v_\alpha, v_\beta \in \mathbb{Z}^2$ be vectors of length $\sqrt{r_1}$ where the angle $\theta_1$ between $v_\alpha, v_\beta$ is not realized between any pair of vectors in $\mathbb{Z}^2$ having length $\sqrt{r_2}$.
Consider the two sets of collinear points $\{v_\alpha, 2v_\alpha, 3v_\alpha, \ldots\}$ and\\ $\{v_\beta, 2v_\beta, 3v_\beta, \ldots\}$. By Lemma \ref{collinearlemma}, for some vectors $w_\alpha, w_\beta$, each of length $\sqrt{r_2}$, $\varphi(iv_\alpha) = iw_\alpha$ and $\varphi(iv_\beta) = iw_\beta$ for all $i \in \mathbb{Z}^+$. Let $\theta_2$ be the angle between vectors $w_\alpha, w_\beta$. We have $\theta_1 \neq \theta_2$, and we will assume $\theta_1 < \theta_2$, however a similar argument would hold in the case of $\theta_2 < \theta_1$. Note that, since $\theta_1 < \theta_2$, by taking larger and larger $m,n$, we can make the difference $|mw_\alpha - nw_\beta| - |mv_\alpha - nv_\beta|$ arbitrarily large as well.
Let $\ell$ be the ray containing each of $(0,0), v_\beta, 2v_\beta, 3v_\beta, \ldots$, and for each $i \in \mathbb{Z}^+$, let $C_i$ be the circle centered at $iv_\beta$ and having radius $\sqrt{r_1}$. Form support lines $\ell_1, \ell_2$ by translating $\ell$ by vectors $t, -t$ where $t$ is perpendicular to $\ell$ and has length $\frac{\sqrt{3r_1}}{2}$. For a visual reference, see Figure 1. Let $v \in \mathbb{Z}^2$ be a vector of length $\sqrt{r_1}$ where $v \not \in \{v_\alpha, v_\beta\}$, so that we may start at $mv_\alpha$ for some sufficiently large $m$ and form points $mv_\alpha + v, mv_\alpha + 2v, mv_\alpha + 3v, \ldots$ such that the ray containing each of $mv_\alpha, mv_\alpha + v, mv_\alpha + 2v, mv_\alpha + 3v, \ldots$ intersects $\ell$. Such a vector $v$ is guaranteed to exist, and in fact, if one uses $v_\alpha = \langle a,b \rangle, v_\beta = \langle b,a \rangle$ with $a^2 + b^2 = r_1$ and $a,b \in \mathbb{Z}^+$ (as described in the proof of Lemma \ref{anglelemma}), $v = \langle a, -b \rangle$ is suitable.
\begin{figure}\label{circlepicture}
\end{figure}
Note that there must be some $k \in \mathbb{Z}^+$ such that the point $p_k = mv_\alpha + kv$ falls between $\ell_1, \ell_2$, and in doing so, we have for some $n$, $|p_k - nv_\beta| < \sqrt{r_1}$. By Lemma \ref{10lemma}, there exists a path in $G_1$, beginning at $p_k$ and terminating at $nv_\beta$, of length less than $8r^\frac32$. Letting $z = k + 8r^\frac32$, in $G_1$ we therefore have $f_z(mv_\alpha, nv_\beta) > 0$. However, for $m$ taken sufficiently large, $f_z(mw_\alpha, nw_\beta) = 0$, a contradiction that completes the proof of the theorem.
\ensuremath{\Box} \end{proof}
Theorem \ref{z2result}, along with the previously described work of \cite{isomorphism}, give us the following main result.
\begin{theorem} \label{mainresult} Let distinct $d_1, d_2$ be distances realized between points of the integer lattice $\mathbb{Z}^2$. Then the graphs $G(\mathbb{Z}^2, d_1)$ and $G(\mathbb{Z}^2, d_2)$ are not isomorphic. \end{theorem}
\section{Further Work}
Along with Question \ref{z2question}, the following was posed in \cite{isomorphism}.
\begin{question} \label{subgraphquestion} Let $G_1 = G(\mathbb{Z}^2, d_1)$ and $G_2 = G(\mathbb{Z}^2, d_2)$ where $G_1 \not \simeq G_2$. If possible, construct a finite graph $H$ which appears as a subgraph of exactly one of $G_1, G_2$. \end{question}
Of course, the existence of such $H$ would guarantee $G_1 \not \simeq G_2$ without the a priori knowledge of Theorem \ref{mainresult} that the two graphs are not isomorphic. Note also the the qualifier ``If possible" is necessary, as for some instances of $G_1, G_2$, no such $H$ exists. As touched upon in the previous section, consider $G_1 = G(\mathbb{Z}^2, 1)$ and $G_2 = G(\mathbb{Z}^2, \sqrt{2})$. Graph $G_1$ is connected, while $G_2$ is a disconnected graph having two components, each of which is isomorphic to $G_1$. Even without access to Theorem \ref{mainresult}, we then have that $G_1 \not \simeq G_2$, but here, the construction of the desired $H$ is not possible.
Unfortunately, the line of proof given in the previous section, when taken at face value, does not offer much toward a resolution of Question \ref{subgraphquestion}. Really, the only graphs that are explicitly noted in the proof as being subgraphs of $G(\mathbb{Z}^2, \sqrt{r_1})$ are paths and even cycles. Certainly, these are subgraphs of any $G(\mathbb{Z}^2, \sqrt{r_2})$ as well. The characteristics of $G(\mathbb{Z}^2, \sqrt{r_1})$ that were utilized in the proof of Theorem \ref{mainresult} were more geometric in nature, as opposed to being graph or number theoretic. However, perhaps in some future investigation, the method used in Section 2 can be modified to elaborate upon the underlying structural properties of $G(\mathbb{Z}^2, d)$ in a more concrete manner, and hopefully shed some light on Question \ref{subgraphquestion}.
\end{document} |
\begin{document}
\title{Two statements of the Duggan-Schwartz theorem}
\begin{abstract} The Duggan-Schwartz theorem \citep{Duggan1992} is a famous result concerning strategy-proof social choice correspondences, often stated as ``A social choice correspondence that can be manipulated by neither an optimist nor a pessimist has a weak dictator''. However, this formulation is actually due to \citet{Taylor2002}, and the original theorem, at face value, looks rather different. In this note we show that the two are in fact equivalent. \end{abstract}
\section{Definitions}
\begin{definition}
Let $V$ be a finite set of voters, $A$ a finite set of alternatives.
A profile $P$ consists of a linear order over $A$ (also known
as a \emph{preference order} or a \emph{ballot}), $P_i$, for every voter $i$. The set of all profiles of voters $V$ over alternatives $A$ is denoted $\mathcal{P}(V,A)$. We use $P_{-i}$ to refer to the ballots of all voters except $i$. Hence,
$P=P_iP_{-i}$ and $P_i'P_{-i}$ is obtained from profile $P$ by replacing
$P_i$ with $P_i'$.
A \emph{social choice correspondence} produces a nonempty set
of alternatives, $F:\mathcal{P}(V,A)\rightarrow 2^A\backslash\set{\emptyset}$. \end{definition}
\begin{definition}\label{def:sp}
Let $\emptyset\neq W\subseteq A$. We use $\text{best}(P_i,W)$ to denote the best alternative in $W$ according to $P_i$, $\text{worst}(P_i,W)$ the worst.
We extend $\succeq_i$ into two weak orders over $2^A\backslash\set{\emptyset}$:
\begin{enumerate}
\item $X\succeq_i^O Y$ iff $\textnormal{best}(P_i,X)\succeq_i\textnormal{best}(P_i,Y)$.
\item $X\succeq_i^P Y$ iff $\textnormal{worst}(P_i,X)\succeq_i\textnormal{worst}(P_i,Y)$.
\end{enumerate}
A social choice correspondence is \emph{strategy-proof for optimists} (SPO) if for all $P_i'$, whenever $F(P_iP_{-i})=W$ and $F(P_i'P_{-i})=W'$,
$W\succeq_i^O W'$.
A social choice correspondence is \emph{strategy-proof for pessimists} (SPP) if for all $P_i'$, whenever $F(P_iP_{-i})=W$ and $F(P_i'P_{-i})=W'$,
$W\succeq_i^P W'$. \end{definition}
\begin{definition}
Given a social choice correspondence $F$, a \emph{weak dictator} is some $i\in V$ such that the first choice of $i$ is always in $F(P)$. \end{definition}
\section{Proofs}
\begin{theorem}[\cite{Taylor2002}]\label{thm:Taylor}
Let $F$ be a social choice correspondence that satisfies SPP, SPO and is onto with respect
to singletons. That is, for every $a\in A$ there exists a $P$ such that $F(P)=\set{a}$.
For $|A|\geq 3$, $F$ has a weak dictator. \end{theorem}
\begin{theorem}[\cite{Duggan1992}]
Let $F$ be a social choice correspondence that is onto with respect
to singletons. That is, for every $a\in A$ there exists a $P$ such that $F(P)=\set{a}$.
Let each voter $i$ be equipped with a probability
function $p_i:\mathcal{P}(V,A)\times A\times 2^A\rightarrow [0,1]$ such that $\sum_{x\in X}p_i(P,x,X)=1$ and $p_i(P,a,X)>0$ whenever $a=\textnormal{best}(P_i,X)$
or $a=\textnormal{worst}(P_i,X)$.
Suppose further that for every $u_i$ consistent with $P_i$
($u_i(a)>u_i(b)$ whenever $a\succ_i b$), and for every $P_i'$, the following is true:
$$\sum_{x\in F(P_iP_{-i})}p_i(P_iP_{-i},x,F(P_iP_{-i}))u_i(x)\geq
\sum_{x\in F(P_i'P_{-i})}p_i(P_iP_{-i},x,F(P_i'P_{-i}))u_i(x).$$
For $|A|\geq 3$, $F$ has a weak dictator. \end{theorem}
The notion of manipulation used by \citeauthor{Duggan1992} is obviously more general than that of \citeauthor{Taylor2002}, and one is thus tempted to conclude that the original theorem is weaker than Taylor's reformulation.\footnote{A wider notion of manipulability implies a more narrow notion of strategy-proofness, and hence the theorem would apply to less functions.}
However, this would be erroneous as the theorems, strictly speaking, are incomparable. Taylor's theorem concerns a social choice correspondence $F$, whereas \citeauthor{Duggan1992}'s theorem applies to $F$ \emph{together} with a set of probability functions, $p_i$. It is entirely plausible that one could find two sets of probability functions such that $F$ and $p_1,\dots,p_n$ satisfy the hypotheses of the Duggan-Schwartz theorem while $F$ and $p_1',\dots,p_n'$ do not. However, $F$ is unchanged -- it either has a weak dictator, or it does not.
To more properly compare the two theorems, then, we need to take an existential projection over the original Duggan-Schwartz theorem.
\begin{theorem}[\cite{Duggan1992}]\label{thm:DSexist}
Let $F$ be a social choice correspondence that is onto with respect
to singletons. That is, for every $a\in A$ there exists a $P$ such that $F(P)=\set{a}$.
Suppose there exist probability
functions $p_i:\mathcal{P}(V,A)\times A\times 2^A\rightarrow [0,1]$ such that $\sum_{x\in X}p_i(P,x,X)=1$ and $p_i(P,a,X)>0$ whenever $a=\textnormal{best}(P_i,X)$
or $a=\textnormal{worst}(P_i,X)$.
Suppose further that for every $u_i$ consistent with $P_i$
($u_i(a)>u_i(b)$ whenever $a\succ_i b$), and for every $P_i'$, the following is true:
$$\sum_{x\in F(P_iP_{-i})}p_i(P_iP_{-i},x,F(P_iP_{-i}))u_i(x)\geq
\sum_{x\in F(P_i'P_{-i})}p_i(P_iP_{-i},x,F(P_i'P_{-i}))u_i(x).$$
For $|A|\geq 3$, $F$ has a weak dictator. \end{theorem}
Now we claim the two theorems are equivalent.
\begin{proposition}
$F$ satisfies the hypotheses of \cref{thm:Taylor} if and only if
$F$ satisfies the hypotheses of \cref{thm:DSexist}. \end{proposition} \begin{proof}
We will first show that if $F$ is manipulable in the sense
of Taylor it is manipulable in the sense of Duggan-Schwartz.
Pay heed to the order of the quantifiers in \cref{thm:DSexist},
as they may appear counter-intuitive:
$F$ is strategy-proof if for \emph{some} choice of
probability functions, for \emph{every} choice of a
utility function, voter $i$ cannot improve his expected
utility. Hence, $F$ is manipulable just if for \emph{every}
choice of probability functions we can construct \emph{some}
utility function giving voter $i$ a profitable deviation.
Suppose $i$ can manipulate optimistically from $P_iP_{-i}$
to $P_i'P_{-i}$. That is:
\begin{align*}
F(P_iP_{-i})=X&,\quad F(P_i'P_{-i})=Y,\\
\textnormal{best}(P_i,X)=a&,\quad\textnormal{best}(P_i,Y)=b,\\
a&\prec_i b.
\end{align*}
Now, let $p_i$ be any probability function in the sense
of \cref{thm:DSexist}. Note that this means that $p_i(P,a,X)=\epsilon$
and $p_i(P,b,Y)=\delta$ are strictly positive. Let $c\in X$ be the next-best alternative after $a$. Observe that an upper bound
on the utility voter $i$ obtains sincerely is $\epsilon u_i(a)+(1-\epsilon)u_i(c)$, whereas the lower bound on
the utility voter $i$ obtains from the deviation is $\delta u_i(b)$.
All we need to do is pick a $u_i$ that satisfies:
$$\delta u_i(b)>\epsilon u_i(a)+(1-\epsilon)u_i(c).$$
It is of course easy to do so as, necessarily, $u_i(b)>u_i(a),u_i(c)$,
and $\epsilon,\delta$ are constants. For example, let $u_i(a)=1,u_i(c)=2$ and $u_i(b)=\sfrac{3}{\delta}$.
Suppose $i$ can manipulate pessimistically from $P_iP_{-i}$
to $P_i'P_{-i}$. That is:
\begin{align*}
F(P_iP_{-i})=X&,\quad F(P_i'P_{-i})=Y,\\
\textnormal{worst}(P_i,X)=a&,\quad\textnormal{worst}(P_i,Y)=b,\\
a&\prec_i b.
\end{align*}
As before, let $p_i$ be any probability function in the sense
of \cref{thm:DSexist}. This means that $p_i(P,a,X)=\epsilon$
and $p_i(P,b,Y)=\delta$ are strictly positive. Let $c\in X$ be the best alternative in the set. Observe that an upper bound
on the utility voter $i$ obtains sincerely is $\epsilon u_i(a)+(1-\epsilon)u_i(c)$, whereas the lower bound on
the utility voter $i$ obtains from the deviation is $u_i(b)$.\footnote{$b$ is the worst element in $Y$, so the utility of
any other element must be at least $u_i(b)$, and $p_i(P,y,Y)$ sums to 1.}
All we need to do is pick a $u_i$ that satisfies:
$$u_i(b)>\epsilon u_i(a)+(1-\epsilon)u_i(c).$$
This time it is possible that $u_i(c)>u_i(b)$, however
$1-\epsilon$ is strictly smaller than 1. One possibility
is $u_i(a)=1$,
$u_i(b)=\frac{\sfrac{1}{\epsilon}+\epsilon+1}{1-\epsilon}$, $u_i(c)=\frac{\sfrac{1}{\epsilon}+\epsilon+2}{1-\epsilon}$.
This leads to the following inequality, which can be verified
algebraically:
$$\frac{\sfrac{1}{\epsilon}+\epsilon+1}{1-\epsilon}>2\epsilon+2+\sfrac{1}{\epsilon}.$$
Now suppose that $F$ is manipulable in the sense of Duggan-Schwartz.
This means for every choice of $p_i$, there is some choice of $u_i$
such that for some choice of $P_iP_{-i}$ and $P_i'P_{-i}$, $i$'s expected
utility is higher in the insincere profile.
Pick a $p_i$ that attaches a probability of $\sfrac{1}{2}$ to the
best alternative in the set and $\sfrac{1}{2}$ to the worst.
In other words, we have the following situation:
\begin{align*}
F(P_iP_{-i})=X&,\quad F(P_i'P_{-i})=Y,\\
\textnormal{best}(P_i,X)=x_1&,\quad\textnormal{best}(P_i,Y)=y_1,\\
\textnormal{worst}(P_i,X)=x_2&,\quad\textnormal{worst}(P_i,Y)=y_2,\\
\frac{u_i(x_1)+u_i(x_2)}{2}&<\frac{u_i(y_1)+u_i(y_2)}{2}.\\
\end{align*}
Clearly, a necessary condition for the above to hold is that
either $u_i(y_1)>u_i(x_1)$ or $u_i(y_2)>u_i(x_2)$. That is
to say, $F$ is manipulable by either an optimist or a pessimist. \end{proof}
\end{document} |
\begin{document}
\title{Exponential Lag Synchronization of Cohen-Grossberg Neural Networks with Discrete and Distributed Delays on Time Scales}
\author[]{Vipin Kumar} \affil[]{Max Planck Institute for Dynamics of Complex Technical Systems, Sandtorstra\ss e 1, 39106 Magdeburg, Germany.\authorcr
\email{[email protected]}, \orcid{0000-0002-7068-5426}}
\author[]{Jan Heiland} \affil[]{Max Planck Institute for Dynamics of Complex Technical Systems, Sandtorstra\ss e 1, 39106 Magdeburg, Germany.\authorcr
\email{[email protected]\}}, \orcid{0000-0003-0228-8522}}
\author[]{Peter Benner} \affil[]{Max Planck Institute for Dynamics of Complex Technical Systems, Sandtorstra\ss e 1, 39106 Magdeburg, Germany.\authorcr
\email{[email protected]}, \orcid{0000-0003-3362-4103}}
\shorttitle{Cohen-Grossberg Neural Networks on Time Scales} \shortauthor{V. Kumar, J. Heiland, P. Benner} \shortdate{}
\keywords{}
\abstract{ In this article, we investigate exponential lag synchronization results for the Cohen-Grossberg neural networks (C-GNNs) with discrete and distributed delays on an arbitrary time domain by applying feedback control. We formulate the problem by using the time scales theory so that the results can be applied to any uniform or non-uniform time domains. Also, we provide a comparison of results that shows that obtained results are unified and generalize the existing results. Mainly, we use the unified matrix-measure theory and Halanay inequality to establish these results. In the last section, we provide two simulated examples for different time domains to show the effectiveness and generality of the obtained analytical results.}
\novelty{This is the first attempt to discuss the exponential lag synchronization results for the generalized C-GNNs with mixed delays on time scales. The results are obtained by applying the novel unified matrix-measure theory and Halanay inequality. A comparison of results shows that these results unify and generalize the existing results. An example with simulation for different time domains is given to illustrate the analytical results.}
\maketitle
\section{Introduction}\label{sec1} Since the 1980s, neural networks (NNs), including recurrent NNs, Hopfield NNs, cellular NNs, and bi-directional associative NNs, have been a subject of intense study because of their large number of potential applications in many fields, such as the classification of patterns, signal and image processing, optimization problems, associative memory, parallel computing, and so on. In $1983$, Cohen-Grossberg \cite{CG} introduced the C-GNNs which are recognized as one of the most important and typical NNs because some other well-known NNs, for example, recurrent NNs, cellular NNs, and Hopfield NNs are special cases of C-GNNs. As a result, these types of networks have attracted considerable research attention and have been extensively studied in terms of their dynamical properties such as state estimation \cite{CG-app-state}, periodicity \cite{CG-app-periodic}, stability \cite{CG-app-stability-2,CG-app-stability-3}, boundedness \cite{CG-app-bound-2}, and synchronization \cite{CG-synchro-1,syn-exponential-1}. Furthermore, due to the importance of discrete-time C-GNNs as discussed in \cite{CG-D-1}, the dynamics of discrete-time C-GNNs have become a popular research topic; see, for example, \cite{CG-D-2, CG-D-3,CG-D-6,CG-D-7}.
Synchronization is one of the most important qualitative properties of dynamic systems and means that two or more dynamic systems lead to a common dynamical behaviour by using some coupling or external forces. The concept of synchronization in drive-response systems was first introduced by Pecora and Carrol \cite{pecora}, and since then, it has been capturing increased attention from both a fundamental and application-driven perspective.
Potential applications of synchronization can be found in many areas of applied sciences, such as harmonic oscillation generation, information science, human heartbeat regulation, chemical and biological systems, and secure communication \cite{app-synchro-1,app-synchro-2,app-synchro-3}. In the last few years, various types of synchronization phenomena have been discovered and investigated, such as exponential synchronization \cite{syn-exponential-2, add-2}, complete synchronization \cite{syn-complete}, finite-time synchronization \cite{syn-finite,syn-finite-2}, lag synchronization \cite{syn-lag}, adaptive synchronization \cite{syn-adaptive,add-1}, and projective synchronization \cite{syn-projective,add-2}. Among them, lag synchronization has been extensively studied \cite{work-lag-synchro-1,work-lag-synchro-2,work-lag-synchro-3,work-lag-synchro-4} due to its relevance in connected electronic networks, where constant time shifts between drive and response systems can make complete synchronization difficult to implement effectively.
In practical applications, both discrete and continuous dynamic systems play a significant role, but results for them are often studied separately.
In $1988$, Hilger \cite{ts-thesis}, introduced the so-called \emph{time scale theory (or measure chain theory)} which unifies the separate analysis of discrete and continuous dynamic systems into a single comprehensive analysis.
Eventhough, the study of dynamic systems is not limited to just discrete and continuous-time domains. In fact,
there are many other time domains which can be useful to study the dynamic behaviours of dynamic systems more accurately. For example, to model the growth process of some species like Magicicada Septendecim, Magicicada Cassini, and Pharaoh Cicada, we need a time domain of the form $T=\cup_{k=0}^\infty [k(a+b), k(a+b)+b], \ a,b \in (0,\infty)$.
Further, there exist neurons in the brain that follow a pattern of being active during the day and inactive at night. Intuitively, the dynamic behaviour of these neurons can be observed in the time domain
$\mathbb{T} = \bigcup_{l=0}^\infty [24l, 24l+d_l],$ where $d_l$ denotes the number of active hours of the neurons in each day; see \Cref{ex-brain}. \begin{figure}
\caption{Red lines denote the active time of neurons during a day while the gap shows the inactive time of neurons at night}
\label{ex-brain}
\end{figure} Another example is an RLC circuit (see \Cref{RLC}), where if the capacitor discharges with small time units $\delta>0$ at periodic intervals of $l$ time units, the dynamics of such a model can be modelled on the time scale $T = \bigcup_{l=0}^\infty [l, l+1-\delta].$
\begin{figure}
\caption{A simple $RLC$ circuit}
\label{RLC}
\end{figure} These examples require a time domain
which is neither discrete nor continuous.
However, the time scale theory can overcome such difficulties as it gives the freedom to work on the general domain, i.e., the results obtained by using the time scales will also be valid for uniform and non-uniform time domains such as the non-overlapping closed intervals, a mixture of closed intervals and discrete points, and even a discrete non-uniform time domain. Thus, we can summarize the above and state that ``Unification and Extension" are two main features of the time scale theory. Therefore, it is worth to investigate the dynamic equations on time scales. For more
studies on time scales, one can refer to the monograph \cite{ts-book1}.
In the last few years, the study of dynamic equations on time scales has drawn a tremendous amount of attention across the world and many researchers found its applications in many fields, such as epidemiology, economics, and control theory \cite{ts-app-eco, ts-app-control}. Recently, many authors have also established different types of qualitative behaviours of dynamic systems on time scales, for example, the existence of solutions, stability analysis, stabilization, and synchronization \cite{work-ts-1,work-ts-2,work-ts-3,work-ts-5,work-ts-6}. Also, few authors established the existence of periodic, anti-periodic, almost-periodic solutions and their stability results of the C-GNNs \cite{work-cgnn-1,work-cgnn-2,work-cgnn-3,E-work-cgnn-1,E-work-cgnn-2,E-work-cgnn-3}. In \cite{E-work-cgnn-1}, the authors studied the existence of an anti-periodic solution and exponential stability for C-GNNs with time-varying delays on time scales. In \cite{E-work-cgnn-2}, the authors established the existence and global exponential stability of almost periodic solutions for CβGNNs with distributed delays on time scales while in \cite{E-work-cgnn-3}, the authors considered the impulsive C-GNNs with distributed delays on time scales and studied the existence and exponential stability of periodic solutions by using Lyapunov functions, M-matrix theory, and coincidence degree theory.
Despite the growing interest in the study of dynamic equations on time scales,
the synchronization problem of C-GNNs on time scales has not been studied so far to the best of our knowledge. Therefore, to fill this gap, in this work, we establish exponential lag synchronization results for C-GNNs with discrete and distributed time delays on time scales by using feedback control, a novel unified matrix-measure technique
and the Halanay inequality. In short, the
main focus and benefit of this manuscript can be summarized as follows:
\begin{itemize} \item The C-GNNs with discrete and distributed delays on arbitrary time domains are considered to study exponential lag synchronization.
\item The problem is formulated by using the time scales theory and the results are derived based on a unified matrix-measure theory and the Halanay inequality.
\item The results for different special cases are given which shows that the obtained results unify and generalize the existing results.
\item A simulated example for different time scales including continuous, discrete and non-overlapping closed intervals, is given to verify the obtained analytical outcomes. \end{itemize}
The remaining part of the manuscript is organized as follows: In \Cref{sec:pre}, we recall
basic concepts from matrix theory and time scales that are essential for the subsequent sections. In \Cref{sec:SOP}, we formulate our statement of the problem. In \Cref{sec:Results}, the main results are discussed.
Finally, in \Cref{sec:examples}, two numerical examples with simulation are given to verify the obtained results.
\section{Preliminaries} \label{sec:pre} Throughout this paper, the notations $\mathbb{R}, \mathbb{Z}$ and $\mathbb{N}$ denote the set of all real, integers. and natural numbers, respectively; $\mathbb{T}$ denotes the time scale; $\emptyset$ denotes the empty set; $\mathbb{R}^n$ and $\mathbb{R}^{n \times m}$
denote the $n$-dimensional Euclidean space and the set of all $n \times m$ matrices, respectively; $\diag\{\ldots\}$ denotes the diagonal matrix; Superscript $*$ denotes the matrix transpose; $\Id$ and $\Zero$ denote the identity and zero matrices of appropriate dimensions, respectively; $[a,b]_\mathbb{T} = [a,b] \cap \mathbb{T}$, denotes the time scale interval. For any $a, b \in \mathbb{R}, C([a,b],\mathbb{R}^n)$ denotes the set of continuous functions from $[a,b]$ into $\mathbb{R}^n$;
$\| \cdot \|_p, \ (p =1,2,\infty)$ is used to denote the $p$-norm for a vector or for a matrix.
Next, we recall some basic definitions and results about time scale calculus.
A \emph{time scale} is an arbitrary non-empty closed subset of the real numbers $\mathbb{R}$ with the topology and ordering inherited from $\mathbb{R}$. $h\mathbb{Z}(h>0)$, $\mathbb{R},$ $\mathbb{P}_{a,b} = \cup_{k=0}^\infty [k(a+b), k(a+b) + a]$ for $a,b \in (0,\infty)$, and any discrete set are some examples of time scales. The \emph{forward and backward jump operators} $\sigma, \rho : \mathbb{T} \to \mathbb{T}$ are defined by $\sigma(t) = \inf \{ s \in \mathbb{T}: s>t\}$ and $\rho(t) = \sup\{ s \in \mathbb{T}: s<t \},$ respectively with the substitution $\sup \mathbb{T} = \inf \emptyset$ and $\inf \mathbb{T} = \sup \emptyset$. Also the graininess functions $\mu:\mathbb{T} \to [0,\infty)$ is given by $\mu(t) = \sigma(t) - t$. A point $t\in \mathbb{T}$ is called \emph{right-dense} if $t<\max\{ \mathbb{T} \}$ and $\sigma(t) = t$, \emph{left-dense} if $t>\min\{ \mathbb{T} \}$ and $\rho(t) = t$, \emph{right-scattered} if $\sigma(t)>t$, and \emph{left-scattered} if $ \rho(t) <t$. If $\mathbb{T}$ has a left-scattered maximum $M$, then we set $\mathbb{T}^k = \mathbb{T}\setminus\{M\}$, otherwise $\mathbb{T}^k = \mathbb{T}$.
\begin{definition} [\cite{work-ts-1}, Def. 1] \label{def:delta} Let $f : \mathbb{T} \to \mathbb{R}$ be a function. Then the \emph{delta derivative} of $f$ at a point $t \in \mathbb{T}^k$ is defined as a number $f^\Delta(t)$ (provided it exists) whenever for each $\epsilon > 0$ there exists a neighborhood $U$ of $t$ such that $$ \vert [f(\sigma(t)) - f(s)] - f^\Delta(t)[\sigma(t) - s] \vert \leq \epsilon \vert \sigma(t) - s \vert \ \text{for all} \ s \in U. $$ Further, if the neighborhood $U$ is replaced by the right-hand sided neighborhood $U^+$, then the delta derivative is called the \emph{upper right Dini-delta-derivative} and denoted by $D_\Delta^+f(t)$. \end{definition}
\begin{remark} \label{add:remark-1} In the above \Cref{def:delta}, if $\mu(t) = 0$, then the delta derivative $f^\Delta(t)$ becomes the ordinary derivative $f^\prime(t)$ and the upper right Dini-delta-derivative $D_\Delta^+f(t)$ becomes the ordinary upper right Dini-derivative $D^+f(t)$. Further, if $\mathbb{T} = h \mathbb{Z}, \ h>0$, then the delta derivative $f^\Delta(t)$ becomes the h-difference operator, i.e., $f^\Delta(t) = \frac{f(t+h)-f(t)}{h}$. \end{remark}
\begin{remark} Let $f : \mathbb{T} \to \mathbb{R}$ is differentiable at $t\in \mathbb{T}^k$, then the forward operator $\sigma$ and the delta derivative of $f$ are related by the formula $f(\sigma(t)) = f(t) + \mu(t) f^\Delta(t)$. \end{remark}
A function $f :\mathbb{T}\to \mathbb{R}$ is called \emph{regressive (or positive regressive)} if $1+\mu(t)f(t) \neq 0 (or \ >0)$ for all $t\in \mathbb{T}$. Also, $f$ is called \emph{regulated} provided its right-side limit exists (finite) at all right-dense points of $\mathbb{T}$ and its left-side limit exist (finite) at all left-dense points of $\mathbb{T}$. Furthermore, $f$ is called a \emph{rd-continuous function} if it is regulated and it is continuous at all right-dense points of $\mathbb{T}$. The collection of all rd-continuous functions and rd-continuous regressive (or rd-continuous positive regressive) functions from $\mathbb{T}$ to $\mathbb{R}$ are defined, respectively, by $C_{rd}(\mathbb{T},\mathbb{R})$ and $\mathcal{R}(or \ \mathcal{R}^+)$.
\begin{definition} [\cite{work-ts-3}, Def. 2.6] For any $p \in \mathcal{R}$ and $t \in \mathbb{T}^k$, we define $\ominus p$ by $$(\ominus p)(t) = - \dfrac{p(t)}{1+\mu(t) p(t)}.$$ \end{definition}
\begin{remark} If $p \in \mathcal{R}$, then $\ominus p \in \mathcal{R}$. \end{remark}
Next, we define the time scales version of the exponential function. \begin{definition} [\cite{ts-book1}, Def. 2.30] Let $p\in \mathcal{R}$, then we define the exponential function on time scales by $$e_p(t,s)= \exp \left(\int_s^t \zeta_{\mu (z)}(p(z))\Delta z \right) \ \text{for} \ t, s \in \mathbb{T} $$ with \[
\zeta_{\mu (s)}(p(s))= \begin{cases}
\dfrac{1}{\mu ( s )}\log(1+p(s)\mu (s)), & \text{if } \mu ( s ) \neq 0,\\
p(s), & \text{if } \mu (s)=0. \end{cases} \] \end{definition}
Next, we define the delta-integral on time scales.
\begin{definition} [\cite{ts-book1}, Def. 1.71] Let $f: \mathbb{T} \to \mathbb{R}$ be a regulated function, then a function $F : \mathbb{T} \to \mathbb{R}$ is called an \emph{anti-derivative of $f$} if $F^\Delta(t) = f(t)$ holds for all $t \in \mathbb{T}^k$. Also, we define the Cauchy integral by $$\int_{a}^b f(t) \Delta (t) = F(b)-F(a) \ \text{for all \ } a,b, \in \mathbb{T}. $$ \end{definition}
\begin{remark} \label{remark:integral} For any $a, b \in \mathbb{T}$ and $ f \in C_{rd}(\mathbb{T},\mathbb{R})$, if we set $\mathbb{T=R}$, then we have \begin{align*} \int_a^b f(t) \Delta t = \int_a^b f(t) dt. \end{align*} Further, if $[a, b)_\mathbb{T}$ consists of only isolated points, then we have \begin{align*} \int_a^b f(t) \Delta t = \begin{cases} \sum_{t \in [a,b)_\mathbb{T}} \mu(t) f(t) \quad & \text{if} \ a<b,\\ 0 & \text{if} \ a=b,\\
- \sum_{t \in [a,b)_\mathbb{T}} \mu(t) f(t) \quad & \text{if} \ a>b. \end{cases} \end{align*} \end{remark}
Next, we recall some basics from matrix-measure theory.
\begin{definition} [\cite{work-ts-6}, Def. 1] \label{mm-real} The \emph{generalized matrix-measure}
and \emph{classical matrix-measure} of a real square matrix $W = (w_{kl})_{n \times n}$ with respect to the $p-$norm $(p=1, 2$ or $\infty)$ are defined by \begin{align*}
\omega_p (W,h) = \dfrac{\| \Id + h W\|_p - 1}{h} \ \text{and} \ \Lambda_p (W) = \lim_{s \to 0^+} \dfrac{\| \Id + s W\|_p - 1}{s}, \end{align*} respectively, where $h>0$. The matrix norms and corresponding classical matrix-measures are given in \Cref{table:Matrix-norm-measure}. \end{definition}
\begin{table}[h]
\begin{center}
\begin{tabular}{cl}
Matrix norm & \quad Matrix-measure \\
\hline\noalign{
}
$\|W\|_1 = \max_{j} \sum_{i=1}^n \vert w_{ij} \vert $ & \quad $\Lambda_1(W) = \max_{j} w_{jj} + \sum_{i=1, i \neq j}^n \vert w_{ij}\vert$ \\
$\|W\|_2 = \sqrt{\lambda_{\max}( W^T W)}$ & \quad $\Lambda_2(W) = \dfrac{1}{2}{\lambda_{\max}( W^T + W)}$\\
$\|W\|_\infty = \max_{i} \sum_{j=1}^n \vert w_{ij} \vert$ & \quad $\Lambda_\infty(W) = \max_{i} w_{ii} + \sum_{j=1, \neq i}^n \vert w_{ij} \vert$ \\
\noalign{
}\hline\noalign{
}
\end{tabular}
\caption{Matrix norms and corresponding classical matrix-measures}
\label{table:Matrix-norm-measure}
\end{center} \end{table}
\begin{definition} [\cite{work-ts-6}, Def. 2 ] \label{mm-ts}
Let $W \in \mathbb R^{n\times n}$ be a real matrix and let $\mathbb{T}$ be an arbitrary time scale. Then the \emph{unified matrix-measure on $\mathbb{T}$ with respect to the $p-$norm} $(p=1, 2$ or $\infty)$ is defined as \begin{align*} M_p(W,\mathbb{T}) =
\begin{cases}
\max\biggl\{ \dfrac{\|\Id + \mu(t) W\|_p - 1}{\mu(t)}\colon t \in
\mathbb{T}\biggr\}, \text{if } \mu(t) > 0, \forall \ t\in \mathbb{T},\\
\max\biggl\{
\Lambda_p(W), \ \max\Bigl\{ \dfrac{\|\Id + \mu(t) W\|_p - 1}{\mu(t)}\colon t \in
\mathbb{T},\mu(t)>0\Bigr\} \biggr\}, \ \text{else.} \end{cases} \end{align*} \end{definition}
Note that for $\mathbb{T}= \mathbb{R}$ and $\mathbb{T} = h\mathbb{Z}$, $h>0$, \Cref{mm-ts} reduces to \Cref{mm-real}.
\section{Statement of Problem} \label{sec:SOP} We consider a class of C-GNNs with discrete and distributed delays on time scales of the following form: \begin{align} \label{eq:main-D} \begin{cases} \x^\Delta(t) & = - \G(\x(t))[\U(\x(t)) - \P \f(\x(t)) - \Q \f(\x(t-\t_1)) - \R \int_{t-\t_2}^t \f(\x(s)) \Delta s - I], \ t \in [0,\infty)_\mathbb{T},\\ \x(s) & = \phi(s), \ s \in [-\t , 0]_\mathbb{T}, \end{cases} \end{align} \sloppy where $\x(t) = [\x_1(t),\x_2(t),\ldots,\x_n(t)]^* \in \mathbb{R}^n$ is the state vector; $ \P = (\p_{ij})_{n \times n} \in \mathbb{R}^{n \times n}, \Q= (\q_{ij})_{n \times n} \in \mathbb{R}^{n \times n}$ and $ \R= (\r_{ij})_{n \times n} \in \mathbb{R}^{n \times n}$ are the connection, discrete delay connection and distributed delay
connection strength matrices, respectively; $\t_1(>0)$ and $\t_2(>0) $ are the discrete and distributed delay, respectively, such that $t-\t_1 \in \mathbb{T}$ and $t-\t_2 \in \mathbb{T}$; $\t = \max\{\t_1,\t_2\}$; $\G(\x(t)) = \diag\{\G_1(\x(t)), \G_2(\x(t)), \ldots, \G_n(\x(t))\} \in \mathbb{R}^{n \times n}$ is the state-dependent amplification function; $\U(\x(t)) = [\U_1(\x(t)),\U_2(\x(t)),\ldots,\U_n(\x(t))]^* \in \mathbb{R}^n$ is the appropriate behaviour function; $\f(\x(\cdot)) = [\f_1(\x(\cdot)),\f_2(\x(\cdot)),\ldots,\f_n(\x(\cdot))]^* \in \mathbb{R}^n$ denotes the activation function; $I$ is the external bias term; $\phi \in C_{rd}([-\t,0]_\mathbb{T}, \mathbb{R}^n)$.
In this paper, we shall establish synchronization results by using the drive-response technique. Therefore, we consider system \eqref{eq:main-D} as the drive system and, correspondingly, we consider a
response system described as follows: \begin{align} \label{eq:main-R} \begin{cases} \y^\Delta(t) &= - \G(\y(t))[\U(\y(t)) - \P \f(\y(t)) - \Q \f(\y(t-\t_1)) \\ & \quad - \R \int_{t-\t_2}^t \f(\y(s)) \Delta s - I] + \u(t), \ t \in [0,\infty)_\mathbb{T},\\ \y(s) & = \psi(s), \ s \in [-\t , 0]_\mathbb{T}, \end{cases} \end{align} where $\y(t) \in \mathbb{R}^n$; $\psi \in C_{rd}([\t,0]_\mathbb{T}, \mathbb{R}^n)$; $\u(t)$ is the control function defined as \begin{align} \label{eq:control} \u(t) = -K (\y(t) - \x(t-\beta)), \end{align}
where $K$ is the feedback gain matrix and $\beta$ is the transmittal delay such that $t-\beta \in \mathbb{T}$. \begin{remark} \label{remark:ts:cases} The considered class of C-GNNs is defined on the general time domain, and hence, it contains the usual continuous-time C-GNNs, discrete-time C-GNNs, and many more. For example, if we consider the\textbf{ continuous-time domain}, i.e., $\mathbb{T} = \mathbb{R}$, then, see \Cref{add:remark-1}, the drive system \eqref{eq:main-D} becomes \begin{align} \label{eq:main-D-r} \x^\prime (t) = - \G(\x(t))[\U(\x(t)) - \P \f(\x(t)) - \Q \f(\x(t-\t_1)) - \R \int_{t-\t_2}^t \f(\x(s)) d s - I] \end{align} and the response system \eqref{eq:main-R} becomes \begin{align} \label{eq:main-R-r} \y^\prime(t) = \G(\y(t))[\U(\y(t)) - \P \f(\y(t)) - \Q \f(\y(t-\t_1)) - \R \int_{t-\t_2}^t \f(\y(s)) d s - I] + \u(t), \end{align} where $t \in [0,\infty)$, and the rest of the parameters are the same as defined previously. Also, if we choose, the \textbf{$h-$difference discrete-time domain}, i.e., $\mathbb{T}=h\mathbb{Z}$, $h>0$, then, see \Cref{add:remark-1} and \Cref{remark:integral}, the drive system \eqref{eq:main-D} is converted to \begin{align} \label{eq:main-D-d} \x(t+h) & = \x(t) - h \G(\x(t))\bigg [\U(\x(t)) - \P \f(\x(t)) - \Q \f(\x(t-\t_1)) - \R \sum_{k = \frac{t-\t_2}{h}}^{\frac{t}{h} -1} h\f(\x(kh)) - I \bigg ] \end{align} and the response system \eqref{eq:main-R} is converted to \begin{align} \label{eq:main-R-d}
\y(t+h) & =\y(t) - h \G(\y(t)) \bigg [\U(\y(t)) - \P \f(\y(t)) - \Q \f(\y(t-\t_1)) - \R \sum_{k = \frac{t-\t_2}{h}}^{\frac{t}{h} -1} h\f(\y(kh)) - I \bigg ] + h \u(t), \end{align} where
$t \in [0,\infty)_{h \mathbb{Z}}$. Furthermore, by applying the above mentioned cases to
the \textbf{non-overlapping time domain} $\mathbb{T} =\cup_{i=0}^\infty [i, i+h], 0 <h<1,$ the concrete expression of the drive system \eqref{eq:main-D}
can be derived as \begin{align} \begin{cases} \x^\prime (t) & = - \G(\x(t))[\U(\x(t)) - \P \f(\x(t)) - \Q \f(\x(t-\t_1)) \\ & \quad - \R \int_{t-\t_2}^t \f(\x(s)) d s - I], \ t \in \cup_{i=0}^\infty [i, i+h), \\
\x(t+1-h) & = \x(t) - (1-h)\G(\x(t)) [\U(\x(t)) - \P \f(\x(t)) - \Q \f(\x(t-\t_1)) \\
& \quad - \R \sum_{k = \frac{t-\t_2}{1-h}}^{\frac{t}{1-h} -1} (1-h)\f(\x(k(1-h))) - I ], \ t = \cup_{i=0}^\infty \{i+h\} \end{cases} \end{align} and the response system \eqref{eq:main-R}
can be derived as \begin{align} \begin{cases} \y^\prime(t) & = - \G(\y(t))[\U(\y(t)) - \P \f(\y(t)) - \Q \f(\y(t-\t_1)) \\ & \quad - \R \int_{t-\t_2}^t \f(\y(s)) d s - I] + \u(t), \ t \in \cup_{i=0}^\infty [i, i+h), \\
\y(t+1-h) & = \y(t) - (1-h)\G(\y(t)) [\U(\y(t)) - \P \f(\y(t)) - \Q \f(\y(t-\t_1)) \\
& \quad - \R \sum_{k = \frac{t-\t_2}{1-h}}^{\frac{t}{1-h} -1} (1-h)\f(\y(k(1-h))) - I ] + (1-h)\u(t), \\
& \qquad \ \ t = \cup_{i=0}^\infty \{i+h\}. \end{cases} \end{align} \end{remark}
The main idea of synchronization is that the response system \eqref{eq:main-R} utilizes a feasible controller to synchronize itself with the drive system \cref{eq:main-D}.
Mathematically, we can define it in the following definition. \begin{definition} \label{main:Def:ELS} The drive system \eqref{eq:main-D} and the response system \eqref{eq:main-R} are said to be \emph{exponentially lag-synchronized} in the timescale sense under the control protocol \eqref{eq:control} if there exist two constants $C >0$ and $\nu > 0$ such that the following inequality holds
$$\| \y(t) - \x(t - \beta) \|_p \leq C e_{\ominus \nu}(t,0), \ t \geq 0.$$ \end{definition}
\begin{remark} In the above \Cref{main:Def:ELS}, if $\beta = 0$, then the drive system \eqref{eq:main-D} and the response system \eqref{eq:main-R} are called exponentially synchronized. \end{remark}
Now, to prove the synchronization results, we define the error between the drive system \eqref{eq:main-D} and the response system \eqref{eq:main-R} by
$\e(t) = \y(t) - \x(t-\beta)$, then the error dynamics can be written as \begin{align} \label{eq:main-E} \e^\Delta(t) & = -K\e(t) - \td\G(\e(t))[\td\U(\e(t)) - \P \td \f(\e(t)) - \Q \td \f(\e(t-\t_1))- \R \int_{t-\t_2}^t \td \f(\e(s)) \Delta s - I], \end{align} where $\e(t)\in \mathbb{R}^n$ and \begin{align*} \td \G(\e(t)) \td \U(\e(t)) & = \G(\y(t))\U(\y(t)) -\G(\x(t-\beta))\U(\x(t-\beta)), \\ \td \G(\e(t)) \P \td \f(\e(t)) & = \G(\y(t)) \P \f(\y(t)) -\G(\x(t-\beta)) \P \f(\x(t-\beta)), \\ \td \G(\e(t)) \Q \td \f(\e(t-\t_1)) & = \G(\y(t)) \Q \f(\y(t-\t_1)) - \G(\x(t-\beta)) \Q \f(\x(t-\beta-\t_1)), \\ \td \G(\e(t)) \R \int_{t-\t_2}^t \td \f(\e(s)) \Delta s & = \G(\y(t)) \R \int_{t-\t_2}^t \f(\y(s)) \Delta s - \G(\x(t-\beta)) \R \int_{t-\t_2}^t \f(\x(s-\beta)) \Delta s, \\ \td \G(\e(t)) I & = \G(\y(t)) I - \G(\x(t-\beta))I. \end{align*}
From the definition of $\e(t)$, it is clear that if the error system \eqref{eq:main-E} is exponentially stable, then the drive system \eqref{eq:main-D} and the response system \eqref{eq:main-R} are exponentially lag-synchronized. Therefore, our goal is to show the exponential stability of the error system \eqref{eq:main-E}.
To deal with the lag delay, we set $\x(s) = \phi(-\t)$ for all $s \in [-\t-\beta, -\t]_\mathbb{T}$ and \begin{align*} \Psi(s) = \begin{cases} \phi(s), \ s \in [-\t, 0]_\mathbb{T},\\ \phi(-\t), \ s \in [-\t-\beta, -\t]_\mathbb{T}, \end{cases} \end{align*} then, we can define the initial condition for the error system \eqref{eq:main-E} as follows \begin{align*} \e(s) = \psi(s) - \Psi(s-\beta), \ s \in [-\t,0]_\mathbb{T}. \end{align*}
In order to prove the main results, we need the following assumption. \begin{assumption} [\cite{add-1}, Ass. A1,A2]
\label{ass:fjgi-bounded}
The functions $\G, \U$ and $\f$ are Lipschitz continuous and bounded. In particular, for any $\x,\y \in \mathbb{R}^n$, there exist positive constants $L_\G, L_\U, L_\f$ such that \begin{align*}
& \|\G(\x)-\G(\y)\|_p \leq L_{\G} \|\x-\y\|_p, \ \|\U(\x)-\U(\y)\|_p \leq L_{\U} \| \x-\y\|_p, \ \|\f(\x)-\f(\y)\|_p \leq L_{\f} \|\x-\y\|_p. \end{align*} Also, there exist positive constants $M_\G, M_\U, M_\f$ such that \begin{align*}
\|\G(\x)\|_p \leq M_{\G}, \ \|\U(\x)\|_p \leq M_{\U} , \ \|\f(\x)\|_p \leq M_{\f}. \end{align*}
\end{assumption}
We note that typical choices of the activation functions like $\tanh$ or sigmoid fulfil this assumption. Moreover, anticipating the nature of the estimates of the following section, we can state the following relaxation of \Cref{ass:fjgi-bounded}. \begin{remark} \label{add:remark:2} If the states can be confined a priori to a bounded set $\Omega \subset \mathbb{R}^n$, then the Lipschitz and boundedness conditions need to be established on $\Omega$ only. \end{remark}
\section{Exponential Lag Synchronization Results} \label{sec:Results} In this section, we provide the main results of this manuscript. Before that, we are giving an important lemma which is useful to establish these results.
\begin{lemma} [\cite{work-ts-6}, Lemma 2] \label{lemma-1} For any real scalars $c$ and $d$ such that $c>d>0$ and $-c \in \mathcal{R}^+$, let $x(t)$ be a non-negative right-dense continuous function satisfying \begin{align*} D^+_\Delta x(t) \leq -c x(t) + d \sup_{s \in [t-\t, t]_\mathbb{T}}x(s), \ t \in [0, \infty)_\mathbb{T}, \end{align*} where $D^+_\Delta x(t)$ is the upper right Dini-delta-derivative of $x$ at $t$. Then the inequality \begin{align*} x(t) \leq \sup_{s \in [t-\t, t]_\mathbb{T}}x(s) e_{\ominus \lambda}(t,0), \end{align*} holds, where $\lambda>0$ is a solution of the inequality $\lambda + d \exp(\lambda \t)<c$. \end{lemma}
Now, we are ready to give the first main result of this article in the following theorem.
\begin{theorem} \label{main-theorem-1}
Let \Cref{ass:fjgi-bounded} hold. If, for some $p \in \{1,2,\infty\}$, there exist a non-singular matrix $Z$ and a control gain matrix $K$ such that $\mathcal{M}_1^p - \mathcal{M}_2^p >0 $
and $-\mathcal{M}_1^p \in \mathcal{R}^+$, where \begin{align*}
\mathcal{M}_1^p & = - \big ( M_p(-ZKZ^{-1},\mathbb{T}) + \|Z\|_p \|Z^{-1}\|_p ((M_\G L_\U + M_\U L_\G) \\
& \quad + (M_\G L_\f + M_\f L_\G)\| \P\|_p + L_\G \|I\|_p) \big ),\\
\mathcal{M}_2^p & = \|Z\|_p \|Z^{-1}\|_p (M_\G L_\f + M_\f L_\G)(\|\Q\|_p + \t \|\R\|_p) \end{align*} and $M_p(\cdot,\mathbb{T})$ denotes the unified matrix-measure as defined in \Cref{mm-ts}, then the drive system \eqref{eq:main-D} and the response system \eqref{eq:main-R} are exponentially lag-synchronized. \end{theorem}
\begin{proof} For any non-singular matrix $Z$, we define \begin{align*}
V(\e(t)) = \| Z \e(t)\|_p. \end{align*} Now, for any arbitrary point $t \in \mathbb{T}$, from the definition of $\mu(t)$, we have either $\mu(t) =0$ or $\mu(t)>0$. Therefore, we split the proof into the following two steps:\\ \textbf{Step 1:} When $\mu(t)>0$, then for any $t \in \mathbb{T}$, we have \begin{align}
\dfrac{\| Z \e(\sigma(t)) \|_p -\| Z \e(t) \|_p }{\mu(t)} & = \dfrac{1}{\mu(t)} \bigg \{ \|Z \e(t) + \mu(t) Z \e^\Delta(t) \|_p -\| Z \e(t) \|_p \bigg \} \nonumber\\
& = \dfrac{1}{\mu(t)} \bigg \{ \| Z \e(t) + \mu(t) Z(-K\e(t) - \td\G(\e(t))[\td\U(\e(t)) - \P \td \f(\e(t)) \nonumber \\
& \quad \quad - \Q \td \f(\e(t-\t_1)) - \R \int_{t-\t_2}^t \td \f(\e(s)) \Delta s - I] ) \|_p - \|Z\e(t)\|_p \bigg \} \nonumber \\
& \leq \dfrac{1}{\mu(t)} \big \{ \|Z \e(t) + \mu(t)(-ZK)\e(t)\|_p -\| Z \e(t) \|_p \big \} \nonumber\\
& \quad \quad + \|Z \td\G(\e(t))\td\U(\e(t))\|_p + \|Z \td\G(\e(t))\P\td \f(\e(t))\|_p + \|Z \td\G(\e(t)) I\|_p \nonumber \\
& \quad \quad + \|Z \td\G(\e(t)) \Q\td \f(\e(t-\t_1))\|_p
+ \| Z \td\G(\e(t)) \R \int_{t-\t_2}^t \td \f(\e(s))\Delta s \|_p. \label{eq:eq-cal-1} \end{align} Now, from the definition of $\td \G, \td\U, \td \f$ and
\Cref{ass:fjgi-bounded}, we have \begin{align} \label{eq:eq-cal-2}
\|\td \G(\e(t)) \td \U(\e(t))\|_p & = \|\G(\y(t))\U(\y(t)) -\G(\x(t-\beta))\U(\x(t-\beta))\|_p \nonumber \\
& \leq \|\G(\y(t))\U(\y(t)) - \G(\y(t))\U(\x(t-\beta)) \|_p \nonumber \\
&\quad + \|\G(\y(t))\U(\x(t-\beta)) -\G(\x(t-\beta))\U(\x(t-\beta))\|_p \nonumber \\
& \leq (M_\G L_\U + M_\U L_\G) \| \e(t)\|_p . \end{align} Similarly, one can obtain \begin{align} \label{eq:eq-cal-3}
\| \td \G(\e(t)) \P \td \f(\e(t))\|_p & = \| \G(\y(t)) \P \f(\y(t)) -\G(\x(t-\beta)) \P \f(\x(t-\beta))\|_p \nonumber \\
& \leq (M_\G L_\f + M_\f L_\G) \|\P\|_p \| \e(t)\|_p, \end{align} \begin{align} \label{eq:eq-cal-4}
\| \td \G(\e(t)) \Q \td \f(\e(t-\t_1))\|_p \leq (M_\G L_\f + M_\f L_\G) \|\Q\|_p \sup_{s \in [t-\t_1 ,t]_\mathbb{T}} \| \e(s)\|_p, \end{align} \begin{align} \label{eq:eq-cal-5}
\bigg \| \td \G(\e(t)) \R \int_{t-\t_2}^t \td \f(\e(s)) \Delta s \bigg \|_p \leq \t (M_\G L_\f + M_\f L_\G) \|\R\|_p \sup_{s \in [t-\t_2 ,t]_\mathbb{T}} \| \e(s)\|_p \end{align} and \begin{align} \label{eq:eq-cal-6}
\| \td \G(\e(t)) I \|_p \leq L_\G \|I\|_p \| \e(t)\|_p. \end{align} Now, from the inequalities \eqref{eq:eq-cal-1}, \eqref{eq:eq-cal-2}, \eqref{eq:eq-cal-3}, \eqref{eq:eq-cal-4}, \eqref{eq:eq-cal-5} and \eqref{eq:eq-cal-6}, we get \begin{align*}
\dfrac{\|Z \e(\sigma(t)) \|_p -\| Z \e(t) \|_p}{\mu(t)} & \leq \dfrac{\| \Id + \mu(t) (-ZKZ^{-1})\|_p -1 }{\mu(t)} \|Z\e(t) \|_p + \|Z\|_p L_\G \|I\|_p \|\e(t)\|_p \\
& \quad \quad + \|Z\|_p (M_\G L_\U + M_\U L_\G) \| \e(t)\|_p \\
& \quad \quad + \|Z\|_p (M_\G L_\f + M_\f L_\G) \|\P\|_p \|\e(t)\|_p \\
& \quad \quad+ \|Z\|_p (M_\G L_\f + M_\f L_\G) \|\Q\|_p \sup_{s \in [t-\t_1 ,t]_\mathbb{T}} \|Z \e(s)\|_p \\
& \quad \quad + \t \|Z\|_p (M_\G L_\f + M_\f L_\G) \|\R\|_p \sup_{s \in [t-\t_2 ,t]_\mathbb{T}} \|\e(s)\|_p \\
& \leq ( M_p(-ZK\P^{-1},\mathbb{T}) + \|Z\|_p L_\G \|I\|_p \|Z^{-1}\|_p \| Z\e(t)\|_p \\
& \quad \quad + \|Z\|_p (M_\G L_\U + M_\U L_\G) \|Z^{-1}\|_p \| Z \e(t)\|_p \\
& \quad \quad + \|Z\|_p (M_\G L_\f + M_\f L_\G) \|\P\|_p \| Z^{-1}\|_p \|Z \e(t)\|_p \\
& \quad \quad + \|Z\|_p (M_\G L_\f + M_\f L_\G) \|\Q\|_p \| Z^{-1}\|_p \sup_{s \in [t-\t_1 ,t]_\mathbb{T}} \|Z \e(s)\|_p \\
& \quad \quad + \t \|Z\|_p (M_\G L_\f + M_\f L_\G) \|\R\|_p \|Z^{-1}\|_p \sup_{s \in [t-\t_2 ,t]_\mathbb{T}} \| Z\e(s)\|_p \\
& \leq -\mathcal{M}_1^p \|Z \e(t)\|_p + \mathcal{M}_2^p \sup_{s \in [t-\t, t]_\mathbb{T}} \|Z \e(s)\|_p. \end{align*} Hence, using \Cref{def:delta}, we get \begin{align} \label{eq:eq-cal-7} D^+_\Delta V(\e(t))
\leq -\mathcal{M}_1^p V(\e(t)) + \mathcal{M}_2^p \sup_{s \in [t-\t, t]_\mathbb{T}} V(\e(s)). \end{align}
\noindent \textbf{Step 2:} When $\mu(t)=0$, the derivative is the classical derivative, therefore, by using the formula $\x(t+h) = \x(t) + \x'(t)h + o(h)$ with $\lim _{h\to 0} \frac{\|o(h)\|_p}{h} = 0$, we can calculate \begin{align*}
\lim_{h \to 0^+} \dfrac{\|Z \e(t+h) \|_p -\| Z \e(t) \|_p }{h} & = \lim_{h \to 0^+} \dfrac{1}{h} \bigg \{ \| Z \e(t) + h Z \e^\Delta(t) + o(h) \|_p -\| Z \e(t) \|_p \bigg \} \\
& = \lim_{h \to 0^+} \dfrac{1}{h} \bigg \{ \|Z \e(t) + h Z(-K\e(t) - \td\G(\e(t))[\td\U(\e(t)) - \P \td \f(\e(t)) \nonumber \\
& \quad \quad - \Q \td \f(\e(t-\t_1)) - \R \int_{t-\t_2}^t \td \f(\e(s)) \Delta s - I] ) + o(h) \|_p - \|Z\e(t)\|_p \bigg \} \nonumber \\
& \leq ( M_p(-ZK\P^{-1},\mathbb{T}) + \|Z\|_p L_\G \|I\|_p \|Z^{-1}\|_p \| Z\e(t)\|_p \\
& \quad \quad + \|Z\|_p (M_\G L_\U + M_\U L_\G) \|Z^{-1}\|_p \| Z \e(t)\|_p \\
& \quad \quad + \|Z\|_p (M_\G L_\f + M_\f L_\G) \|\P\|_p \| Z^{-1}\|_p \|Z \e(t)\|_p \\
& \quad \quad + \|Z\|_p (M_\G L_\f + M_\f L_\G) \|\Q\|_p \| Z^{-1}\|_p \sup_{s \in [t-\t_1 ,t]_\mathbb{T}} \|Z \e(s)\|_p \\
& \quad \quad + \t \|Z\|_p (M_\G L_\f + M_\f L_\G) \|\R\|_p \|Z^{-1}\|_p \sup_{s \in [t-\t_2 ,t]_\mathbb{T}} \| Z\e(s)\|_p \\
& \leq -\mathcal{M}_1^p \|Z \e(t)\|_p + \mathcal{M}_2^p \sup_{s \in [t-\t, t]_\mathbb{T}} \|Z \e(s)\|_p. \end{align*} Hence, using \Cref{def:delta} again, we get the same inequality as \eqref{eq:eq-cal-7}.
Thus, from the above two steps, for any $t \in \mathbb{T}$, we have \begin{align*} D^+ _\Delta V(\e(t))
\leq -\mathcal{M}_1^p V(\e(t)) + \mathcal{M}_2^p \sup_{s \in [t-\t, t]_\mathbb{T}} V(\e(s)). \end{align*} Therefore, from \Cref{lemma-1}, we get \begin{align*} V(\e(t)) \leq \sup_{s \in [t-\t,t]_\mathbb{T}} V(\e(s)) e_{\ominus \lambda} (t,0), \end{align*} where $\lambda$ is the solution of $\lambda + \mathcal{M}_2^p \exp(\lambda \t) \leq \mathcal{M}_1^p$. Further, it is clear that \begin{align*}
\| \e(t) \|_p & = \| Z^{-1} Z \e(t) \|_p \\
& \leq \|Z^{-1}\|_p \|V(\e(t))\|_p \\
& \leq \|Z^{-1}\|_p \sup_{s \in [t-\t,t]_\mathbb{T}} V(\e(s)) e_{\ominus \lambda }(t,0) \\ & \leq C e_{\ominus \lambda }(t,0), \end{align*}
where $C= \| Z\|_p \|Z^{-1}\|_p \sup_{s \in [t-\t,t]_\mathbb{T}} \|\e(s)\|>0$.
Hence, from \Cref{main:Def:ELS}, the error
system \eqref{eq:main-E} is exponentially stable, and hence, the drive system \eqref{eq:main-D} and the response system \eqref{eq:main-R} are exponentially lag-synchronized. \end{proof}
\begin{remark} \label{remark:p:id}
By choosing $Z=\Id$, the constants $\mathcal{M}_1^p$ and $\mathcal{M}_2^p$ of \Cref{main-theorem-1} become \begin{align*}
\mathcal{M}_1^p & = - (M_p(-K,\mathbb{T}) + (M_\G L_\U + M_\U L_\G) + (M_\G L_\f + M_\f L_\G)\| \P\|_p + L_\G \|I\|_p), \\
\mathcal{M}_2^p & = (M_\G L_\f + M_\f L_\G)(\|\Q\|_p + \t \|\R\|_p). \end{align*} \end{remark} Next, we consider a particular case of the considered problem by setting $\G(\x(t)) = \Id$ and $\U(\x(t)) = \A \x(t)$, where $\A=\diag\{\a_1,\a_2,\ldots,\a_n \} \in \mathbb{R}^{n \times n}$ with $\a_i>0, i=1,2,\ldots,n$,
then the drive system \eqref{eq:main-D} and the response system \eqref{eq:main-R} become \begin{align} \label{eq:main-D-NN} \begin{cases} \x^\Delta(t) & = - \A \x(t) + \P \f(\x(t)) + \Q \f(\x(t-\t_1)) + \R \int_{t-\t_2}^t \f(\x(s)) \Delta s + I, \ t \in [0,\infty)_\mathbb{T},\\ \x(s) & = \phi(s), \ s \in [-\t , 0]_\mathbb{T} \end{cases} \end{align} and \begin{align} \label{eq:main-R-NN} \begin{cases} \y^\Delta(t) & = - \A \y(t) + \P \f(\y(t)) + \Q \f(\y(t-\t_1)) + \R \int_{t-\t_2}^t \f(\y(s)) \Delta s + I + u(t), \ t \in [0,\infty)_\mathbb{T},\\ \y(s) & = \psi(s), \ s \in [-\t , 0]_\mathbb{T}, \end{cases} \end{align} respectively. Also, the error system \eqref{eq:main-E} becomes \begin{align} \label{eq:main-E-NN} \e^\Delta(t) = -(\A+K)\e(t) + \P \hat \f(\e(t)) + \Q \hat \f(\e(t-\t_1)) + \R \int_{t-\t_2}^t \hat \f(\e(s)) \Delta s , \end{align} where $\hat \f(\e(\cdot)) = \f(\y(\cdot)) -\f(\x(\cdot-\beta))$.
\begin{remark} One could have a remark similar to \Cref{remark:ts:cases} for the drive system \eqref{eq:main-D-NN} and the response system \eqref{eq:main-R-NN}. \end{remark}
Now, we will give some sufficient conditions for the exponential lag synchronization for the systems \eqref{eq:main-D-NN}--\eqref{eq:main-R-NN} as follows.
\begin{theorem} \label{main-theorem-1-NN}
Let $\f$ satisfy the Lipschitz and bounded conditions as stated in \Cref{ass:fjgi-bounded}. If, for some $p \in \{1,2,\infty\}$, there exist a non-singular matrix $Z$ and a control gain matrix $K$ such that $\mathcal{M}_3^p - \mathcal{M}_4^p >0 $
and $-\mathcal{M}_3^p \in \mathcal{R}^+$, where \begin{align*}
\mathcal{M}_3^p & = - (M_p(-Z(\A+K)Z^{-1},\mathbb{T}) + \|Z\|_p \|Z^{-1}\|_p \|\P\|_p L_\f ),\\
\mathcal{M}_4^p & = \|Z\|_p \|Z^{-1}\|_p L_\f(\|\Q\|_p + \t \|\R\|_p), \end{align*} then the drive system \eqref{eq:main-D-NN} and response system \eqref{eq:main-R-NN} are exponentially lag-synchronized.
\end{theorem} \begin{proof} For any non-singular matrix $Z$, we define \begin{align*}
V(\e(t)) = \| Z \e(t)\|_p. \end{align*}
Similar to the proof of \Cref{main-theorem-1}, we
consider the following two steps:\\ \textbf{Step 1:} When $\mu(t)>0$, then for any $t \in \mathbb{T}$, we have \begin{align*}
\dfrac{\| Z \e(\sigma(t)) \|_p -\| Z \e(t) \|_p }{\mu(t)}
& = \dfrac{1}{\mu(t)} \bigg \{ \|Z \e(t) + \mu(t) Z \e^\Delta(t) \|_p -\| Z \e(t) \|_p \bigg \} \nonumber\\
& = \dfrac{1}{\mu(t)} \bigg \{ \| Z \e(t) + \mu(t) Z(-(\A+K)\e(t) + \P \hat \f(\e(t)) + \Q \hat \f(\e(t-\t_1)) \\
& \quad \quad + \R \int_{t-\t_2}^t \hat \f(\e(s)) \Delta s ) \|_p - \|Z\e(t)\|_p \bigg \} \nonumber \\
& \leq \dfrac{1}{\mu(t)} \big \{ \|Z \e(t) + \mu(t)(-Z(\A+K))\e(t)\|_p -\| Z \e(t) \|_p \big \} + \|Z \P \hat \f(\e(t)) \|_p\nonumber\\
& \quad \quad + \|Z \Q \hat \f(\e(t-\t_1))\|_p + \|Z \R \int_{t-\t_2}^t \hat \f(\e(s)) \Delta s \|_p \nonumber \\
& \quad \leq -\mathcal{M}_3^p \|Z \e(t)\|_p + \mathcal{M}_4^p \sup_{s \in [t-\t, t]_\mathbb{T}} \|Z \e(s)\|_p. \end{align*} Hence, from \Cref{def:delta}, we get \begin{align} \label{eq:eq-cal-7-NN} D^+_\Delta V(\e(t))
\leq -\mathcal{M}_3^p V(\e(t)) + \mathcal{M}_4^p \sup_{s \in [t-\t, t]_\mathbb{T}} V(\e(s)). \end{align} \noindent \textbf{Step 2:} When $\mu(t)=0$, then, using the same analysis as in Step 1, we get \begin{align*}
\lim_{h \to 0^+} \dfrac{\|Z \e(t+h) \|_p -\| Z \e(t) \|_p }{h}
& = \lim_{h \to 0^+} \dfrac{1}{h} \bigg \{ \| Z \e(t) + h Z \e^\Delta(t) + o(h) \|_p -\| Z \e(t) \|_p \bigg \} \\
& \leq \lim_{h \to 0^+} \dfrac{1}{h} \bigg \{ \|Z \e(t) + h Z(-(\A+K)\e(t) + \P \hat \f(\e(t)) \\
& \quad \quad + \Q \hat \f(\e(t-\t_1)) + \R \int_{t-\t_2}^t \hat \f(\e(s)) \Delta s ) + o(h) \|_p - \|Z\e(t)\|_p \bigg \} \nonumber \\
& \leq -\mathcal{M}_3^p \|Z \e(t)\|_p + \mathcal{M}_4^p \sup_{s \in [t-\t, t]_\mathbb{T}} \|Z \e(s)\|_p. \end{align*} Hence, using \Cref{def:delta} again, we get the same inequality as \eqref{eq:eq-cal-7-NN}.
Thus, from the above two steps, for any $t \in \mathbb{T}$, we have \begin{align*} D^+ _\Delta V(\e(t))
\leq -\mathcal{M}_3^p V(\e(t)) + \mathcal{M}_4^p \sup_{s \in [t-\t, t]_\mathbb{T}} V(\e(s)). \end{align*} Therefore, from \Cref{lemma-1}, we get $ V(\e(t)) \leq \sup_{s \in [t-\t,t]_\mathbb{T}} V(\e(s)) e_{\ominus \lambda} (t,0), $ where $\lambda$ is the solution of $\lambda + \mathcal{M}_4^p \exp(\lambda \t) \leq \mathcal{M}_3^p$. Further, it is clear that
$\| \e(t) \|_p = \| Z^{-1} Z \e(t) \|_p \leq C e_{\ominus \lambda }(t,0), $
where $C= \| Z\|_p \|Z^{-1}\|_p \sup_{s \in [t-\t,t]_\mathbb{T}} \|\e(s)\|>0$.
Hence, from \Cref{main:Def:ELS}, the error
system \eqref{eq:main-E} is exponentially stable, and hence, the drive system \eqref{eq:main-D} and the response system \eqref{eq:main-R} are exponentially lag-synchronized. \end{proof}
\begin{remark} \label{remark:p:id:NN} Similar to \Cref{remark:p:id}, by choosing $Z=\Id$, the constants $\mathcal{M}_3^p$ and $\mathcal{M}_4^p$ of \Cref{main-theorem-1-NN} become \begin{align*}
\mathcal{M}_3^p = - (M_p(-(\A+K),\mathbb{T}) + \|\P\|_p L_\f) , \ \mathcal{M}_4^p = L_\f(\|\Q\|_p + \t \|\R\|_p). \end{align*} \end{remark}
\begin{remark} In the case when there is no distributed time-delay in the systems \eqref{eq:main-D}--\eqref{eq:main-R} (or \eqref{eq:main-D-NN}--\eqref{eq:main-R-NN}), i.e., when $\t_2 = 0$, then one can establish all the above results by setting the corresponding terms to zero in the computation of the constants $\mathcal{M}_1^p$ and $\mathcal{M}_2^p$ (or $\mathcal{M}_3^p$ and $\mathcal{M}_4^p$). \end{remark}
\begin{remark} The results of \Cref{main-theorem-1} and \Cref{main-theorem-1-NN} cover the problem in all generality, therefore, one can obtain the results for particular time domains, such as the continuous-time domain (when $\mathbb{T}=\mathbb{R}$) and discrete-time domain (when $\mathbb{T}= \mathbb{Z}$), by replacing the matrix-measures evolves in the constants $\mathcal{M}_1^p,\mathcal{M}_2^p,\mathcal{M}_3^p$ and $\mathcal{M}_4^p$ from the known \Cref{mm-real}. \end{remark}
\begin{remark} For the continuous-time domain, few authors reported the synchronization results for the C-GNNs with mixed delays \cite{work-lag-synchro-2,syn-lag, syn-finite, syn-adaptive}. Particularly, in \cite{syn-lag}, the authors considered a class of C-GNNs with mixed delays and studied the exponential lag synchronization via periodically intermittent control and mathematical induction technique. In \cite{syn-finite}, the authors studied finite-time synchronization of C-GNNs with mixed delays by using the Lyapunov-Krasovskii functional approach. Furthermore, there are only a few authors who studied the synchronization problem of the discrete-time C-GNNs \cite{CG-D-3,CG-D-7}. In particular, the authors in \cite{CG-D-7}, studied the exponential synchronization results for an array of coupled discrete-time C-GNNs with time-dependent delay by applying the Lyapunov-Krasovskii functional approach while in \cite{CG-D-3}, the authors investigated the existence of a bounded unique solution, exponential stability, and synchronization by using some fixed point techniques and inequality techniques. \end{remark}
\begin{remark} All the results obtained on continuous-time \cite{work-lag-synchro-2,syn-lag, syn-finite, syn-adaptive} and discrete-time \cite{CG-D-3,CG-D-7} C-GNNs are studied separately. The continuous-time or discrete-time C-GNNs results cannot be directly applied and easily extended to the case of arbitrary time C-GNNs. And, there is no manuscript on the continuous-time or discrete-time domain which discussed the exponential lag synchronization results for the C-GNNs with mixed delays by using the matrix-measure and Halanay inequality, therefore, the results of this manuscript are completely new even for the continuous case ($\mathbb{T} = \mathbb{R}$) and discrete case ($\mathbb{T} = \mathbb{Z}$). \end{remark}
\section{ Illustrated Examples} \label{sec:examples}
In this section, we provide two examples to illustrate the obtained results for different time domains. Whereas the first example is tailored to best illustrate the potentials of our theoretical results with respect to arbitrary time domains, the second example is borrowed from \cite{add-3} to show the general applicability of our methods.
\begin{example} \label{ex-1} Consider the drive system \eqref{eq:main-D} and response system \eqref{eq:main-R} with the following coefficients \begin{align*} & \G(\x(t)) = \begin{bmatrix} 0.4 + 0.2 \cos(\x_1(t)) & 0.0 \\ 0.0 & 0.4-0.2 \sin(\x_2(t)) \end{bmatrix}, \ \U(\x(t)) = \begin{bmatrix} 0.3 + 0.2 \sin(\x_1(t)) \\ 0.3 - 0.2 \cos(\x_2(t)) \end{bmatrix},\\ & \P = \begin{bmatrix} 0.8 & 0.0 \\ -0.2 & -0.7 \end{bmatrix}, \ \Q = \begin{bmatrix} -0.4 & 0.1 \\ -0.2 & 0.5 \end{bmatrix}, \ \R = \begin{bmatrix} -0.5 & 0.6 \\ -0.6 & 0.5 \end{bmatrix}, \ I = \begin{bmatrix} 0.4 \\ 0.3 \end{bmatrix}, \\ & \f(\x(t))= \begin{bmatrix} 0.8 \tanh (\x_1(t)) \\ 0.8 \tanh (\x_2(t)) \end{bmatrix}, \ \phi(s) = \begin{bmatrix} 0.5 \\ 1 \end{bmatrix},\ \psi(s) = \begin{bmatrix} -1 \\ -0.5 \end{bmatrix} \ \text{for } s \in [-\t,0]_\mathbb{T}, \ Z=\Id. \end{align*} \end{example}
One can confirm that for \Cref{ex-1}, $\G, \U$, and $\f$ satisfy \Cref{ass:fjgi-bounded} with $L_\G = L_\U = 0.2, L_\f =M_\f = 0.8, M_\G =0.6, M_\U = 0.5$. Now, we consider the following three different time domains as follows.\\ \noindent \textbf{Case 1.} $\mathbb{T} = \mathbb{R}$.
Let $\t_1 = 0.5, \t_2=0.8$ and $\beta = 0.4$. Here, $\t=0.8$ and the graininess function $\mu(t)=0$ for all $ t \in \mathbb{R}$. The state trajectories and the error trajectories of the systems \eqref{eq:main-D}--\eqref{eq:main-R} without feedback control are shown in Fig. \ref{fig:1} and Fig. \ref{fig:2}, respectively. Clearly, from Fig. \ref{fig:1} and Fig. \ref{fig:2}, the drive system \eqref{eq:main-D} and the response system \eqref{eq:main-R} are not synchronized. \newlength\figureheight \newlength\figurewidth \setlength\figureheight{4cm} \setlength\figurewidth{8cm} \graphicspath{{plot-real/}} \begin{figure}
\caption{Uncoupled synchronization curves \\ when $\mathbb{T} = \mathbb{R}$ }
\label{fig:1}
\caption{Uncoupled synchronization error \\ curves when $\mathbb{T} = \mathbb{R}$}
\label{fig:2}
\end{figure} However, for the control gain matrix $$ K = \begin{bmatrix} 2.2 & 0.0\\ 0.0 & 2.2 \end{bmatrix},$$ we can calculate $$ \mathcal{M}_2^1 = 0.9472, \ \mathcal{M}_2^2 = 0.9542, \ \mathcal{M}_2^\infty = 1.0112 $$ and \begin{align*}
\Lambda_1(-K) &= -2.2000, \ \Lambda_2(-K) = -4.4000,\ \Lambda_\infty(-K) = -2.2000. \end{align*}
Hence,
$$
\mathcal{M}_1^1 = 0.7800 ,\quad \mathcal{M}_1^2 = 3.2242, \quad \text{and } \mathcal{M}_1^\infty = 1.0840.
$$ Therefore, we can see that $\mathcal{M}_1^1 - \mathcal{M}_2^1 = -0.1672 <0,$ $\mathcal{M}_1^2 - \mathcal{M}_2^2 = 2.2700 >0,$ and $\mathcal{M}_1^\infty - \mathcal{M}_2^\infty = 0.0728 >0$. Also, $-\mathcal{M}_1^2, -\mathcal{M}_1^\infty \in \mathcal{R}^+$. Hence, for $p=2, \infty$, all the conditions of \Cref{main-theorem-1} hold, and thus, the systems \eqref{eq:main-D}--\eqref{eq:main-R} with feedback control \eqref{eq:control} are exponentially lag-synchronized with the maximum rate of convergence for $p = 2,\infty$ are $1.0366$ and $0.0394$, respectively. The synchronized curves and synchronized errors curves with feedback control are shown in Fig. \ref{fig:3} and Fig. \ref{fig:4}, respectively. \begin{figure}
\caption{Coupled synchronization curves \\ when $\mathbb{T} = \mathbb{R}$}
\label{fig:3}
\caption{Coupled synchronization error curves \\ when $\mathbb{T} = \mathbb{R}$}
\label{fig:4}
\end{figure}
\noindent \textbf{Case 2.} $\mathbb{T} = 0.5 \mathbb{Z}$.
Let $\t_1=\t_2=\beta = 0.5$. Here, $\t=0.5$ and the graininess function $\mu(t)=0.5$ for all $ t \in \mathbb{R}$. The state trajectories and the error trajectories of the systems \eqref{eq:main-D}--\eqref{eq:main-R} without feedback control are shown in Fig. \ref{fig:5} and Fig. \ref{fig:6}, respectively which are clearly not synchronized. \graphicspath{{plot-discrete/}} \begin{figure}
\caption{Uncoupled synchronization curves \\ when $\mathbb{T} = \frac{1}{2}\mathbb{Z}$}
\label{fig:5}
\caption{Uncoupled synchronization error \\ curves when $\mathbb{T} = \frac{1}{2}\mathbb{Z}$}
\label{fig:6}
\end{figure} However, for the control gain matrix $$ K = \begin{bmatrix} 2.0 & 0.0\\ 0.0 & 2.0 \end{bmatrix},$$ we can calculate $$ \mathcal{M}_2^1 = 0.7360, \ \mathcal{M}_2^2 = 0.7430, \ \mathcal{M}_2^\infty = 0.8000 $$ and \begin{align*}
\Lambda_1(-K) &= -2.000, \ \Lambda_2(-K) = -2.000,\ \Lambda_\infty(-K) = -2.000. \end{align*}
Hence,
$$
\mathcal{M}_1^1 = 0.5800 ,\quad \mathcal{M}_1^2 = 0.8242, \quad \text{and } \mathcal{M}_1^\infty = 0.8840.
$$ Therefore, we can see that $\mathcal{M}_1^1 - \mathcal{M}_2^1 = -0.1560 <0,$ $\mathcal{M}_1^2 - \mathcal{M}_2^2 = 0.0812 >0,$ and $\mathcal{M}_1^\infty - \mathcal{M}_2^\infty = 0.0840 >0$. Also, $-\mathcal{M}_1^2, -\mathcal{M}_1^\infty \in \mathcal{R}^+$. Hence, for $p=2, \infty$, all the conditions of \Cref{main-theorem-1} hold, and thus, the systems \eqref{eq:main-D}--\eqref{eq:main-R} with feedback control \eqref{eq:control} are exponentially lag-synchronized with the maximum rate of convergence for $p = 2,\infty$ are $0.0583$ and $0.0590$, respectively. The synchronized curves and synchronized errors curves with feedback control are shown in Fig. \ref{fig:7} and Fig. \ref{fig:8}, respectively. \begin{figure}
\caption{Coupled synchronization curves \\ when $\mathbb{T} = \frac{1}{2}\mathbb{Z}$}
\label{fig:7}
\caption{Coupled synchronization error curves \\ when $\mathbb{T} = \frac{1}{2}\mathbb{Z}$}
\label{fig:8}
\end{figure}
\noindent \textbf{Case 3.} $\mathbb{T} = \mathcal{P} = [-1,0] \cup_{i=0}^\infty [i, i+0.7]$.
Let $\t_1=\t_2=\beta = 1$.
Here, $\t=1$ and the graininess function $\mu(t)$ is given by \begin{align*} \mu(t) = \begin{cases} 0, \ t \in [-1,0] \cup_{i=0}^\infty [i, i+0.7), \\ 0.3, \ t= \cup_{i=0}^\infty \{i+0.7 \}. \end{cases} \end{align*} The state trajectories and the error trajectories of the systems \eqref{eq:main-D}--\eqref{eq:main-R} without feedback control are shown in Fig. \ref{fig:9} and Fig. \ref{fig:10}, respectively which are clearly not synchronized. \graphicspath{{plot-ts-1/}} \begin{figure}
\caption{Uncoupled synchronization curves \\ when $\mathbb{T} = \mathcal{P}$}
\label{fig:9}
\caption{Uncoupled synchronization error curves when $\mathbb{T} = \mathcal{P}$}
\label{fig:10}
\end{figure} However, for the control gain matrix $$ K = \begin{bmatrix} 2.4 & 0.0\\ 0.0 & 2.4 \end{bmatrix},$$ we can calculate $$ \mathcal{M}_2^1 = 1.0880, \ \mathcal{M}_2^2 = 1.0950, \ \mathcal{M}_2^\infty = 1.1520 $$ and \begin{align*}
\Lambda_1(-K) &= -2.4000, \ \Lambda_2(-K) = -2.4000,\ \Lambda_\infty(-K) = -2.4000. \end{align*}
Hence,
$$
\mathcal{M}_1^1 = 0.9800 ,\quad \mathcal{M}_1^2 = 1.2242, \quad \text{and } \mathcal{M}_1^\infty = 1.2840.
$$ Therefore, we can see that $\mathcal{M}_1^1 - \mathcal{M}_2^1 = -0.1080 <0,$ $\mathcal{M}_1^2 - \mathcal{M}_2^2 = 0.1292 >0,$ and $\mathcal{M}_1^\infty - \mathcal{M}_2^\infty = 0.1320 >0$. Also, $-\mathcal{M}_1^2, -\mathcal{M}_1^\infty \in \mathcal{R}^+$. Hence, for $p=2, \infty$, all the conditions of \Cref{main-theorem-1} hold and thus, the systems \eqref{eq:main-D}--\eqref{eq:main-R} with feedback control \eqref{eq:control} are exponentially lag-synchronized with the maximum rate of convergence for $p = 2,\infty$ are $0.0602$ and $0.0599$, respectively. The synchronized curves and synchronized errors curves with feedback control are shown in Fig. \ref{fig:11} and Fig. \ref{fig:12}, respectively. \begin{figure}
\caption{Coupled synchronization curves \\ when $\mathbb{T} = \frac{1}{2}\mathbb{Z}$}
\label{fig:11}
\caption{Coupled synchronization error curves when $\mathbb{T} = \frac{1}{2}\mathbb{Z}$}
\label{fig:12}
\end{figure}
Next, we provide another example to illustrate our main \Cref{main-theorem-1-NN}. \begin{example} \label{ex-3} Consider the continuous-time case of the drive and response systems \eqref{eq:main-D-NN}--\eqref{eq:main-R-NN} with the following coefficients as in \cite[Ex. 2]{add-3} \begin{align*} &\A = \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix}, \ \P = \begin{bmatrix} 2.0 & -0.1 \\ -5.0 & 2.8 \end{bmatrix}, \ \Q = \begin{bmatrix} -1.6 & -0.1 \\ -0.3 & -2.5 \end{bmatrix}, \ \R = \begin{bmatrix} 0.5 & 0.6 \\ 0.7 & 0.2 \end{bmatrix}, \ I = \begin{bmatrix} 0.0 \\ 0.0 \end{bmatrix}, \\ & \f(\x(t))= \begin{bmatrix} \tanh(\x_1(t)) \\ \tanh(\x_2(t))) \end{bmatrix}, \ \t_1 = 0.6, \t_2=0.2, \beta= 0, \\ & \phi(s) = \begin{bmatrix} 0.2 \\ 0.4 \end{bmatrix},\ \psi(s) = \begin{bmatrix} -0.4 \\ -0.6 \end{bmatrix} \ \text{for } s \in [-1,0]_\mathbb{T}, \ Z=\Id. \end{align*} \end{example} One can confirm that for \Cref{ex-3}, $\f$ satisfies the Lipschitz conditions with $ L_\f =1$. The state trajectories and the error trajectories of the systems \eqref{eq:main-D-NN}--\eqref{eq:main-R-NN} without feedback control are shown in Fig. \ref{fig:19} and Fig. \ref{fig:20}, respectively. Clearly, from Fig. \ref{fig:19} and Fig. \ref{fig:20}, the drive system \eqref{eq:main-D-NN} and the response system \eqref{eq:main-R-NN} are not synchronized. \graphicspath{{plot-ex-3/}} \begin{figure}
\caption{Uncoupled synchronization curves}
\label{fig:19}
\caption{Uncoupled synchronization error \\curves }
\label{fig:20}
\end{figure} However, for the control gain matrix $$ K = \begin{bmatrix} 3.8 & 0.0\\ 0.0 & 3.8 \end{bmatrix},$$ we can calculate $$ \mathcal{M}_4^1 = 3.320, \ \mathcal{M}_4^2 = 3.1331, \ \mathcal{M}_4^\infty = 3.460, $$ and \begin{align*}
\Lambda_1(-(\A+K)) &= -4.80, \ \Lambda_2(-(\A+K)) = -9.60,\ \Lambda_\infty(-(\A+K)) = -4.80. \end{align*}
Hence,
$$
\mathcal{M}_3^1 = -2.200,\quad \mathcal{M}_3^2 = 3.5892, \quad \text{and } \mathcal{M}_3^\infty = -3.0.
$$ Therefore, we see that $\mathcal{M}_3^1 - \mathcal{M}_4^1 = -5.520 <0,$ $\mathcal{M}_3^2 - \mathcal{M}_4^2 = 0.4561 >0,$ and $\mathcal{M}_3^\infty - \mathcal{M}_4^\infty = -6.460 <0$. Also, $-\mathcal{M}_3^2 \in \mathcal{R}^+$. Hence, for $p=2$, all the conditions of \Cref{main-theorem-1-NN} hold, and thus, the systems \eqref{eq:main-D-NN}--\eqref{eq:main-R-NN} with feedback control \eqref{eq:control} are exponentially synchronized with the maximum rate of convergence $0.1533$. The synchronized curves and synchronized errors curves with feedback control are shown in Fig. \ref{fig:21} and Fig. \ref{fig:22}, respectively. \begin{figure}
\caption{Coupled synchronization curves }
\label{fig:21}
\caption{Coupled synchronization error curves}
\label{fig:22}
\end{figure}
Comparing the results quantitatively, we note that our approach provides a faster error convergence rate $0.1533$, compared to the convergence rate of $0.01$ reported in \cite{add-3}.
\begin{remark}
Previous works, such as \cite{work-lag-synchro-2,syn-lag,add-3, syn-finite, syn-adaptive,CG-D-3,CG-D-7,add-1,add-2}, have considered similar types of examples on either continuous or discrete-time domains. To the best of our knowledge, there is currently no other example in the literature that has addressed lag synchronization of CGNNs on hybrid-type time domains (as presented in case 3 of \Cref{ex-1}).
\end{remark}
\section*{Conclusion} We have successfully established the exponential lag synchronization results for a new class of C-GNNs with discrete and distributed time delays on arbitrary time domains by using the theory of time scales and feedback control law. We have also studied some special cases of the considered problem. We mainly used a unified matrix-measure theory and Halanay inequality to establish these results. The obtained results are verified by providing some simulated examples for different time domains including the continuous-time domain (case 1 of \Cref{ex-1}, \Cref{ex-3}), discrete-time domain (case 2 of \Cref{ex-1}), and non-overlapping time domain (case 3 of \Cref{ex-1}).
Possible future research could concern an extension of the results to non-smooth though still bounded functions. Another potential future direction is to further investigate the stability and synchronization results for C-GNNs with delays and impulsive conditions on arbitrary time domains. This could include studying the effects of different types of delays, such as time-varying delays or distributed delays, on the synchronization of C-GNNs.
Additionally, it could be interesting to investigate the robustness and reliability of the synchronization results for C-GNNs with delays and stochastic effects on time scales. This could include studying the effects of random disturbances or noise on the synchronization of C-GNNs, and how the proposed approach can be modified to handle these types of effects.
\end{document} |
\begin{document}
\title{Unified Riccati theory for optimal permanent and sampled-data control problems in finite and infinite time horizons}
\begin{abstract} We revisit and extend the Riccati theory, unifying continuous-time linear-quadratic optimal permanent and sampled-data control problems, in finite and infinite time horizons. In a nutshell, we prove that the following diagram commutes: \begin{equation*} \xymatrix@R=2cm@C=4cm { \mathrm{(SD\text{-}DRE)} \hspace{-5cm} & E^{T,\Delta} \ar[r]^{T \to +\infty} \ar[d]_{\Vert \Delta \Vert \to 0} & E^{\infty,\Delta} \ar[d]^{\Vert \Delta \Vert \to 0} & \hspace{-5cm} \mathrm{(SD\text{-}ARE)} \\ \mathrm{(P\text{-}DRE)} \hspace{-5cm} & E^T \ar[r]_{T \to +\infty} & E^\infty & \hspace{-5cm} \mathrm{(P\text{-}ARE)}
} \end{equation*} i.e., that: \begin{itemize} \item[--] when the time horizon $T$ tends to $+\infty$, one passes from the Sampled-Data Difference Riccati Equation~$\mathrm{(SD\text{-}DRE)}$ to the Sampled-Data Algebraic Riccati Equation~$\mathrm{(SD\text{-}ARE)}$, and from the Permanent Differential Riccati Equation~$\mathrm{(P\text{-}DRE)}$ to the Permanent Algebraic Riccati Equation~$\mathrm{(P\text{-}ARE)}$; \item[--] when the maximal step~$\Vert \Delta \Vert$ of the time partition~$\Delta$ tends to~$0$, one passes from~$\mathrm{(SD\text{-}DRE)}$ to~$\mathrm{(P\text{-}DRE)}$, and from~$\mathrm{(SD\text{-}ARE)}$ to~$\mathrm{(P\text{-}ARE)}$. \end{itemize} The notation $E$ in the above diagram (with various superscripts) refers to the solution of each of the Riccati equations listed above. Our notations and analysis provide a unified framework in order to settle all corresponding results. \end{abstract}
\textbf{Keywords:} optimal control; sampled-data control; linear-quadratic (LQ) problems; Riccati theory; feedback control; convergence.
\textbf{AMS Classification:} 49J15; 49N10; 93C05; 93C57; 93C62.
\section{Introduction} Optimal control theory is concerned with acting on controlled dynamical systems by minimizing a given criterion. We speak of a \textit{Linear-Quadratic~(LQ)} optimal control problem when the control system is a linear differential equation and the cost is given by a quadratic integral (see \cite{Kwakernaak}). One of the main results of LQ theory is that the optimal control is expressed as a linear state feedback called \textit{Linear-Quadratic Regulator~(LQR)}. The linear state feedback is described by using the \textit{Riccati matrix} which is the solution to a nonlinear backward matrix Cauchy problem in finite time horizon (DRE: Differential Riccati Equation), and to a nonlinear algebraic matrix equation in infinite time horizon (ARE: Algebraic Riccati Equation). The LQR problem is a fundamental issue in optimal control theory. Since the pioneering works by Maxwell, Lyapunov and Kalman (see the textbooks \cite{Kwakernaak, lee1986, sontag1998}), it has been extended to many contexts, among which: discrete-time~\cite{kuvera1972}, stochastic~\cite{zhu2005}, infinite-dimensional~\cite{curtain1974}, fractional~\cite{li2008}. One of these concerns the case where controls must be piecewise constant, which is particularly important in view of engineering applications. We speak, there, of \textit{sampled-data controls} (or \textit{digital controls}), in contrast to \textit{permanent controls}. Recall that a control problem is said to be \textit{permanent} when the control function is authorized to be modified at any time. In many problems, achieving the corresponding solution trajectory requires a permanent modification of the control. However such a requirement is not conceivable in practice for human beings, even for mechanical or numerical devices. Therefore sampled-data controls, for which only a finite number of modifications is authorized over any compact time interval, are usually considered for engineering issues. The corresponding set of \textit{sampling times} (at which the control value can be modified) is called \textit{time partition}. A vast literature deals with sampled-data control systems, as evidenced by numerous references and books (see, e.g., \cite{acker,acker2,azhm,bami,chen,fada,gero,Iser,land,nesi,raga,souz,toiv,tou} and references therein). One of the first contributions on LQ optimal sampled-data control problems can be found in~\cite{kalman1958}. This field has significantly grown since the 70's, motivated by the electrical and mechanical engineering issues with applications for example to strings of vehicles (see~\cite{astrom1963, dorato1971, levis1968, levis1971, melzer1971, middleton1990, salgado1988}). Sampled-data versions of feedback controls and of Riccati equations have been derived and, like in the fully discrete-time case (see \cite[Remark~2]{liu2014}), these two concepts in the sampled-data control case have various equivalent formulations in the literature, due to different developed approaches: in most of the references, LQ optimal sampled-data control problems are recast as fully discrete-time problems, and then the feedback control and the Riccati equation are obtained by applying the discrete-time dynamical programming principle (see \cite{bini2009,dorato1971,kalman1958}) or by applying a discrete-time version of the Pontryagin maximum principle (see \cite{astrom1963,dorato1971,kleinman1966}).
In the present paper our objective is to provide a mathematical framework in which LQ theories in the permanent and in the sampled-data case can be settled in a unified way. We build on our recent article~\cite{bourdin2017} in which we have developed a novel approach keeping the initial continuous-time formulation of the sampled-data problem, based on a sampled-data version of the Pontryagin maximum principle (see \cite{bourdin2013,bourdin2016}). Analogies between LQ optimal permanent and sampled-data controls have already been noticed in several works (see, e.g.,~\cite{salgado1988} or~\cite[Remark~5.4]{yuz2005}). In this article we gather in a unified setting the main results of LQ optimal control theory in the following four situations: permanent / sampled-data control, finite / infinite time horizon. To this aim, an important tool is the map~$\mathcal{F}$ defined in Section~\ref{secF}, thanks to which we formulate, in the above-mentioned four situations, feedback controls and Riccati equations in Propositions~\ref{thmriccperm}, \ref{thmriccsample}, \ref{thmriccperminf} and~\ref{thmriccsampleinf} (Sections \ref{secfinitehorizon} and \ref{secinfinitehorizon}). Moreover, exploiting the continuity of $\mathcal{F}$, we establish convergence results between the involved Riccati matrices, either as the length of the time partition goes to zero or as the finite time horizon goes to infinity. Four convergence results are summarized in the diagram presented in the abstract, and we refer to our main result, Theorem~\ref{thmmain1} (stated in Section~\ref{secmain}), for the complete mathematical statement. Some of the convergence results are already known, some others are new. Hence, Theorem~\ref{thmmain1} fills some gaps in the existing literature and, in some sense, it closes the loop, which is the meaning of the commutative diagram that conveys the main message of this article.
Theorem~\ref{thmmain1} is proved in Appendix~\ref{app1}. An important role in the proof is played by the \textit{optimizability} property (or \textit{finite cost} property), which is well known in infinite time horizon problems and is related to various notions of controllability and of stabilizability (see \cite{datta2004,terrell2009,weiss2000}). For sampled-data controls, when rewriting the original problem as a fully discrete-time problem, optimizability is formulated on the corresponding discrete-time problem (see \cite[Theorem~3]{dorato1971} or~\cite[p.~348]{levis1971}). Here, we prove in the instrumental Lemma~\ref{lemimportant} that, if the permanent optimizability property is satisfied, then the sampled-data optimizability property is satisfied for all time partitions of sufficiently small length (moreover, a bound of the minimal sampled-data cost is given, uniform with respect to the length of the time partition). This lemma plays a key role in order to prove convergence of the sampled-data Riccati matrix to the permanent one in infinite time horizon when the length of the time partition goes to zero.
\section{Preliminaries on linear-quadratic optimal control problems}\label{secprelim}
Throughout the paper, given any $p \in \mathbb{N}^*$, we denote by $\mathcal{S}^p_+$ (resp., $\mathcal{S}^p_{++}$) the set of all symmetric positive semi-definite (resp., positive definite) matrices of $\mathbb{R}^{p\times p}$. Let $n$, $m \in \mathbb{N}^*$, let $P \in \mathcal{S}^n_+$, and for every $t\in\mathbb{R}$, let~$A(t)\in\mathbb{R}^{n\times n}$, $B(t)\in\mathbb{R}^{n\times m}$, $Q(t)\in\mathcal{S}^n_+$ and $R(t)\in\mathcal{S}^{m}_{++}$ be matrices depending continuously on $t$. Let~$\Phi(\cdot,\cdot)$ be the \textit{state-transition matrix} (\textit{fundamental matrix solution}) associated to~$A(\cdot)$ (see \cite[Appendix~C.4]{sontag1998}).
\begin{definition}\label{defautonomous} We speak of an \textit{autonomous setting} when $A(t)\equiv A \in \mathbb{R}^{n\times n}$, $B(t)\equiv B \in \mathbb{R}^{n\times m}$, $Q(t)\equiv Q \in \mathcal{S}^n_{+}$ and $R(t)\equiv R \in \mathcal{S}^{m}_{++}$ are constant with respect to $t$. \end{definition}
\subsection{Notations for a unified setting}\label{secF}
In this paper we consider four different LQ optimal control problems: permanent control versus sampled-data control, and finite time horizon versus infinite time horizon. To provide a unified presentation of our results (see Propositions~\ref{thmriccperm}, \ref{thmriccsample}, \ref{thmriccperminf} and~\ref{thmriccsampleinf}), we define the map $$ \fonction{\mathcal{F}}{\mathbb{R} \times \mathcal{S}^n_+ \times \mathbb{R}_+}{\mathbb{R}^{n\times n}}{(t,E,h)}{\mathcal{F}(t,E,h) := \mathcal{M}(t,E,h) \mathcal{N}(t,E,h)^{-1} \mathcal{M}(t,E,h)^\top - \mathcal{G}(t,E,h) } $$ where $\mathcal{M}(t,E,h) := \mathcal{M}_1 (t,E,h) + \mathcal{M}_2(t,E,h)$, $\mathcal{N}(t,E,h) := \mathcal{N}_1(t,E,h) + \mathcal{N}_2 (t,E,h) + \mathcal{N}_3(t,E,h)$ and $\mathcal{G} (t,E,h) := \mathcal{G}_1(t,E,h) + \mathcal{G}_2(t,E,h)$, with
\begin{center}
\begin{tabular}[H]{|c|c|c|} \hline
& if $h > 0$ & if $h=0$ \\ \hline & & \\ $\mathcal{M}_1(t,E,h) := $ & $\Phi(t,t-h)^\top E \left( \dfrac{1}{h} \displaystyle \int_{t-h}^t \Phi(t,\tau) B(\tau) \; d\tau \right)$ & $EB(t)$ \\ & & \\ \hline & & \\ $ \mathcal{M}_2(t,E,h) :=$ & $\dfrac{1}{h} \displaystyle \int_{t-h}^t \Phi (\tau,t-h)^\top Q(\tau) \left( \int_{t-h}^\tau \Phi(\tau,\xi) B(\xi) \; d\xi \right) \; d\tau$ & $0_{\mathbb{R}^{n\times m}}$ \\ & & \\ \hline & & \\ $ \mathcal{N}_1(t,E,h) := $ & $\displaystyle \dfrac{1}{h} \int_{t-h}^t R(\tau) \; d\tau$ & $R(t)$ \\ & & \\ \hline & & \\ $ \mathcal{N}_2(t,E,h) := $ & $\displaystyle \dfrac{1}{h} \int_{t-h}^t \left( \int_{t-h}^\tau B(\xi)^\top \Phi(\tau,\xi)^\top \; d\xi \right) Q(\tau) \left( \int_{t-h}^\tau \Phi(\tau,\xi)B(\xi) \; d\xi \right) \; d\tau $ & $0_{\mathbb{R}^{m\times m}}$ \\ & & \\ \hline & & \\ $ \mathcal{N}_3(t,E,h) := $ & $\displaystyle \dfrac{1}{h} \left( \int^t_{t-h} B(\tau)^\top \Phi(t,\tau)^\top \; d\tau \right) E \left( \int^t_{t-h} \Phi(t,\tau)B(\tau) \; d\tau \right)$ & $0_{\mathbb{R}^{m\times m}}$ \\ & & \\ \hline & & \\ $ \mathcal{G}_1(t,E,h) := $ & $\displaystyle \dfrac{1}{h} \int_{t-h}^t \Phi (\tau,t-h)^\top Q(\tau) \Phi(\tau,t-h) \; d\tau $ & $Q(t)$ \\ & & \\ \hline & & \\ $ \mathcal{G}_2(t,E,h) := $ & $\displaystyle \dfrac{1}{h} \Big( \Phi(t,t-h)^\top E \Phi(t,t-h) - E \Big)$ & $A(t)^\top E + E A(t)$ \\ & & \\ \hline \end{tabular} \end{center}
The map~$\mathcal{F}$ is well-defined and is continuous (see Lemma~\ref{lemF} in Appendix~\ref{appprelim}). Moreover, for $h=0$, we have $$ \mathcal{F}(t,E,0) = EB(t)R(t)^{-1} B(t)^\top E - Q(t) - A(t)^\top E - E A(t) \qquad \forall (t,E) \in \mathbb{R} \times \mathcal{S}^n_+. $$ One recognizes here the second member of the Permanent Differential Riccati Equation (see Proposition~\ref{thmriccperm} and Remark~\ref{remanalog}). The map~$\mathcal{F}$ is designed to provide a unified notation for the permanent and sampled-data control settings.
\begin{remark} In the \textit{autonomous setting} (see Definition~\ref{defautonomous}), the state-transition matrix is $\Phi (t,\tau) = e^{(t-\tau)A}$ for all~$(t,\tau) \in \mathbb{R} \times \mathbb{R}$ (see, e.g.,~\cite[Lemma~C.4.1]{sontag1998}) and hence in this case the map~$\mathcal{F}$ does not depend on $t$, and $$ \mathcal{F}(E,h) = \mathcal{M}(E,h) \mathcal{N}(E,h)^{-1} \mathcal{M}(E,h)^\top - \mathcal{G}(E,h)\qquad \forall E \in \mathcal{S}^n_+ \quad\forall h\geq 0 $$ where $\mathcal{M}(E,h) := \mathcal{M}_1 (E,h) + \mathcal{M}_2(E,h)$, $\mathcal{N}(E,h) := \mathcal{N}_1(E,h) + \mathcal{N}_2 (E,h) + \mathcal{N}_3(E,h)$ and $\mathcal{G} (E,h) := \mathcal{G}_1(E,h) + \mathcal{G}_2(E,h)$, with
\begin{center}
\begin{tabular}[H]{|c|c|c|} \hline
& if $h > 0$ & if $h=0$ \\ \hline & & \\ $\mathcal{M}_1(E,h) := $ & $\displaystyle e^{hA^\top} E \left( \dfrac{1}{h} \int_{0}^h e^{\tau A} \; d\tau \right) B$ & $EB$ \\ & & \\ \hline & & \\ $ \mathcal{M}_2(E,h) :=$ & $\displaystyle \dfrac{1}{h} \left( \int_{0}^h e^{\tau A^\top} Q \left( \int_0^\tau e^{\xi A} \; d\xi \right) \; d\tau \right) B$ & $0_{\mathbb{R}^{n\times m}}$ \\ & & \\ \hline & & \\ $ \mathcal{N}_1(E,h) := $ & $R$ & $R$ \\ & & \\ \hline & & \\ $ \mathcal{N}_2(E,h) := $ & $\displaystyle B^\top \left( \dfrac{1}{h} \int_{0}^h \left( \int_{0}^\tau e^{\xi A^\top} \; d\xi \right) Q \left( \int_{0}^\tau e^{\xi A} \; d\xi \right) \; d\tau \right) B $ & $0_{\mathbb{R}^{m\times m}}$ \\ & & \\ \hline & & \\ $ \mathcal{N}_3(E,h) := $ & $\displaystyle B^\top \left( \dfrac{1}{h} \left( \int_0^{h} e^{\tau A^\top} \; d\tau \right) E \left( \int_0^{h}e^{\tau A} \; d\tau \right) \right) B$ & $0_{\mathbb{R}^{m\times m}}$ \\ & & \\ \hline & & \\ $ \mathcal{G}_1(E,h) := $ & $\displaystyle \dfrac{1}{h} \int_{0}^h e^{\tau A^\top} Q e^{\tau A }\; d\tau $ & $Q$ \\ & & \\ \hline & & \\ $ \mathcal{G}_2(E,h) := $ & $\displaystyle \dfrac{1}{h} \Big( e^{hA^\top} E e^{hA} - E \Big)$ & $A^\top E + E A$ \\ & & \\ \hline \end{tabular} \end{center}
In particular, in the autonomous setting and for $h=0$, we have $$ \mathcal{F}(E,0) = EBR^{-1} B^\top E - Q - A^\top E - E A \qquad \forall E \in \mathcal{S}^n_+. $$ \end{remark}
\subsection{Finite time horizon: permanent / sampled-data control}\label{secfinitehorizon}
Given any $T >0$, we denote by~$\mathrm{AC}([0,T],\mathbb{R}^n)$ the space of absolutely continuous functions defined on $[0,T]$ with values in $\mathbb{R}^n$, and by $\mathrm{L}^2([0,T],\mathbb{R}^m)$ the Lebesgue space of square-integrable functions defined almost everywhere on $[0,T]$ with values in $\mathbb{R}^m$. In what follows $\mathrm{L}^2([0,T],\mathbb{R}^m)$ is the set of \textit{permanent controls}.
A \textit{time partition} of the interval $[0,T]$ is a finite set~$\Delta = \{ t_i \}_{i=0,\ldots,N}$, with $N \in \mathbb{N}^*$, such that~$ 0 = t_0 < t_1 < \ldots < t_{N-1} < t_N = T $. We denote by~$\mathrm{PC}^\Delta ([0,T],\mathbb{R}^m)$ the space of functions defined on $[0,T]$ with values in $\mathbb{R}^m$ that are piecewise constant according to the time partition~$\Delta$, that is $$ \mathrm{PC}^\Delta ([0,T],\mathbb{R}^m) := \{ u : [0,T] \to \mathbb{R}^m \ \mid\ u(t) = u_i\in\mathbb{R}^m\quad\forall t\in[t_i,t_{i+1}), \ i=0,\ldots,N-1 \}. $$ In what follows $\mathrm{PC}^\Delta([0,T],\mathbb{R}^m)$ is the set of \textit{sampled-data controls} according to the time partition $\Delta$ (it is a vector space of dimension $N$).
We denote by $\Vert \Delta \Vert := {\mathrm{max}}\{ h_i,\ i=1,\ldots,N\} > 0$, where $h_i := t_i - t_{i-1} > 0$ for all~$i=1,\ldots,N$. When $h_i = h$ for some~$h > 0$ for every~$i=1,\ldots,N$, the time partition $\Delta$ is said to be \textit{$h$-uniform} (which corresponds to \textit{periodic sampling}, see~\cite[Section~II.A]{bini2014}).
In this section we consider two LQ optimal control problems in finite time horizon: permanent control~$u \in \mathrm{L}^2([0,T],\mathbb{R}^m)$ (Proposition~\ref{thmriccperm}) and sampled-data control $u \in \mathrm{PC}^\Delta([0,T],\mathbb{R}^m)$ (Proposition~\ref{thmriccsample}).
\begin{proposition}[Permanent control in finite time horizon]\label{thmriccperm} Let $T > 0$ and let $x_0 \in \mathbb{R}^n$. The LQ optimal permanent control problem in finite time horizon $T$ given by \begin{equation}\tag{$\mathrm{OCP}^T_{x_0}$} \begin{array}{rl} \text{minimize} & \langle P x(T) , x(T) \rangle_{\mathbb{R}^n} + \displaystyle \int_0^T \Big( \langle Q(\tau) x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R(\tau) u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau \\[18pt] \text{subject to} & \left\lbrace \begin{array}{l} x \in \mathrm{AC}([0,T],\mathbb{R}^n), \qquad u \in \mathrm{L}^2([0,T],\mathbb{R}^m) \\[8pt] \dot{x}(t) = A(t)x(t) + B(t)u(t) \qquad \text{for a.e.}\ t \in [0,T] \\[8pt] x(0)=x_0 \end{array} \right. \end{array} \end{equation} has a unique optimal solution $(x^*,u^*)$. Moreover $u^*$ is the time-varying state feedback $$ u^*(t) = - \mathcal{N}(t,E^T(t),0)^{-1} \mathcal{M}(t,E^T(t),0)^\top x^*(t) \qquad \text{for a.e.}\ t \in [0,T] $$ where $E^T: [0,T] \to \mathcal{S}^{n}_+$ is the unique solution to the Permanent Differential Riccati Equation $\mathrm{(P\text{-}DRE)}$ \begin{equation}\tag{$\mathrm{P\text{-}DRE}$} \left\lbrace \begin{array}{l} \dot{E^T}(t) = \mathcal{F}(t,E^T(t),0) \qquad \forall t \in [0,T] \\[5pt] E^T(T) = P. \end{array} \right. \end{equation} Furthermore, the minimal cost of $(\mathrm{OCP}^T_{x_0})$ is equal to $ \langle E^T(0) x_0 , x_0 \rangle_{\mathbb{R}^{n}}$. \end{proposition}
\begin{proposition}[Sampled-data control in finite time horizon]\label{thmriccsample} Let $T > 0$, let $\Delta = \{ t_i \}_{i=0,\ldots,N}$ be a time partition of the interval $[0,T]$ and let~$x_0 \in \mathbb{R}^n$. The LQ optimal sampled-data control problem in finite time horizon $T$ given by \begin{equation}\tag{$\mathrm{OCP}^{T,\Delta}_{x_0}$} \begin{array}{rl} \text{minimize} & \langle P x(T) , x(T) \rangle_{\mathbb{R}^n} + \displaystyle \int_0^T \Big( \langle Q(\tau) x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R(\tau) u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau \\[18pt] \text{subject to} & \left\lbrace \begin{array}{l} x \in \mathrm{AC}([0,T],\mathbb{R}^n), \qquad u \in \mathrm{PC}^\Delta([0,T),\mathbb{R}^m) \\[8pt] \dot{x}(t) = A(t)x(t) + B(t)u(t) \qquad \text{for a.e.}\ t \in [0,T] \\[8pt] x(0)=x_0 \end{array} \right. \end{array} \end{equation} has a unique optimal solution $(x^*,u^*)$. Moreover $u^*$ is the time-varying state feedback $$ u^*_i = - \mathcal{N}(t_{i+1},E^{T,\Delta}_{i+1},h_{i+1})^{-1} \mathcal{M}(t_{i+1},E^{T,\Delta}_{i+1},h_{i+1} )^\top x^*(t_i) \qquad \forall i=0,\ldots,N-1 $$ where $E^{T,\Delta} = (E^{T,\Delta}_i)_{i=0,\ldots,N} \subset \mathcal{S}^{n}_+$ is the unique solution to the Sampled-Data Difference Riccati Equation~$\mathrm{(SD\text{-}DRE)}$ \begin{equation}\tag{$\mathrm{SD\text{-}DRE}$} \left\lbrace \begin{array}{l} E^{T,\Delta}_{i+1}-E^{T,\Delta}_i = h_{i+1} \mathcal{F} (t_{i+1},E^{T,\Delta}_{i+1},h_{i+1}) \qquad \forall i=0,\ldots,N-1 \\[5pt] E^{T,\Delta}_N = P. \end{array} \right. \end{equation} Furthermore, the minimal cost of $(\mathrm{OCP}^{T,\Delta}_{x_0})$ is equal to $ \langle E^{T,\Delta}_0 x_0 , x_0 \rangle_{\mathbb{R}^{n}}$. \end{proposition}
\begin{remark}\label{remanalog} The mathematical contents of Propositions~\ref{thmriccperm} and~\ref{thmriccsample} are not new. The time-varying state feedback~$u^*$ in Proposition~\ref{thmriccperm} is usually written as $$ u^*(t) = - R(t)^{-1} B(t)^\top E^T(t) x^*(t) \qquad \text{for a.e.}\ t \in [0,T] $$ and $\mathrm{(P\text{-}DRE)}$ is usually written as \begin{equation*} \left\lbrace \begin{array}{l} \dot{E^T}(t) = E^T(t) B(t) R(t)^{-1} B(t)^\top E^T(t) - Q(t) - A(t)^\top E^T(t) - E^T(t)A(t) \qquad \forall t \in [0,T] \\[5pt] E^T(T) = P \end{array} \right. \end{equation*} (see \cite{bressan2007, Kwakernaak, lee1986, sontag1998, trelat2005}). Like in the fully discrete-time case~\cite[Remark~2]{liu2014}, the analogous results in the sampled-data control case have various equivalent formulations in the literature. Using the Duhamel formula, Problem~$(\mathrm{OCP}^{T,\Delta}_{x_0})$ can be recast as a fully discrete-time linear-quadratic optimal control problem. In this way, the time-varying state feedback control $u^*$ in Proposition~\ref{thmriccsample} and $\mathrm{(SD\text{-}DRE)}$ were first obtained in~\cite{kalman1958} by applying the discrete-time dynamical programming principle (method revisited in~\cite[p.~616]{dorato1971} or more recently in~\cite[Theorem~4.1]{bini2009}), while they are derived in~\cite[Appendix~B]{astrom1963} or in \cite[p.~618]{dorato1971} by applying a discrete-time version of the Pontryagin maximum principle (see \cite{kleinman1966}).
In Theorem~\ref{thmmain1} hereafter, we are going to prove convergence of $E^{T,\Delta}$ to $E^T$ when $\Vert \Delta \Vert \to 0$. \end{remark}
\subsection{Infinite time horizon: permanent / sampled-data control (autonomous setting and uniform time partition)}\label{secinfinitehorizon} This section is dedicated to the infinite time horizon case. We denote by~$\mathrm{AC}([0,+\infty),\mathbb{R}^n)$ the space of functions defined on~$[0,+\infty)$ with values in $\mathbb{R}^n$ which are absolutely continuous over all intervals~$[0,T]$ with~$T> 0$, and by $\mathrm{L}^2([0,+\infty),\mathbb{R}^m)$ the Lebesgue space of square-integrable functions defined almost everywhere on $[0,+\infty)$ with values in $\mathbb{R}^m$. Assume that we are in the autonomous setting (see Definition~\ref{defautonomous}). We consider the following assumptions: \begin{enumerate} \item[$\mathrm{(H_1)}$] $Q \in \mathcal{S}^n_{++}$. \item[$\mathrm{(H_2)}$] For every $x_0 \in \mathbb{R}^n$, there exists a pair $(x,u) \in \mathrm{AC}([0,+\infty),\mathbb{R}^n) \times \mathrm{L}^2 ([0,+\infty),\mathbb{R}^m)$ such that $\dot{x}(t) = Ax(t) + Bu(t)$ for almost every $t \geq 0$ and~$x(0)=x_0$, satisfying $$ \displaystyle \int_0^{+\infty} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau < +\infty. $$ \end{enumerate}
Assumption~$\mathrm{(H_2)}$ is known in the literature as \textit{optimizability} assumption (or \textit{finite cost} assumption) and is related to various notions of \textit{stabilizability} of linear permanent control systems (see \cite{weiss2000}). A wide literature is dedicated to this topic (see \cite{terrell2009} and references mentioned in \cite[Section~10.10]{datta2004}). Recall that, if the pair~$(A,B)$ satisfies the Kalman condition (see \cite[Theorem~1.2]{zabczyk2008}) or only the weaker Popov-Belevitch-Hautus test condition (see \cite[Theorem~6.2]{terrell2009}) then~$\mathrm{(H_2)}$ is satisfied.
Let $h > 0$. The $h$-uniform time partition of the interval~$[0,+\infty)$ is the sequence $\Delta = \{ t_i \}_{i \in \mathbb{N}}$, where~$t_i := ih$ for every~$i \in \mathbb{N}$. We denote by $\Vert \Delta \Vert = h$ and by~$\mathrm{PC}^\Delta ([0,+\infty),\mathbb{R}^m)$ the space of functions defined on $[0,+\infty)$ with values in $\mathbb{R}^m$ that are piecewise constant according to the time partition $\Delta$, that is $$ \mathrm{PC}^\Delta ([0,+\infty),\mathbb{R}^m) := \{ u : [0,+\infty) \to \mathbb{R}^m\ \mid\ u(t) = u_i\quad \forall t \in [t_i,t_{i+1}), \ i\in\mathbb{N} \}. $$ We also consider the following assumption that we call \textit{$h$-optimizability} assumption: \begin{enumerate} \item[$\mathrm{(H}_2^h\mathrm{)}$] For every $x_0 \in \mathbb{R}^n$, there exists a pair $(x,u) \in \mathrm{AC}([0,+\infty),\mathbb{R}^n) \times \mathrm{PC}^\Delta ([0,+\infty),\mathbb{R}^m)$ such that $\dot{x}(t) = Ax(t) + Bu(t)$ for almost every $t \geq 0$ and $x(0)=x_0$, satisfying $$ \int_0^{+\infty} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau < +\infty. $$ \end{enumerate} Obviously, if $\mathrm{(H}_2^h\mathrm{)}$ is satisfied for some $h > 0$ then $\mathrm{(H_2)}$ is satisfied. In other words, $\mathrm{(H}_2^h\mathrm{)}$ for a given $h>0$ is stronger than~$\mathrm{(H}_2\mathrm{)}$. Conversely, we will prove in Lemma~\ref{lemimportant} further that, if $\mathrm{(H_1)}$ and~$\mathrm{(H_2)}$ are satisfied, then there exists~$\overline{h} > 0$ such that $\mathrm{(H}_2^h\mathrm{)}$ is satisfied for every $h\in(0,\overline{h}]$.
In this section, in the autonomous setting (see Definition~\ref{defautonomous}), we consider two infinite time horizon LQ optimal control problems: permanent control~$u \in \mathrm{L}^2([0,+\infty),\mathbb{R}^m)$ (Proposition~\ref{thmriccperminf}) and sampled-data control~$u \in \mathrm{PC}^\Delta([0,+\infty),\mathbb{R}^m)$ (Proposition~\ref{thmriccsampleinf}).
\begin{proposition}[Permanent control in infinite time horizon]\label{thmriccperminf} Assume that we are in the autonomous setting (see Definition~\ref{defautonomous}). Let $x_0 \in \mathbb{R}^n$. Under Assumptions $\mathrm{(H_1)}$ and $\mathrm{(H_2)}$, the LQ optimal permanent control problem in infinite time horizon given by \begin{equation}\tag{$\mathrm{OCP}^\infty_{x_0}$} \begin{array}{rl} \text{minimize} & \displaystyle \int_0^{+\infty} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau \\[18pt] \text{subject to} & \left\lbrace \begin{array}{l} x \in \mathrm{AC}([0,+\infty),\mathbb{R}^n), \quad u \in \mathrm{L}^2([0,+\infty),\mathbb{R}^m) \\[8pt] \dot{x}(t) = Ax(t) + Bu(t) \qquad \text{for a.e.}\ t \geq 0 \\[8pt] x(0)=x_0 \end{array} \right. \end{array} \end{equation} has a unique optimal solution $(x^*,u^*)$. Moreover $u^*$ is the state feedback $$ u^*(t) = - \mathcal{N}(E^\infty,0)^{-1} \mathcal{M}(E^\infty,0) ^\top x^*(t) \qquad \text{for a.e.}\ t \geq 0 $$ where $E^\infty \in \mathcal{S}^{n}_{++}$ is the unique solution to the Permanent Algebraic Riccati Equation~$\mathrm{(P\text{-}ARE)}$ \begin{equation}\tag{$\mathrm{P\text{-}ARE}$} \left\lbrace \begin{array}{l} \mathcal{F}(E^\infty,0) = 0_{\mathbb{R}^{n\times n}} \\[5pt] E^\infty \in \mathcal{S}^n_{+}. \end{array} \right. \end{equation} Furthermore, the minimal cost of $(\mathrm{OCP}^\infty_{x_0})$ is equal to $ \langle E^\infty x_0 , x_0 \rangle_{\mathbb{R}^{n}}$. \end{proposition}
\begin{proposition}[Sampled-data control in infinite time horizon]\label{thmriccsampleinf} Assume that we are in the autonomous setting (see Definition~\ref{defautonomous}). Let $\Delta = \{ t_i \}_{i \in \mathbb{N}} $ be a $h$-uniform time partition of the interval~$[0,+\infty)$ and let~$x_0 \in \mathbb{R}^n$. Under Assumptions~$\mathrm{(H_1)}$ and $\mathrm{(H}^h_2\mathrm{)}$, the LQ optimal sampled-data control problem in infinite time horizon given by \begin{equation}\tag{$\mathrm{OCP}^{\infty,\Delta}_{x_0}$} \begin{array}{rl} \text{minimize} & \displaystyle \int_0^{+\infty} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau \\[18pt] \text{subject to} & \left\lbrace \begin{array}{l} x \in \mathrm{AC}([0,+\infty),\mathbb{R}^n), \quad u \in \mathrm{PC}^\Delta([0,+\infty),\mathbb{R}^m) \\[8pt] \dot{x}(t) = Ax(t) + Bu(t) \qquad \text{for a.e.}\ t \geq 0 \\[8pt] x(0)=x_0 \end{array} \right. \end{array} \end{equation} has a unique optimal solution $(x^*,u^*)$. Moreover $u^*$ is the state feedback $$ u^*_i = - \mathcal{N}(E^{\infty,\Delta},h)^{-1} \mathcal{M}(E^{\infty,\Delta},h)^\top x^*(t_i) \qquad \forall i \in \mathbb{N} $$ where $E^{\infty,\Delta} \in \mathcal{S}^{n}_{++}$ is the unique solution to the Sampled-Data Algebraic Riccati Equation~$\mathrm{(SD\text{-}ARE)}$ \begin{equation}\tag{$\mathrm{SD\text{-}ARE}$} \left\lbrace \begin{array}{l} \mathcal{F}(E^{\infty,\Delta},h) = 0_{\mathbb{R}^{n\times n}} \\[5pt] E^{\infty,\Delta} \in \mathcal{S}^n_{+}. \end{array} \right. \end{equation} Furthermore, the minimal cost of $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ is equal to $ \langle E^{\infty,\Delta} x_0 , x_0 \rangle_{\mathbb{R}^{n}}$. \end{proposition}
\begin{remark} The mathematical content of Proposition~\ref{thmriccperminf} is well known in the literature (see \cite{bressan2007, Kwakernaak, lee1986, sontag1998, trelat2005}). The state feedback control $u^*$ in Proposition~\ref{thmriccperminf} is usually written as $$ u^*(t) = - R^{-1} B^\top E^\infty x^*(t) \qquad \text{for a.e.}\ t \geq 0 $$ and~$\mathrm{(P\text{-}ARE)}$ is usually written as \begin{equation*} \left\lbrace \begin{array}{l} E^\infty B R^{-1} B^\top E^\infty - Q - A^\top E^\infty - E^\infty A = 0_{\mathbb{R}^{n\times n}} \\[5pt] E^\infty \in \mathcal{S}^n_{+}. \end{array} \right. \end{equation*} As said in Remark~\ref{remanalog}, our formulation of Proposition~\ref{thmriccperminf}, using the continuous map~$\mathcal{F}$ defined in Section~\ref{secF}, provides a unified presentation in the permanent and sampled-data cases. In Theorem~\ref{thmmain1} hereafter, we are going to prove convergence of $E^{\infty,\Delta}$ to $E^\infty$ when $h=\Vert \Delta \Vert \to 0$. \end{remark}
\begin{remark}\label{remanalog2} Similarly to the finite time horizon case (see Remark~\ref{remanalog}), the state feedback control in Proposition~\ref{thmriccsampleinf} and $\mathrm{(SD\text{-}ARE)}$ have various equivalent formulations in the literature (see \cite{bini2014,levis1968,levis1971,melzer1971,middleton1990}) and in most of these references Problem~$(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ is recast as a fully discrete-time LQ optimal control problem with infinite time horizon. In particular the optimizability property for Problem~$(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ is equivalent to the optimizability of the corresponding fully discrete-time problem (see \cite[Theorem~3]{dorato1971} or~\cite[p.348]{levis1971}). In the present work we will prove that, if~$\mathrm{(H_1)}$ and~$\mathrm{(H_2)}$ are satisfied, then there exists~$\overline{h} > 0$ such that the $h$-optimizability assumption~$\mathrm{(H}_2^h\mathrm{)}$ is satisfied for every $h\in(0,\overline{h}]$ (see Lemma~\ref{lemimportant} further). Moreover, in that context, a uniform bound of the minimal cost of Problem~$(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ (independently of~$h\in(0,\overline{h}]$) is obtained. It plays a key role in order to prove convergence of~$E^{\infty,\Delta}$ to~$E^\infty$ when~$h=\Vert \Delta \Vert\to 0$.
We provide in Appendix~\ref{appthmriccsampleinf} a proof of Proposition~\ref{thmriccsampleinf} based on the $h$-optimizability assumption~$\mathrm{(H}^h_2\mathrm{)}$, by keeping the initial continuous-time formulation of Problem~$(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ as in~\cite{bourdin2017}. This proof is an adaptation to the sampled-data control case of the proof of Proposition~\ref{thmriccperminf} (see \cite[p.153]{bressan2007}, \cite[Theorem~7 p.198]{lee1986} or~\cite[Theorem~4.13]{trelat2005}). Moreover it contains in particular the proof of convergence of $E^{T,\Delta}$ to $E^{\infty,\Delta}$ when~$T \to + \infty$. \end{remark}
\section{Main result}\label{secmain}
Propositions~\ref{thmriccperm}, \ref{thmriccsample}, \ref{thmriccperminf} and~\ref{thmriccsampleinf} in Section~\ref{secprelim} give state feedback optimal controls for permanent and sampled-data LQ problems in finite and infinite time horizons. In each case, the optimal control is expressed thanks to a Riccati matrix: $E^T$, $E^{T,\Delta}$, $E^\infty$ and $E^{\infty,\Delta}$ respectively. Our main result (Theorem~\ref{thmmain1} below) asserts that the following diagram commutes: \begin{equation*} \xymatrix@R=2cm@C=4cm { \mathrm{(SD\text{-}DRE)} \hspace{-5cm} & E^{T,\Delta} \ar[r]^{T \to +\infty} \ar[d]_{\Vert \Delta \Vert \to 0} & E^{\infty,\Delta} \ar[d]^{\Vert \Delta \Vert \to 0} & \hspace{-5cm} \mathrm{(SD\text{-}ARE)} \\ \mathrm{(P\text{-}DRE)} \hspace{-5cm} & E^T \ar[r]_{T \to +\infty} & E^\infty & \hspace{-5cm} \mathrm{(P\text{-}ARE)}
} \end{equation*} The precise mathematical meaning of the above convergences is provided in the next theorem which is the main contribution of the present work. Let us first state the following lemma (proved in Appendix~\ref{applemimportant}).
\begin{lemma}\label{lemimportant} In the autonomous setting (see Definition~\ref{defautonomous}), under Assumptions~$\mathrm{(H_1)}$ and~$\mathrm{(H_2)}$, there exist~$\overline{h} > 0$ and $\overline{c} \geq 0$ such that, for all $h$-uniform time partitions $\Delta$ of the interval~$[0,+\infty)$, with~$0 < h \leq \overline{h}$, and for every~$x_0 \in \mathbb{R}^n$, there exists a pair $(x,u) \in \mathrm{AC}([0,+\infty),\mathbb{R}^n) \times \mathrm{PC}^\Delta ([0,+\infty),\mathbb{R}^m)$ such that~$\dot{x}(t) = Ax(t) + Bu(t)$ for almost every $t \geq 0$ and~$x(0)=x_0$, satisfying $$ \int_0^{+\infty} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau \leq \overline{c} \langle E^\infty x_0 , x_0 \rangle_{\mathbb{R}^n} < +\infty. $$ \end{lemma}
Not only Lemma~\ref{lemimportant} asserts that, if $\mathrm{(H_1)}$ and~$\mathrm{(H_2)}$ are satisfied, then there exists~$\overline{h} > 0$ such that~$\mathrm{(H}_2^h\mathrm{)}$ is satisfied for every $h\in(0,\overline{h}]$, but it also provides a \textit{uniform} $h$-optimizability for all~$0 < h \leq \overline{h}$ (in the sense that the finite right-hand term is independent of $h$). This uniform bound plays a crucial role in order to derive convergence of $E^{\infty,\Delta}$ to $E^\infty$ when $h = \Vert \Delta \Vert \to 0$ (which corresponds to the right arrow of the above diagram and to the fourth item of Theorem~\ref{thmmain1} below). Finally, from the proof of Lemma~\ref{lemimportant} in Appendix~\ref{applemimportant}, note that a lower bound of the threshold~$\overline{h} > 0$ can be expressed in function of the norms of~$A$, $B$, $Q$, $R$ and~$E^\infty$.
\begin{theorem}[Commutative diagram]\label{thmmain1} We have the following convergence results: \begin{enumerate} \item[\rm{(i)}] \textbf{Left arrow of the diagram:} Given any $T > 0$, we have $$ \lim\limits_{ \Vert \Delta \Vert \to 0} \ \ \underset{i=0,\ldots,N}{{\mathrm{max}}} \Vert E^T(t_i) - E^{T,\Delta}_i \Vert_{\mathbb{R}^{n\times n}} = 0 $$ for all time partitions $\Delta = \{ t_i \}_{i=0,\ldots,N}$ of the interval~$[0,T]$. \item[\rm{(ii)}] \textbf{Bottom arrow of the diagram:} Assume that $P=0_{\mathbb{R}^{n\times n}}$ and that we are in the autonomous setting (see Definition~\ref{defautonomous}). Under Assumptions~$\mathrm{(H_1)}$ and $\mathrm{(H_2)}$, we have $$ \lim\limits_{ T \to +\infty } E^T (t) = E^\infty \qquad \forall t \geq 0. $$ \item[\rm{(iii)}] \textbf{Top arrow of the diagram:} Assume that $P=0_{\mathbb{R}^{n\times n}}$ and that we are in the autonomous setting (see Definition~\ref{defautonomous}). Let $\Delta = \{ t_i \}_{i \in \mathbb{N}} $ be a $h$-uniform time partition of the interval~$[0,+\infty)$. For all~$N \in \mathbb{N}^*$, we denote by~$\Delta_N := \Delta \cap [0,t_N]$ the~$h$-uniform time partition of the interval~$[0,t_N]$. Under Assumptions~$\mathrm{(H_1)}$ and $\mathrm{(H}^h_2\mathrm{)}$, we have $$ \lim\limits_{ N \to +\infty } E^{t_N , \Delta_N }_i = E^{\infty , \Delta} \qquad \forall i \in \mathbb{N}. $$ \item[\rm{(iv)}] \textbf{Right arrow of the diagram:} In the autonomous setting (see Definition~\ref{defautonomous}), under Assumptions~$\mathrm{(H_1)}$ and $\mathrm{(H_2)}$, we have $$ \lim\limits_{ h \to 0} E^{\infty,\Delta} = E^\infty $$ for all $h$-uniform time partitions $\Delta = \{ t_i \}_{i \in \mathbb{N}}$ of the interval $[0,+\infty)$ with $0 < h \leq \overline{h}$ (where $\overline{h} > 0$ is given by Lemma~\ref{lemimportant}). \end{enumerate} \end{theorem}
\begin{remark} The proof of Theorem~\ref{thmmain1} is done in Appendix~\ref{appthmmain1}. Some results similar to the four items of Theorem~\ref{thmmain1} have already been discussed and can be found in the literature. For example, in the autonomous case and with $h$-uniform time partitions, the first item of Theorem~\ref{thmmain1} has been proved in~\cite[Corollary~2.3]{astrom1963} (a second-order convergence has even been derived). The second item of Theorem~\ref{thmmain1} is a well known fact and follows from the proof of Proposition~\ref{thmriccperminf} (see \cite[p.153]{bressan2007}, \cite[Theorem~7]{lee1986} or \cite[Theorem~4.13]{trelat2005}). The third item of Theorem~\ref{thmmain1} follows from the proof of Proposition~\ref{thmriccsampleinf} given in Appendix~\ref{appthmriccsampleinf} by keeping the initial continuous-time writting of Problem~$(\mathrm{OCP}^{\infty,\Delta}_{x_0})$. As evoked in Remarks~\ref{remanalog} and~\ref{remanalog2}, in the literature, the LQ optimal sampled-data control problems are usually rewritten as fully discrete-time LQ optimal control problems. As a consequence the result of the third item of Theorem~\ref{thmmain1} is usually reduced in the literature to the corresponding result at the discrete level (see \cite[Theorem~3]{dorato1971} or~\cite[p.348]{levis1971}). The last item of Theorem~\ref{thmmain1} is proved in Appendix~\ref{appthmmain1} by using the uniform $h$-optimizability obtained in Lemma~\ref{lemimportant}. Note that sensitivity analysis of $\mathrm{(SD\text{-}ARE)}$ with respect to $h$ has been explored in~\cite{fukata1979,levis1968,levis1971,melzer1971} by computing its derivative algebraically in view of optimization of the sampling period~$h$. Note that the map~$\mathcal{F}$ defined in Section~\ref{secF} is a suitable candidate in order to invoke the classical implicit function theorem and justify the differentiability of~$E^{\infty,\Delta}$ with respect to~$h$. Finally the contribution of the present work is to provide a framework allowing to gather Propositions~\ref{thmriccperm}, \ref{thmriccsample}, \ref{thmriccperminf} and~\ref{thmriccsampleinf} in a unified setting, based on the continuous map~$\mathcal{F}$, which moreover allows us to prove several convergence results for Riccati matrices and to summarize it in a single diagram. \end{remark}
\appendix
\section{Proofs}\label{app1}
Preliminaries and reminders are done in Section~\ref{appprelim}. We prove Proposition~\ref{thmriccsampleinf} in Section~\ref{appthmriccsampleinf}, Lemma~\ref{lemimportant} in Section~\ref{applemimportant} and Theorem~\ref{thmmain1} in Section~\ref{appthmmain1}.
\subsection{Preliminaries}\label{appprelim}
\begin{lemma}[A backward discrete Gr\"onwall lemma]\label{lemgronwall} Let $N \in \mathbb{N}^*$ and $(w_i)_{i=0,\ldots,N}$, $(z_i)_{i=1,\ldots,N}$ and $(\mu_i)_{i=1,\ldots,N}$ be three finite nonnegative real sequences which satisfy $w_N = 0$ and $$ w_i \leq (1+\mu_{i+1})w_{i+1} + z_{i+1} \qquad \forall i = 0,\ldots,N-1. $$ Then $$ w_i \leq \sum_{j=i+1}^N \left( \prod_{q=i+1}^{j-1} (1+\mu_q) \right) z_j \leq \sum_{j=i+1}^N e^{ \sum_{q=i+1}^{j-1} \mu_q } z_j \qquad \forall i=0,\ldots,N-1. $$ \end{lemma}
\begin{proof} The first inequality follows from a backward induction. The second inequality comes from the inequality $1+\mu \leq e^\mu$ for all $\mu \geq 0$. \end{proof}
\begin{lemma}[Some reminders on symmetric matrices]\label{lemmatrice} Let $p \in \mathbb{N}^*$. The following properties are satisfied: \begin{enumerate} \item[\rm{(i)}] Let $E \in \mathcal{S}^p_+$ (resp., $E \in \mathcal{S}^p_{++}$). Then all eigenvalues of $E$ are nonnegative (resp., positive) real numbers. \item[\rm{(ii)}] Let $E \in \mathcal{S}^p_{+}$. Then $ \rho_{\mathrm{min}}(E) \Vert y \Vert_{\mathbb{R}^p}^2 \leq \langle Ey,y \rangle_{\mathbb{R}^p} \leq \rho_{\mathrm{max}}(E) \Vert y \Vert_{\mathbb{R}^p}^2 $ for all $y \in \mathbb{R}^p$, where $\rho_{{\mathrm{min}}}(E)$ and~$\rho_{{\mathrm{max}}}(E)$ stand respectively for the smallest and the largest nonnegative eigenvalues of $E$. \item[\rm{(iii)}] Let $E \in \mathcal{S}^p_{++}$. Then $E$ is invertible and $E^{-1} \in \mathcal{S}^p_{++}$. Moreover we have~$\rho_{{\mathrm{min}}}(E^{-1}) = 1/\rho_{\mathrm{max}}(E)$ and~$\rho_{{\mathrm{max}}}(E^{-1}) = 1/\rho_{\mathrm{min}}(E)$. \item[\rm{(iv)}] Let $E \in \mathcal{S}^p_+$. It holds that $\Vert E \Vert_{\mathbb{R}^{p \times p}} = \rho_{{\mathrm{max}}}(E) $. \item[\rm{(v)}] Let $E \in \mathcal{S}^p_+$. If there exists $c \geq 0$ such that $\langle E y , y \rangle_{\mathbb{R}^p} \leq c \Vert y \Vert^2_{\mathbb{R}^p}$ for every $y \in \mathbb{R}^p$, then $\Vert E \Vert_{\mathbb{R}^{p \times p}} \leq c$. \item[\rm{(vi)}] Let $E_1$, $E_2 \in \mathcal{S}^p_+$. If $\langle E_1 y , y \rangle_{\mathbb{R}^p} = \langle E_2 y , y \rangle_{\mathbb{R}^p}$ for every $y \in \mathbb{R}^p$ then $E_1 = E_2$. \item[\rm{(vii)}] Let $(E_k)_{k \in \mathbb{N}}$ be a sequence of matrices in $ \mathcal{S}^p_+$. If $\langle E_k y , y \rangle_{\mathbb{R}^p}$ converges when $k \to +\infty$ for all~$y \in \mathbb{R}^p$ then~$(E_k)_{k \in \mathbb{N}}$ has a limit $E \in \mathcal{S}^p_+$. \end{enumerate} \end{lemma}
\begin{proof} The first four items are classical results (see, e.g.,~\cite{horn2013}). The fifth item follows from the fourth one. The last two items follow from the following fact: if $E \in \mathcal{S}^p_+$, with $E = (e_{ij})_{i,j=1,\ldots,p}$, then $$ e_{ij} = \langle E b_j , b_i \rangle_{\mathbb{R}^p} = \dfrac{1}{2} \Big( \langle E (b_i+b_j) , b_i+b_j \rangle_{\mathbb{R}^p} - \langle E b_i , b_i \rangle_{\mathbb{R}^p} - \langle E b_j , b_j \rangle_{\mathbb{R}^p} \Big) \qquad \forall i, j = 1, \ldots,p $$ where $\{ b_i \}_{i=1,\ldots,p}$ stands for the canonical basis of $\mathbb{R}^p$. \end{proof}
\begin{lemma}[Properties of the function~$\mathcal{F}$]\label{lemF} The three following properties are satisfied: \begin{enumerate}[label=\rm{(\roman*)}] \item The map $\mathcal{F}$ is well-defined on $\mathbb{R} \times \mathcal{S}^n_+ \times \mathbb{R}_+$. \item The map $\mathcal{F}$ is continuous on $\mathbb{R} \times \mathcal{S}^n_+ \times \mathbb{R}_+$. \item If $\mathcal{K}$ is a compact subset of $\mathbb{R} \times \mathcal{S}^n_+ \times \mathbb{R}_+$, then there exists a constant $c \geq 0$ such that $$ \Vert \mathcal{F}(t,E_2,h) - \mathcal{F}(t,E_1,h) \Vert_{\mathbb{R}^{n\times n}} \leq c \Vert E_2-E_1 \Vert_{\mathbb{R}^{n\times n}} $$ for all $(t,E_1,E_2,h)$ such that $(t,E_1,h) \in \mathcal{K}$ and $(t,E_2,h) \in \mathcal{K}$. \end{enumerate} \end{lemma}
\begin{proof} {\rm (i)} For $(t,E,h) \in \mathbb{R} \times \mathcal{S}^n_+ \times \mathbb{R}_+$, note that $\mathcal{N}_1(t,E,h) \in \mathcal{S}^m_{++}$, $\mathcal{N}_2(t,E,h) \in \mathcal{S}^m_+$ and $\mathcal{N}_3(t,E,h) \in \mathcal{S}^m_+$. Hence the sum $\mathcal{N}(t,E,h) $ belongs to~$\mathcal{S}^m_{++}$ and thus is invertible from~(iii) of Lemma~\ref{lemmatrice}.
{\rm (ii)} Since taking the inverse of a matrix is a continuous operation, we only need to prove that $\mathcal{M}$, $\mathcal{N}$ and~$\mathcal{G}$ are continuous over~$\mathbb{R} \times \mathcal{S}^n_+ \times \mathbb{R}_+$. Let~$(t_k,E_k,h_k)_{k\in \mathbb{N}}$ be a sequence of $\mathbb{R} \times \mathcal{S}^n_+ \times \mathbb{R}_+$ which converges to some~$(t,E,h) \in \mathbb{R} \times \mathcal{S}^n_+ \times \mathbb{R}_+$. We need to prove that~$\mathcal{M}(t_k,E_k,h_k)$, $\mathcal{N}(t_k,E_k,h_k)$ and $\mathcal{G}(t_k,E_k,h_k)$ converge respectively to~$\mathcal{M} (t,E,h)$, $\mathcal{N}(t,E,h)$ and $\mathcal{G} (t,E,h)$ when~$k \to +\infty$. The case $h \neq 0$ can be treated using, for instance, the Lebesgue dominated convergence theorem. Let us discuss the case $h=0$ and let us assume, without loss of generality (since~$A$, $B$, $Q$ and $R$ are continuous matrices), that $h_k > 0$ for every $k \in \mathbb{N}$. In that situation we conclude by using in particular the fact that $t$ is a Lebesgue point of all integrands involved in the definitions of the functions~$\mathcal{M}$, $\mathcal{N}$ and $\mathcal{G}$.
{\rm (iii)} It is clear that $\mathcal{F}$ is continuously differentiable over $\mathcal{S}^n_+$ with respect to its second variable. Similarly to the previous item, we can moreover prove that the map $(t,E,h) \mapsto \mathcal{D}_2 \mathcal{F}(t,E,h)$ is continuous over~$\mathbb{R} \times \mathcal{S}^n_+ \times \mathbb{R}_+$. Thus the third item follows by applying the Taylor expansion formula with integral remainder. \end{proof}
\begin{lemma}[A uniform bound for $E^T$ and $E^{T,\Delta}$]\label{lembound} Let $T > 0$. We have
$$ \Vert E^T(t) \Vert_{\mathbb{R}^{n\times n}} \leq \Big( \Vert P \Vert_{\mathbb{R}^{n\times n}} + (T-t) \Vert Q_{|[t,T]} \Vert_{\infty} \Big) e^{2 \Vert A_{|[t,T]} \Vert_{\infty} (T-t)} \qquad \forall t \in [0,T]. $$ If $\Delta = \{ t_i \}_{i=0,\ldots,N}$ is a time partition of the interval $[0,T]$, then
$$ \Vert E^{T,\Delta}_i \Vert_{\mathbb{R}^{n\times n}} \leq \Big( \Vert P \Vert_{\mathbb{R}^{n\times n}} + (T-t_i) \Vert Q_{|[t_i,T]} \Vert_{\infty} \Big) e^{2 \Vert A_{|[t_i,T]} \Vert_{\infty} (T-t_i)} \qquad \forall i=0,\ldots,N. $$ \end{lemma}
\begin{proof} Let us prove the first part of Lemma~\ref{lembound}. We first deal with the case $t=0$. Taking the null control in Problem~$(\mathrm{OCP}^T_y)$ and using the Duhamel formula, we deduce that its minimal cost satisfies
$$ \langle E^T(0)y,y \rangle_{\mathbb{R}^n} \leq \Big( \Vert P \Vert_{\mathbb{R}^{n\times n}} + T \Vert Q_{|[0,T]} \Vert_{\infty} \Big) e^{2T \Vert A_{|[0,T]} \Vert_{\infty} } \Vert y \Vert_{\mathbb{R}^n}^2 \qquad \forall y \in \mathbb{R}^n. $$ The result at $t=0$ then follows from (v) in Lemma~\ref{lemmatrice}. The case $0 < t < T$ can be treated similarly by considering the restriction of Problem~$(\mathrm{OCP}^T_y)$ to the time interval $[t,T]$ (instead of $[0,T]$). Finally the case~$t=T$ is obvious since $E^T(T) = P$. The second part of Lemma~\ref{lembound} is derived in a similar way. \end{proof}
\begin{lemma}[Zero limit of finite cost trajectories at infinite time horizon]\label{leminfzero} In the autonomous setting (see Definition~\ref{defautonomous}), under Assumption $\mathrm{(H_1)}$, for every $(x,u) \in \mathrm{AC}([0,+\infty),\mathbb{R}^n) \times \mathrm{L}^2 ([0,+\infty),\mathbb{R}^m)$ such that~$\dot{x}(t) = Ax(t) + Bu(t)$ for almost every $t \geq 0$ and satisfying $$ \int_0^{+\infty} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau < +\infty , $$ we have $ \lim_{t \to +\infty} x(t) = 0_{\mathbb{R}^n} $. \end{lemma}
\begin{proof} Since $Q \in \mathcal{S}^n_{++}$, we have $\Vert x(t) \Vert^2_{\mathbb{R}^n} \leq \frac{1}{\rho_{\mathrm{min}} (Q)} \langle Q x(t) , x(t) \rangle_{\mathbb{R}^n}$ for all $t \geq 0$. Using the assumptions we deduce that $x \in \mathrm{L}^2 ([0,+\infty),\mathbb{R}^m)$. Let us introduce $X \in \mathrm{AC}([0,+\infty),\mathbb{R})$ defined by~$X(t) := \Vert x(t) \Vert^2_{\mathbb{R}^n} \geq 0$ for all $t \geq 0$. Since~$\dot{X}(t) = 2 \langle A x(t) + Bu(t), x(t) \rangle_{\mathbb{R}^n} $ for almost every $t \geq 0$, we deduce that~$\dot{X} \in \mathrm{L}^1([0,+\infty),\mathbb{R})$ and thus~$X(t)$ admits a limit $\ell \geq 0$ when $t \to +\infty$. By contradiction let us assume that $\ell > 0$. Then there exists $s \geq 0$ such that $X(t) \geq \frac{\ell}{2} > 0$ for all~$t \geq s$. We get that \begin{multline*} \int_0^{\overline{t}} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau \geq \rho_{\mathrm{min}} (Q) \left( \int_0^{\overline{t}} X(\tau) \; d\tau \right) \\ = \rho_{\mathrm{min}} (Q) \int_0^{s} X(\tau) \; d\tau + \int_s^{\overline{t}} X(\tau) \; d\tau \geq \rho_{\mathrm{min}} (Q) \left( \int_0^{s} X(\tau) \; d\tau + (\overline{t}-s) \dfrac{\ell}{2} \right) \qquad \forall \overline{t} \geq s. \end{multline*} A contradiction is obtained by letting $\overline{t} \to + \infty$. \end{proof}
\subsection{Proof of Proposition~\ref{thmriccsampleinf}}\label{appthmriccsampleinf}
This proof is inspired from the proof of Proposition~\ref{thmriccperminf} (see \cite[p.153]{bressan2007}, \cite[Theorem~7 p.198]{lee1986} or \cite[Theorem~4.13]{trelat2005}) and is an adaptation to the sampled-data control case. We denote by~$\Delta_N := \Delta \cap [0,t_N]$ the~$h$-uniform time partition of the interval~$[0,t_N]$ for every $N \in \mathbb{N}^*$.
\paragraph{Existence and uniqueness of the optimal solution.} Let $x_0 \in \mathbb{R}^n$. For every $u \in \mathrm{L}^2([0,+\infty),\mathbb{R}^m)$, we denote by $x(\cdot,u) \in \mathrm{AC}([0,+\infty),\mathbb{R}^n)$ the unique solution to the Cauchy problem $$ \left\lbrace \begin{array}{l} \dot{x}(t) = A x(t) + Bu(t) \qquad \text{for a.e.}\ t \geq 0, \\[5pt] x(0)= x_0. \end{array} \right. $$ We define the cost function $$ \fonction{\mathcal{C}}{\mathrm{L}^2([0,+\infty),\mathbb{R}^m)}{\mathbb{R} \cup \{ +\infty \} }{u}{\mathcal{C} (u) := \displaystyle \int_0^{+\infty} \Big( \langle Q x(\tau,u) , x(\tau,u) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau. } $$ Problem~$(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ can be recast as $ {\mathrm{min}} \{ \mathcal{C}(u) \mid u \in \mathrm{PC}^\Delta ([0,+\infty),\mathbb{R}^m)\}$. Since $\mathrm{(H}^h_2\mathrm{)}$ is satisfied, we have $$ \mathcal{C}^* := \inf\{ \mathcal{C}(u) \mid u \in \mathrm{PC}^\Delta ([0,+\infty),\mathbb{R}^m)\} < +\infty. $$ Let us consider a minimizing sequence~$(u_k)_{k \in \mathbb{N}} \subset \mathrm{PC}^\Delta ([0,+\infty),\mathbb{R}^m)$ and, without loss of generality, we assume that $\mathcal{C} (u_k) < +\infty$ for every $k \in \mathbb{N}$. Since $R \in \mathcal{S}^n_{++}$, we deduce that the sequence $(u_k)_{k \in \mathbb{N}}$ is bounded in~$\mathrm{L}^2([0,+\infty),\mathbb{R}^m)$ and thus, up to a subsequence (that we do not relabel), converges weakly to some $u^* \in \mathrm{L}^2([0,+\infty),\mathbb{R}^m)$. Since $\mathrm{PC}^\Delta ([0,+\infty),\mathbb{R}^m)$ is a weakly closed subspace of $\mathrm{L}^2([0,+\infty),\mathbb{R}^m)$, it follows that $u^* \in \mathrm{PC}^\Delta ([0,+\infty),\mathbb{R}^m)$. Moreover, denoting by $x_k := x(\cdot,u_k)$ for every $k \in \mathbb{N}$, the Duhamel formula gives $$ x_k (t) = e^{tA} x_0 + \int_0^t e^{(t-\tau)A} B u_k(\tau) \; d\tau \qquad \forall t \geq 0 \qquad \forall k \in \mathbb{N}. $$ By weak convergence we get that, for every $t\geq 0$, the sequence $(x_k(t))_{k \in \mathbb{N}}$ converges pointwise on $[0,+\infty)$ to $$ x^* (t) := e^{tA} x_0 + \int_0^t e^{(t-\tau)A} B u^*(\tau) \; d\tau . $$ Then, obviously, $x^* = x(\cdot,u^*)$. Moreover, by Fatou's lemma (see, e.g.,~\cite[Lemma~4.1]{brezis2011}) and by weak convergence, we get that \begin{multline*} \mathcal{C}^* = \lim_{k \to + \infty} \mathcal{C}(u_k) = \liminf_{k \to + \infty} \mathcal{C}(u_k) = \liminf_{k \to +\infty} \int_0^{+\infty}\Big( \langle Q x_k(\tau) , x_k(\tau) \rangle_{\mathbb{R}^n} + \langle R u_k(\tau) , u_k(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau \\
\geq \liminf_{k \to +\infty} \int_0^{+\infty} \langle Q x_k(\tau) , x_k(\tau) \rangle_{\mathbb{R}^n} \; d\tau + \liminf_{k \to +\infty} \Vert u_k \Vert^2_{\mathrm{L}^2_R} \\
\geq \int_0^{+\infty} \langle Q x^*(\tau) , x^*(\tau) \rangle_{\mathbb{R}^n} \; d\tau + \Vert u^* \Vert^2_{\mathrm{L}^2_R} = \int_0^{+\infty} \left( \langle Q x^*(\tau) , x^*(\tau) \rangle_{\mathbb{R}^n} + \langle R u^*(\tau) , u^*(\tau) \rangle_{\mathbb{R}^m} \right) d\tau = \mathcal{C}(u^*) \end{multline*} where the norm defined by $ \Vert u \Vert_{\mathrm{L}^2_R} := ( \int_0^{+\infty} \langle R u(\tau),u(\tau) \rangle_{\mathbb{R}^m} \; d\tau )^{1/2}$ for every $u \in \mathrm{L}^2([0,+\infty),\mathbb{R}^m)$ is equivalent to the usual one since $R \in \mathcal{S}^m_{++}$. We conclude that $(x^*,u^*)$ is an optimal solution to $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$.
Let us prove uniqueness. Note that $x(\cdot,\lambda u + (1-\lambda) v) = \lambda x(\cdot,u) + (1-\lambda) x(\cdot,v)$ for all $u$, $v \in \mathrm{L}^2([0,+\infty),\mathbb{R}^m)$ and all $\lambda \in [0,1]$. Hence, since moreover $Q \in \mathcal{S}^n_{++}$ and $R \in \mathcal{S}^m_{++}$, the cost function $\mathcal{C}$ is strictly convex and thus the optimal solution to $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ is unique.
\paragraph{Existence of a solution to $\mathrm{(SD\text{-}ARE)}$.} Let us introduce the sequence $(D_i )_{i \in \mathbb{N}} \subset \mathbb{R}^{n\times n}$ being the solution to the forward matrix induction given by $$ \left\lbrace \begin{array}{l} D_{i+1}-D_i = - h \mathcal{F}(D_{i},h) \qquad \forall i \in \mathbb{N}, \\[5pt] D_0 = 0_{\mathbb{R}^{n\times n}}. \end{array} \right. $$ Taking $P = 0_{\mathbb{R}^{n\times n}}$, one has $D_i = E^{t_N,\Delta_N}_{N-i}$ for every $i = 0, \ldots,N$ and every $N \in \mathbb{N}^*$. Hence the sequence~$ ( D_i )_{i \in \mathbb{N}} $ is well defined and is in $\mathcal{S}^n_+$.
Our aim now is to prove that the sequence $ ( D_i )_{i \in \mathbb{N}}$ converges. Let $x_0 \in \mathbb{R}^n$. We denote by $$ M := \int_0^{+\infty} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau < +\infty $$ where $(x,u) \in \mathrm{AC}([0,+\infty),\mathbb{R}^n) \times \mathrm{PC}^\Delta ([0,+\infty),\mathbb{R}^m)$ is the pair provided in $\mathrm{(H}_2^h\mathrm{)}$. Since the minimal cost of~$(\mathrm{OCP}^{t_N,\Delta_N}_{x_0})$ (with $P = 0_{\mathbb{R}^{n\times n}}$) is given by $\langle E^{t_N,\Delta_N}_0 x_0 , x_0 \rangle_{\mathbb{R}^n} = \langle D_N x_0,x_0 \rangle_{\mathbb{R}^n}$ and is increasing with respect to $N$, we deduce that $\langle D_N x_0,x_0 \rangle_{\mathbb{R}^n}$ is increasing with respect to $N$. Since it is also bounded by $M$, we deduce that it converges when $N \to +\infty$. By~(vii) of Lemma~\ref{lemmatrice}, we conclude that the sequence $ ( D_i )_{i \in \mathbb{N}}$ in $\mathcal{S}^n_+$ converges to some $D \in \mathcal{S}^n_+$ which satisfies~$\mathcal{F}(D,h) = 0_{\mathbb{R}^{n\times n}}$ by continuity of $\mathcal{F}$ (see Lemma~\ref{lemF}).
\paragraph{Positive definiteness of $D$.} Let $x_0 \in \mathbb{R}^n \backslash \{ 0 \}$. Since $Q \in \mathcal{S}^n_{++}$, the minimal cost of~$(\mathrm{OCP}^{t_N,\Delta_N}_{x_0})$ (with~$P = 0_{\mathbb{R}^{n\times n}}$) given by $\langle E^{t_N,\Delta_N}_0 x_0 , x_0 \rangle_{\mathbb{R}^n} = \langle D_N x_0 , x_0 \rangle_{\mathbb{R}^n} $ for every~$N \in \mathbb{N}^*$ is positive. Since $\langle D_N x_0 , x_0 \rangle_{\mathbb{R}^n}$ is increasing with respect to $N$ and converges to $\langle D x_0 , x_0 \rangle_{\mathbb{R}^n}$, we deduce that $\langle D x_0 , x_0 \rangle_{\mathbb{R}^n} > 0$ and thus~$D \in \mathcal{S}^n_{++}$.
\paragraph{Lower bound of the minimal cost of $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$.} Our aim in this paragraph is to prove that, if~$Z \in \mathcal{S}^n_+$ satisfies $\mathcal{F}(Z,h) = 0_{\mathbb{R}^{n\times n}}$, then $\langle Z x_0 , x_0 \rangle_{\mathbb{R}^n}$ is a lower bound of the minimal cost of $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ for every~$x_0 \in \mathbb{R}^n$.
Let $x_0 \in \mathbb{R}^n$. Let $(x,u) \in \mathrm{AC}([0,+\infty),\mathbb{R}^n) \times \mathrm{PC}^\Delta([0,+\infty),\mathbb{R}^m)$ be a pair such that~$\dot{x}(t) = Ax(t) + Bu(t)$ for almost every $t \geq 0$ and $x(0) = x_0$. Our objective is to prove that $$ \langle Z x_0 , x_0 \rangle_{\mathbb{R}^n} \leq \int_{0}^{+\infty} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau. $$ If the integral at the right-hand side is infinite, the result is obvious. Let us assume that the integral is finite. By Lemma~\ref{leminfzero}, $x(t)$ tends to $0_{\mathbb{R}^n}$ when $t \to +\infty$. By Proposition~\ref{thmriccsample}, the minimal cost of~$(\mathrm{OCP}^{t_N,\Delta_N}_{x_0})$ with~$P = Z$ is given by~$\langle E^{t_N,\Delta_N}_0 x_0 , x_0 \rangle_{\mathbb{R}^n}$ for every $N \in \mathbb{N}^*$. Since~$E^{t_N,\Delta_N}_N = Z$ and~$\mathcal{F}(Z,h) = 0_{\mathbb{R}^{n\times n}}$, from the backward matrix induction, we get that $E^{t_N,\Delta_N}_i = Z$ for every $i = 0, \ldots ,N$ and every $N \in \mathbb{N}^*$. In particular the minimal cost of $(\mathrm{OCP}^{t_N,\Delta_N}_{x_0})$ with $P = Z$ is given by $\langle Z x_0 , x_0 \rangle_{\mathbb{R}^n}$ for every $N \in \mathbb{N}^*$. Hence $$ \langle Z x_0 , x_0 \rangle_{\mathbb{R}^n} \leq \langle Z x(t_N),x(t_N) \rangle_{\mathbb{R}^n} + \int_0^{t_N} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau. $$ Taking the limit $N \to +\infty$, the proof is complete.
\paragraph{Upper bound of the minimal cost of $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$.} Our aim in this paragraph is to prove that, if~$Z \in \mathcal{S}^n_+$ satisfies $\mathcal{F}(Z,h) = 0_{\mathbb{R}^{n\times n}}$, then $\langle Z x_0 , x_0 \rangle_{\mathbb{R}^n}$ is an upper bound of the minimal cost of $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ for every~$x_0 \in \mathbb{R}^n$. Denote by $\mathcal{M} := \mathcal{M} (Z,h)$, $\mathcal{N} := \mathcal{N}(Z,h)$ and $\mathcal{G} := \mathcal{G}(Z,h)$. We similarly use the notations~$\mathcal{M}_i$, $\mathcal{N}_i$ and~$\mathcal{G}_i$ for $i=1,2,3$ (see Section~\ref{secF} for details).
Let $x_0 \in \mathbb{R}^n$. Let $x \in \mathrm{AC}([0,+\infty),\mathbb{R}^n)$ be the unique solution to $$ \left\lbrace \begin{array}{l} \dot{x}(t) = A x(t) - B \mathcal{N}^{-1} \mathcal{M}^\top x(t_i) \qquad \text{for a.e.}\ t \in [t_i,t_{i+1}) \qquad \forall i \in \mathbb{N} \\[5pt] x(0) = x_0, \end{array} \right. $$ and let $u \in \mathrm{PC}^\Delta([0,+\infty),\mathbb{R}^m)$ defined by $u_i := - \mathcal{N}^{-1} \mathcal{M}^\top x(t_i)$ for every $i \in \mathbb{N}$. In particular~$\dot{x}(t) = Ax(t) + Bu(t)$ for almost every $t \geq 0$ and $x(0) = x_0$.
By the Duhamel formula, we have $x(t) = (\alpha_i(t) - \beta_i(t)) x(t_i)$ for all $t \in [t_i,t_{i+1})$ and every $i \in \mathbb{N}$, where $$ \alpha_i (t) := e^{(t-t_i)A} \quad \text{and} \quad \beta_i (t) := \left( \int_{t_i}^t e^{\xi A} \; d\xi \right) B \mathcal{N}^{-1} \mathcal{M}^\top \qquad \forall t \in [t_i,t_{i+1}) \qquad \forall i \in \mathbb{N}. $$ Using the above expressions of $\alpha_i$ and $\beta_i$, and after some computations, we get that $$ \int_{t_i}^{t_{i+1}}\Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau = h \langle W_1 x(t_i) , x(t_i) \rangle_{\mathbb{R}^n} \qquad \forall i \in \mathbb{N} $$ where $ W_1 := \mathcal{G}_1 + \mathcal{M} \mathcal{N}^{-1} \mathcal{N}_2 \mathcal{N}^{-1} \mathcal{M}^\top - 2 \mathcal{M}_2 \mathcal{N}^{-1} \mathcal{M}^\top + \mathcal{M} \mathcal{N}^{-1} \mathcal{N}_1 \mathcal{N}^{-1} \mathcal{M}^\top $. On the other hand, using again the above expressions of $\alpha_i$ and $\beta_i$, we compute $$ \langle Z x(t_i),x(t_i) \rangle_{\mathbb{R}^n} - \langle Z x(t_{i+1}),x(t_{i+1}) \rangle_{\mathbb{R}^n} = h \langle W_2 x(t_i),x(t_i) \rangle_{\mathbb{R}^n} \qquad \forall i \in \mathbb{N} $$ where $W_2 := - \mathcal{G}_2 + 2 \mathcal{M}_1 \mathcal{N}^{-1} \mathcal{M}^\top - \mathcal{M} \mathcal{N}^{-1} \mathcal{N}_3 \mathcal{N}^{-1} \mathcal{M}$. Using that~$\mathcal{F}(Z,h) = \mathcal{M} \mathcal{N}^{-1} \mathcal{M}^\top - \mathcal{G} = 0_{\mathbb{R}^{n\times n}}$, we obtain $W_2 - W_1 = 0_{\mathbb{R}^{n\times n}}$ and thus $W_2 = W_1$. We deduce that $$ \int_{t_i}^{t_{i+1}} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau = \langle Z x(t_i),x(t_i) \rangle_{\mathbb{R}^n} - \langle Z x(t_{i+1}),x(t_{i+1}) \rangle_{\mathbb{R}^n} \qquad \forall i \in \mathbb{N}. $$ Summing these equalities and using that $Z \in \mathcal{S}^n_+$, we get $$ \int_{0}^{t_{N}}\Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau = \langle Z x_0 , x_0 \rangle_{\mathbb{R}^n} - \langle Z x(t_N) ,x(t_N) \rangle_{\mathbb{R}^n} \leq \langle Z x_0 , x_0 \rangle_{\mathbb{R}^n} \qquad \forall N \in \mathbb{N}^*. $$ Passing to the limit $N \to +\infty$, we finally obtain $$ \int_{0}^{+\infty} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau \leq \langle Z x_0 , x_0 \rangle_{\mathbb{R}^n}. $$ We deduce that $\langle Z x_0 , x_0 \rangle_{\mathbb{R}^n}$ is an upper bound of the minimal cost of $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ for every $x_0 \in \mathbb{R}^n$.
\paragraph{Minimal cost of $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ and state feedback control.} Let $x_0 \in \mathbb{R}^n$. By the previous paragraphs, since $D \in \mathcal{S}^n_{++} \subset \mathcal{S}^n_+$ satisfies $\mathcal{F}(D,h) = 0_{\mathbb{R}^{n\times n}}$, the minimal cost of $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ is equal to $\langle D x_0 , x_0 \rangle_{\mathbb{R}^n}$. Moreover, by the previous paragraph, denoting by $x \in \mathrm{AC}([0,+\infty),\mathbb{R}^n)$ the unique solution to $$ \left\lbrace \begin{array}{l} \dot{x}(t) = A x(t) - B \mathcal{N}(D,h)^{-1} \mathcal{M}(D,h)^\top x(t_i) \qquad \text{for a.e.}\ t \in [t_i,t_{i+1}) \qquad \forall i \in \mathbb{N} \\[5pt] x(0) = x_0, \end{array} \right. $$ and by $u \in \mathrm{PC}^\Delta([0,+\infty),\mathbb{R}^m)$ the control defined by $u_i := - \mathcal{N}(D,h)^{-1} \mathcal{M}(D,h)^\top x(t_i)$ for every $i \in \mathbb{N}$, we get that $\dot{x}(t) = Ax(t) + Bu(t)$ for almost every $t \geq 0$ and $x(0) = x_0$, and $$ \int_{0}^{+\infty} \Big( \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} + \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^m} \Big) \; d\tau \leq \langle D x_0 , x_0 \rangle_{\mathbb{R}^n}. $$ Since $\langle D x_0 , x_0 \rangle_{\mathbb{R}^n}$ is the minimal cost of $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$, the above inequality is actually an equality. By uniqueness of the optimal solution~$(x^*,u^*)$, we get that $(x,u) = (x^*,u^*)$ and thus the optimal sampled-data control~$u^*$ is given by $u^*_i = - \mathcal{N}(D,h)^{-1} \mathcal{M}(D,h)^\top x^*(t_i)$ for every $i \in \mathbb{N}$.
\paragraph{Uniqueness of the solution to $\mathrm{(SD\text{-}ARE)}$.} Assume that there exist $Z_1$, $Z_2 \in \mathcal{S}^n_+$ satisfying $\mathcal{F}(Z_1,h) = \mathcal{F}(Z_2,h) = 0_{\mathbb{R}^{n\times n}}$. By the previous paragraphs, the minimal cost of $(\mathrm{OCP}^{\infty,\Delta}_{x_0})$ is equal to $\langle Z_1 x_0 , x_0 \rangle_{\mathbb{R}^n} = \langle Z_2 x_0 , x_0 \rangle_{\mathbb{R}^n} $ for every $x_0 \in \mathbb{R}^n$. By (vi) of Lemma~\ref{lemmatrice}, we conclude that $Z_1 = Z_2$.
\paragraph{End of the proof.} Defining $E^{\infty,\Delta} := D \in \mathcal{S}^n_{++}$, the proof of Proposition~\ref{thmriccsampleinf} is complete.
\subsection{Proof of Lemma~\ref{lemimportant}}\label{applemimportant}
This proof is inspired from the techniques developed in~\cite{nesic1999} for preserving the stabilizing property of controls of nonlinear systems under sampling. We set $W := BR^{-1}B^\top E^\infty \in \mathbb{R}^{n\times n}$ where $E^\infty$ is given by Proposition~\ref{thmriccperminf}. Note that $E^\infty W \in \mathcal{S}^n_+$. Using $\mathrm{(P\text{-}ARE)}$, we obtain $$ 2 \langle E^\infty y , (A-W)y \rangle_{\mathbb{R}^n} = - \langle Q y , y \rangle_{\mathbb{R}^n} - \langle E^\infty W y, y \rangle_{\mathbb{R}^m} \leq -\rho_{\mathrm{min}} (Q) \Vert y \Vert^2_{\mathbb{R}^n} \qquad \forall y \in \mathbb{R}^n $$ where $ \rho_{\mathrm{min}} (Q) > 0 $ since $Q \in \mathcal{S}^n_{++}$. Let $\overline{h} > 0$ be such that $$ h \Vert A - W \Vert_{\mathbb{R}^{n\times n}} e^{h \Vert A \Vert_{\mathbb{R}^{n\times n}}} < 1 \qquad \text{and} \qquad 2 \rho_{\mathrm{max}}(E^\infty W) \dfrac{h \Vert A-W \Vert_{\mathbb{R}^{n\times n}} e^{h \Vert A \Vert_{\mathbb{R}^{n\times n}}}}{1 - h \Vert A-W \Vert_{\mathbb{R}^{n\times n}} e^{h \Vert A \Vert_{\mathbb{R}^{n\times n}}} } \leq \dfrac{\rho_{\mathrm{min}} (Q)}{2} $$ for every $h\in(0,\overline{h}]$.
Now, let $x_0 \in \mathbb{R}^n$ and let $\Delta = \{ t_i \}_{i \in \mathbb{N}}$ be a $h$-uniform time partition of the interval $[0,+\infty)$ satisfying~$h\in(0,\overline{h}]$. Let~$x \in \mathrm{AC}([0,+\infty),\mathbb{R}^n)$ be the unique solution to $$ \left\lbrace \begin{array}{l} \dot{x}(t) = A x(t) - W x(t_i) \qquad \text{for a.e.}\ t \in [t_i,t_{i+1}) \qquad \forall i \in \mathbb{N} \\[5pt] x(0) = x_0, \end{array} \right. $$ and let $u \in \mathrm{PC}^\Delta([0,+\infty),\mathbb{R}^m)$ be defined by $u_i := - R^{-1} B^\top E^\infty x(t_i)$ for every $i \in \mathbb{N}$. In particular~$\dot{x}(t) = Ax(t) + Bu(t)$ for almost every $t \geq 0$ and $x(0) = x_0$.
On the one hand, we have \begin{multline*} \Vert x(t) - x(t_i) \Vert_{\mathbb{R}^n} = \left\Vert \int_{t_i}^t \Big( Ax(\tau) - W x(t_i) \Big) \; d\tau \right\Vert_{\mathbb{R}^n} = \left\Vert \int_{t_i}^t \Big( A(x(\tau)-x(t_i)) + (A- W) x(t_i) \Big) \; d\tau \right\Vert_{\mathbb{R}^n} \\ \leq h \Vert A - W \Vert_{\mathbb{R}^{n\times n}} \Vert x(t_i) \Vert_{\mathbb{R}^n} + \Vert A \Vert_{\mathbb{R}^{n\times n}} \int_{t_i}^t \Vert x(\tau) - x(t_i) \Vert_{\mathbb{R}^n} \; d\tau \end{multline*} and, by the Gr\"onwall lemma (see \cite[Appendix~C.3]{sontag1998}), we get that $$ \Vert x(t) - x(t_i) \Vert_{\mathbb{R}^n} \leq h \Vert A - W \Vert_{\mathbb{R}^{n\times n}} e^{h \Vert A \Vert_{\mathbb{R}^{n\times n}}} \Vert x(t_i) \Vert_{\mathbb{R}^n} \qquad \forall t \in [t_i,t_{i+1}) \qquad \forall i \in \mathbb{N}. $$ Since $\Vert x(t_i) \Vert_{\mathbb{R}^{n\times n}} \leq \Vert x(t)-x(t_i) \Vert_{\mathbb{R}^{n\times n}} + \Vert x(t) \Vert_{\mathbb{R}^{n\times n}}$ and $ h \Vert A - W \Vert_{\mathbb{R}^{n\times n}} e^{h \Vert A \Vert_{\mathbb{R}^{n\times n}}} < 1 $, we get that $$ \Vert x(t_i) \Vert_{\mathbb{R}^{n\times n}} \leq \dfrac{1}{1-h \Vert A - W \Vert_{\mathbb{R}^{n\times n}} e^{h \Vert A \Vert_{\mathbb{R}^{n\times n}}}} \Vert x(t) \Vert_{\mathbb{R}^{n\times n}}, \qquad \forall t \in [t_i,t_{i+1}) \qquad \forall i \in \mathbb{N} $$ and thus $$ \Vert x(t) - x(t_i) \Vert_{\mathbb{R}^{n\times n}} \leq \dfrac{h \Vert A - W \Vert_{\mathbb{R}^{n\times n}} e^{h \Vert A \Vert_{\mathbb{R}^{n\times n}}}}{1-h \Vert A - W \Vert_{\mathbb{R}^{n\times n}} e^{h \Vert A \Vert_{\mathbb{R}^{n\times n}}}} \Vert x(t) \Vert_{\mathbb{R}^{n\times n}}, \qquad \forall t \in [t_i,t_{i+1}) \qquad \forall i \in \mathbb{N}. $$ On the other hand, we have \begin{multline*} \dfrac{d}{dt} \langle E^\infty x(t),x(t) \rangle_{\mathbb{R}^n} = 2 \langle E^\infty x(t) , \dot{x}(t) \rangle_{\mathbb{R}^n} = 2 \langle E^\infty x(t) , A x(t) - W x(t_i) \rangle_{\mathbb{R}^n} \\ = 2 \langle E^\infty x(t) , (A-W) x(t) \rangle_{\mathbb{R}^n} + 2 \langle E^\infty x(t) , W (x(t)-x(t_i)) \rangle_{\mathbb{R}^n} \qquad \text{for a.e.}\ t \in [t_i,t_{i+1}) \qquad \forall i \in \mathbb{N}. \end{multline*} We deduce that \begin{multline*} \dfrac{d}{dt} \langle E^\infty x(t),x(t) \rangle_{\mathbb{R}^n} \leq \left( - \rho_{\mathrm{min}} (Q) + 2 \rho_{\mathrm{max}}(E^\infty W) \dfrac{h \Vert A-W \Vert_{\mathbb{R}^{n\times n}} e^{h \Vert A \Vert_{\mathbb{R}^{n\times n}}}}{1 - h \Vert A-W \Vert_{\mathbb{R}^{n\times n}} e^{h \Vert A \Vert_{\mathbb{R}^{n\times n}}} } \right) \Vert x(t) \Vert^2_{\mathbb{R}^n} \\ \leq - \dfrac{\rho_{\mathrm{min}} (Q)}{2} \Vert x(t) \Vert^2_{\mathbb{R}^n} \leq - \dfrac{\rho_{\mathrm{min}} (Q)}{2 \rho_{\mathrm{max}}(E^\infty) } \langle E^\infty x(t) , x(t) \rangle_{\mathbb{R}^n} \qquad \text{for a.e.}\ t \geq 0. \end{multline*} We deduce from the Gr\"onwall lemma that $$ \Vert x(t) \Vert^2_{\mathbb{R}^n} \leq \dfrac{1}{\rho_{\mathrm{min}} (E^\infty)} \langle E^\infty x(t) , x(t) \rangle_{\mathbb{R}^n} \leq \dfrac{1}{\rho_{\mathrm{min}} (E^\infty)} \langle E^\infty x_0 , x_0 \rangle_{\mathbb{R}^n} e^{- \frac{\rho_{\mathrm{min}} (Q)}{2 \rho_{\mathrm{max}}(E^\infty) } t} \qquad \forall t \geq 0. $$ We deduce that \begin{multline*}
\int_0^{+\infty} \langle Q x(\tau) , x(\tau) \rangle_{\mathbb{R}^n} \; d\tau \leq \dfrac{\rho_{\mathrm{max}}(Q)}{\rho_{\mathrm{min}} (E^\infty)} \langle E^\infty x_0 , x_0 \rangle_{\mathbb{R}^n} \int_0^{+\infty} e^{- \frac{\rho_{\mathrm{min}} (Q)}{2 \rho_{\mathrm{max}}(E^\infty) } \tau} \; d\tau \\
= \dfrac{2 \rho_{\mathrm{max}}(Q) \rho_{\mathrm{max}}(E^\infty)}{\rho_{\mathrm{min}} (Q) \rho_{\mathrm{min}} (E^\infty)} \langle E^\infty x_0 , x_0 \rangle_{\mathbb{R}^n} < +\infty. \end{multline*} Moreover, using that $t_i = ih$ for every $i \in \mathbb{N}$, we have \begin{multline*}
\int_0^{+\infty} \langle R u(\tau) , u(\tau) \rangle_{\mathbb{R}^n} \; d\tau \leq h \rho_{\mathrm{max}}(R) \sum_{i \in \mathbb{N}} \Vert u_i \Vert^2_{\mathbb{R}^m} \leq h \rho_{\mathrm{max}}(R) \Vert R^{-1} B^\top E^\infty \Vert^2_{\mathbb{R}^{m \times n}} \sum_{i \in \mathbb{N}} \Vert x(t_i) \Vert^2_{\mathbb{R}^m} \\
\leq h \dfrac{\rho_{\mathrm{max}}(R)}{\rho_{\mathrm{min}} (E^\infty)} \Vert R^{-1} B^\top E^\infty \Vert^2_{\mathbb{R}^{m \times n}} \langle E^\infty x_0 , x_0 \rangle_{\mathbb{R}^n} \sum_{i \in \mathbb{N}} \Big( e^{- \frac{\rho_{\mathrm{min}} (Q)}{2 \rho_{\mathrm{max}}(E^\infty) } h} \Big)^i \\
= h \dfrac{\rho_{\mathrm{max}}(R)}{\rho_{\mathrm{min}} (E^\infty)} \Vert R^{-1} B^\top E^\infty \Vert^2_{\mathbb{R}^{m \times n}} \langle E^\infty x_0 , x_0 \rangle_{\mathbb{R}^n} \dfrac{1}{1 - e^{- \frac{\rho_{\mathrm{min}} (Q)}{2 \rho_{\mathrm{max}}(E^\infty) } h}} \\
\leq \dfrac{2 \rho_{\mathrm{max}}(R) \rho_{\mathrm{max}}(E^\infty)}{\rho_{\mathrm{min}} ( Q ) \rho_{\mathrm{min}} (E^\infty)} \Vert R^{-1} B^\top E^\infty \Vert^2_{\mathbb{R}^{m \times n}} \langle E^\infty x_0 , x_0 \rangle_{\mathbb{R}^n} e^{\frac{\rho_{\mathrm{min}} (Q)}{2 \rho_{\mathrm{max}}(E^\infty) } \overline{h}} < +\infty. \end{multline*} Taking $$ \overline{c} := \dfrac{2 \rho_{\mathrm{max}}(E^\infty)}{\rho_{\mathrm{min}} ( Q ) \rho_{\mathrm{min}} (E^\infty)} \Big( \rho_{\mathrm{max}} ( Q ) +\rho_{\mathrm{max}} ( R ) \Vert R^{-1} B^\top E^\infty \Vert^2_{\mathbb{R}^{m \times n}} e^{\frac{\rho_{\mathrm{min}} (Q)}{2 \rho_{\mathrm{max}}(E^\infty) } \overline{h}} \Big) \geq 0 ,$$ the proof is complete.
\subsection{Proof of Theorem~\ref{thmmain1}}\label{appthmmain1}
\paragraph*{First item.} This proof is inspired from the classical Lax theorem in numerical analysis (see \cite[p.73]{polyanin2018}). Let~$\varepsilon > 0$. We define the map $$ \fonction{\varphi}{[0,T] \times [0,T]}{\mathbb{R}^{n\times n}}{(t,h)}{\varphi(t,h) := \mathcal{F}(t,E^T(t),h).} $$ By continuity of $E^T$ on $[0,T]$ and by Lemma~\ref{lemF}, the map $\varphi$ is uniformly continuous on the compact set~$[0,T] \times [0,T]$. Hence there exists $\delta > 0$ such that $$ \Vert \varphi(t_2,h_2) - \varphi(t_1,h_1) \Vert_{\mathbb{R}^{n\times n}} \leq \dfrac{\varepsilon}{2Te^{cT}} $$ for all $(t_1,h_1)$, $ (t_2,h_2) \in [0,T] \times [0,T]$ satisfying $\vert t_2-t_1 \vert + \vert h_2 - h_1 \vert \leq \delta$, where $c \geq 0$ is the constant given in Lemma~\ref{lemF} associated to the compact set $\mathcal{K} := [0,T] \times \mathrm{K} \times [0,T]$ where
$$ \mathrm{K} := \left\lbrace E \in \mathcal{S}^n_+ \mid \Vert E \Vert_{\mathbb{R}^{n\times n}} \leq \Big( \Vert P \Vert_{\mathbb{R}^{n\times n}} + T \Vert Q_{|[0,T]} \Vert_{\infty} \Big) e^{2T \Vert A_{|[0,T]} \Vert_{\infty} } \right\rbrace . $$ In the sequel we consider a time partition $\Delta = \{ t_i \}_{i=0,\ldots,N}$ of the interval~$[0,T]$ such that $0 < \Vert \Delta \Vert \leq \delta$. Note that \begin{multline*}
E^{T,\Delta}_i = E^{T,\Delta}_{i+1} - h_{i+1} \mathcal{F} (t_{i+1},E^{T,\Delta}_{i+1},h_{i+1}) \\
\text{and} \quad E^T(t_i) = E^T(t_{i+1}) - h_{i+1} \mathcal{F}(t_{i+1},E^T(t_{i+1}),h_{i+1}) +\eta_{i+1} \qquad \forall i=0,\ldots,N-1 \end{multline*} where $$ \eta_{i+1} := E^T(t_i)-E^T(t_{i+1}) + h_{i+1} \mathcal{F} (t_{i+1},E^T(t_{i+1}),h_{i+1}) \qquad \forall i=0,\ldots,N-1 .$$ By Lemmas~\ref{lemF} and~\ref{lembound}, we have $$ \Vert E^T(t_i)-E^{T,\Delta}_i \Vert_{\mathbb{R}^{n\times n}} \leq (1+c h_{i+1} ) \Vert E^T(t_{i+1})-E^{T,\Delta}_{i+1} \Vert_{\mathbb{R}^{n\times n}} + \Vert \eta_{i+1} \Vert_{\mathbb{R}^{n\times n}} \qquad \forall i=0,\ldots,N-1 .$$ It follows from the backward discrete Gr\"onwall lemma (see Lemma~\ref{lemgronwall}) that $$ \Vert E^T(t_i)-E^{T,\Delta}_i \Vert_{\mathbb{R}^{n\times n}} \leq \sum_{j=i+1}^N e^{ c \sum_{q=i+1}^{j-1} h_q } \Vert \eta_{j} \Vert_{\mathbb{R}^{n\times n}} \leq e^{cT} \sum_{j=1}^N \Vert \eta_{j} \Vert_{\mathbb{R}^{n\times n}} \qquad \forall i=0,\ldots,N-1 . $$ Since \begin{multline*} \eta_{j} = h_j \Big( \mathcal{F}(t_j,E^T(t_j),h_j) - \mathcal{F}(t_j,E^T(t_j),0) \Big) + \int_{t_{j-1}}^{t_j} \left( \mathcal{F}(t_j,E^T(t_j),0) - \mathcal{F}(\tau,E^T(\tau),0) \right) d\tau \\ = h_j \Big( \varphi(t_j,h_j) - \varphi(t_j,0) \Big) + \int_{t_{j-1}}^{t_j} \left( \varphi(t_j,0) - \varphi(\tau,0) \right) d\tau \qquad \forall j=1,\ldots,N \end{multline*} we obtain, by uniform continuity of $\varphi$ and using that $0 < \Vert \Delta \Vert \leq \delta$, $$ \Vert \eta_{j} \Vert_{\mathbb{R}^{n\times n}} \leq 2 h_j \dfrac{\varepsilon }{2Te^{cT}} = h_j \dfrac{\varepsilon }{Te^{cT}} \qquad \forall j=1,\ldots,N. $$ We conclude that $$ \Vert E^T(t_i)-E^{T,\Delta}_i \Vert_{\mathbb{R}^{n\times n}} \leq e^{cT} \sum_{j=1}^N \Vert \eta_{j} \Vert_{\mathbb{R}^{n\times n}} \leq e^{cT} \sum_{j=1}^N h_j \dfrac{\varepsilon }{Te^{cT}} = \dfrac{\varepsilon}{T} \sum_{j=1}^N h_j = \varepsilon \qquad \forall i=0,\ldots,N-1. $$ The proof is complete.
\paragraph*{Second item.} The second item of Theorem~\ref{thmmain1} is well known and follows from the proof of Proposition~\ref{thmriccperminf} (see \cite[p.153]{bressan2007}, \cite[Theorem~7]{lee1986} or \cite[Theorem~4.13]{trelat2005}).
\paragraph*{Third item.} This result follows from the proof of Proposition~\ref{thmriccsampleinf}. Indeed, using the notations from Appendix~\ref{appthmriccsampleinf}, it is clear that $$ \lim\limits_{N \to + \infty} E^{t_N,\Delta_N}_i = \lim\limits_{N \to + \infty} D_{N-i} = D = E^{\infty,\Delta} \qquad \forall i \in \mathbb{N}. $$
\paragraph*{Fourth item.} By contradiction let us assume that $E^{\infty,\Delta}$ does not converge to $E^\infty$ when $h \to 0$. Then there exists $\varepsilon > 0$ and a positive sequence $(h_k)_{k \in \mathbb{N}}$ converging to $0$ such that $\Vert E^{\infty,\Delta_k} - E^\infty \Vert_{\mathbb{R}^{n\times n}} \geq \varepsilon$ for every $k \in \mathbb{N}$, where $\Delta_k$ stands for the $h_k$-uniform time partition of the interval $[0,+\infty)$. Without loss of generality, we assume that $0 < h_k \leq \overline{h}$ for every $k \in \mathbb{N}$. It follows from Proposition~\ref{thmriccsampleinf} and from Lemma~\ref{lemimportant} that the minimal cost of $(\mathrm{OCP}^{\infty,\Delta_k}_{x_0})$ satisfies $$ \langle E^{\infty,\Delta_k} x_0 , x_0 \rangle_{\mathbb{R}^{n}} \leq \overline{c} \langle E^{\infty} x_0 , x_0 \rangle_{\mathbb{R}^{n}} \leq \overline{c} \Vert E^\infty \Vert_{\mathbb{R}^{n\times n} } \Vert x_0 \Vert_{\mathbb{R}^n}^2 \qquad \forall x_0 \in \mathbb{R}^n. $$ Hence $\Vert E^{\infty,\Delta_k} \Vert_{\mathbb{R}^{n\times n}} \leq \overline{c} \Vert E^{\infty} \Vert_{\mathbb{R}^{n\times n}}$ for every $k \in \mathbb{N}$ by (v) of Lemma~\ref{lemmatrice}. Thus the sequence~$(E^{\infty,\Delta_k})_{k \in \mathbb{N}}$ is bounded in $\mathbb{R}^{n\times n}$ and, up to a subsequence (that we do not relabel), converges to some~$L \in \mathbb{R}^{n\times n}$. In particular $\Vert L - E^\infty \Vert_{\mathbb{R}^{n\times n}} \geq \varepsilon$. Since $E^{\infty,\Delta_k} \in \mathcal{S}^n_{++} \subset \mathcal{S}^n_+$ for every~$k \in \mathbb{N}$, it is clear that~$L \in \mathcal{S}^n_+$. Moreover, by $\mathrm{(SD\text{-}ARE)}$ associated to $h_k$ (see Proposition~\ref{thmriccsampleinf}), we know that~$\mathcal{F}(E^{\infty,\Delta_k},h_k) = 0_{\mathbb{R}^{n\times n}}$ for all $k \in \mathbb{N}$. By continuity of $\mathcal{F}$ (see Lemma~\ref{lemF}), we conclude that~$\mathcal{F}(L,0) = 0_{\mathbb{R}^{n\times n}}$. By uniqueness (see Proposition~\ref{thmriccperminf}) we deduce that $L = E^\infty$ which raises a contradiction with the inequality $\Vert L - E^\infty \Vert_{\mathbb{R}^{n\times n}} \geq \varepsilon$. The proof is complete.
\end{document} |
\begin{document}
\title{ Prime polynomial values of \\ quadratic functions in short intervals}
\author{Sushma Palimar}
\address{ Department of Mathematics,\\ Indian Institute of Science,\\ Bangalore, Karnataka, India. }
\email{[email protected], [email protected].} \subjclass[2010]{11T55(primary),11P55,11N37.} \begin{abstract}{In this paper we establish the function field analogue of Bateman-Horn conjecture in short interval in the limit of a large finite field. Hence
we start with counting prime polynomials generated by primitive quadratic functions in short intervals.
To this end we further
work out on function field analogs of cancellation of Mobius sums and its correlations(Chowla type sums)
and confirm that square root cancellation in Mobius sums is equivalent to square root cancellation in Chowla type sums. }
\end{abstract} \maketitle
\section{Introduction} The well known conjecture of Hardy-Littlewood and Bateman-Horn predicts how often polynomials take prime values. For example, choose $f_{1}(T)$,..., $f_{r}(T)$ to be non-associate irreducible polynomials in $\mbox{$\mathbb{Z}$}[T]$, with leading coefficient of each $f_{i}>0$ and suppose that for each prime $p$ there exists $ n\in \mbox{$\mathbb{Z}$}$ such that $p\nmid f_{1}(n)\cdot\cdot\cdot f_{r}(n)$ for all integers $n$. Set $\pi_{f_1,f_2,...,f_r}$ as the number of positive integers $n\leq x$ such that $ f_{1}(n),...,f_{r}(n) \text{ are all primes}$. \begin{equation}\label{eqNo.1}
\pi_{f_1,f_2,...f_r}(x):= \#\{1\leq n\leq x: f_{1}(n),...,f_{r}(n) \text{ are all primes} \}\end{equation}
\[ \sim\frac{C (f_{1},f_{2},...,f_{r})}{ \prod\limits_{i=1}^{r} deg f_i}\frac{x}{(log x)^{r} } \] where\[ C (f_{1},f_{2},...,f_{r}):=\underset{p \text{ prime}}{\bf{\prod}}\frac{1-\nu(p)/p}{({1-1/p})^{m}}, \] $\nu(p)$ being the number of solutions to $f_{1}(T)...f_{r}(T)\equiv {0}\pmod{p}$ in $\mbox{$\mathbb{Z}$}/p.$
The product, $C (f_{1},f_{2},...,f_{r}) $ is called Hardy-Littlewood constant associated to ${f_{1}(n),...,f_{r}(n)}$ \cite{kc}.
The only proved case of Bateman-Horn conjecture is the case of a single linear polynomial, which is the Dirichlet's theorem on
primes in arithmetic progressions \cite{sb}. Bateman Horn conjecture reduces to the special case,
Hardy-Littlewood twin primes conjecture on the density of twin primes, whenever $r=2, f_1(T)=T; f_2(T)=T+2$, in (\ref{eqNo.1})
according to which the number of twin primes pairs less than $x$ is:
\[\pi_{2}(x)\sim 2\underset{p\geq 3}\Pi \frac{p(p-2)}{(p-1)^{2}}\frac{x}{(log x)^{2}}
\]
We derive the function field analog of Bateman-Horn conjecture in the limit of large finite field in short interval.
\subsection*{Polynomial Ring and Prime Polynomials} Let $\mbox{$\mathbb{F}$}_{q}[t]$ be the ring of polynomials over the finite field $\mbox{$\mathbb{F}$}_{q}$ with $q$ elements, $q=p^{\nu}, p: \text{ prime}.$
Let $\mathcal{P}_{n}=\{f\in \mbox{$\mathbb{F}$}_{q}[t]| \mathrm{deg} f=n \}$ be the set of all polynomials of degree $n$ and
$\mathcal{M}_{n}\subset\mathcal{P}_{n}$ be the subset of monic polynomials of degree $n$ over $\mbox{$\mathbb{F}$}_{q}$.
The polynomial ring $\mbox{$\mathbb{F}$}_q[t]$ over a finite field $\mbox{$\mathbb{F}$}_q$ shares several properties with the ring of integers and the analogies between
number field and function fields are fundamental in number theory. For instance, as quantitative aspect of this analogy,
we have the Prime Polynomial Theorem.
The prime polynomial theorem states that, the number $\pi_{q}(n)$ of monic irreducible polynomials of degree $n$ is \[ \pi_{q}(n)=\frac{q^{n}}{n}+ O\big(\frac{q^{n/2}}{n}\big), \quad q^{n}\rightarrow \infty.\] The prime polynomial theorem for arithmetic progression asserts, given a polynomial modulus $Q\in \mbox{$\mathbb{F}$}_{q}[t]$, of positive degree and a polynomial $A$, coprime to $Q$, the number $ \pi_{q}(n; Q, A)$ of primes $P\equiv A\pmod Q , P\in \mathcal{M}_{n} $ satisfies, \[ \pi_{q}(n; Q, A)=\frac{\pi_{q}(n)}{\Phi(Q)}+O(\mathrm{deg }Q. q^{n/2}).\] where $\Phi_(Q)$ is the number of coprime residues modulo $Q$. For $q\rightarrow \infty,$ the main term is dominant as long as $\mathrm{deg}Q<n/2.$ \subsection*{} In \cite{lbs} Bary-Soroker considered the function field analogue of Hardy - Littlewood prime tuple conjecture, in the limit of a large finite field, for functions, $F_i=f+h_i$,
$h_i\in \mathbb{F}_q[t]$, $ deg(f)>deg(h_i), \text{ for } i=1,2,...,n$. This result was established previously by
Bender and Pollack \cite{AP} for the case $i=2$.
\subsection{Prime polynomials in short interval}
Some of the salient problems of prime number theory deals with the study of distribution of primes
in short interval and arithmetic progression.
To set up an equivalent problem for the polynomial ring $\mbox{$\mathbb{F}$}_{q}[t]$, we define short interval in function fields. Here we follow \cite{KZ} for notations.
For a nonzero polynomial, $f\in \mbox{$\mathbb{F}$}_{q}[t]$, we define its norm by \(||f||:= {\#F_q[t]}/{(f)}=q^{deg f}\).
Given a monic polynomial $f\in \mathcal{M}_{n}$ of degree $n$, and $h<n$, ``short intervals" around $f\in \mathcal{M}_{n}$
of diameter $q^{h}$
is the set
\begin{equation}I(f,h):=\{g\in\mbox{$\mathbb{F}$}_{q}[t]:\mathrm{deg}(f-g)\leq h\}=\{g\in \mbox{$\mathbb{F}$}_{q}[t]:||f-g||\leq q^{h}\} =f+ \mathcal{P}_{\leq h}\end{equation} Thus, $I(f,h)$ is of the form, $f+\sum_{i=0}^{h}a_it^i$, where ${\mathbf {a}}=(a_0,a_1,...,a_h)$ are
algebraically independent variables over $\mbox{$\mathbb{F}$}_{q}$. The number of polynomials in this interval is \[H:=\#I(f;h)=q^{h+1}.\]
For $h=n-1, I(f,n-1)=\mathcal{M}_{n}$ is the set of all polynomials of degree $n$. For $h<n$, if $||f-g||\leq q^{h}$, then $f$ is monic if and only if $g$ is monic. Bank, Bary-Soroker and Rosenzweig \cite{Baroli}
obtained the result on counting prime polynomials in the short interval $I(A,h)$
for the primitive linear function $f(t)+g(t)x$.
In \cite{Baro} the function field analogue of Hardy - Littlewood prime tuple conjecture on these primitive linear functions is resolved
in short interval case.
\subsection*{Counting Prime polynomials and HIT} To establish the function field analogue of counting prime polynomials in \textit{short interval} we start with irreducible quadratic function $F(x,t)=f(t)+x^{2}\cdot g(t) \in \mbox{$\mathbb{F}$}_{q}[t][x]$ with following properties. Let $f, g\in \mbox{$\mathbb{F}$}_{q}[t]$ be non zero, relatively prime polynomials, $g(t)$ a monic polynomial and the product $f\cdot g $ not a square polynomial with $\mathrm{deg}f<\mathrm{deg}g$. Hence, by the choice of $f\text{ and }g$, it is clear that, the function $F(x,t)= f(t)+x^2\cdot g(t)$ is irreducible in $x$. The first derivative of $F(x,t)$ is $2xg(t)\neq0$, implies the function $ f(t)+g(t)x^2$, as a polynomial in $x$ is separable over $\mbox{$\mathbb{F}$}_{q}[t]$. \subsection*{} The short interval $I(p,m)$ defined as, $\mathrm{h}=p+\mathcal{P}_{\leq m}, \mathrm{deg}p>m, \text{ is given by } $\begin{equation}\mathrm{h}(t)=p(t)+\sum_{i=0}^{m}a_it^i\end{equation} where ${\mathbf{a}}=(a_0,a_1,...,a_m)$ are
algebraically independent variables over $\mbox{$\mathbb{F}$}_{q}$.
``Technically, the problem of finding prime polynomials in short interval is
to find the number of tuples $\mathbf{A}=\{A_0,...,A_m\}\in \mbox{$\mathbb{F}$}_{q}^{m+1}$
for which $F(\mathbf{A},t)$ is irreducible in $\mbox{$\mathbb{F}$}_{q}[t]$."
The key tool used is the Hilbert Irreducibility Theorem, which answers, does the specialization $\mathbf{a}\mapsto\mathbf{A}\in\mbox{$\mathbb{F}$}_{q}^{m+1}$ preserve the irreducibility? We have,\begin{equation}\label{maineq}
F(x,t)=f(t)+x^2g(t) \text{ for } f(t),g(t)\in \mbox{$\mathbb{F}$}_{q}[t]
\end{equation}
Then, \[F(\mathrm{h},t)=f(t)+g(t)\mathrm{h}^2 = f(t)+g(t)\Big\{p(t)+\sum\limits_{i=0}^{m}a_it^i\Big\}^{2}\]
therefore,\begin{equation} \label{maineq1}F(\mathbf{a},t)= \tilde f(t)+g(t)\Big\{\big(\sum\limits_{j=0}^{m}a_jt^j\big)^2+2p(t)\sum\limits_{j=0}^{m}a_jt^j\Big\}\end{equation}
where \[\tilde f(t)=f(t)+g(t)p(t)^2 \]and\[ n=\mathrm{deg}F=\mathrm{deg}\tilde f=s+2k>\mathrm{deg}g\]
Under the above setup, we get an asymptotic for:
\begin{equation} \pi_{q}(I(p,m))=\#\{ h:=p(t)+\sum\limits_{j=0}^{m}A_jt^j| F(\mathbf{A},t) \text{ is irreducible in } \mbox{$\mathbb{F}$}_{q}[t]\}\end{equation} and we have the following theorem. \begin{theorem}\label{th1} Let $n$ be a fixed positive integer and $q$ an odd prime power.
Then we have $\pi_{q}(I(p,m))=\frac{q^{m+1}}{n}+O_{n}(q^{m+\frac{1}{2}})$ \end{theorem}
One of the basic forms of HIT states that, if $f(x_1,x_2,...,x_r,T_1,T_2,...,T_s)\in \mbox{$\mathbb{Q}$}[x_1,...,x_r,T_1,...,T_s]$ is irreducible, then there exists a specialization $\mathbf{(t)}=(t_1,...,t_s)$ such that $f(x_1,...,x_r)=f(x_1,...,x_r,t_1,...,t_s)$ as a rational polynomial in $x_1,...,x_r$ is irreducible over $\mbox{$\mathbb{Q}$}[x_1,...,x_r]$. If $r=1$,
consider $f$ as a polynomial in $x$ over the rational function field
$L=\mbox{$\mathbb{Q}$}(T_1,...,T_s)$, having roots $\alpha_1, . . . , \alpha_n$ in the algebraic closure $\bar L.$
If $f$ is irreducible and separable, then these roots are distinct, and we can consider the Galois group $G$ of $f$
over $L$ as a subgroup of the symmetric group $S_n$. Then there exists a specialisation
$ \mathbf{t}\in \mbox{$\mathbb{Q}$}^{s}$ such that the resulting rational polynomial in $x$ still is irreducible and has Galois group
$G$ over $\mbox{$\mathbb{Q}$}$. In fact, if $\mathbf{t}$ is chosen in such a way that the specialized polynomial in $x$
still is of degree $n$, and separable, then its Galois group $G_\mathbf{t}$ over $\mbox{$\mathbb{Q}$}$ is a subgroup of $G$
(well-defined up to conjugation)
and it turns out that βalmost allβ specializations for $\mathbf{t}$ preserve the Galois group, i.e. $G_\mathbf{t}=G$. Hence, we start by computing the
Galois group of $F(\mathbf{a},t)$ over $\bar \mbox{$\mathbb{F}$}_{q}(\mathbf{a})$.
In the sequel let $k=\bar \mbox{$\mathbb{F}$}_q$ and we prove $\mathrm{Gal}(F({\mathbf{a}},t),k({\mathbf{a}})) = S_{n}$ in section \ref{gal}.
\section{$\mathrm{Gal}(F({\mathbf{a}},t),k({\mathbf{a}})) = S_{n}$} \label{gal}
\begin{theorem}\label{th2}
Let $k=\bar \mbox{$\mathbb{F}$}_{q},\, q \text{ an odd prime power and } {\mathbf{ a}}=(a_0,a_1,...,a_m)$ be $m+1$ tuple of varibles,
$m\geq 2$.
Then, $\mathrm{Gal}(F({\mathbf{a}},t),k({\mathbf{a}}))$ = $S_{n}$.
\end{theorem} To prove Theorem \ref{th2}, as a first step we show the following: \subsection{$\mathrm{Gal}(F({\mathbf{a}},t),k({\mathbf{a}}))$ is doubly transitive} \begin{proposition}\label{prop1}
The polynomial function, $F(\mathbf{a},t)=\tilde f(t)+g(t)\{\sum\limits_{j=0}^{m}a_jt^j)^2+2p(t)\sum\limits_{j=0}^{m}a_jt^j\}$
is separable in $t$ and irreducible in the ring $k({\mathbf{a}})[t]$. \end{proposition}
\begin{proof}
To prove irreducibility of $F(\textbf{a},t)$ in $k({\textbf{a}})[t]$,
we consider $F({\textbf{a}},t)$ as a quadratic equation in the variable $a_0$
and show that its discriminant is square free. We have from equation (\ref{maineq1}),
\[F({\textbf{a}},t)=\tilde f(t)+g(t)\{(\sum\limits_{j=0}^{m}a_jt^j)^2+2p(t)\sum\limits_{j=0}^{m}a_jt^j\}\]
\[F({\textbf{a}},t)=\tilde f(t)+g(t)\big\{l(t)^2+2l(t)p(t)\big\}\text{ where } l(t)=\sum\limits_{i=0}^{m}a_it^i.\]
writing,
$ F(\mathbf{a},t)$ as a quadratic equation in $a_0$, we have, \[ g(t)a_0^2+\{2g(t)(l_1(t)+p(t))\}a_0+\big\{\tilde f(t)+g(t)\{l_1(t)^2+2p(t)l_1(t)\}\big \}=0\]
\[\text{ where }l_1(t)=\sum_{i=1}^{m}a_it^i\] \text{ The discriminant of the above equation is } \begin{equation}\label{eqdisc} \Delta(F({\mathbf{a}},t))= 4g(t)^2\{l_1(t)+p(t)\}^2-4g(t)\{\tilde f(t)+g(t)\{l_1(t)^2+2p(t)l_1(t)\}\}\end{equation} Substituting, $\tilde f(t)=f(t)+g(t)p(t)^2$, in the second sum of equation (\ref{eqdisc}), we have \[4g(t)^2\{l_1(t)+p(t)\}^2 -4g(t)f(t)-4g(t)^2\{p(t)^2+l_1(t)^2+2p(t)l_1(t)\}\] Hence,
\[\Delta(F({\mathbf{a}},t))=-4g(t)f(t)\neq 0\] Clearly, $\Delta(F({\mathbf{a}},t)=-4f(t)g(t)$, is not a square by our choice of $f$ and $g$. Therefore, $F(\mathbf{a},t)$ is irreducible in $k[a_1,...,a_m,t][a_0]=k[a_0,...,a_m,t]$. Hence, by Gauss lemma, $F({\mathbf{a}},t)$ is irreducible in $k(a_0,...,a_m)[t]$. Coming to the separability of $F(\mathbf{a},t)$ in $t$, we see that,
the irreducible polynomial $F(x,t)$ is separable in $x$, (since the first derivative of $F(x,t)$
is not a zero polynomial by the choice of $f$ and $g$.) Hence, the result by Rudnik in \cite{zr} confirms, the polynomial $F(\mathbf{a},t)$ is separable in $t$. \end{proof} In the next proposition we prove that the Galois group of $F({\mathbf{a}},t)$ over $k(a_0,...,a_m)$ is doubly transitive with respect to the action on the roots of $F.$ We quickly go through the definitions of doubly transitivity as given in [page no.119, \cite{ssa}]. Let $K$ be a field. Consider a polynomial $f(y)=y^n+a_1y^{n-1}+...+a_n$ with $a_i\in K.$ We can factor $f$ as $f(y)=(y-\alpha_1)(y-\alpha_2)...(y-\alpha_n)$, where the roots $\alpha_i$ are in some extension field of $K.$ Let $L=K(\alpha_1,...,\alpha_n)$ then $L$ is called the splitting field of $K$. The Galois group of $L$ over $K$, denoted by $\mathrm{Gal}(L/K)$ is the group of all $K-$automorphisms of $L$. i.e., those field automorphisms of $L$ which leave $K$, element wise fixed. Assuming $L$ to be separable over $K$, and $f$ to have no multiple factors in $K[y]$, every member of $\mathrm{Gal}(L/K)$ permutes $\alpha_1,...,\alpha_n$, and this gives an injective homomorphism of $\mathrm{Gal}(L/K)$ into $S_{n}$ whose image is called Galois group of $f$ over $K$, denoted $\mathrm{Gal}(f,K)$. $\mathrm{Gal}(f,K)$ is transitive if and only if $f$ is irreducible in $K[y] $ and $\mathrm{Gal}(f,K)$, a subgroup of symmetric group $S_n$ is $2-$ transitive if and only if $G$ is transitive and its one point stabilizer group $G_{\alpha_1}$ is transitive as a subgroup of $S_{n-1.}$
where, by definition, $G_{\alpha_1}=\{g\in G| g(\alpha_1)=\alpha_1\}$ is thought of as a subgroup of the group of all permutations of roots $\{\alpha_2,...,\alpha_n\}$ of $f.$ Note that if $G$ is transitive, then all the one point stabilizers $G_{\alpha_i},i=1,2,...,n$ are isomorphic to each other. To see the equational analogue of this, we consider, $f(y)$, an irreducible polynomial in $K[y]$. We throw away a root of $f$, say $\alpha_1$ to get \[ f_1(y)=\frac{f(y)}{(y-\alpha_1)}=y^{n-1}+b_1y^{n-2}+...+b_{n-1}\in \mbox{$\mathbb{K}$}(\alpha_1)[Y].\] Then $f$ and $f_1$ are irreducible in $K[y]$ and $K(\alpha_1)[y]$ respectively if and only if $\mathrm{Gal}(f,K)$ is doubly transitive. \cite{ssa1}
\begin{proposition}\label{prop2}
For, $F(\mathbf{a},t)$ defined above, the Galois group $\mathrm{G}$ of $F({\mathbf{a}},t)$
over $k({\mathbf{a}})$ is doubly transitive with respect to the
action on the roots of $F({\mathbf{a}},t)$. \end{proposition} \begin{proof} Proposition \ref{prop1} implies, the Galois group $\mathrm{G}=\mathrm{Gal}(F({\mathbf{a}},t),k({\mathbf{a}}))$ is transitive. We show that, the Galois group $\mathrm{Gal}(F({\mathbf{a}},t),k({\mathbf{a}}))$ is doubly transitive by specializing $a_0=0.$ Under the specialization $a_0=0$, we have \[\tilde F(a_1,...,a_m,t)=\tilde f(t)+g(t)\{(\sum\limits_{j=1}^{m}a_jt^j)^2+2p(t)\sum\limits_{j=1}^{m}a_jt^j\}\]
Let $\alpha \in k$ be a root of $\tilde f(t)$, by substituting, $t$ by $t+\alpha$, we may assume that, $\tilde f(0)=0.$ Hence, $ f_{0}(t)=\tilde f(t)/t$ is a polynomial. \[\tilde F(a_1,...,a_m,t)= t\big\{f_0(t)+g(t)\sum\limits_{i=1}^{m}\sum\limits_{j=1}^{m}a_ia_jt^{i+j-1}+g(t)2p(t)\sum\limits_{j=1}^{m}a_jt^{j-1}\big\}\] We first show that,\begin{equation}\label{irr-dou}
f_0(t)+g(t)\sum\limits_{i=1}^{m}\sum\limits_{j=1}^{m}a_ia_jt^{i+j-1}+g(t)2p(t)\sum\limits_{j=1}^{m}a_jt^{j-1}
\end{equation} is irreducible in $k(a_1,...,a_m)[t]$ and separable in $t$.
Separability of polynomial equation (\ref{irr-dou}) is attained by applying the result in \cite{zr}. Now, we show that, equation(\ref{irr-dou}) is irreducible in $t$. We prove this by writing, equation(\ref{irr-dou}) as a quadratic equation in $a_1$ and show that, discriminant of this quadratic equation is $f(t).g(t)$, which is not a square polynomial. Hence writing, equation(\ref{irr-dou})
as a quadratic equation in $a_1$ we have \begin{equation}\label{disc-dou} \begin{split} &t\cdot g(t)a_1^2+ g(t)\{\sum\limits_{j=2}^{m}a_jt^j+2p(t)\}a_1 +\\ &f_0(t)+g(t)\{\sum\limits_{i=2}^{m}\sum\limits_{i=2}^{m}a_ia_jt^{i+j-1}+2p(t) \sum\limits_{i=2}^{m}a_it^{i-1}\}=0 \end{split} \end{equation} Discriminant of the above quadratic equation (\ref{disc-dou}) in $a_1$ is \[g(t)^{2}\{\sum\limits_{j=2}^{m}a_jt^j+2p(t)\}^2-4tg(t)\big[f_0(t)+g(t)\{\sum\limits_{i=2}^{m}\sum\limits_{i=2}^{m}a_ia_jt^{i+j-1}+2p(t)\sum\limits_{i=2}^{m}a_it^{i-1}\}\big]\] substituting, $f_0(t)=\frac{1}{t}(f(t)+g(t)p(t)^2)$ \[g(t)^{2}\{\sum\limits_{j=2}^{m}a_jt^j+2p(t)\}^2-4f(t)g(t)-g(t)^2\{\sum\limits_{j=2}^{m}a_jt^j+2p(t)\}^2=-4f(t)g(t)\] Clearly, equation (\ref{irr-dou}) is irreducible in $k(a_1,...,a_m)[t]$ and separable in $t$. Let $\mathrm{G}_t$ be the Galois group of $\tilde F(a_1,...,a_m,t)$ over $k(\alpha,a_1,...,a_m)$.
Hence, from the discussion above Proposition \ref{prop2} it is clear that, $\mathrm{G}_t$ is doubly transitive subgroup of the symmetric group $S_{deg \tilde f}$. Since, $\tilde F(a_1,...,a_m,t)$ is separable, specialization induces, $\mathrm{G}_t\subset \mathrm{G}$, which is uniquely determined up to conjugation. Hence, the stabilizer of a root of $F$ in $\mathrm{G}$ is transitive. Thus $\mathrm{G}$ is doubly transitive. \end{proof} \subsection*{ Proof of Theorem \ref{th2}} \begin{proof}
Already, we have seen Galois group of $F({\mathbf{a}},t)$ over $k({\mathbf{a}})$ is doubly transitive. Hence, it only remains to show Galois group of $F({\mathbf{a}},t)$ over $k({\mathbf{a}})$ contains a tranposition. To achieve this, we first show that at some specialization $a_m=0$, the polynomial $F(a_0,...,a_{m-1})$ has one double zero and rest $(n-2)$ simple zeros.
$$\text{ Let }\tilde F(\mathbf{a},t)=F({\mathbf{a}},t)|a_m=0$$
\begin{definition}\label{Morse}
A polynomial $f$ is called { \it Morse function }\cite{jp} if \begin{enumerate} \item $f(\beta_i)\neq f(\beta_j),$ for $i\neq j$ i.e., critical values of $f$ are distinct.
\item The zeros $\beta_1,\beta_2,...,\beta_{n-1}$ of derivative $f^{\prime}$ of $f$ are simple. i.e.,
critical values of $f$ are non degenerate. \end{enumerate} \end{definition}
It is well known that, discriminant of a monic separable polynomial
is given by, \begin{equation}\label{disdef}disc(F)=\pm Res(F,F^{\prime})\end{equation}
Proposition \ref{prop1} implies, the specialized polynomial $F({\mathbf{a}},t)|a_m=0$ (equation (\ref{speceq}))
is separable in $t$ and irreducible in $k(a_0,a_1,...,a_{m-1})[t]$. We have,
\begin{equation}\label{speceq}\tilde F(a_0,...,a_{m-1},t)=\tilde f(t)+g(t)\{(\sum\limits_{j=0}^{m-1}a_jt^j)^2+2p(t)\sum\limits_{j=0}^{m-1}a_jt^j\}\end{equation}
Separability of $\tilde F$ implies for $(A_0,A_1,...,A_{m-1}) \in \bar k^{m}$, the system of equations below does not have a solution in the algebraic closure of $k$. \begin{equation}\label{eq2}
\begin{cases}
\tilde F^{\prime}(\rho_i)=0\\
\tilde F^{\prime}(\rho_j)=0\\
\tilde F(\rho_i)=\tilde F(\rho_j) \text{ for some} \rho_i,\rho_j \text{ in the algebraic closure of } {k}
\end{cases} \end{equation}
Which further imply, the critical values of $\tilde F(a_0,...,a_{m-1},t)$ are distinct. Proving condition $(1)$ of Definition {\ref{Morse}}.
Detailed explanation is given in (\cite{CR}, Section 3 and Section 4).
It remains to prove, the condition $(2)$ of Definition \ref{disdef}, i.e., critical values of $\tilde F$ are non degenerate.
A small calculation shows derivative of $\tilde F(t)$ and derivative of $\tilde F^{\prime}(t)$ with respect to $t$ have no common root. Thus, critical values of $\tilde F$ are non degenerate. Hence, the function $\tilde F(a_0,...,a_{m-1},t)$ is Morse. Hence, the polynomial $F(a_0,...,a_{m-1})$ has one double zero and rest $(n-2)$ simple zeros. Hence a transposition in $G=\mathrm{Gal}(F({\mathbf{a}},t),k({\mathbf{a}}))$ is implied by (\cite{Ho}, Lemma1) which is stated below. \begin{lemma}\label{lem2}
Let $p$ be a prime number and $\mathfrak{p}$ be a prime ideal in $K$ satisfying $\mathfrak{p}|p.$
If $f(x)\equiv (x-c)^{2}\bar h(x) \pmod{p}$ for some $c\in \mbox{$\mathbb{Z}$}$ and a separable polynomial $\bar h(x)\equiv \pmod {p}$
such that $\bar h(c)\not\equiv 0{\pmod {p}}$,
then the inertia group of $\mathfrak{p}$ over $\mbox{$\mathbb{Q}$}$ is either trivial or a group generated by a transposition. \end{lemma} By Proposition \ref{prop2}, the Galois group $G$ of $F({\mathbf{a}},t)$ over $k({\mathbf{a}})$ is doubly transitive. Any finite doubly transitive permutation group containing a transposition is a full symmetric group (\cite{jp} Lemma, 4.4.3). Thus, $\mathrm{Gal}(F({\mathbf{a}},t), k({\mathbf{a}}))$ is isomorphic to the full symmetric group $S_{n}$. Thus, Theorem \ref{th2} is complete. \end{proof} \section{Irreducibility Criteria} Since, $\mathrm{Gal}(F({\mathbf{a}},t),k({\mathbf{a}}))= S_{n}$. Now, we obtain the asymptotic for the number of irreducibles in $I(p,m)$ for which the specialized polynomial $F({\mathbf{A}},t)$ is irreducible in $\mbox{$\mathbb{F}$}_{q}[t]$, where $\mathbf{A}=(A_0,A_1,...,A_m)\in \mbox{$\mathbb{F}$}_q$. To attain this, we invoke an irreducibility criteria, as in [Lemma 2.8, \cite{lbmj}], which reduces the above problem of finding irreducibles $h\in I(p,m)$ to counting of rational points of an absolutely irreducible variety over a finite field $\mbox{$\mathbb{F}$}_{q}$. Then the required asymptotic follows by applying $Lang-Weil$ estimate.
Now, we have the following proposition. \begin{proposition}\label{Theorem2}
Let ${\mathbf{a}}=(a_0,a_1,...,a_m)$ be an $(m+1)$ tuple of variables. Let $F({\mathbf{a}},t)\in \mbox{$\mathbb{F}$}_{q}[a_0,a_1,...,a_m,t]$
be a polynomial that is separable in $t$ and irreducible in the ring $k(\mathbf{a})[t]$ with $\mathrm{deg}_{t}F=n$. Let $L$ be the splitting field of $F({\mathbf{a}},t)$ over $\mbox{$\mathbb{F}$}_{q}(\mathbf{a})$. Let $k$ be an algebraic closure of $\mbox{$\mathbb{F}$}_{q}$. Assume that, $\mathrm{Gal}(F,k(a_0,...,a_m))=S_n.$ Then the number of $\mathbf{A}=(A_0,...,A_m) \in \mbox{$\mathbb{F}$}_{q}^{m+1} $
for which all the specialized polynomial $F(\mathbf{A},t)$, is irreducible is $\frac{q^{m+1}}{n}\big(1+O_{n}(q^{-1/2})\big)$ as $q\rightarrow\infty$ and $n$ is fixed. \end{proposition} \begin{proof} This is proved in [Lemma 2.1, \cite{lbs}]. \end{proof} \subsection*{Proof of Theorem \ref{th1}} \begin{proof}
Let $n$ be fixed positive integer and $q$ an odd prime power and
\[F(\mathbf{a},t)= \tilde f(t)+g(t)(\sum\limits_{j=0}^{m}a_jt^j)^2+2p(t)g(t)\sum\limits_{j=0}^{m}a_jt^j.\]
We have seen, $\mathrm{Gal}(F(\mathbf{a},t),k(\mathbf{a}))=S_n$
and $F(\mathbf{a},t)$ satisfies all assumptions of Proposition \ref{Theorem2}.
Thus the number of $\{(A_0,...,A_m)\}\in \mbox{$\mathbb{F}$}_{q}^{m+1}$ for which $ F(\mathbf{A},t)$
irreducible in $\mbox{$\mathbb{F}$}_q[t]$ is \[ \frac{q^{m+1}}{n}+O_{n}(q^{m+\frac{1}{2}}).\]
This finishes the proof since this number equals $\pi_q(I(p,m))$. Hence, \[\pi_q(I(p,m))=\frac{\#I(p,m)}{n}+O_{n}(q^{m+\frac{1}{2}}).\] \end{proof} \section{ Cycle structure, Factorization type, Galois groups and Conjugacy classes} In previous sections, we obtained an asymptotic for the number of prime polynomials in the interval $I(p,m)$ for the function $F(x,t)=f(t)+x^2g(t)\in \mbox{$\mathbb{F}$}_{q}[t][x]$. Here, we derive an equidistribution result (Theorem \ref{frob}) by the function field version of Chebotarev Density theorem.
We know that, factorization over $\mbox{$\mathbb{F}$}_{q}[t]$ resemble cycles of permutations, below we state some known results, mainly from \cite{BR} and \cite{anlz}.
By definition, $\mathcal{M}_{n}$
is the collection of monic polynomials
of degree $n$ consists of $q^{n}$ elements. Partition $\tau$ of a positive integer $n$ is defined to be a sequence of non-increasing positive integers
$(c_1,...,c_k)$ such that, $|\tau|:=c_1+\cdot\cdot\cdot+c_k$ and $|\tau|=n$.
\begin{definition}
Every monic polynomial $f\in \mbox{$\mathbb{F}$}_{q}[t]$ of some degree $n$ has a factorization $f=P_1\cdot \cdot \cdot P_k$ in to irreducible monic polynomials $P_1,...,P_k \in \mbox{$\mathbb{F}$}_{q}[t]$, which is unique up to rearrangement. Taking degrees we obtain a partition of $n$ given by, $\mathrm{deg} P_1 + \cdot \cdot \cdot +\mathrm{deg} P_k$ of $\mathrm{deg}f$ and
its factorization type is given by $$\tau_{f}=(\mathrm{deg}P_1,...,\mathrm{deg}P_k)$$ \end{definition}
\begin{definition}
Every permutation $\sigma \in S_n$ has a cycle decomposition $\sigma=(\sigma_1...\sigma_k)$ in to disjoint cycles $\sigma_1,...,\sigma_k$ which is unique up to rearrangement and each fixed point of $\sigma$ has cycle length 1. If $|\sigma_i|$ is the length of cycle $\sigma_i$, we obtain a partition of $n$ given by
$$\tau_{\sigma}=(|\sigma_1|, \cdot \cdot \cdot ,|\sigma_k|)$$ and it's cycle type to be given by
$\tau_{\sigma}=(|\sigma_1|+ \cdot \cdot \cdot +|\sigma_k|)$
\end{definition}
For each partition $\tau\vdash n$, the probability of a random permutation on $n$ letters has a cycle structure $\sigma$ is given by Cauchy's formula: \begin{equation} \mathbb{P}(\tau_{\sigma}=\tau)=\frac{\#\{\sigma \in S_n:\tau_{\sigma}=\tau\}}{\# S_n}=\prod\limits_{j=1}^{k}\frac{1}{j^{c_j}\cdot c_j!}\end{equation} As $q\rightarrow \infty$, the distribution over $\mathcal{M}_{n}$ of factorization types tends to distribution of cycle types in $S_{n}$ \cite{anlz}. \begin{proposition}
For a partition $\tau\vdash n$,
\[ \lim\limits_{q\rightarrow \infty} \mathbb{P}_{f\in \mathcal{M}_{n}}(\tau_{f}=\tau)=\mathbb{P}_{\sigma\in S_n}(\tau_{\sigma}=\tau)\text{ where, } \]$\mathbb{P}_{f\in \mathcal{M}_{n}}(\tau_{f}=\tau):=\frac{1}{q^{n}}\#\{f\in \mathcal{M}_{n}:\tau_{f}=\tau\}$. \end{proposition}
We consider specializations $F(\mathbf{A},t)$ as described in previous sections, where $\mathbf{A}=\{A_0,A_1,...,A_m\}\in \mbox{$\mathbb{F}$}_{q}$. For such an $\mathbf{A}$ denote by $\Theta(F(\mathbf{A},t)$, the conjugacy class in $S_n$ of permutations with cycle structure $(d_1,d_2,...d_r)$, this is the factorization class of $F(\mathbf{A},t)$.
For a separable polynomial $f\in \mbox{$\mathbb{F}$}_{q}[t]$ of degree $n$, the Frobenius map $\mathrm{Fr}_{q}$ given by $(y\rightarrow y^{q})$ defines a permutation of the roots of $f$, which gives a well defined conjugacy class $\Theta(f)$ of the symmetric group $S_n.$ The degrees of the prime factors of $f$ correspond to the cycle lengths of $\Theta (f)$. In particular, $f$ is irreducible if and only if $\Theta(f)$ (conjugacy class of) is a full cycle. It is known that for any fixed conjugacy class $C$ of $S_n$ the probability of $\Theta(F(\mathbf{A},t))=C$ as $\mathbf{A}$ ranges over $\mbox{$\mathbb{F}$}_{q}^{m+1}$ is determined by the Galois group $G$ of the polynomial $F(\mathbf{A},t)$ over the field $\mbox{$\mathbb{F}$}_{q}(\mathbf{A})$ together with its standard action on the roots, up to an error term of $O_{m, \mathrm{deg}F}(q^{-\frac{1}{2}}))$. Hence, we have \begin{theorem}\label{frob}
Let ${\mathbf{a}}=(a_0,a_1,...,a_m)$ be an $(m+1)$ tuple of variables over $\mbox{$\mathbb{F}$}_{q}$.
Let $k$ be an algebraic closure of $\mbox{$\mathbb{F}$}_{q}$. Let $F({\mathbf{a}},t)\in \mbox{$\mathbb{F}$}_{q}[a_0,a_1,...,a_m,t]$
be a polynomial that is separable in $t$ and irreducible in the ring $k(\mathbf{a})[t]$ with $\mathrm{deg}_{t}F=n$. Let $L$ be the splitting field of $F({\mathbf{a}},t)$ over $\mbox{$\mathbb{F}$}_{q}(\mathbf{a})$. Assume that, $G=\mathrm{Gal}(F,k(a_0,...,a_m))=S_n.$ Then for every conjugacy class $C$ in $S_n$
\[\#\{h\in I(p,m)| \Theta(F({\mathbf{A}},t))=C\}= \frac{|C|}{|G}q^{m+1}(1+O_{n}(q^{-\frac{1}{2}}))\] \end{theorem} \begin{proof}
The proof follows from Theorem 3.1 of \cite{anlz}.
\end{proof} A variant of application of Theorem 3.1 in \cite{anlz} is given in ( Theorem 2.2., \cite{AE}).
\section{Bateman-Horn conjecture} The classical Bateman-Horn problem is described in the introduction. In \cite{AE}, Entin has established an analogue of Bateman and Horn conjecture under the following set up. Let $F_1,...,F_m\in \mbox{$\mathbb{F}$}_{q}[t][x], \mathrm{deg}_{x}F_i>0$ be non-associate, irreducible and separable over $\mbox{$\mathbb{F}$}_{q}(t), n$ a natural number. Let $a_0,a_1,...,a_n$ be free variables, $\mathrm{f}=a_nt^n+...+a_0\in\mbox{$\mathbb{F}$}_{q}[\mathbf{a},t]$ and $N_i=\mathrm{deg}_{t}F_i(t,\mathrm{f})$. Under the above assumptions, below Theorem is established.
\begin{theorem}\label{entbat}
Let $F_1,...,F_m\in \mbox{$\mathbb{F}$}_{q}[t][x]$, $\mathrm{deg}_{x}F_{i}=r_i>0$, be non associate irreducible polynomials which are separable over
$\mbox{$\mathbb{F}$}_{q}(t)$ (i.e., $F_i\not\in \mbox{$\mathbb{F}$}_q[t][x^p]$) and monic in $x$. Let $n$ be a natural number satisfying $n\geq3$ and $n\geq sl F_i$
for $1\leq i\leq m$. Denote $N_i=r_in$. Denote by $\mu_i$ the number of irrducible factors into which $F_i(t,x)$ splits over $\bar \mbox{$\mathbb{F}$}_q$.
Then \[\#\{f\in \mbox{$\mathbb{F}$}_q[t], deg f=n|F_i(t,f)\in \mbox{$\mathbb{F}$}_{q}[t]\text{ is irreducible for $i=1,2,..,r$}\}\]
\[=\big(\prod\limits_{i=1}^{m}\frac{\mu_i}{N_i}\big)q^{n+1}(1+O_{m,\mathrm{deg}F_i,n}(q^{\frac{-1}{2}}))\]
\end{theorem} We study the function field version of Bateman-Horn conjecture for polynomial functions defined in equation (\ref{maineq}) namely $F_i=f_i+g_ix^2 \in \mbox{$\mathbb{F}$}_{q}[t][x]$. We obtain the following result. \begin{theorem}\label{bat}
Let each of $F_1,...,F_r\in \mbox{$\mathbb{F}$}_{q}[t][x]$, distinct primitive quadratic functions satisfy all conditions of Proposition \ref{Theorem2}.
Then \[\#\{h:=p(t)+\sum\limits_{j=0}^{m}A_jt^j|F_i(\mathbf{A},t)\in \mbox{$\mathbb{F}$}_{q}[t]\text{ is irreducible for $i=1,2,...,r$} \}\]\[=
\big(\prod\limits_{i=1}^{r}\frac{1}{n_i}\big)q^{m+1}(1+O_{n,r}(q^{\frac{-1}{2}}))\]
\[=\big(\prod\limits_{i=1}^{r}\frac{1}{n_i}\big)\#I(p,m)+O_{n,r}(q^{m+{\frac{1}{2}}})\] \end{theorem} \begin{proof}
The proof of this Theorem is completed, once we show that, the Galois group $G$ is the full permutation group $S_{n_{1}}\times...\times S_{n_{r}}$ acting on the roots of $F_{i}(\mathbf{a},t)$ over $k(\mathbf{a})$. From Lemma \ref{lem2}, we see that, $\mathrm{Gal}(F_i({\mathbf{a}},t), k({\mathbf{a}}))\cong S_{n_i}$. To show that, the Galois group $G$ is the full permutation group $S_{n_{1}}\times...\times S_{n_{r}}$ we need to show the multiplicative independence of $disc_{t}F_i(t,h)$ modulo squares, i.e., $disc_{t}F_{i}(\mathbf{a},t)$ is linearly independent as elements of $k(\mathbf{a})^{\times}/k(\mathbf{a})^{\times{2}}$, nothing but
$d_i, d_j$ for $i\neq j$ are non squares and are relatively prime in the ring $k(\mathbf{a},t)$. Denoting $d_i=disc_{t}F_{i}(\mathbf{a},t)$, the discriminant of $F_{i}(\mathbf{a},t)$, is considered as a polynomial in $t$. Discriminant of a monic separable polynomial $f(t)$ is defined by the resultant of $f \text{ and } f^{\prime}$: \[disc(f)=\pm Res(f,f^{\prime})=\pm\prod_{j=1}^{\nu}f(\tau_j), \text{ where }f^{\prime}=c\prod_{j=1}^{\nu}(t-\tau_j) \] Since, $\mathrm{Gal}(F_i({\mathbf{a}},t), k({\mathbf{a}}))$ is a full symmetric group, $d_i$ is not a square in $k(\mathbf{a})$ for any $i$. If $d_i,d_j$ are not relatively prime in $k({\mathbf{a}})$, then they have a common root. Thus $d_i,d_j$ having a common root gives the following system of equations: \begin{equation}
\begin{cases}
F^{\prime}(\rho_i)=0\\
F^{\prime}(\rho_j)=0\\
F(\rho_i)=F(\rho_j) \text{ for some } \rho_i,\rho_j \in \bar{k}.
\end{cases} \end{equation} But we have seen, this system does not have any solution for any $\rho_i,\rho_j$ in the algebraic closure of $k$ [page 3, \cite{CR}]. Hence, the Galois group $\mathrm{G}=S_{n_{1}}\times...\times S_{n_{r}}$. Rest of the proof follows from [Theorem 3.1, \cite{Baro}].
Thus \[\#\{h:=p(t)+\sum\limits_{j=0}^{m}A_jt^j |F_i(\mathbf{A},t)\in \mbox{$\mathbb{F}$}_{q}[t]\text{ is irreducible for $i=1,2,...,r$} \}\] \[=
\big(\prod\limits_{i=1}^{r}\frac{1}{n_i}\big)q^{m+1}(1+O_{n,r}(q^{\frac{-1}{2}}))\]
\end{proof}
\section{M$\ddot{\mathrm{o}}$bius sums and Chowla's conjecture} The Mertens function, given by the partial sums of M$\ddot{\mathrm{o}}$bius function, $M(n):=\sum\limits_{k=1}^{n}\mu(k)$ is of great importance in Number Theory, where $\mu(k)$ is the Mobius function. For example, the Prime Number Theorem is logically equivalent to \begin{equation}\sum\limits_{k=1}^{n}\mu(k)=o(n)\end{equation} and \begin{equation}\sum\limits_{k=1}^{n}\frac{\mu(k)}{k}=o(1)\end{equation} the Riemann Hypothesis is equivalent to \begin{equation}M(n)=O(n^{\frac{1}{2}+\epsilon})\text{ for all }\epsilon>0.\end{equation} Thus one can say $M(n)$ is said to demonstrate square root cancellation. Keating and Rudnick have established the function field version of square root cancellation of Mobius sums in short intervals \cite{KZ}. Carmon and Rudnick have resolved the function field version of the conjecture of Chowla on auto-corelation of Mobius function \cite{CR} over large finite field and proved the following result.
For $r,n\geq 2$, distinct polynomials $\alpha_1,...,\alpha_r\in\mbox{$\mathbb{F}$}_{q}[X]$ of degree smaller than $n, q$ odd, and $(\epsilon_1,...,\epsilon_r)\in\{1,2\}$ not all even, \begin{equation}
\sum_{deg F=n}\mu(F+\alpha_1)^{\epsilon_1}\cdot\cdot\cdot\mu(F+\alpha_1)^{\epsilon_r}=O(rnq^{n-\frac{1}{2}}) \end{equation} We show that, there is square root cancellation in Mobius sums, as well as in the auto-correlation type sums appearing in Chowla's conjecture for the function $F(x,t)=f(t)+g(t)x^2$ in the short interval of the form $I(p,m)$ in large finite field (equation \ref{maineq1}). \newline For polynomials over a finite field $\mbox{$\mathbb{F}$}_q$, the Mobius function of a nonzero polynomial $f\in \mbox{$\mathbb{F}$}_{q}[x]$ is defined to be $\mu(F)=(-1)^{r}$ if $F=cP_1,...,P_r$ with $0\neq c\in \mbox{$\mathbb{F}$}_{q}$ and $P_1,...,P_r$ are distinct monic irreducible polynomials, and $\mu(F)\neq 0$ otherwise. The analogue of the full sum $M(n)$ is the sum over all monic polynomials $\mathcal{M}_n$ of given degree $n$, for which we have \[
\sum_{f\in \mathcal{M}_n}\mu(f) =
\begin{cases}
1, & n=0\\
-q &n=1\\
0, &n\geq 2
\end{cases}
\]
\begin{equation}
\text{ Set } S_{\mu}(p;m):=\sum\limits_{h\in I(p,m)}\mu(f+gh^2)
\end{equation}
In the next Theorem, we demonstrate square root cancellation in Mobius sums is equivalent to square root cancellation in auto correlation of Mobius sums
in the short interval $I(p,m)$ in the larger finite field
limit $q\rightarrow \infty$ and $\mathrm{deg}(p)$ fixed.
\begin{theorem}\label{th-mobi-sum}
\begin{enumerate}
\item
Let $F(\mathbf{a},t)$ satisfy the conditions of Propisition \ref{prop1} and $\mathrm{deg}F(\mathbf{a},t)=n$. Then for $m\geq 1$
\begin{equation}\label{mobsum}
\big|S_{\mu}(p,m)\big| \ll_{n}\frac{\#I(p,m)}{\sqrt{q}}\end{equation} and the implied constant depending only on $n=\mathrm{deg}(F)$.
\item Let each of $F_i$ of $\mathrm{max}$ $\mathrm{ deg} n_i$ satisfy all conditions of
Proposition \ref{prop1}. Then for $\epsilon_{i}\in\{1,2\}$,
not all even,
\begin{equation}\label{autocho}
\Big|\sum\limits_{h\in I(p,m)} \mu(F_1(\mathbf{a},t))^{\epsilon_1}...\mu(F_r(\mathbf{a},t))^{\epsilon_r}\Big|\ll_{r,\mathrm{deg}F_i}\frac{\#I(p,m)}{\sqrt{q}}
\end{equation}
\end{enumerate}
\end{theorem}
\begin{proof}
Mobius function $\mu(F)$ can be computed in terms of the discriminant $\mathrm{disc}(F)$ of $F(x)$ as (see \cite{kc1}) \(\mu(F)=(-1)^{\mathrm{deg} F}\chi_{2}(\mathrm{disc}(F)), \text{ where } \chi_{2}\) is the quadratic character on $\mbox{$\mathbb{F}$}_{q}. $ Then equation (\ref{mobsum}) becomes, \begin{equation}\label{disc-char} S_{\mu}(p;m):=(-1)^{n}\sum\limits_{h\in I(p,m)}\chi_{2}(\mathrm{disc}(f(t)+g(t)h^2))\end{equation} To solve the equation (\ref{disc-char}), we follow the method as in \cite{CR}. Since, $\mathrm{disc}(F)$ is polynomial in the coefficients of $F$, equation (\ref{disc-char}) is an ${m+1}$ dimensional character sum, the equation is evaluated by bounding all but one variable. writing, \begin{equation} \begin{split} &F(\mathbf{a},t)=\tilde F(\mathbf{a},t)+b\\
\text{Set } &D_{F}(b):=disc(\tilde F(\mathbf{a},t)+b)
\end{split}
\end{equation}Here, $b:=F(0)$ of $F(\mathbf{a},t)=f(t)+g(t)h^{2}$ which is a polynomial of degree $(n-1)$ in $b$. Therefore we have \begin{equation}
S_{\mu}(p;m) \leq \sum\limits_{\mathbf{A}\in \mbox{$\mathbb{F}$}_{q}^{m}}\Big|\sum\limits_{b\in \mbox{$\mathbb{F}$}_q}\chi_{2}(D_{F}(b))\Big| \end{equation} By Weil's theorem (the Riemann Hypothesis for curves over a finite field), which implies that for a polynomial $P(t)\in \mbox{$\mathbb{F}$}_{q}[t]$ of positive degree, which is not proportional to a square of another polynomial (see \cite{CR}) \begin{equation}\label{cal-mobi}
\Big|\chi_{2}(P(t))\Big| \leq(\mathrm{deg}P-1)\sqrt{q},\quad P(t)\neq cH^{2}(t) \end{equation} Proposition \ref{prop1} implies $D_{F}(b)$ is not a square. Non vanishing of $\chi_{2}(D_{F}(b))$ follows from the fact that, $F(x,t)$ is separable in $x$. Hence, we have
\begin{equation}
S_{\mu}(p;m) \leq (n-2)q^{m+\frac{1}{2}}
\end{equation} The implied constant depends only on $n=\mathrm{deg}F(\mathbf{a},t)$.
Proof of equation (\ref{autocho}) is based on the similar techniques used in proving equation(\ref{mobsum}) of Theorem \ref{th-mobi-sum}.
Therefore we have from equation (\ref{cal-mobi}) $$
\sum\limits_{{\mathbf{A}\in\mbox{$\mathbb{F}$}_{q}^{m}}}\Big|\sum\limits_{b\in{\mbox{$\mathbb{F}$}_q }}\chi_2(D_{F_i}(b)^{\epsilon_1}\cdot\cdot\cdot D_{F_r}(b)^{\epsilon_r})\Big|\leq
\Big(2\sum\limits_{i=1}^{r}(n_i-1)-1\Big)q^{m+\frac{1}{2}} $$
which is clearly,
$$\sum\limits_{{\mathbf{A}\in\mbox{$\mathbb{F}$}_{q}^{m}}}\Big|\sum\limits_{b\in{\mbox{$\mathbb{F}$}_q }}\chi_2(D_{F_i}(b)^{\epsilon_1}\cdot\cdot\cdot D_{F_r}(b)^{\epsilon_r})\Big|
\ll_{deg{F_i},r}\frac{\#I(p,m)}{\sqrt{q}}$$
Hence, we can conclude that square root cancellation in Mobius sums is equivalent to square root cancellation in Chowla type sums.
\end{proof}
\end{document} |
\begin{document}
\title{$(k,k',k'')$-domination in graphs} \begin{abstract} \noindent We first introduce the concept of $(k,k',k'')$-domination numbers in graphs, which is a genaralization of many domination parameters. Then we find lower and upper bounds for this parameter, which improve many well-known results in literatures.
\\ {\bf Keywords:} $(k,k',k'')$-domination number, $k$-domination number, restrained domination numbers. \\ {\bf MSC 2000}: 05C69 \end{abstract}
\section{Introduction and preliminaries} Throughout this paper, let $G$ be a finite connected graph with vertex set $V=V(G)$, edge set $E=E(G)$, minimum degree $\delta=\delta(G)$ and maximum degree $\Delta=\Delta(G)$. We use \cite{w} as a reference for terminology and notation which are not defined here. For any vertex $v \in V$, $N(v)=\{u\in G\mid uv\in E(G)\}$ denotes the {\em open neighbourhood} of $v$ in $G$, and $N[v]=N(v)\cup \{v\}$ denotes its {\em closed neighbourhood}.
\noindent There are many domination parameters in graph theory. The diversity of domination parameters and the types of proofs involved are very extensive. We believe that some of the results in this field are similar and the main ideas of their proofs are the same. Therefore we introduce and investigate the concept of $(k,k',k'')$-domination number, as a generalization of many domination parameters, by a simple uniform approach.
\noindent Let $k,k'$ and $k''$ be nonnegative integers. A set $S\subseteq V$ is a {\em $(k,k',k'')$-dominating set} in $G$ if every vertex in $S$ has at least $k$ neighbors in $S$ and every vertex in $Vβ\setminus Sβ$ has at least $k'$ neighbors in $S$ and at least $k''$ neighbors in $Vβ\setminus Sβ$. The {\em $(k,k',k'')$-domination number} $\gammaβ_{(k,k',k'')}β(G)$ is the minimum cardinality of a $(k,k',k'')$-dominating set. We note that every graph with the minimum degree at least $k$ has a $(k,k',k'')$-dominating set, since $S=V(G)$ is such a set. Note that \begin{itemize} \item $\gammaβ_{(0,1,1)}β(G)=\gammaβ_{r}(G)$: {\em Restrained domination number}; \item $\gammaβ_{(1,1,1)}β(G)= β\gammaβββ_{t}^rββ(G)$: {\em Total restrained domination number};β \item $\gammaβ_{(1,2,1)}β(G)=β\gammaβ_{2r}ββ(G)$: {\em Restrained double domination number}; \item $\gammaβ_{(k,k,k)}β(G)=\gammaβββ_{β\times k,tβ}^r(G): k$-{\em Tuple total restrained domination number}; \item $\gammaβ_{(k,k,0)}β(G)=β\gammaβ_{β\times k,tβ}ββ(G): k$-{\em Tuple total domination number}; \item $\gammaβ_{(k-1,k,0)}β(G)=\gammaβββ_β_{β\times βk}β(G): k$-{\em Tuple domination number}; \item $\gammaβ_{(0,k,0)}β(G)=β\gammaβ_{k}(G)ββ: k$-{\em Domination number}. \end{itemize} For the definitions of the parameters above and a comprehensive work on domination in graphs see \cite{cfhv,cr,dhhm,hhs,hhs2,kn,k}.
\section {Lower bounds on $(k,k',k'')$-domination numbers}
In this section, we calculate a lower bound on $\gammaβ_{(k,k',k'')}β(G)$, which improves the existing lower bounds on these seven parameters.
\noindent The following result can be found in \cite{cr} and \cite{hjjp2}. \begin{theorem}\label{LB:TR} If $G$ is a graph without isolated vertices of order $n$ and size $m$, then \begin{equation}\label{EQ3}
β\gammaβββ_{t}^rββ(G)β\geq 3n/2-mβ, \end{equation} in addition this bound is sharp. \end{theorem}
\noindent Also Hattingh et.al \cite{hjlpv} found that \begin{equation}\label{EQ4} β\gammaβ_{r}(G)βββ\geq n-2m/3β. \end{equation}
The following known result is an immediate consequence of Theorem \ref{LB:TR}. \begin{theorem}\cite{hjjp1} If $T$ is a tree of order $nβ\geq2β$, then \begin{equation}\label{EQ5}
β\gammaβββ_{t}^rββ(T)β\geq β\lceilβ\frac{n+2}{2}ββ\rceilβββ.
\end{equation} \end{theorem}
The inequality \begin{equation}\label{EQ6} β\gammaβ_{r}ββ(T)β\geq β\lceilβ\frac{n+2}{3}ββ\rceilβββ \end{equation} on restrained domination number of a tree of order $nβ\geq1β$ was obtained by Domke et al. \cite{dhhm}.
\noindent The author in \cite{k} generalized Theorem \ref{LB:TR} and proved that if $β\delta(G)β\geq kββ$, then \begin{equation}\label{EQ7} β\gammaβββ_{β\times k,tβ}^r(G)βββ\geq 3n/2-mβ/k. \end{equation} Moreover the authors in \cite{kn} proved that if $G$ is a graph without isolated vertices, then \begin{equation}\label{EQ8} β\gammaβ_{2r}(G)βββ\geq β\frac{5n-2m}{4}. \end{equation}
\noindent We now improve the lower bounds given in $(\ref{EQ3}), (\ref{EQ4}), \ldots,(\ref{EQ8})$. For this purpose we first introduce a notation. Let $G$ be a graph with $β\delta(G)β\geq kββ$ and let $S$ be a $(k,k',k'')$-dominating set in $G$. We define $$β\deltaββ^{*}βββ=\min\{\deg(v)\mid vβ\in V(G) \mbox{ and } \deg(v) \geq k'+k'' \}.$$
\noindent It is easy to see that $\deg(v)ββ$ is at least $k'+k''$ and therefore is at least $β\deltaβ^{*}ββ$ for all vertices in $Vβ\setminusβ S$.
\begin{theorem}\label{Th:Lower.Bound} Let $G$ be a graph with $β\delta(G)β\geq kββ$. Then $$β\gammaβββ_{(k,k',k'')}(G)βββ\geq β\frac{(k'+β\deltaβ^{*}ββ)n-2m}{β\deltaβ^{*}+k'-kββ}.β$$ \end{theorem}
\begin{proof}
Let $S$ be a minimum $(k,k',k'')$-dominating set in $G$. Then, every vertex $v\in S$ is adjacent to at least $k$ vertices in $S$. Therefore $|E(G[S])|β\geq k|S|/2β$. Let $E(v)$ be the set of edges at vertex $v$. Now let $vβ\in Vβ\setminus βSβ$. Since $S$ is a $(k,k',k'')$-dominating set, it follows that $v$ is incident to at least $k'$ edges $eβ_{1}, \ldots ,βeβ_{k'}β$ in $[S,Vβ\setminus Sβ]$ and at least $k''$ edges $eβ_{k'+1}, \ldots ,eβ_{k'+k''}ββ$ in $E(G[Vβ\setminus βS])β$. Since $deg(v)β\geq β\deltaβ^{*}β\geq k'+k''ββββ$, $v$ is incident to at least
$β\deltaβ^{*}-k'-k''$ edges in $E(v)β\setminus \{eβ_{i}β\}β_{i=1}^{k'+k''}ββ$. The value of $|[S,Vβ\setminus Sβ]|+|E(G[Vβ\setminus βS])β|$ is minimized if the edges in $E(v)β\setminus \{eβ_{i}β\}β_{i=1}^{k'+k''}ββ$ belong to $E(G[Vβ\setminus βS])$. Therefore $$\begin{array}{lcl}
2m&β=&2|E(G[S])|β+2|[S,V\setminus S]|+2|E(G[V\setminus S])|\\
&\geq &βk|S|+2k'(n-|S|)β+kββ''(n-|S|)+(β\deltaβ^{*}-k'-k''ββ)(n-|S|). \end{array}$$
This leads to $β\gammaβββ_{β(k,k',k'')}(G)=|S|β\geq β\frac{(k'+β\deltaβ^{*}ββ)n-2m}{β\deltaβ^{*}+k'-kββ}β$. \end{proof}
\noindent We note that when $(k,k',k'')=(1,1,1)$, then Theorem \ref{Th:Lower.Bound} gives improvements for inequalities (\ref{EQ3}) and (\ref{EQ5}). When $(k,k',k'')=(0,1,1)$, then it will be improvements of its corresponding results given by (\ref{EQ4}) and (\ref{EQ6}). Also, if $(k,k',k'')=(k,k,k)$, Theorem \ref{Th:Lower.Bound} improves (\ref{EQ7}) and if $(k,k',k'')=(1,2,1)$, it improves (\ref{EQ8}).
As an immediate result of Theorem \ref{Th:Lower.Bound}, we conclude the following result of Hattingh and Joubert.
\begin{corollary}\cite{hj} If $G$ is a cubic graph of order $n$, then $β\gammaββ_{r}β(G)βββ\geq β\frac{n}{4}ββ$. \end{corollary} Also, for total restrained and restrained double domination numbers of a cubic graph $G$, we obtain $β\gammaβββ_{t}^rβββ(G)β\geq β\frac{n}{3}ββ$ and $\gammaβββ_{2r}βββ(G)β\geq β\frac{n}{2}$, βby Theorem \ref{Th:Lower.Bound}β, respectively.
\noindent Since $β\gammaβ_{β\timesβk}(G)ββ=β\gammaβ_{(k-1,k,0)}(G)ββ$, Theorem \ref{Th:Lower.Bound} is improvements of the following results. \begin{theorem}\cite{hh} Let $G$ be a graph of order $n$ and $β\delta(G)β\geq k-1ββ$, then $$\gammaβ_{β\timesβk}(G)β\geq β\frac{2kn-2m}{k+1}ββ$$ and this bound is sharp. \end{theorem}
\begin{theorem}\cite{zwx} Let $G$ be a graph of order $n$ and size $m$ with minimum degree $β\deltaβ\geq kββ$. Then $β\gammaβ_{β\timesβk,t}ββ(G)β\geqβ2(n-\frac{m}{k})$ and this bound is sharp. \end{theorem} Theorem \ref{Th:Lower.Bound} is also an improvement of the following Theorem.
\begin{theorem}\cite{fj2} If $G$ is a graph with $n$ vertices and $m$ edges, then $β\gammaβ_{k}(G)β\geq n-\frac{m}{k}βββ$ for each $kβ\geq1β$. \end{theorem}
\noindent We note that every graph $G$ with $β\delta(G)ββ\geq k$ has a $(k,k',0)$-dominating set such as $S=V(G)$ and therefore $β\gammaβ_{(k,k',0)}(G)$ is well-defined when $β\delta(G)ββ\geq k$.
\begin{theorem}\label{Th:LB2} If $G$ is a graph of order $n$ and $β\delta(G)ββ\geq kβ$, then $β\gammaβ_{(k,k',0)}(G)β\geq k'n/(β\Delta+k'-kβ)βββ$. \end{theorem}
\begin{proof}
Let $S$ be a minimum $(k,k',0)$-dominating set in $G$. Then each vertex of $S$ is adjacent to at least $k$ vertices in $S$ and therefore to at most $β\Delta-kβ$ vertices in $Vβ\setminus Sβ$, and so $|[S,Vβ\setminus Sβ]|β\leq (β\Delta-k)β|S|$. On the other hand, every vertex of $Vβ\setminus Sβ$ has at least $k'$ neighbors in $S$, and so $k'(n-|S|)β\leq β|[S,Vβ\setminus Sβ]|β$. Consequently, $β\gammaβ_{(k,k',0)}(G)β=|S|β\geq k'n/(β\Delta+k'-kβ)β$. \end{proof}
\noindent The following corollaries are immediate results of Theorem \ref{Th:LB2}.
\begin{corollary}(\cite{hk,zwx}) If $G$ is a graph of minimum degree at least $k$, then $β\gammaβ_{β\times k,t}(G)β\geq kn/β\Deltaββββ$ and this bound is sharp. \end{corollary}
\begin{corollary}\cite{hh} If $G$ is a graph of order $n$ with $β\delta(G)β\geq k-1ββ$, then $β\gammaβ_{β\times k}(G)β\geq kn/(β\Delta+1β)$ and this bound is sharp. \end{corollary}
\begin{corollary}\cite{fj1} If $G$ is a graph of order $n$, then $β\gammaβ_{k}(G)βββ\geq kn/(β\Delta+kβ)β$ for every integer $kβ$β. \end{corollary}
\section {Upper bounds on $(k,k',1)$-domination numbers}
\noindent In this section we present an upper bound on $(k,k',1)$-domination numbers and list some of the existing upper bounds which can be derived from this upper bound.
\begin{theorem} Let $G$ be a graph of order $n$ and let $k$ and $k'β$ be positive integers. \begin{enumerate} \item If $k'β\geq k+1β$ and $β\deltaββ\geq k'+1β$, then $β\gammaββ_{(k,k',1)}(G)β\leq n-β\delta+ k'-1ββββ$.
\item If $k\geq k'$ and $β\deltaββ\geq k+2β$, then $β\gammaββ_{(k,k',1)}(G)β\leq n-β\delta+ kββββ$. \end{enumerate} The bounds given in Parts 1 and 2 are sharp. \end{theorem}
\begin{proof} Let $u$ be a vertex in $G$ with $\deg(u)=β\deltaβ$.
\noindent {\bf Proof of 1:} Suppose that $vβ_{1},\ldots,vβ_{k'}βββ\in N(u)β$. Since $β\deltaβ\geq k'+1$, it follows that $|N[u]β\setminus \{vβ_{1},\ldots,vβ_{k'}\}β|β\geq2β$ and therefore $N[u]β\setminus \{vβ_{1},\ldots,vβ_{k'}\}$ is a nonempty set. Also, it is easy to see that the subgraph induced by $N[u]β\setminus \{vβ_{1},\ldots,vβ_{k'}\}$ has no isolated vertices. Now let $S=V(G)β\setminus (N[u]β\setminus \{vβ_{1},\ldots,vβ_{k'}\})β$. Let $vβ\inβ N[u]\setminus \{vβ_{1},\ldots,vβ_{k'}\}$. Then $v$ can be joint to at most $β\delta-k'β$ vertices in $N[u]β\setminus \{vβ_{1},\ldots,vβ_{k'}\}$. Thus $v$ has at least $k'$ neighbors in $S$. On the other hand, for every vertex $v$ in $S$ we have
$$|N(v)β\cap Sβ|=deg(v)-|N(v)β\cap (N[u]β\setminus \{vβ_{1},\ldots,vβ_{k'}\})β|β\geq deg(v)-β\delta +k'-1βββ\geq k.β$$ Therefore $S$ is a $(k,k',1)$-dominating set in $G$. Hence, $$
\gammaβ_{(k,k',1)}(G)ββββ\leq |S|β=|V(G)β\setminus (N[u]β\setminus \{vβ_{1},\ldots,vβ_{k'}\})β|=n-β\delta+k'-1. $$
\noindent {\bf Proof of 2:} Suppose that $vβ_{1},\ldots,vβ_{k+1}βββ\in N(u)β$. By assumption,
$|N[u]β\setminus \{vβ_{1},\ldots,vβ_{k+1}\}β|β\geq 2β$ and the subgraph induced by $N[u]β\setminus \{vβ_{1},\ldots,vβ_{k+1}\}$ has no isolated vertices. An argument similar to that described in Part 1 shows that $S=V(G)β\setminus (N[u]β\setminus \{vβ_{1},\ldots,vβ_{k+1}\})β$ is a $(k,k',1)$-dominating set in $G$. Therefore $$
\gammaβ_{(k,k',1)}(G)ββββ\leq |S|β=|V(G)β\setminus (N[u]β\setminus \{vβ_{1},\ldots,vβ_{k+1}\})β|=n-β\delta+k. $$
\noindent It is easy to see that the upper bounds are sharp for the complete graph $Kβ_{n}β$, when $nβ\geq \max\{k,k'\}+3β$. \end{proof} Considering Parts 1 and 2 of Theorem 3.1 we can see that $$β\gammaββ_{(k,k',1)}(G)β\leq n-β\delta+ max\{k,k'-1\},ββββ$$ when $β\delta(G)ββ\geq max\{k,k'\}+2β$.\\
As an immediate consequence we conclude the following corollary.
\begin{corollary} If $G$ is a graph of order $n$ and minimum degree $β\deltaβ\geq3ββ$, then $\gammaβ_{2r}ββ(G)β\leq n-β\delta+1ββ$ and the bound is sharp. \end{corollary}
\noindent The authors in \cite{kn} showed that $\gammaβ_{2r}(G)β\leq n-2β$ for every graph of order $n$ and minimum degree $β\delta(Gββ)ββ\geq 3β$. In fact, Corollary 3.2 gives an improvement for this bound. In addition, if $β\deltaβ\geq4ββ$, then the upper bound $n-2$ for $\gammaβ_{2r}(G)$ is not sharp.
\end{document} |